xref: /netbsd-src/sys/kern/kern_synch.c (revision 73704c4ce4ee2a60eb617e693ce7e9f03902613e)
1 /*	$NetBSD: kern_synch.c,v 1.137 2003/09/08 11:14:18 itojun Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*-
41  * Copyright (c) 1982, 1986, 1990, 1991, 1993
42  *	The Regents of the University of California.  All rights reserved.
43  * (c) UNIX System Laboratories, Inc.
44  * All or some portions of this file are derived from material licensed
45  * to the University of California by American Telephone and Telegraph
46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47  * the permission of UNIX System Laboratories, Inc.
48  *
49  * Redistribution and use in source and binary forms, with or without
50  * modification, are permitted provided that the following conditions
51  * are met:
52  * 1. Redistributions of source code must retain the above copyright
53  *    notice, this list of conditions and the following disclaimer.
54  * 2. Redistributions in binary form must reproduce the above copyright
55  *    notice, this list of conditions and the following disclaimer in the
56  *    documentation and/or other materials provided with the distribution.
57  * 3. Neither the name of the University nor the names of its contributors
58  *    may be used to endorse or promote products derived from this software
59  *    without specific prior written permission.
60  *
61  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71  * SUCH DAMAGE.
72  *
73  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
74  */
75 
76 #include <sys/cdefs.h>
77 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.137 2003/09/08 11:14:18 itojun Exp $");
78 
79 #include "opt_ddb.h"
80 #include "opt_ktrace.h"
81 #include "opt_kstack.h"
82 #include "opt_lockdebug.h"
83 #include "opt_multiprocessor.h"
84 #include "opt_perfctrs.h"
85 
86 #include <sys/param.h>
87 #include <sys/systm.h>
88 #include <sys/callout.h>
89 #include <sys/proc.h>
90 #include <sys/kernel.h>
91 #include <sys/buf.h>
92 #if defined(PERFCTRS)
93 #include <sys/pmc.h>
94 #endif
95 #include <sys/signalvar.h>
96 #include <sys/resourcevar.h>
97 #include <sys/sched.h>
98 #include <sys/sa.h>
99 #include <sys/savar.h>
100 
101 #include <uvm/uvm_extern.h>
102 
103 #ifdef KTRACE
104 #include <sys/ktrace.h>
105 #endif
106 
107 #include <machine/cpu.h>
108 
109 int	lbolt;			/* once a second sleep address */
110 int	rrticks;		/* number of hardclock ticks per roundrobin() */
111 
112 /*
113  * The global scheduler state.
114  */
115 struct prochd sched_qs[RUNQUE_NQS];	/* run queues */
116 __volatile u_int32_t sched_whichqs;	/* bitmap of non-empty queues */
117 struct slpque sched_slpque[SLPQUE_TABLESIZE]; /* sleep queues */
118 
119 struct simplelock sched_lock = SIMPLELOCK_INITIALIZER;
120 
121 void schedcpu(void *);
122 void updatepri(struct lwp *);
123 void endtsleep(void *);
124 
125 __inline void awaken(struct lwp *);
126 
127 struct callout schedcpu_ch = CALLOUT_INITIALIZER;
128 
129 
130 
131 /*
132  * Force switch among equal priority processes every 100ms.
133  * Called from hardclock every hz/10 == rrticks hardclock ticks.
134  */
135 /* ARGSUSED */
136 void
137 roundrobin(struct cpu_info *ci)
138 {
139 	struct schedstate_percpu *spc = &ci->ci_schedstate;
140 
141 	spc->spc_rrticks = rrticks;
142 
143 	if (curlwp != NULL) {
144 		if (spc->spc_flags & SPCF_SEENRR) {
145 			/*
146 			 * The process has already been through a roundrobin
147 			 * without switching and may be hogging the CPU.
148 			 * Indicate that the process should yield.
149 			 */
150 			spc->spc_flags |= SPCF_SHOULDYIELD;
151 		} else
152 			spc->spc_flags |= SPCF_SEENRR;
153 	}
154 	need_resched(curcpu());
155 }
156 
157 /*
158  * Constants for digital decay and forget:
159  *	90% of (p_estcpu) usage in 5 * loadav time
160  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
161  *          Note that, as ps(1) mentions, this can let percentages
162  *          total over 100% (I've seen 137.9% for 3 processes).
163  *
164  * Note that hardclock updates p_estcpu and p_cpticks independently.
165  *
166  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
167  * That is, the system wants to compute a value of decay such
168  * that the following for loop:
169  * 	for (i = 0; i < (5 * loadavg); i++)
170  * 		p_estcpu *= decay;
171  * will compute
172  * 	p_estcpu *= 0.1;
173  * for all values of loadavg:
174  *
175  * Mathematically this loop can be expressed by saying:
176  * 	decay ** (5 * loadavg) ~= .1
177  *
178  * The system computes decay as:
179  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
180  *
181  * We wish to prove that the system's computation of decay
182  * will always fulfill the equation:
183  * 	decay ** (5 * loadavg) ~= .1
184  *
185  * If we compute b as:
186  * 	b = 2 * loadavg
187  * then
188  * 	decay = b / (b + 1)
189  *
190  * We now need to prove two things:
191  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
192  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
193  *
194  * Facts:
195  *         For x close to zero, exp(x) =~ 1 + x, since
196  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
197  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
198  *         For x close to zero, ln(1+x) =~ x, since
199  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
200  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
201  *         ln(.1) =~ -2.30
202  *
203  * Proof of (1):
204  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
205  *	solving for factor,
206  *      ln(factor) =~ (-2.30/5*loadav), or
207  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
208  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
209  *
210  * Proof of (2):
211  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
212  *	solving for power,
213  *      power*ln(b/(b+1)) =~ -2.30, or
214  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
215  *
216  * Actual power values for the implemented algorithm are as follows:
217  *      loadav: 1       2       3       4
218  *      power:  5.68    10.32   14.94   19.55
219  */
220 
221 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
222 #define	loadfactor(loadav)	(2 * (loadav))
223 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
224 
225 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
226 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
227 
228 /*
229  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
230  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
231  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
232  *
233  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
234  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
235  *
236  * If you dont want to bother with the faster/more-accurate formula, you
237  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
238  * (more general) method of calculating the %age of CPU used by a process.
239  */
240 #define	CCPU_SHIFT	11
241 
242 /*
243  * Recompute process priorities, every hz ticks.
244  */
245 /* ARGSUSED */
246 void
247 schedcpu(void *arg)
248 {
249 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
250 	struct lwp *l;
251 	struct proc *p;
252 	int s, minslp;
253 	unsigned int newcpu;
254 	int clkhz;
255 
256 	proclist_lock_read();
257 	LIST_FOREACH(p, &allproc, p_list) {
258 		/*
259 		 * Increment time in/out of memory and sleep time
260 		 * (if sleeping).  We ignore overflow; with 16-bit int's
261 		 * (remember them?) overflow takes 45 days.
262 		 */
263 		minslp = 2;
264 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
265 			l->l_swtime++;
266 			if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
267 			    l->l_stat == LSSUSPENDED) {
268 				l->l_slptime++;
269 				minslp = min(minslp, l->l_slptime);
270 			} else
271 				minslp = 0;
272 		}
273 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
274 		/*
275 		 * If the process has slept the entire second,
276 		 * stop recalculating its priority until it wakes up.
277 		 */
278 		if (minslp > 1)
279 			continue;
280 		s = splstatclock();	/* prevent state changes */
281 		/*
282 		 * p_pctcpu is only for ps.
283 		 */
284 		clkhz = stathz != 0 ? stathz : hz;
285 #if	(FSHIFT >= CCPU_SHIFT)
286 		p->p_pctcpu += (clkhz == 100)?
287 			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
288                 	100 * (((fixpt_t) p->p_cpticks)
289 				<< (FSHIFT - CCPU_SHIFT)) / clkhz;
290 #else
291 		p->p_pctcpu += ((FSCALE - ccpu) *
292 			(p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
293 #endif
294 		p->p_cpticks = 0;
295 		newcpu = (u_int)decay_cpu(loadfac, p->p_estcpu);
296 		p->p_estcpu = newcpu;
297 		splx(s);	/* Done with the process CPU ticks update */
298 		SCHED_LOCK(s);
299 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
300 			if (l->l_slptime > 1)
301 				continue;
302 			resetpriority(l);
303 			if (l->l_priority >= PUSER) {
304 				if (l->l_stat == LSRUN &&
305 				    (l->l_flag & L_INMEM) &&
306 				    (l->l_priority / PPQ) != (l->l_usrpri / PPQ)) {
307 					remrunqueue(l);
308 					l->l_priority = l->l_usrpri;
309 					setrunqueue(l);
310 				} else
311 					l->l_priority = l->l_usrpri;
312 			}
313 		}
314 		SCHED_UNLOCK(s);
315 	}
316 	proclist_unlock_read();
317 	uvm_meter();
318 	wakeup((caddr_t)&lbolt);
319 	callout_reset(&schedcpu_ch, hz, schedcpu, NULL);
320 }
321 
322 /*
323  * Recalculate the priority of a process after it has slept for a while.
324  * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
325  * least six times the loadfactor will decay p_estcpu to zero.
326  */
327 void
328 updatepri(struct lwp *l)
329 {
330 	struct proc *p = l->l_proc;
331 	unsigned int newcpu;
332 	fixpt_t loadfac;
333 
334 	SCHED_ASSERT_LOCKED();
335 
336 	newcpu = p->p_estcpu;
337 	loadfac = loadfactor(averunnable.ldavg[0]);
338 
339 	if (l->l_slptime > 5 * loadfac)
340 		p->p_estcpu = 0; /* XXX NJWLWP */
341 	else {
342 		l->l_slptime--;	/* the first time was done in schedcpu */
343 		while (newcpu && --l->l_slptime)
344 			newcpu = (int) decay_cpu(loadfac, newcpu);
345 		p->p_estcpu = newcpu;
346 	}
347 	resetpriority(l);
348 }
349 
350 /*
351  * During autoconfiguration or after a panic, a sleep will simply
352  * lower the priority briefly to allow interrupts, then return.
353  * The priority to be used (safepri) is machine-dependent, thus this
354  * value is initialized and maintained in the machine-dependent layers.
355  * This priority will typically be 0, or the lowest priority
356  * that is safe for use on the interrupt stack; it can be made
357  * higher to block network software interrupts after panics.
358  */
359 int safepri;
360 
361 /*
362  * General sleep call.  Suspends the current process until a wakeup is
363  * performed on the specified identifier.  The process will then be made
364  * runnable with the specified priority.  Sleeps at most timo/hz seconds
365  * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
366  * before and after sleeping, else signals are not checked.  Returns 0 if
367  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
368  * signal needs to be delivered, ERESTART is returned if the current system
369  * call should be restarted if possible, and EINTR is returned if the system
370  * call should be interrupted by the signal (return EINTR).
371  *
372  * The interlock is held until the scheduler_slock is acquired.  The
373  * interlock will be locked before returning back to the caller
374  * unless the PNORELOCK flag is specified, in which case the
375  * interlock will always be unlocked upon return.
376  */
377 int
378 ltsleep(const void *ident, int priority, const char *wmesg, int timo,
379     __volatile struct simplelock *interlock)
380 {
381 	struct lwp *l = curlwp;
382 	struct proc *p = l ? l->l_proc : NULL;
383 	struct slpque *qp;
384 	int sig, s;
385 	int catch = priority & PCATCH;
386 	int relock = (priority & PNORELOCK) == 0;
387 	int exiterr = (priority & PNOEXITERR) == 0;
388 
389 	/*
390 	 * XXXSMP
391 	 * This is probably bogus.  Figure out what the right
392 	 * thing to do here really is.
393 	 * Note that not sleeping if ltsleep is called with curlwp == NULL
394 	 * in the shutdown case is disgusting but partly necessary given
395 	 * how shutdown (barely) works.
396 	 */
397 	if (cold || (doing_shutdown && (panicstr || (l == NULL)))) {
398 		/*
399 		 * After a panic, or during autoconfiguration,
400 		 * just give interrupts a chance, then just return;
401 		 * don't run any other procs or panic below,
402 		 * in case this is the idle process and already asleep.
403 		 */
404 		s = splhigh();
405 		splx(safepri);
406 		splx(s);
407 		if (interlock != NULL && relock == 0)
408 			simple_unlock(interlock);
409 		return (0);
410 	}
411 
412 	KASSERT(p != NULL);
413 	LOCK_ASSERT(interlock == NULL || simple_lock_held(interlock));
414 
415 #ifdef KTRACE
416 	if (KTRPOINT(p, KTR_CSW))
417 		ktrcsw(p, 1, 0);
418 #endif
419 
420 	SCHED_LOCK(s);
421 
422 #ifdef DIAGNOSTIC
423 	if (ident == NULL)
424 		panic("ltsleep: ident == NULL");
425 	if (l->l_stat != LSONPROC)
426 		panic("ltsleep: l_stat %d != LSONPROC", l->l_stat);
427 	if (l->l_back != NULL)
428 		panic("ltsleep: p_back != NULL");
429 #endif
430 
431 	l->l_wchan = ident;
432 	l->l_wmesg = wmesg;
433 	l->l_slptime = 0;
434 	l->l_priority = priority & PRIMASK;
435 
436 	qp = SLPQUE(ident);
437 	if (qp->sq_head == 0)
438 		qp->sq_head = l;
439 	else {
440 		*qp->sq_tailp = l;
441 	}
442 	*(qp->sq_tailp = &l->l_forw) = 0;
443 
444 	if (timo)
445 		callout_reset(&l->l_tsleep_ch, timo, endtsleep, l);
446 
447 	/*
448 	 * We can now release the interlock; the scheduler_slock
449 	 * is held, so a thread can't get in to do wakeup() before
450 	 * we do the switch.
451 	 *
452 	 * XXX We leave the code block here, after inserting ourselves
453 	 * on the sleep queue, because we might want a more clever
454 	 * data structure for the sleep queues at some point.
455 	 */
456 	if (interlock != NULL)
457 		simple_unlock(interlock);
458 
459 	/*
460 	 * We put ourselves on the sleep queue and start our timeout
461 	 * before calling CURSIG, as we could stop there, and a wakeup
462 	 * or a SIGCONT (or both) could occur while we were stopped.
463 	 * A SIGCONT would cause us to be marked as SSLEEP
464 	 * without resuming us, thus we must be ready for sleep
465 	 * when CURSIG is called.  If the wakeup happens while we're
466 	 * stopped, p->p_wchan will be 0 upon return from CURSIG.
467 	 */
468 	if (catch) {
469 		l->l_flag |= L_SINTR;
470 		if (((sig = CURSIG(l)) != 0) ||
471 		    ((p->p_flag & P_WEXIT) && p->p_nlwps > 1)) {
472 			if (l->l_wchan != NULL)
473 				unsleep(l);
474 			l->l_stat = LSONPROC;
475 			SCHED_UNLOCK(s);
476 			goto resume;
477 		}
478 		if (l->l_wchan == NULL) {
479 			catch = 0;
480 			SCHED_UNLOCK(s);
481 			goto resume;
482 		}
483 	} else
484 		sig = 0;
485 	l->l_stat = LSSLEEP;
486 	p->p_nrlwps--;
487 	p->p_stats->p_ru.ru_nvcsw++;
488 	SCHED_ASSERT_LOCKED();
489 	if (l->l_flag & L_SA)
490 		sa_switch(l, SA_UPCALL_BLOCKED);
491 	else
492 		mi_switch(l, NULL);
493 
494 #if	defined(DDB) && !defined(GPROF)
495 	/* handy breakpoint location after process "wakes" */
496 	__asm(".globl bpendtsleep ; bpendtsleep:");
497 #endif
498 	/*
499 	 * p->p_nrlwps is incremented by whoever made us runnable again,
500 	 * either setrunnable() or awaken().
501 	 */
502 
503 	SCHED_ASSERT_UNLOCKED();
504 	splx(s);
505 
506  resume:
507 	KDASSERT(l->l_cpu != NULL);
508 	KDASSERT(l->l_cpu == curcpu());
509 	l->l_cpu->ci_schedstate.spc_curpriority = l->l_usrpri;
510 
511 	l->l_flag &= ~L_SINTR;
512 	if (l->l_flag & L_TIMEOUT) {
513 		l->l_flag &= ~(L_TIMEOUT|L_CANCELLED);
514 		if (sig == 0) {
515 #ifdef KTRACE
516 			if (KTRPOINT(p, KTR_CSW))
517 				ktrcsw(p, 0, 0);
518 #endif
519 			if (relock && interlock != NULL)
520 				simple_lock(interlock);
521 			return (EWOULDBLOCK);
522 		}
523 	} else if (timo)
524 		callout_stop(&l->l_tsleep_ch);
525 
526 	if (catch) {
527 		const int cancelled = l->l_flag & L_CANCELLED;
528 		l->l_flag &= ~L_CANCELLED;
529 		if (sig != 0 || (sig = CURSIG(l)) != 0 || cancelled) {
530 #ifdef KTRACE
531 			if (KTRPOINT(p, KTR_CSW))
532 				ktrcsw(p, 0, 0);
533 #endif
534 			if (relock && interlock != NULL)
535 				simple_lock(interlock);
536 			/*
537 			 * If this sleep was canceled, don't let the syscall
538 			 * restart.
539 			 */
540 			if (cancelled ||
541 			    (SIGACTION(p, sig).sa_flags & SA_RESTART) == 0)
542 				return (EINTR);
543 			return (ERESTART);
544 		}
545 	}
546 
547 #ifdef KTRACE
548 	if (KTRPOINT(p, KTR_CSW))
549 		ktrcsw(p, 0, 0);
550 #endif
551 	if (relock && interlock != NULL)
552 		simple_lock(interlock);
553 
554 	/* XXXNJW this is very much a kluge.
555 	 * revisit. a better way of preventing looping/hanging syscalls like
556 	 * wait4() and _lwp_wait() from wedging an exiting process
557 	 * would be preferred.
558 	 */
559 	if (catch && ((p->p_flag & P_WEXIT) && p->p_nlwps > 1 && exiterr))
560 		return (EINTR);
561 	return (0);
562 }
563 
564 /*
565  * Implement timeout for tsleep.
566  * If process hasn't been awakened (wchan non-zero),
567  * set timeout flag and undo the sleep.  If proc
568  * is stopped, just unsleep so it will remain stopped.
569  */
570 void
571 endtsleep(void *arg)
572 {
573 	struct lwp *l;
574 	int s;
575 
576 	l = (struct lwp *)arg;
577 	SCHED_LOCK(s);
578 	if (l->l_wchan) {
579 		if (l->l_stat == LSSLEEP)
580 			setrunnable(l);
581 		else
582 			unsleep(l);
583 		l->l_flag |= L_TIMEOUT;
584 	}
585 	SCHED_UNLOCK(s);
586 }
587 
588 /*
589  * Remove a process from its wait queue
590  */
591 void
592 unsleep(struct lwp *l)
593 {
594 	struct slpque *qp;
595 	struct lwp **hp;
596 
597 	SCHED_ASSERT_LOCKED();
598 
599 	if (l->l_wchan) {
600 		hp = &(qp = SLPQUE(l->l_wchan))->sq_head;
601 		while (*hp != l)
602 			hp = &(*hp)->l_forw;
603 		*hp = l->l_forw;
604 		if (qp->sq_tailp == &l->l_forw)
605 			qp->sq_tailp = hp;
606 		l->l_wchan = 0;
607 	}
608 }
609 
610 /*
611  * Optimized-for-wakeup() version of setrunnable().
612  */
613 __inline void
614 awaken(struct lwp *l)
615 {
616 
617 	SCHED_ASSERT_LOCKED();
618 
619 	if (l->l_slptime > 1)
620 		updatepri(l);
621 	l->l_slptime = 0;
622 	l->l_stat = LSRUN;
623 	l->l_proc->p_nrlwps++;
624 	/*
625 	 * Since curpriority is a user priority, p->p_priority
626 	 * is always better than curpriority on the last CPU on
627 	 * which it ran.
628 	 *
629 	 * XXXSMP See affinity comment in resched_proc().
630 	 */
631 	if (l->l_flag & L_INMEM) {
632 		setrunqueue(l);
633 		if (l->l_flag & L_SA)
634 			l->l_proc->p_sa->sa_woken = l;
635 		KASSERT(l->l_cpu != NULL);
636 		need_resched(l->l_cpu);
637 	} else
638 		sched_wakeup(&proc0);
639 }
640 
641 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
642 void
643 sched_unlock_idle(void)
644 {
645 
646 	simple_unlock(&sched_lock);
647 }
648 
649 void
650 sched_lock_idle(void)
651 {
652 
653 	simple_lock(&sched_lock);
654 }
655 #endif /* MULTIPROCESSOR || LOCKDEBUG */
656 
657 /*
658  * Make all processes sleeping on the specified identifier runnable.
659  */
660 
661 void
662 wakeup(const void *ident)
663 {
664 	int s;
665 
666 	SCHED_ASSERT_UNLOCKED();
667 
668 	SCHED_LOCK(s);
669 	sched_wakeup(ident);
670 	SCHED_UNLOCK(s);
671 }
672 
673 void
674 sched_wakeup(const void *ident)
675 {
676 	struct slpque *qp;
677 	struct lwp *l, **q;
678 
679 	SCHED_ASSERT_LOCKED();
680 
681 	qp = SLPQUE(ident);
682  restart:
683 	for (q = &qp->sq_head; (l = *q) != NULL; ) {
684 #ifdef DIAGNOSTIC
685 		if (l->l_back || (l->l_stat != LSSLEEP &&
686 		    l->l_stat != LSSTOP && l->l_stat != LSSUSPENDED))
687 			panic("wakeup");
688 #endif
689 		if (l->l_wchan == ident) {
690 			l->l_wchan = 0;
691 			*q = l->l_forw;
692 			if (qp->sq_tailp == &l->l_forw)
693 				qp->sq_tailp = q;
694 			if (l->l_stat == LSSLEEP) {
695 				awaken(l);
696 				goto restart;
697 			}
698 		} else
699 			q = &l->l_forw;
700 	}
701 }
702 
703 /*
704  * Make the highest priority process first in line on the specified
705  * identifier runnable.
706  */
707 void
708 wakeup_one(const void *ident)
709 {
710 	struct slpque *qp;
711 	struct lwp *l, **q;
712 	struct lwp *best_sleepp, **best_sleepq;
713 	struct lwp *best_stopp, **best_stopq;
714 	int s;
715 
716 	best_sleepp = best_stopp = NULL;
717 	best_sleepq = best_stopq = NULL;
718 
719 	SCHED_LOCK(s);
720 
721 	qp = SLPQUE(ident);
722 
723 	for (q = &qp->sq_head; (l = *q) != NULL; q = &l->l_forw) {
724 #ifdef DIAGNOSTIC
725 		if (l->l_back || (l->l_stat != LSSLEEP &&
726 		    l->l_stat != LSSTOP && l->l_stat != LSSUSPENDED))
727 			panic("wakeup_one");
728 #endif
729 		if (l->l_wchan == ident) {
730 			if (l->l_stat == LSSLEEP) {
731 				if (best_sleepp == NULL ||
732 				    l->l_priority < best_sleepp->l_priority) {
733 					best_sleepp = l;
734 					best_sleepq = q;
735 				}
736 			} else {
737 				if (best_stopp == NULL ||
738 				    l->l_priority < best_stopp->l_priority) {
739 				    	best_stopp = l;
740 					best_stopq = q;
741 				}
742 			}
743 		}
744 	}
745 
746 	/*
747 	 * Consider any SSLEEP process higher than the highest priority SSTOP
748 	 * process.
749 	 */
750 	if (best_sleepp != NULL) {
751 		l = best_sleepp;
752 		q = best_sleepq;
753 	} else {
754 		l = best_stopp;
755 		q = best_stopq;
756 	}
757 
758 	if (l != NULL) {
759 		l->l_wchan = NULL;
760 		*q = l->l_forw;
761 		if (qp->sq_tailp == &l->l_forw)
762 			qp->sq_tailp = q;
763 		if (l->l_stat == LSSLEEP)
764 			awaken(l);
765 	}
766 	SCHED_UNLOCK(s);
767 }
768 
769 /*
770  * General yield call.  Puts the current process back on its run queue and
771  * performs a voluntary context switch.  Should only be called when the
772  * current process explicitly requests it (eg sched_yield(2) in compat code).
773  */
774 void
775 yield(void)
776 {
777 	struct lwp *l = curlwp;
778 	int s;
779 
780 	SCHED_LOCK(s);
781 	l->l_priority = l->l_usrpri;
782 	l->l_stat = LSRUN;
783 	setrunqueue(l);
784 	l->l_proc->p_stats->p_ru.ru_nvcsw++;
785 	mi_switch(l, NULL);
786 	SCHED_ASSERT_UNLOCKED();
787 	splx(s);
788 }
789 
790 /*
791  * General preemption call.  Puts the current process back on its run queue
792  * and performs an involuntary context switch.  If a process is supplied,
793  * we switch to that process.  Otherwise, we use the normal process selection
794  * criteria.
795  */
796 
797 void
798 preempt(int more)
799 {
800 	struct lwp *l = curlwp;
801 	int r, s;
802 /* XXXUPSXXX Not needed for SMP patch */
803 #if 0
804 	/* XXX Until the preempt() bug is fixed. */
805 	if (more && (l->l_proc->p_flag & P_SA)) {
806 		l->l_cpu->ci_schedstate.spc_flags &= ~SPCF_SWITCHCLEAR;
807 		return;
808 	}
809 #endif
810 
811 	SCHED_LOCK(s);
812 	l->l_priority = l->l_usrpri;
813 	l->l_stat = LSRUN;
814 	setrunqueue(l);
815 	l->l_proc->p_stats->p_ru.ru_nivcsw++;
816 	r = mi_switch(l, NULL);
817 	SCHED_ASSERT_UNLOCKED();
818 	splx(s);
819 	if ((l->l_flag & L_SA) != 0 && r != 0 && more == 0)
820 		sa_preempt(l);
821 }
822 
823 /*
824  * The machine independent parts of context switch.
825  * Must be called at splsched() (no higher!) and with
826  * the sched_lock held.
827  * Switch to "new" if non-NULL, otherwise let cpu_switch choose
828  * the next lwp.
829  *
830  * Returns 1 if another process was actually run.
831  */
832 int
833 mi_switch(struct lwp *l, struct lwp *newl)
834 {
835 	struct schedstate_percpu *spc;
836 	struct rlimit *rlim;
837 	long s, u;
838 	struct timeval tv;
839 #if defined(MULTIPROCESSOR)
840 	int hold_count;
841 #endif
842 	struct proc *p = l->l_proc;
843 	int retval;
844 
845 	SCHED_ASSERT_LOCKED();
846 
847 #if defined(MULTIPROCESSOR)
848 	/*
849 	 * Release the kernel_lock, as we are about to yield the CPU.
850 	 * The scheduler lock is still held until cpu_switch()
851 	 * selects a new process and removes it from the run queue.
852 	 */
853 	if (l->l_flag & L_BIGLOCK)
854 		hold_count = spinlock_release_all(&kernel_lock);
855 #endif
856 
857 	KDASSERT(l->l_cpu != NULL);
858 	KDASSERT(l->l_cpu == curcpu());
859 
860 	spc = &l->l_cpu->ci_schedstate;
861 
862 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
863 	spinlock_switchcheck();
864 #endif
865 #ifdef LOCKDEBUG
866 	simple_lock_switchcheck();
867 #endif
868 
869 	/*
870 	 * Compute the amount of time during which the current
871 	 * process was running.
872 	 */
873 	microtime(&tv);
874 	u = p->p_rtime.tv_usec +
875 	    (tv.tv_usec - spc->spc_runtime.tv_usec);
876 	s = p->p_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
877 	if (u < 0) {
878 		u += 1000000;
879 		s--;
880 	} else if (u >= 1000000) {
881 		u -= 1000000;
882 		s++;
883 	}
884 	p->p_rtime.tv_usec = u;
885 	p->p_rtime.tv_sec = s;
886 
887 	/*
888 	 * Check if the process exceeds its cpu resource allocation.
889 	 * If over max, kill it.  In any case, if it has run for more
890 	 * than 10 minutes, reduce priority to give others a chance.
891 	 */
892 	rlim = &p->p_rlimit[RLIMIT_CPU];
893 	if (s >= rlim->rlim_cur) {
894 		/*
895 		 * XXXSMP: we're inside the scheduler lock perimeter;
896 		 * use sched_psignal.
897 		 */
898 		if (s >= rlim->rlim_max)
899 			sched_psignal(p, SIGKILL);
900 		else {
901 			sched_psignal(p, SIGXCPU);
902 			if (rlim->rlim_cur < rlim->rlim_max)
903 				rlim->rlim_cur += 5;
904 		}
905 	}
906 	if (autonicetime && s > autonicetime && p->p_ucred->cr_uid &&
907 	    p->p_nice == NZERO) {
908 		p->p_nice = autoniceval + NZERO;
909 		resetpriority(l);
910 	}
911 
912 	/*
913 	 * Process is about to yield the CPU; clear the appropriate
914 	 * scheduling flags.
915 	 */
916 	spc->spc_flags &= ~SPCF_SWITCHCLEAR;
917 
918 #ifdef KSTACK_CHECK_MAGIC
919 	kstack_check_magic(l);
920 #endif
921 
922 	/*
923 	 * If we are using h/w performance counters, save context.
924 	 */
925 #if PERFCTRS
926 	if (PMC_ENABLED(p))
927 		pmc_save_context(p);
928 #endif
929 
930 	/*
931 	 * Switch to the new current process.  When we
932 	 * run again, we'll return back here.
933 	 */
934 	uvmexp.swtch++;
935 	if (newl == NULL) {
936 		retval = cpu_switch(l, NULL);
937 	} else {
938 		remrunqueue(newl);
939 		cpu_switchto(l, newl);
940 		retval = 0;
941 	}
942 
943 	/*
944 	 * If we are using h/w performance counters, restore context.
945 	 */
946 #if PERFCTRS
947 	if (PMC_ENABLED(p))
948 		pmc_restore_context(p);
949 #endif
950 
951 	/*
952 	 * Make sure that MD code released the scheduler lock before
953 	 * resuming us.
954 	 */
955 	SCHED_ASSERT_UNLOCKED();
956 
957 	/*
958 	 * We're running again; record our new start time.  We might
959 	 * be running on a new CPU now, so don't use the cache'd
960 	 * schedstate_percpu pointer.
961 	 */
962 	KDASSERT(l->l_cpu != NULL);
963 	KDASSERT(l->l_cpu == curcpu());
964 	microtime(&l->l_cpu->ci_schedstate.spc_runtime);
965 
966 #if defined(MULTIPROCESSOR)
967 	/*
968 	 * Reacquire the kernel_lock now.  We do this after we've
969 	 * released the scheduler lock to avoid deadlock, and before
970 	 * we reacquire the interlock.
971 	 */
972 	if (l->l_flag & L_BIGLOCK)
973 		spinlock_acquire_count(&kernel_lock, hold_count);
974 #endif
975 
976 	return retval;
977 }
978 
979 /*
980  * Initialize the (doubly-linked) run queues
981  * to be empty.
982  */
983 void
984 rqinit()
985 {
986 	int i;
987 
988 	for (i = 0; i < RUNQUE_NQS; i++)
989 		sched_qs[i].ph_link = sched_qs[i].ph_rlink =
990 		    (struct lwp *)&sched_qs[i];
991 }
992 
993 static __inline void
994 resched_proc(struct lwp *l, u_char pri)
995 {
996 	struct cpu_info *ci;
997 
998 	/*
999 	 * XXXSMP
1000 	 * Since l->l_cpu persists across a context switch,
1001 	 * this gives us *very weak* processor affinity, in
1002 	 * that we notify the CPU on which the process last
1003 	 * ran that it should try to switch.
1004 	 *
1005 	 * This does not guarantee that the process will run on
1006 	 * that processor next, because another processor might
1007 	 * grab it the next time it performs a context switch.
1008 	 *
1009 	 * This also does not handle the case where its last
1010 	 * CPU is running a higher-priority process, but every
1011 	 * other CPU is running a lower-priority process.  There
1012 	 * are ways to handle this situation, but they're not
1013 	 * currently very pretty, and we also need to weigh the
1014 	 * cost of moving a process from one CPU to another.
1015 	 *
1016 	 * XXXSMP
1017 	 * There is also the issue of locking the other CPU's
1018 	 * sched state, which we currently do not do.
1019 	 */
1020 	ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
1021 	if (pri < ci->ci_schedstate.spc_curpriority)
1022 		need_resched(ci);
1023 }
1024 
1025 /*
1026  * Change process state to be runnable,
1027  * placing it on the run queue if it is in memory,
1028  * and awakening the swapper if it isn't in memory.
1029  */
1030 void
1031 setrunnable(struct lwp *l)
1032 {
1033 	struct proc *p = l->l_proc;
1034 
1035 	SCHED_ASSERT_LOCKED();
1036 
1037 	switch (l->l_stat) {
1038 	case 0:
1039 	case LSRUN:
1040 	case LSONPROC:
1041 	case LSZOMB:
1042 	case LSDEAD:
1043 	default:
1044 		panic("setrunnable: lwp %p state was %d", l, l->l_stat);
1045 	case LSSTOP:
1046 		/*
1047 		 * If we're being traced (possibly because someone attached us
1048 		 * while we were stopped), check for a signal from the debugger.
1049 		 */
1050 		if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
1051 			sigaddset(&p->p_sigctx.ps_siglist, p->p_xstat);
1052 			CHECKSIGS(p);
1053 		}
1054 	case LSSLEEP:
1055 		unsleep(l);		/* e.g. when sending signals */
1056 		break;
1057 
1058 	case LSIDL:
1059 		break;
1060 	case LSSUSPENDED:
1061 		break;
1062 	}
1063 	l->l_stat = LSRUN;
1064 	p->p_nrlwps++;
1065 
1066 	if (l->l_flag & L_INMEM)
1067 		setrunqueue(l);
1068 
1069 	if (l->l_slptime > 1)
1070 		updatepri(l);
1071 	l->l_slptime = 0;
1072 	if ((l->l_flag & L_INMEM) == 0)
1073 		sched_wakeup((caddr_t)&proc0);
1074 	else
1075 		resched_proc(l, l->l_priority);
1076 }
1077 
1078 /*
1079  * Compute the priority of a process when running in user mode.
1080  * Arrange to reschedule if the resulting priority is better
1081  * than that of the current process.
1082  */
1083 void
1084 resetpriority(struct lwp *l)
1085 {
1086 	unsigned int newpriority;
1087 	struct proc *p = l->l_proc;
1088 
1089 	SCHED_ASSERT_LOCKED();
1090 
1091 	newpriority = PUSER + p->p_estcpu +
1092 			NICE_WEIGHT * (p->p_nice - NZERO);
1093 	newpriority = min(newpriority, MAXPRI);
1094 	l->l_usrpri = newpriority;
1095 	resched_proc(l, l->l_usrpri);
1096 }
1097 
1098 /*
1099  * Recompute priority for all LWPs in a process.
1100  */
1101 void
1102 resetprocpriority(struct proc *p)
1103 {
1104 	struct lwp *l;
1105 
1106 	LIST_FOREACH(l, &p->p_lwps, l_sibling)
1107 	    resetpriority(l);
1108 }
1109 
1110 /*
1111  * We adjust the priority of the current process.  The priority of a process
1112  * gets worse as it accumulates CPU time.  The cpu usage estimator (p_estcpu)
1113  * is increased here.  The formula for computing priorities (in kern_synch.c)
1114  * will compute a different value each time p_estcpu increases. This can
1115  * cause a switch, but unless the priority crosses a PPQ boundary the actual
1116  * queue will not change.  The cpu usage estimator ramps up quite quickly
1117  * when the process is running (linearly), and decays away exponentially, at
1118  * a rate which is proportionally slower when the system is busy.  The basic
1119  * principle is that the system will 90% forget that the process used a lot
1120  * of CPU time in 5 * loadav seconds.  This causes the system to favor
1121  * processes which haven't run much recently, and to round-robin among other
1122  * processes.
1123  */
1124 
1125 void
1126 schedclock(struct lwp *l)
1127 {
1128 	struct proc *p = l->l_proc;
1129 	int s;
1130 
1131 	p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
1132 	SCHED_LOCK(s);
1133 	resetpriority(l);
1134 	SCHED_UNLOCK(s);
1135 
1136 	if (l->l_priority >= PUSER)
1137 		l->l_priority = l->l_usrpri;
1138 }
1139 
1140 void
1141 suspendsched()
1142 {
1143 	struct lwp *l;
1144 	int s;
1145 
1146 	/*
1147 	 * Convert all non-P_SYSTEM LSSLEEP or LSRUN processes to
1148 	 * LSSUSPENDED.
1149 	 */
1150 	proclist_lock_read();
1151 	SCHED_LOCK(s);
1152 	LIST_FOREACH(l, &alllwp, l_list) {
1153 		if ((l->l_proc->p_flag & P_SYSTEM) != 0)
1154 			continue;
1155 
1156 		switch (l->l_stat) {
1157 		case LSRUN:
1158 			l->l_proc->p_nrlwps--;
1159 			if ((l->l_flag & L_INMEM) != 0)
1160 				remrunqueue(l);
1161 			/* FALLTHROUGH */
1162 		case LSSLEEP:
1163 			l->l_stat = LSSUSPENDED;
1164 			break;
1165 		case LSONPROC:
1166 			/*
1167 			 * XXX SMP: we need to deal with processes on
1168 			 * others CPU !
1169 			 */
1170 			break;
1171 		default:
1172 			break;
1173 		}
1174 	}
1175 	SCHED_UNLOCK(s);
1176 	proclist_unlock_read();
1177 }
1178 
1179 /*
1180  * Low-level routines to access the run queue.  Optimised assembler
1181  * routines can override these.
1182  */
1183 
1184 #ifndef __HAVE_MD_RUNQUEUE
1185 
1186 /*
1187  * On some architectures, it's faster to use a MSB ordering for the priorites
1188  * than the traditional LSB ordering.
1189  */
1190 #ifdef __HAVE_BIGENDIAN_BITOPS
1191 #define	RQMASK(n) (0x80000000 >> (n))
1192 #else
1193 #define	RQMASK(n) (0x00000001 << (n))
1194 #endif
1195 
1196 /*
1197  * The primitives that manipulate the run queues.  whichqs tells which
1198  * of the 32 queues qs have processes in them.  Setrunqueue puts processes
1199  * into queues, remrunqueue removes them from queues.  The running process is
1200  * on no queue, other processes are on a queue related to p->p_priority,
1201  * divided by 4 actually to shrink the 0-127 range of priorities into the 32
1202  * available queues.
1203  */
1204 
1205 void
1206 setrunqueue(struct lwp *l)
1207 {
1208 	struct prochd *rq;
1209 	struct lwp *prev;
1210 	const int whichq = l->l_priority / 4;
1211 
1212 #ifdef DIAGNOSTIC
1213 	if (l->l_back != NULL || l->l_wchan != NULL || l->l_stat != LSRUN)
1214 		panic("setrunqueue");
1215 #endif
1216 	sched_whichqs |= RQMASK(whichq);
1217 	rq = &sched_qs[whichq];
1218 	prev = rq->ph_rlink;
1219 	l->l_forw = (struct lwp *)rq;
1220 	rq->ph_rlink = l;
1221 	prev->l_forw = l;
1222 	l->l_back = prev;
1223 }
1224 
1225 void
1226 remrunqueue(struct lwp *l)
1227 {
1228 	struct lwp *prev, *next;
1229 	const int whichq = l->l_priority / 4;
1230 #ifdef DIAGNOSTIC
1231 	if (((sched_whichqs & RQMASK(whichq)) == 0))
1232 		panic("remrunqueue");
1233 #endif
1234 	prev = l->l_back;
1235 	l->l_back = NULL;
1236 	next = l->l_forw;
1237 	prev->l_forw = next;
1238 	next->l_back = prev;
1239 	if (prev == next)
1240 		sched_whichqs &= ~RQMASK(whichq);
1241 }
1242 
1243 #undef RQMASK
1244 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
1245