xref: /netbsd-src/sys/kern/kern_synch.c (revision e55cffd8e520e9b03f18a1bd98bb04223e79f69f)
1 /*	$NetBSD: kern_synch.c,v 1.102 2001/04/20 17:58:49 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the NetBSD
22  *	Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 /*-
41  * Copyright (c) 1982, 1986, 1990, 1991, 1993
42  *	The Regents of the University of California.  All rights reserved.
43  * (c) UNIX System Laboratories, Inc.
44  * All or some portions of this file are derived from material licensed
45  * to the University of California by American Telephone and Telegraph
46  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
47  * the permission of UNIX System Laboratories, Inc.
48  *
49  * Redistribution and use in source and binary forms, with or without
50  * modification, are permitted provided that the following conditions
51  * are met:
52  * 1. Redistributions of source code must retain the above copyright
53  *    notice, this list of conditions and the following disclaimer.
54  * 2. Redistributions in binary form must reproduce the above copyright
55  *    notice, this list of conditions and the following disclaimer in the
56  *    documentation and/or other materials provided with the distribution.
57  * 3. All advertising materials mentioning features or use of this software
58  *    must display the following acknowledgement:
59  *	This product includes software developed by the University of
60  *	California, Berkeley and its contributors.
61  * 4. Neither the name of the University nor the names of its contributors
62  *    may be used to endorse or promote products derived from this software
63  *    without specific prior written permission.
64  *
65  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75  * SUCH DAMAGE.
76  *
77  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
78  */
79 
80 #include "opt_ddb.h"
81 #include "opt_ktrace.h"
82 #include "opt_lockdebug.h"
83 #include "opt_multiprocessor.h"
84 
85 #include <sys/param.h>
86 #include <sys/systm.h>
87 #include <sys/callout.h>
88 #include <sys/proc.h>
89 #include <sys/kernel.h>
90 #include <sys/buf.h>
91 #include <sys/signalvar.h>
92 #include <sys/resourcevar.h>
93 #include <sys/sched.h>
94 
95 #include <uvm/uvm_extern.h>
96 
97 #ifdef KTRACE
98 #include <sys/ktrace.h>
99 #endif
100 
101 #include <machine/cpu.h>
102 
103 int	lbolt;			/* once a second sleep address */
104 int	rrticks;		/* number of hardclock ticks per roundrobin() */
105 
106 /*
107  * The global scheduler state.
108  */
109 struct prochd sched_qs[RUNQUE_NQS];	/* run queues */
110 __volatile u_int32_t sched_whichqs;	/* bitmap of non-empty queues */
111 struct slpque sched_slpque[SLPQUE_TABLESIZE]; /* sleep queues */
112 
113 struct simplelock sched_lock = SIMPLELOCK_INITIALIZER;
114 #if defined(MULTIPROCESSOR)
115 struct lock kernel_lock;
116 #endif
117 
118 void schedcpu(void *);
119 void updatepri(struct proc *);
120 void endtsleep(void *);
121 
122 __inline void awaken(struct proc *);
123 
124 struct callout schedcpu_ch = CALLOUT_INITIALIZER;
125 
126 /*
127  * Force switch among equal priority processes every 100ms.
128  * Called from hardclock every hz/10 == rrticks hardclock ticks.
129  */
130 /* ARGSUSED */
131 void
132 roundrobin(struct cpu_info *ci)
133 {
134 	struct schedstate_percpu *spc = &ci->ci_schedstate;
135 
136 	spc->spc_rrticks = rrticks;
137 
138 	if (curproc != NULL) {
139 		if (spc->spc_flags & SPCF_SEENRR) {
140 			/*
141 			 * The process has already been through a roundrobin
142 			 * without switching and may be hogging the CPU.
143 			 * Indicate that the process should yield.
144 			 */
145 			spc->spc_flags |= SPCF_SHOULDYIELD;
146 		} else
147 			spc->spc_flags |= SPCF_SEENRR;
148 	}
149 	need_resched(curcpu());
150 }
151 
152 /*
153  * Constants for digital decay and forget:
154  *	90% of (p_estcpu) usage in 5 * loadav time
155  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
156  *          Note that, as ps(1) mentions, this can let percentages
157  *          total over 100% (I've seen 137.9% for 3 processes).
158  *
159  * Note that hardclock updates p_estcpu and p_cpticks independently.
160  *
161  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
162  * That is, the system wants to compute a value of decay such
163  * that the following for loop:
164  * 	for (i = 0; i < (5 * loadavg); i++)
165  * 		p_estcpu *= decay;
166  * will compute
167  * 	p_estcpu *= 0.1;
168  * for all values of loadavg:
169  *
170  * Mathematically this loop can be expressed by saying:
171  * 	decay ** (5 * loadavg) ~= .1
172  *
173  * The system computes decay as:
174  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
175  *
176  * We wish to prove that the system's computation of decay
177  * will always fulfill the equation:
178  * 	decay ** (5 * loadavg) ~= .1
179  *
180  * If we compute b as:
181  * 	b = 2 * loadavg
182  * then
183  * 	decay = b / (b + 1)
184  *
185  * We now need to prove two things:
186  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
187  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
188  *
189  * Facts:
190  *         For x close to zero, exp(x) =~ 1 + x, since
191  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
192  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
193  *         For x close to zero, ln(1+x) =~ x, since
194  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
195  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
196  *         ln(.1) =~ -2.30
197  *
198  * Proof of (1):
199  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
200  *	solving for factor,
201  *      ln(factor) =~ (-2.30/5*loadav), or
202  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
203  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
204  *
205  * Proof of (2):
206  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
207  *	solving for power,
208  *      power*ln(b/(b+1)) =~ -2.30, or
209  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
210  *
211  * Actual power values for the implemented algorithm are as follows:
212  *      loadav: 1       2       3       4
213  *      power:  5.68    10.32   14.94   19.55
214  */
215 
216 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
217 #define	loadfactor(loadav)	(2 * (loadav))
218 #define	decay_cpu(loadfac, cpu)	(((loadfac) * (cpu)) / ((loadfac) + FSCALE))
219 
220 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
221 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
222 
223 /*
224  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
225  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
226  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
227  *
228  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
229  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
230  *
231  * If you dont want to bother with the faster/more-accurate formula, you
232  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
233  * (more general) method of calculating the %age of CPU used by a process.
234  */
235 #define	CCPU_SHIFT	11
236 
237 /*
238  * Recompute process priorities, every hz ticks.
239  */
240 /* ARGSUSED */
241 void
242 schedcpu(void *arg)
243 {
244 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
245 	struct proc *p;
246 	int s, s1;
247 	unsigned int newcpu;
248 	int clkhz;
249 
250 	proclist_lock_read();
251 	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
252 		/*
253 		 * Increment time in/out of memory and sleep time
254 		 * (if sleeping).  We ignore overflow; with 16-bit int's
255 		 * (remember them?) overflow takes 45 days.
256 		 */
257 		p->p_swtime++;
258 		if (p->p_stat == SSLEEP || p->p_stat == SSTOP)
259 			p->p_slptime++;
260 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
261 		/*
262 		 * If the process has slept the entire second,
263 		 * stop recalculating its priority until it wakes up.
264 		 */
265 		if (p->p_slptime > 1)
266 			continue;
267 		s = splstatclock();	/* prevent state changes */
268 		/*
269 		 * p_pctcpu is only for ps.
270 		 */
271 		clkhz = stathz != 0 ? stathz : hz;
272 #if	(FSHIFT >= CCPU_SHIFT)
273 		p->p_pctcpu += (clkhz == 100)?
274 			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
275                 	100 * (((fixpt_t) p->p_cpticks)
276 				<< (FSHIFT - CCPU_SHIFT)) / clkhz;
277 #else
278 		p->p_pctcpu += ((FSCALE - ccpu) *
279 			(p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
280 #endif
281 		p->p_cpticks = 0;
282 		newcpu = (u_int)decay_cpu(loadfac, p->p_estcpu);
283 		p->p_estcpu = newcpu;
284 		SCHED_LOCK(s1);
285 		resetpriority(p);
286 		if (p->p_priority >= PUSER) {
287 			if (p->p_stat == SRUN &&
288 			    (p->p_flag & P_INMEM) &&
289 			    (p->p_priority / PPQ) != (p->p_usrpri / PPQ)) {
290 				remrunqueue(p);
291 				p->p_priority = p->p_usrpri;
292 				setrunqueue(p);
293 			} else
294 				p->p_priority = p->p_usrpri;
295 		}
296 		SCHED_UNLOCK(s1);
297 		splx(s);
298 	}
299 	proclist_unlock_read();
300 	uvm_meter();
301 	wakeup((caddr_t)&lbolt);
302 	callout_reset(&schedcpu_ch, hz, schedcpu, NULL);
303 }
304 
305 /*
306  * Recalculate the priority of a process after it has slept for a while.
307  * For all load averages >= 1 and max p_estcpu of 255, sleeping for at
308  * least six times the loadfactor will decay p_estcpu to zero.
309  */
310 void
311 updatepri(struct proc *p)
312 {
313 	unsigned int newcpu;
314 	fixpt_t loadfac;
315 
316 	SCHED_ASSERT_LOCKED();
317 
318 	newcpu = p->p_estcpu;
319 	loadfac = loadfactor(averunnable.ldavg[0]);
320 
321 	if (p->p_slptime > 5 * loadfac)
322 		p->p_estcpu = 0;
323 	else {
324 		p->p_slptime--;	/* the first time was done in schedcpu */
325 		while (newcpu && --p->p_slptime)
326 			newcpu = (int) decay_cpu(loadfac, newcpu);
327 		p->p_estcpu = newcpu;
328 	}
329 	resetpriority(p);
330 }
331 
332 /*
333  * During autoconfiguration or after a panic, a sleep will simply
334  * lower the priority briefly to allow interrupts, then return.
335  * The priority to be used (safepri) is machine-dependent, thus this
336  * value is initialized and maintained in the machine-dependent layers.
337  * This priority will typically be 0, or the lowest priority
338  * that is safe for use on the interrupt stack; it can be made
339  * higher to block network software interrupts after panics.
340  */
341 int safepri;
342 
343 /*
344  * General sleep call.  Suspends the current process until a wakeup is
345  * performed on the specified identifier.  The process will then be made
346  * runnable with the specified priority.  Sleeps at most timo/hz seconds
347  * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
348  * before and after sleeping, else signals are not checked.  Returns 0 if
349  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
350  * signal needs to be delivered, ERESTART is returned if the current system
351  * call should be restarted if possible, and EINTR is returned if the system
352  * call should be interrupted by the signal (return EINTR).
353  *
354  * The interlock is held until the scheduler_slock is held.  The
355  * interlock will be locked before returning back to the caller
356  * unless the PNORELOCK flag is specified, in which case the
357  * interlock will always be unlocked upon return.
358  */
359 int
360 ltsleep(void *ident, int priority, const char *wmesg, int timo,
361     __volatile struct simplelock *interlock)
362 {
363 	struct proc *p = curproc;
364 	struct slpque *qp;
365 	int sig, s;
366 	int catch = priority & PCATCH;
367 	int relock = (priority & PNORELOCK) == 0;
368 
369 	/*
370 	 * XXXSMP
371 	 * This is probably bogus.  Figure out what the right
372 	 * thing to do here really is.
373 	 * Note that not sleeping if ltsleep is called with curproc == NULL
374 	 * in the shutdown case is disgusting but partly necessary given
375 	 * how shutdown (barely) works.
376 	 */
377 	if (cold || (doing_shutdown && (panicstr || (p == NULL)))) {
378 		/*
379 		 * After a panic, or during autoconfiguration,
380 		 * just give interrupts a chance, then just return;
381 		 * don't run any other procs or panic below,
382 		 * in case this is the idle process and already asleep.
383 		 */
384 		s = splhigh();
385 		splx(safepri);
386 		splx(s);
387 		if (interlock != NULL && relock == 0)
388 			simple_unlock(interlock);
389 		return (0);
390 	}
391 
392 	KASSERT(p != NULL);
393 
394 #ifdef KTRACE
395 	if (KTRPOINT(p, KTR_CSW))
396 		ktrcsw(p, 1, 0);
397 #endif
398 
399 	SCHED_LOCK(s);
400 
401 #ifdef DIAGNOSTIC
402 	if (ident == NULL)
403 		panic("ltsleep: ident == NULL");
404 	if (p->p_stat != SONPROC)
405 		panic("ltsleep: p_stat %d != SONPROC", p->p_stat);
406 	if (p->p_back != NULL)
407 		panic("ltsleep: p_back != NULL");
408 #endif
409 
410 	p->p_wchan = ident;
411 	p->p_wmesg = wmesg;
412 	p->p_slptime = 0;
413 	p->p_priority = priority & PRIMASK;
414 
415 	qp = SLPQUE(ident);
416 	if (qp->sq_head == 0)
417 		qp->sq_head = p;
418 	else
419 		*qp->sq_tailp = p;
420 	*(qp->sq_tailp = &p->p_forw) = 0;
421 
422 	if (timo)
423 		callout_reset(&p->p_tsleep_ch, timo, endtsleep, p);
424 
425 	/*
426 	 * We can now release the interlock; the scheduler_slock
427 	 * is held, so a thread can't get in to do wakeup() before
428 	 * we do the switch.
429 	 *
430 	 * XXX We leave the code block here, after inserting ourselves
431 	 * on the sleep queue, because we might want a more clever
432 	 * data structure for the sleep queues at some point.
433 	 */
434 	if (interlock != NULL)
435 		simple_unlock(interlock);
436 
437 	/*
438 	 * We put ourselves on the sleep queue and start our timeout
439 	 * before calling CURSIG, as we could stop there, and a wakeup
440 	 * or a SIGCONT (or both) could occur while we were stopped.
441 	 * A SIGCONT would cause us to be marked as SSLEEP
442 	 * without resuming us, thus we must be ready for sleep
443 	 * when CURSIG is called.  If the wakeup happens while we're
444 	 * stopped, p->p_wchan will be 0 upon return from CURSIG.
445 	 */
446 	if (catch) {
447 		p->p_flag |= P_SINTR;
448 		if ((sig = CURSIG(p)) != 0) {
449 			if (p->p_wchan != NULL)
450 				unsleep(p);
451 			p->p_stat = SONPROC;
452 			SCHED_UNLOCK(s);
453 			goto resume;
454 		}
455 		if (p->p_wchan == NULL) {
456 			catch = 0;
457 			SCHED_UNLOCK(s);
458 			goto resume;
459 		}
460 	} else
461 		sig = 0;
462 	p->p_stat = SSLEEP;
463 	p->p_stats->p_ru.ru_nvcsw++;
464 
465 	SCHED_ASSERT_LOCKED();
466 	mi_switch(p);
467 
468 #ifdef	DDB
469 	/* handy breakpoint location after process "wakes" */
470 	asm(".globl bpendtsleep ; bpendtsleep:");
471 #endif
472 
473 	SCHED_ASSERT_UNLOCKED();
474 	splx(s);
475 
476  resume:
477 	KDASSERT(p->p_cpu != NULL);
478 	KDASSERT(p->p_cpu == curcpu());
479 	p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
480 
481 	p->p_flag &= ~P_SINTR;
482 	if (p->p_flag & P_TIMEOUT) {
483 		p->p_flag &= ~P_TIMEOUT;
484 		if (sig == 0) {
485 #ifdef KTRACE
486 			if (KTRPOINT(p, KTR_CSW))
487 				ktrcsw(p, 0, 0);
488 #endif
489 			if (relock && interlock != NULL)
490 				simple_lock(interlock);
491 			return (EWOULDBLOCK);
492 		}
493 	} else if (timo)
494 		callout_stop(&p->p_tsleep_ch);
495 	if (catch && (sig != 0 || (sig = CURSIG(p)) != 0)) {
496 #ifdef KTRACE
497 		if (KTRPOINT(p, KTR_CSW))
498 			ktrcsw(p, 0, 0);
499 #endif
500 		if (relock && interlock != NULL)
501 			simple_lock(interlock);
502 		if ((SIGACTION(p, sig).sa_flags & SA_RESTART) == 0)
503 			return (EINTR);
504 		return (ERESTART);
505 	}
506 #ifdef KTRACE
507 	if (KTRPOINT(p, KTR_CSW))
508 		ktrcsw(p, 0, 0);
509 #endif
510 	if (relock && interlock != NULL)
511 		simple_lock(interlock);
512 	return (0);
513 }
514 
515 /*
516  * Implement timeout for tsleep.
517  * If process hasn't been awakened (wchan non-zero),
518  * set timeout flag and undo the sleep.  If proc
519  * is stopped, just unsleep so it will remain stopped.
520  */
521 void
522 endtsleep(void *arg)
523 {
524 	struct proc *p;
525 	int s;
526 
527 	p = (struct proc *)arg;
528 
529 	SCHED_LOCK(s);
530 	if (p->p_wchan) {
531 		if (p->p_stat == SSLEEP)
532 			setrunnable(p);
533 		else
534 			unsleep(p);
535 		p->p_flag |= P_TIMEOUT;
536 	}
537 	SCHED_UNLOCK(s);
538 }
539 
540 /*
541  * Remove a process from its wait queue
542  */
543 void
544 unsleep(struct proc *p)
545 {
546 	struct slpque *qp;
547 	struct proc **hp;
548 
549 	SCHED_ASSERT_LOCKED();
550 
551 	if (p->p_wchan) {
552 		hp = &(qp = SLPQUE(p->p_wchan))->sq_head;
553 		while (*hp != p)
554 			hp = &(*hp)->p_forw;
555 		*hp = p->p_forw;
556 		if (qp->sq_tailp == &p->p_forw)
557 			qp->sq_tailp = hp;
558 		p->p_wchan = 0;
559 	}
560 }
561 
562 /*
563  * Optimized-for-wakeup() version of setrunnable().
564  */
565 __inline void
566 awaken(struct proc *p)
567 {
568 
569 	SCHED_ASSERT_LOCKED();
570 
571 	if (p->p_slptime > 1)
572 		updatepri(p);
573 	p->p_slptime = 0;
574 	p->p_stat = SRUN;
575 
576 	/*
577 	 * Since curpriority is a user priority, p->p_priority
578 	 * is always better than curpriority.
579 	 */
580 	if (p->p_flag & P_INMEM) {
581 		setrunqueue(p);
582 		KASSERT(p->p_cpu != NULL);
583 		need_resched(p->p_cpu);
584 	} else
585 		sched_wakeup(&proc0);
586 }
587 
588 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
589 void
590 sched_unlock_idle(void)
591 {
592 
593 	simple_unlock(&sched_lock);
594 }
595 
596 void
597 sched_lock_idle(void)
598 {
599 
600 	simple_lock(&sched_lock);
601 }
602 #endif /* MULTIPROCESSOR || LOCKDEBUG */
603 
604 /*
605  * Make all processes sleeping on the specified identifier runnable.
606  */
607 
608 void
609 wakeup(void *ident)
610 {
611 	int s;
612 
613 	SCHED_ASSERT_UNLOCKED();
614 
615 	SCHED_LOCK(s);
616 	sched_wakeup(ident);
617 	SCHED_UNLOCK(s);
618 }
619 
620 void
621 sched_wakeup(void *ident)
622 {
623 	struct slpque *qp;
624 	struct proc *p, **q;
625 
626 	SCHED_ASSERT_LOCKED();
627 
628 	qp = SLPQUE(ident);
629  restart:
630 	for (q = &qp->sq_head; (p = *q) != NULL; ) {
631 #ifdef DIAGNOSTIC
632 		if (p->p_back || (p->p_stat != SSLEEP && p->p_stat != SSTOP))
633 			panic("wakeup");
634 #endif
635 		if (p->p_wchan == ident) {
636 			p->p_wchan = 0;
637 			*q = p->p_forw;
638 			if (qp->sq_tailp == &p->p_forw)
639 				qp->sq_tailp = q;
640 			if (p->p_stat == SSLEEP) {
641 				awaken(p);
642 				goto restart;
643 			}
644 		} else
645 			q = &p->p_forw;
646 	}
647 }
648 
649 /*
650  * Make the highest priority process first in line on the specified
651  * identifier runnable.
652  */
653 void
654 wakeup_one(void *ident)
655 {
656 	struct slpque *qp;
657 	struct proc *p, **q;
658 	struct proc *best_sleepp, **best_sleepq;
659 	struct proc *best_stopp, **best_stopq;
660 	int s;
661 
662 	best_sleepp = best_stopp = NULL;
663 	best_sleepq = best_stopq = NULL;
664 
665 	SCHED_LOCK(s);
666 
667 	qp = SLPQUE(ident);
668 
669 	for (q = &qp->sq_head; (p = *q) != NULL; q = &p->p_forw) {
670 #ifdef DIAGNOSTIC
671 		if (p->p_back || (p->p_stat != SSLEEP && p->p_stat != SSTOP))
672 			panic("wakeup_one");
673 #endif
674 		if (p->p_wchan == ident) {
675 			if (p->p_stat == SSLEEP) {
676 				if (best_sleepp == NULL ||
677 				    p->p_priority < best_sleepp->p_priority) {
678 					best_sleepp = p;
679 					best_sleepq = q;
680 				}
681 			} else {
682 				if (best_stopp == NULL ||
683 				    p->p_priority < best_stopp->p_priority) {
684 					best_stopp = p;
685 					best_stopq = q;
686 				}
687 			}
688 		}
689 	}
690 
691 	/*
692 	 * Consider any SSLEEP process higher than the highest priority SSTOP
693 	 * process.
694 	 */
695 	if (best_sleepp != NULL) {
696 		p = best_sleepp;
697 		q = best_sleepq;
698 	} else {
699 		p = best_stopp;
700 		q = best_stopq;
701 	}
702 
703 	if (p != NULL) {
704 		p->p_wchan = NULL;
705 		*q = p->p_forw;
706 		if (qp->sq_tailp == &p->p_forw)
707 			qp->sq_tailp = q;
708 		if (p->p_stat == SSLEEP)
709 			awaken(p);
710 	}
711 	SCHED_UNLOCK(s);
712 }
713 
714 /*
715  * General yield call.  Puts the current process back on its run queue and
716  * performs a voluntary context switch.
717  */
718 void
719 yield(void)
720 {
721 	struct proc *p = curproc;
722 	int s;
723 
724 	SCHED_LOCK(s);
725 	p->p_priority = p->p_usrpri;
726 	p->p_stat = SRUN;
727 	setrunqueue(p);
728 	p->p_stats->p_ru.ru_nvcsw++;
729 	mi_switch(p);
730 	SCHED_ASSERT_UNLOCKED();
731 	splx(s);
732 }
733 
734 /*
735  * General preemption call.  Puts the current process back on its run queue
736  * and performs an involuntary context switch.  If a process is supplied,
737  * we switch to that process.  Otherwise, we use the normal process selection
738  * criteria.
739  */
740 void
741 preempt(struct proc *newp)
742 {
743 	struct proc *p = curproc;
744 	int s;
745 
746 	/*
747 	 * XXX Switching to a specific process is not supported yet.
748 	 */
749 	if (newp != NULL)
750 		panic("preempt: cpu_preempt not yet implemented");
751 
752 	SCHED_LOCK(s);
753 	p->p_priority = p->p_usrpri;
754 	p->p_stat = SRUN;
755 	setrunqueue(p);
756 	p->p_stats->p_ru.ru_nivcsw++;
757 	mi_switch(p);
758 	SCHED_ASSERT_UNLOCKED();
759 	splx(s);
760 }
761 
762 /*
763  * The machine independent parts of context switch.
764  * Must be called at splsched() (no higher!) and with
765  * the sched_lock held.
766  */
767 void
768 mi_switch(struct proc *p)
769 {
770 	struct schedstate_percpu *spc;
771 	struct rlimit *rlim;
772 	long s, u;
773 	struct timeval tv;
774 #if defined(MULTIPROCESSOR)
775 	int hold_count;
776 #endif
777 
778 	SCHED_ASSERT_LOCKED();
779 
780 #if defined(MULTIPROCESSOR)
781 	/*
782 	 * Release the kernel_lock, as we are about to yield the CPU.
783 	 * The scheduler lock is still held until cpu_switch()
784 	 * selects a new process and removes it from the run queue.
785 	 */
786 	if (p->p_flag & P_BIGLOCK)
787 		hold_count = spinlock_release_all(&kernel_lock);
788 #endif
789 
790 	KDASSERT(p->p_cpu != NULL);
791 	KDASSERT(p->p_cpu == curcpu());
792 
793 	spc = &p->p_cpu->ci_schedstate;
794 
795 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
796 	spinlock_switchcheck();
797 #endif
798 #ifdef LOCKDEBUG
799 	simple_lock_switchcheck();
800 #endif
801 
802 	/*
803 	 * Compute the amount of time during which the current
804 	 * process was running, and add that to its total so far.
805 	 */
806 	microtime(&tv);
807 	u = p->p_rtime.tv_usec + (tv.tv_usec - spc->spc_runtime.tv_usec);
808 	s = p->p_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
809 	if (u < 0) {
810 		u += 1000000;
811 		s--;
812 	} else if (u >= 1000000) {
813 		u -= 1000000;
814 		s++;
815 	}
816 	p->p_rtime.tv_usec = u;
817 	p->p_rtime.tv_sec = s;
818 
819 	/*
820 	 * Check if the process exceeds its cpu resource allocation.
821 	 * If over max, kill it.  In any case, if it has run for more
822 	 * than 10 minutes, reduce priority to give others a chance.
823 	 */
824 	rlim = &p->p_rlimit[RLIMIT_CPU];
825 	if (s >= rlim->rlim_cur) {
826 		/*
827 		 * XXXSMP: we're inside the scheduler lock perimeter;
828 		 * use sched_psignal.
829 		 */
830 		if (s >= rlim->rlim_max)
831 			sched_psignal(p, SIGKILL);
832 		else {
833 			sched_psignal(p, SIGXCPU);
834 			if (rlim->rlim_cur < rlim->rlim_max)
835 				rlim->rlim_cur += 5;
836 		}
837 	}
838 	if (autonicetime && s > autonicetime && p->p_ucred->cr_uid &&
839 	    p->p_nice == NZERO) {
840 		p->p_nice = autoniceval + NZERO;
841 		resetpriority(p);
842 	}
843 
844 	/*
845 	 * Process is about to yield the CPU; clear the appropriate
846 	 * scheduling flags.
847 	 */
848 	spc->spc_flags &= ~SPCF_SWITCHCLEAR;
849 
850 	/*
851 	 * Pick a new current process and switch to it.  When we
852 	 * run again, we'll return back here.
853 	 */
854 	uvmexp.swtch++;
855 	cpu_switch(p);
856 
857 	/*
858 	 * Make sure that MD code released the scheduler lock before
859 	 * resuming us.
860 	 */
861 	SCHED_ASSERT_UNLOCKED();
862 
863 	/*
864 	 * We're running again; record our new start time.  We might
865 	 * be running on a new CPU now, so don't use the cache'd
866 	 * schedstate_percpu pointer.
867 	 */
868 	KDASSERT(p->p_cpu != NULL);
869 	KDASSERT(p->p_cpu == curcpu());
870 	microtime(&p->p_cpu->ci_schedstate.spc_runtime);
871 
872 #if defined(MULTIPROCESSOR)
873 	/*
874 	 * Reacquire the kernel_lock now.  We do this after we've
875 	 * released the scheduler lock to avoid deadlock, and before
876 	 * we reacquire the interlock.
877 	 */
878 	if (p->p_flag & P_BIGLOCK)
879 		spinlock_acquire_count(&kernel_lock, hold_count);
880 #endif
881 }
882 
883 /*
884  * Initialize the (doubly-linked) run queues
885  * to be empty.
886  */
887 void
888 rqinit()
889 {
890 	int i;
891 
892 	for (i = 0; i < RUNQUE_NQS; i++)
893 		sched_qs[i].ph_link = sched_qs[i].ph_rlink =
894 		    (struct proc *)&sched_qs[i];
895 }
896 
897 /*
898  * Change process state to be runnable,
899  * placing it on the run queue if it is in memory,
900  * and awakening the swapper if it isn't in memory.
901  */
902 void
903 setrunnable(struct proc *p)
904 {
905 
906 	SCHED_ASSERT_LOCKED();
907 
908 	switch (p->p_stat) {
909 	case 0:
910 	case SRUN:
911 	case SONPROC:
912 	case SZOMB:
913 	case SDEAD:
914 	default:
915 		panic("setrunnable");
916 	case SSTOP:
917 		/*
918 		 * If we're being traced (possibly because someone attached us
919 		 * while we were stopped), check for a signal from the debugger.
920 		 */
921 		if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
922 			sigaddset(&p->p_sigctx.ps_siglist, p->p_xstat);
923 			CHECKSIGS(p);
924 		}
925 	case SSLEEP:
926 		unsleep(p);		/* e.g. when sending signals */
927 		break;
928 
929 	case SIDL:
930 		break;
931 	}
932 	p->p_stat = SRUN;
933 	if (p->p_flag & P_INMEM)
934 		setrunqueue(p);
935 
936 	if (p->p_slptime > 1)
937 		updatepri(p);
938 	p->p_slptime = 0;
939 	if ((p->p_flag & P_INMEM) == 0)
940 		sched_wakeup((caddr_t)&proc0);
941 	else if (p->p_priority < curcpu()->ci_schedstate.spc_curpriority) {
942 		/*
943 		 * XXXSMP
944 		 * This is not exactly right.  Since p->p_cpu persists
945 		 * across a context switch, this gives us some sort
946 		 * of processor affinity.  But we need to figure out
947 		 * at what point it's better to reschedule on a different
948 		 * CPU than the last one.
949 		 */
950 		need_resched((p->p_cpu != NULL) ? p->p_cpu : curcpu());
951 	}
952 }
953 
954 /*
955  * Compute the priority of a process when running in user mode.
956  * Arrange to reschedule if the resulting priority is better
957  * than that of the current process.
958  */
959 void
960 resetpriority(struct proc *p)
961 {
962 	unsigned int newpriority;
963 
964 	SCHED_ASSERT_LOCKED();
965 
966 	newpriority = PUSER + p->p_estcpu + NICE_WEIGHT * (p->p_nice - NZERO);
967 	newpriority = min(newpriority, MAXPRI);
968 	p->p_usrpri = newpriority;
969 	if (newpriority < curcpu()->ci_schedstate.spc_curpriority) {
970 		/*
971 		 * XXXSMP
972 		 * Same applies as in setrunnable() above.
973 		 */
974 		need_resched((p->p_cpu != NULL) ? p->p_cpu : curcpu());
975 	}
976 }
977 
978 /*
979  * We adjust the priority of the current process.  The priority of a process
980  * gets worse as it accumulates CPU time.  The cpu usage estimator (p_estcpu)
981  * is increased here.  The formula for computing priorities (in kern_synch.c)
982  * will compute a different value each time p_estcpu increases. This can
983  * cause a switch, but unless the priority crosses a PPQ boundary the actual
984  * queue will not change.  The cpu usage estimator ramps up quite quickly
985  * when the process is running (linearly), and decays away exponentially, at
986  * a rate which is proportionally slower when the system is busy.  The basic
987  * principle is that the system will 90% forget that the process used a lot
988  * of CPU time in 5 * loadav seconds.  This causes the system to favor
989  * processes which haven't run much recently, and to round-robin among other
990  * processes.
991  */
992 
993 void
994 schedclock(struct proc *p)
995 {
996 	int s;
997 
998 	p->p_estcpu = ESTCPULIM(p->p_estcpu + 1);
999 
1000 	SCHED_LOCK(s);
1001 	resetpriority(p);
1002 	SCHED_UNLOCK(s);
1003 
1004 	if (p->p_priority >= PUSER)
1005 		p->p_priority = p->p_usrpri;
1006 }
1007 
1008 void
1009 suspendsched()
1010 {
1011 	struct proc *p;
1012 	int s;
1013 
1014 	/*
1015 	 * Convert all non-P_SYSTEM SSLEEP or SRUN processes to SSTOP.
1016 	 */
1017 	proclist_lock_read();
1018 	SCHED_LOCK(s);
1019 	for (p = LIST_FIRST(&allproc); p != NULL; p = LIST_NEXT(p, p_list)) {
1020 		if ((p->p_flag & P_SYSTEM) != 0)
1021 			continue;
1022 		switch (p->p_stat) {
1023 		case SRUN:
1024 			if ((p->p_flag & P_INMEM) != 0)
1025 				remrunqueue(p);
1026 			/* FALLTHROUGH */
1027 		case SSLEEP:
1028 			p->p_stat = SSTOP;
1029 			break;
1030 		case SONPROC:
1031 			/*
1032 			 * XXX SMP: we need to deal with processes on
1033 			 * others CPU !
1034 			 */
1035 			break;
1036 		default:
1037 			break;
1038 		}
1039 	}
1040 	SCHED_UNLOCK(s);
1041 	proclist_unlock_read();
1042 }
1043