xref: /netbsd-src/sys/kern/kern_synch.c (revision d48f14661dda8638fee055ba15d35bdfb29b9fa8)
1 /*	$NetBSD: kern_synch.c,v 1.162 2006/06/24 05:23:06 mrg Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  * This code is derived from software contributed to The NetBSD Foundation
11  * by Charles M. Hannum.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by the NetBSD
24  *	Foundation, Inc. and its contributors.
25  * 4. Neither the name of The NetBSD Foundation nor the names of its
26  *    contributors may be used to endorse or promote products derived
27  *    from this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
30  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
31  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
32  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
33  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39  * POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 /*-
43  * Copyright (c) 1982, 1986, 1990, 1991, 1993
44  *	The Regents of the University of California.  All rights reserved.
45  * (c) UNIX System Laboratories, Inc.
46  * All or some portions of this file are derived from material licensed
47  * to the University of California by American Telephone and Telegraph
48  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
49  * the permission of UNIX System Laboratories, Inc.
50  *
51  * Redistribution and use in source and binary forms, with or without
52  * modification, are permitted provided that the following conditions
53  * are met:
54  * 1. Redistributions of source code must retain the above copyright
55  *    notice, this list of conditions and the following disclaimer.
56  * 2. Redistributions in binary form must reproduce the above copyright
57  *    notice, this list of conditions and the following disclaimer in the
58  *    documentation and/or other materials provided with the distribution.
59  * 3. Neither the name of the University nor the names of its contributors
60  *    may be used to endorse or promote products derived from this software
61  *    without specific prior written permission.
62  *
63  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73  * SUCH DAMAGE.
74  *
75  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.162 2006/06/24 05:23:06 mrg Exp $");
80 
81 #include "opt_ddb.h"
82 #include "opt_ktrace.h"
83 #include "opt_kstack.h"
84 #include "opt_lockdebug.h"
85 #include "opt_multiprocessor.h"
86 #include "opt_perfctrs.h"
87 
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/callout.h>
91 #include <sys/proc.h>
92 #include <sys/kernel.h>
93 #include <sys/buf.h>
94 #if defined(PERFCTRS)
95 #include <sys/pmc.h>
96 #endif
97 #include <sys/signalvar.h>
98 #include <sys/resourcevar.h>
99 #include <sys/sched.h>
100 #include <sys/sa.h>
101 #include <sys/savar.h>
102 #include <sys/kauth.h>
103 
104 #include <uvm/uvm_extern.h>
105 
106 #ifdef KTRACE
107 #include <sys/ktrace.h>
108 #endif
109 
110 #include <machine/cpu.h>
111 
112 int	lbolt;			/* once a second sleep address */
113 int	rrticks;		/* number of hardclock ticks per roundrobin() */
114 
115 /*
116  * Sleep queues.
117  *
118  * We're only looking at 7 bits of the address; everything is
119  * aligned to 4, lots of things are aligned to greater powers
120  * of 2.  Shift right by 8, i.e. drop the bottom 256 worth.
121  */
122 #define	SLPQUE_TABLESIZE	128
123 #define	SLPQUE_LOOKUP(x)	(((u_long)(x) >> 8) & (SLPQUE_TABLESIZE - 1))
124 
125 #define	SLPQUE(ident)	(&sched_slpque[SLPQUE_LOOKUP(ident)])
126 
127 /*
128  * The global scheduler state.
129  */
130 struct prochd sched_qs[RUNQUE_NQS];	/* run queues */
131 volatile uint32_t sched_whichqs;	/* bitmap of non-empty queues */
132 struct slpque sched_slpque[SLPQUE_TABLESIZE]; /* sleep queues */
133 
134 struct simplelock sched_lock = SIMPLELOCK_INITIALIZER;
135 
136 void schedcpu(void *);
137 void updatepri(struct lwp *);
138 void endtsleep(void *);
139 
140 inline void sa_awaken(struct lwp *);
141 inline void awaken(struct lwp *);
142 
143 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
144 static unsigned int schedcpu_ticks;
145 
146 
147 /*
148  * Force switch among equal priority processes every 100ms.
149  * Called from hardclock every hz/10 == rrticks hardclock ticks.
150  */
151 /* ARGSUSED */
152 void
153 roundrobin(struct cpu_info *ci)
154 {
155 	struct schedstate_percpu *spc = &ci->ci_schedstate;
156 
157 	spc->spc_rrticks = rrticks;
158 
159 	if (curlwp != NULL) {
160 		if (spc->spc_flags & SPCF_SEENRR) {
161 			/*
162 			 * The process has already been through a roundrobin
163 			 * without switching and may be hogging the CPU.
164 			 * Indicate that the process should yield.
165 			 */
166 			spc->spc_flags |= SPCF_SHOULDYIELD;
167 		} else
168 			spc->spc_flags |= SPCF_SEENRR;
169 	}
170 	need_resched(curcpu());
171 }
172 
173 #define	PPQ	(128 / RUNQUE_NQS)	/* priorities per queue */
174 #define	NICE_WEIGHT 2			/* priorities per nice level */
175 
176 #define	ESTCPU_SHIFT	11
177 #define	ESTCPU_MAX	((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
178 #define	ESTCPULIM(e)	min((e), ESTCPU_MAX)
179 
180 /*
181  * Constants for digital decay and forget:
182  *	90% of (p_estcpu) usage in 5 * loadav time
183  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
184  *          Note that, as ps(1) mentions, this can let percentages
185  *          total over 100% (I've seen 137.9% for 3 processes).
186  *
187  * Note that hardclock updates p_estcpu and p_cpticks independently.
188  *
189  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
190  * That is, the system wants to compute a value of decay such
191  * that the following for loop:
192  * 	for (i = 0; i < (5 * loadavg); i++)
193  * 		p_estcpu *= decay;
194  * will compute
195  * 	p_estcpu *= 0.1;
196  * for all values of loadavg:
197  *
198  * Mathematically this loop can be expressed by saying:
199  * 	decay ** (5 * loadavg) ~= .1
200  *
201  * The system computes decay as:
202  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
203  *
204  * We wish to prove that the system's computation of decay
205  * will always fulfill the equation:
206  * 	decay ** (5 * loadavg) ~= .1
207  *
208  * If we compute b as:
209  * 	b = 2 * loadavg
210  * then
211  * 	decay = b / (b + 1)
212  *
213  * We now need to prove two things:
214  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
215  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
216  *
217  * Facts:
218  *         For x close to zero, exp(x) =~ 1 + x, since
219  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
220  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
221  *         For x close to zero, ln(1+x) =~ x, since
222  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
223  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
224  *         ln(.1) =~ -2.30
225  *
226  * Proof of (1):
227  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
228  *	solving for factor,
229  *      ln(factor) =~ (-2.30/5*loadav), or
230  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
231  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
232  *
233  * Proof of (2):
234  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
235  *	solving for power,
236  *      power*ln(b/(b+1)) =~ -2.30, or
237  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
238  *
239  * Actual power values for the implemented algorithm are as follows:
240  *      loadav: 1       2       3       4
241  *      power:  5.68    10.32   14.94   19.55
242  */
243 
244 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
245 #define	loadfactor(loadav)	(2 * (loadav))
246 
247 static fixpt_t
248 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
249 {
250 
251 	if (estcpu == 0) {
252 		return 0;
253 	}
254 
255 #if !defined(_LP64)
256 	/* avoid 64bit arithmetics. */
257 #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
258 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
259 		return estcpu * loadfac / (loadfac + FSCALE);
260 	}
261 #endif /* !defined(_LP64) */
262 
263 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
264 }
265 
266 /*
267  * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
268  * sleeping for at least seven times the loadfactor will decay p_estcpu to
269  * less than (1 << ESTCPU_SHIFT).
270  *
271  * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
272  */
273 static fixpt_t
274 decay_cpu_batch(fixpt_t loadfac, fixpt_t estcpu, unsigned int n)
275 {
276 
277 	if ((n << FSHIFT) >= 7 * loadfac) {
278 		return 0;
279 	}
280 
281 	while (estcpu != 0 && n > 1) {
282 		estcpu = decay_cpu(loadfac, estcpu);
283 		n--;
284 	}
285 
286 	return estcpu;
287 }
288 
289 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
290 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
291 
292 /*
293  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
294  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
295  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
296  *
297  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
298  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
299  *
300  * If you dont want to bother with the faster/more-accurate formula, you
301  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
302  * (more general) method of calculating the %age of CPU used by a process.
303  */
304 #define	CCPU_SHIFT	11
305 
306 /*
307  * Recompute process priorities, every hz ticks.
308  */
309 /* ARGSUSED */
310 void
311 schedcpu(void *arg)
312 {
313 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
314 	struct lwp *l;
315 	struct proc *p;
316 	int s, minslp;
317 	int clkhz;
318 
319 	schedcpu_ticks++;
320 
321 	proclist_lock_read();
322 	PROCLIST_FOREACH(p, &allproc) {
323 		/*
324 		 * Increment time in/out of memory and sleep time
325 		 * (if sleeping).  We ignore overflow; with 16-bit int's
326 		 * (remember them?) overflow takes 45 days.
327 		 */
328 		minslp = 2;
329 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
330 			l->l_swtime++;
331 			if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
332 			    l->l_stat == LSSUSPENDED) {
333 				l->l_slptime++;
334 				minslp = min(minslp, l->l_slptime);
335 			} else
336 				minslp = 0;
337 		}
338 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
339 		/*
340 		 * If the process has slept the entire second,
341 		 * stop recalculating its priority until it wakes up.
342 		 */
343 		if (minslp > 1)
344 			continue;
345 		s = splstatclock();	/* prevent state changes */
346 		/*
347 		 * p_pctcpu is only for ps.
348 		 */
349 		clkhz = stathz != 0 ? stathz : hz;
350 #if	(FSHIFT >= CCPU_SHIFT)
351 		p->p_pctcpu += (clkhz == 100)?
352 			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
353                 	100 * (((fixpt_t) p->p_cpticks)
354 				<< (FSHIFT - CCPU_SHIFT)) / clkhz;
355 #else
356 		p->p_pctcpu += ((FSCALE - ccpu) *
357 			(p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
358 #endif
359 		p->p_cpticks = 0;
360 		p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
361 		splx(s);	/* Done with the process CPU ticks update */
362 		SCHED_LOCK(s);
363 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
364 			if (l->l_slptime > 1)
365 				continue;
366 			resetpriority(l);
367 			if (l->l_priority >= PUSER) {
368 				if (l->l_stat == LSRUN &&
369 				    (l->l_flag & L_INMEM) &&
370 				    (l->l_priority / PPQ) != (l->l_usrpri / PPQ)) {
371 					remrunqueue(l);
372 					l->l_priority = l->l_usrpri;
373 					setrunqueue(l);
374 				} else
375 					l->l_priority = l->l_usrpri;
376 			}
377 		}
378 		SCHED_UNLOCK(s);
379 	}
380 	proclist_unlock_read();
381 	uvm_meter();
382 	wakeup((caddr_t)&lbolt);
383 	callout_schedule(&schedcpu_ch, hz);
384 }
385 
386 /*
387  * Recalculate the priority of a process after it has slept for a while.
388  */
389 void
390 updatepri(struct lwp *l)
391 {
392 	struct proc *p = l->l_proc;
393 	fixpt_t loadfac;
394 
395 	SCHED_ASSERT_LOCKED();
396 	KASSERT(l->l_slptime > 1);
397 
398 	loadfac = loadfactor(averunnable.ldavg[0]);
399 
400 	l->l_slptime--; /* the first time was done in schedcpu */
401 	/* XXX NJWLWP */
402 	p->p_estcpu = decay_cpu_batch(loadfac, p->p_estcpu, l->l_slptime);
403 	resetpriority(l);
404 }
405 
406 /*
407  * During autoconfiguration or after a panic, a sleep will simply
408  * lower the priority briefly to allow interrupts, then return.
409  * The priority to be used (safepri) is machine-dependent, thus this
410  * value is initialized and maintained in the machine-dependent layers.
411  * This priority will typically be 0, or the lowest priority
412  * that is safe for use on the interrupt stack; it can be made
413  * higher to block network software interrupts after panics.
414  */
415 int safepri;
416 
417 /*
418  * General sleep call.  Suspends the current process until a wakeup is
419  * performed on the specified identifier.  The process will then be made
420  * runnable with the specified priority.  Sleeps at most timo/hz seconds
421  * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
422  * before and after sleeping, else signals are not checked.  Returns 0 if
423  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
424  * signal needs to be delivered, ERESTART is returned if the current system
425  * call should be restarted if possible, and EINTR is returned if the system
426  * call should be interrupted by the signal (return EINTR).
427  *
428  * The interlock is held until the scheduler_slock is acquired.  The
429  * interlock will be locked before returning back to the caller
430  * unless the PNORELOCK flag is specified, in which case the
431  * interlock will always be unlocked upon return.
432  */
433 int
434 ltsleep(volatile const void *ident, int priority, const char *wmesg, int timo,
435     volatile struct simplelock *interlock)
436 {
437 	struct lwp *l = curlwp;
438 	struct proc *p = l ? l->l_proc : NULL;
439 	struct slpque *qp;
440 	struct sadata_upcall *sau;
441 	int sig, s;
442 	int catch = priority & PCATCH;
443 	int relock = (priority & PNORELOCK) == 0;
444 	int exiterr = (priority & PNOEXITERR) == 0;
445 
446 	/*
447 	 * XXXSMP
448 	 * This is probably bogus.  Figure out what the right
449 	 * thing to do here really is.
450 	 * Note that not sleeping if ltsleep is called with curlwp == NULL
451 	 * in the shutdown case is disgusting but partly necessary given
452 	 * how shutdown (barely) works.
453 	 */
454 	if (cold || (doing_shutdown && (panicstr || (l == NULL)))) {
455 		/*
456 		 * After a panic, or during autoconfiguration,
457 		 * just give interrupts a chance, then just return;
458 		 * don't run any other procs or panic below,
459 		 * in case this is the idle process and already asleep.
460 		 */
461 		s = splhigh();
462 		splx(safepri);
463 		splx(s);
464 		if (interlock != NULL && relock == 0)
465 			simple_unlock(interlock);
466 		return (0);
467 	}
468 
469 	KASSERT(p != NULL);
470 	LOCK_ASSERT(interlock == NULL || simple_lock_held(interlock));
471 
472 #ifdef KTRACE
473 	if (KTRPOINT(p, KTR_CSW))
474 		ktrcsw(l, 1, 0);
475 #endif
476 
477 	/*
478 	 * XXX We need to allocate the sadata_upcall structure here,
479 	 * XXX since we can't sleep while waiting for memory inside
480 	 * XXX sa_upcall().  It would be nice if we could safely
481 	 * XXX allocate the sadata_upcall structure on the stack, here.
482 	 */
483 	if (l->l_flag & L_SA) {
484 		sau = sadata_upcall_alloc(0);
485 	} else {
486 		sau = NULL;
487 	}
488 
489 	SCHED_LOCK(s);
490 
491 #ifdef DIAGNOSTIC
492 	if (ident == NULL)
493 		panic("ltsleep: ident == NULL");
494 	if (l->l_stat != LSONPROC)
495 		panic("ltsleep: l_stat %d != LSONPROC", l->l_stat);
496 	if (l->l_back != NULL)
497 		panic("ltsleep: p_back != NULL");
498 #endif
499 
500 	l->l_wchan = ident;
501 	l->l_wmesg = wmesg;
502 	l->l_slptime = 0;
503 	l->l_priority = priority & PRIMASK;
504 
505 	qp = SLPQUE(ident);
506 	if (qp->sq_head == 0)
507 		qp->sq_head = l;
508 	else {
509 		*qp->sq_tailp = l;
510 	}
511 	*(qp->sq_tailp = &l->l_forw) = 0;
512 
513 	if (timo)
514 		callout_reset(&l->l_tsleep_ch, timo, endtsleep, l);
515 
516 	/*
517 	 * We can now release the interlock; the scheduler_slock
518 	 * is held, so a thread can't get in to do wakeup() before
519 	 * we do the switch.
520 	 *
521 	 * XXX We leave the code block here, after inserting ourselves
522 	 * on the sleep queue, because we might want a more clever
523 	 * data structure for the sleep queues at some point.
524 	 */
525 	if (interlock != NULL)
526 		simple_unlock(interlock);
527 
528 	/*
529 	 * We put ourselves on the sleep queue and start our timeout
530 	 * before calling CURSIG, as we could stop there, and a wakeup
531 	 * or a SIGCONT (or both) could occur while we were stopped.
532 	 * A SIGCONT would cause us to be marked as SSLEEP
533 	 * without resuming us, thus we must be ready for sleep
534 	 * when CURSIG is called.  If the wakeup happens while we're
535 	 * stopped, p->p_wchan will be 0 upon return from CURSIG.
536 	 */
537 	if (catch) {
538 		l->l_flag |= L_SINTR;
539 		if (((sig = CURSIG(l)) != 0) ||
540 		    ((p->p_flag & P_WEXIT) && p->p_nlwps > 1)) {
541 			if (l->l_wchan != NULL)
542 				unsleep(l);
543 			l->l_stat = LSONPROC;
544 			SCHED_UNLOCK(s);
545 			goto resume;
546 		}
547 		if (l->l_wchan == NULL) {
548 			catch = 0;
549 			SCHED_UNLOCK(s);
550 			goto resume;
551 		}
552 	} else
553 		sig = 0;
554 	l->l_stat = LSSLEEP;
555 	p->p_nrlwps--;
556 	p->p_stats->p_ru.ru_nvcsw++;
557 	SCHED_ASSERT_LOCKED();
558 	if (l->l_flag & L_SA)
559 		sa_switch(l, sau, SA_UPCALL_BLOCKED);
560 	else
561 		mi_switch(l, NULL);
562 
563 #if	defined(DDB) && !defined(GPROF) && !defined(sun2)
564 	/* handy breakpoint location after process "wakes" */
565 	__asm(".globl bpendtsleep\nbpendtsleep:");
566 #endif
567 	/*
568 	 * p->p_nrlwps is incremented by whoever made us runnable again,
569 	 * either setrunnable() or awaken().
570 	 */
571 
572 	SCHED_ASSERT_UNLOCKED();
573 	splx(s);
574 
575  resume:
576 	KDASSERT(l->l_cpu != NULL);
577 	KDASSERT(l->l_cpu == curcpu());
578 	l->l_cpu->ci_schedstate.spc_curpriority = l->l_usrpri;
579 
580 	l->l_flag &= ~L_SINTR;
581 	if (l->l_flag & L_TIMEOUT) {
582 		l->l_flag &= ~(L_TIMEOUT|L_CANCELLED);
583 		if (sig == 0) {
584 #ifdef KTRACE
585 			if (KTRPOINT(p, KTR_CSW))
586 				ktrcsw(l, 0, 0);
587 #endif
588 			if (relock && interlock != NULL)
589 				simple_lock(interlock);
590 			return (EWOULDBLOCK);
591 		}
592 	} else if (timo)
593 		callout_stop(&l->l_tsleep_ch);
594 
595 	if (catch) {
596 		const int cancelled = l->l_flag & L_CANCELLED;
597 		l->l_flag &= ~L_CANCELLED;
598 		if (sig != 0 || (sig = CURSIG(l)) != 0 || cancelled) {
599 #ifdef KTRACE
600 			if (KTRPOINT(p, KTR_CSW))
601 				ktrcsw(l, 0, 0);
602 #endif
603 			if (relock && interlock != NULL)
604 				simple_lock(interlock);
605 			/*
606 			 * If this sleep was canceled, don't let the syscall
607 			 * restart.
608 			 */
609 			if (cancelled ||
610 			    (SIGACTION(p, sig).sa_flags & SA_RESTART) == 0)
611 				return (EINTR);
612 			return (ERESTART);
613 		}
614 	}
615 
616 #ifdef KTRACE
617 	if (KTRPOINT(p, KTR_CSW))
618 		ktrcsw(l, 0, 0);
619 #endif
620 	if (relock && interlock != NULL)
621 		simple_lock(interlock);
622 
623 	/* XXXNJW this is very much a kluge.
624 	 * revisit. a better way of preventing looping/hanging syscalls like
625 	 * wait4() and _lwp_wait() from wedging an exiting process
626 	 * would be preferred.
627 	 */
628 	if (catch && ((p->p_flag & P_WEXIT) && p->p_nlwps > 1 && exiterr))
629 		return (EINTR);
630 	return (0);
631 }
632 
633 /*
634  * Implement timeout for tsleep.
635  * If process hasn't been awakened (wchan non-zero),
636  * set timeout flag and undo the sleep.  If proc
637  * is stopped, just unsleep so it will remain stopped.
638  */
639 void
640 endtsleep(void *arg)
641 {
642 	struct lwp *l;
643 	int s;
644 
645 	l = (struct lwp *)arg;
646 	SCHED_LOCK(s);
647 	if (l->l_wchan) {
648 		if (l->l_stat == LSSLEEP)
649 			setrunnable(l);
650 		else
651 			unsleep(l);
652 		l->l_flag |= L_TIMEOUT;
653 	}
654 	SCHED_UNLOCK(s);
655 }
656 
657 /*
658  * Remove a process from its wait queue
659  */
660 void
661 unsleep(struct lwp *l)
662 {
663 	struct slpque *qp;
664 	struct lwp **hp;
665 
666 	SCHED_ASSERT_LOCKED();
667 
668 	if (l->l_wchan) {
669 		hp = &(qp = SLPQUE(l->l_wchan))->sq_head;
670 		while (*hp != l)
671 			hp = &(*hp)->l_forw;
672 		*hp = l->l_forw;
673 		if (qp->sq_tailp == &l->l_forw)
674 			qp->sq_tailp = hp;
675 		l->l_wchan = 0;
676 	}
677 }
678 
679 inline void
680 sa_awaken(struct lwp *l)
681 {
682 
683 	SCHED_ASSERT_LOCKED();
684 
685 	if (l == l->l_savp->savp_lwp && l->l_flag & L_SA_YIELD)
686 		l->l_flag &= ~L_SA_IDLE;
687 }
688 
689 /*
690  * Optimized-for-wakeup() version of setrunnable().
691  */
692 inline void
693 awaken(struct lwp *l)
694 {
695 
696 	SCHED_ASSERT_LOCKED();
697 
698 	if (l->l_proc->p_sa)
699 		sa_awaken(l);
700 
701 	if (l->l_slptime > 1)
702 		updatepri(l);
703 	l->l_slptime = 0;
704 	l->l_stat = LSRUN;
705 	l->l_proc->p_nrlwps++;
706 	/*
707 	 * Since curpriority is a user priority, p->p_priority
708 	 * is always better than curpriority on the last CPU on
709 	 * which it ran.
710 	 *
711 	 * XXXSMP See affinity comment in resched_proc().
712 	 */
713 	if (l->l_flag & L_INMEM) {
714 		setrunqueue(l);
715 		KASSERT(l->l_cpu != NULL);
716 		need_resched(l->l_cpu);
717 	} else
718 		sched_wakeup(&proc0);
719 }
720 
721 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
722 void
723 sched_unlock_idle(void)
724 {
725 
726 	simple_unlock(&sched_lock);
727 }
728 
729 void
730 sched_lock_idle(void)
731 {
732 
733 	simple_lock(&sched_lock);
734 }
735 #endif /* MULTIPROCESSOR || LOCKDEBUG */
736 
737 /*
738  * Make all processes sleeping on the specified identifier runnable.
739  */
740 
741 void
742 wakeup(volatile const void *ident)
743 {
744 	int s;
745 
746 	SCHED_ASSERT_UNLOCKED();
747 
748 	SCHED_LOCK(s);
749 	sched_wakeup(ident);
750 	SCHED_UNLOCK(s);
751 }
752 
753 void
754 sched_wakeup(volatile const void *ident)
755 {
756 	struct slpque *qp;
757 	struct lwp *l, **q;
758 
759 	SCHED_ASSERT_LOCKED();
760 
761 	qp = SLPQUE(ident);
762  restart:
763 	for (q = &qp->sq_head; (l = *q) != NULL; ) {
764 #ifdef DIAGNOSTIC
765 		if (l->l_back || (l->l_stat != LSSLEEP &&
766 		    l->l_stat != LSSTOP && l->l_stat != LSSUSPENDED))
767 			panic("wakeup");
768 #endif
769 		if (l->l_wchan == ident) {
770 			l->l_wchan = 0;
771 			*q = l->l_forw;
772 			if (qp->sq_tailp == &l->l_forw)
773 				qp->sq_tailp = q;
774 			if (l->l_stat == LSSLEEP) {
775 				awaken(l);
776 				goto restart;
777 			}
778 		} else
779 			q = &l->l_forw;
780 	}
781 }
782 
783 /*
784  * Make the highest priority process first in line on the specified
785  * identifier runnable.
786  */
787 void
788 wakeup_one(volatile const void *ident)
789 {
790 	struct slpque *qp;
791 	struct lwp *l, **q;
792 	struct lwp *best_sleepp, **best_sleepq;
793 	struct lwp *best_stopp, **best_stopq;
794 	int s;
795 
796 	best_sleepp = best_stopp = NULL;
797 	best_sleepq = best_stopq = NULL;
798 
799 	SCHED_LOCK(s);
800 
801 	qp = SLPQUE(ident);
802 
803 	for (q = &qp->sq_head; (l = *q) != NULL; q = &l->l_forw) {
804 #ifdef DIAGNOSTIC
805 		if (l->l_back || (l->l_stat != LSSLEEP &&
806 		    l->l_stat != LSSTOP && l->l_stat != LSSUSPENDED))
807 			panic("wakeup_one");
808 #endif
809 		if (l->l_wchan == ident) {
810 			if (l->l_stat == LSSLEEP) {
811 				if (best_sleepp == NULL ||
812 				    l->l_priority < best_sleepp->l_priority) {
813 					best_sleepp = l;
814 					best_sleepq = q;
815 				}
816 			} else {
817 				if (best_stopp == NULL ||
818 				    l->l_priority < best_stopp->l_priority) {
819 				    	best_stopp = l;
820 					best_stopq = q;
821 				}
822 			}
823 		}
824 	}
825 
826 	/*
827 	 * Consider any SSLEEP process higher than the highest priority SSTOP
828 	 * process.
829 	 */
830 	if (best_sleepp != NULL) {
831 		l = best_sleepp;
832 		q = best_sleepq;
833 	} else {
834 		l = best_stopp;
835 		q = best_stopq;
836 	}
837 
838 	if (l != NULL) {
839 		l->l_wchan = NULL;
840 		*q = l->l_forw;
841 		if (qp->sq_tailp == &l->l_forw)
842 			qp->sq_tailp = q;
843 		if (l->l_stat == LSSLEEP)
844 			awaken(l);
845 	}
846 	SCHED_UNLOCK(s);
847 }
848 
849 /*
850  * General yield call.  Puts the current process back on its run queue and
851  * performs a voluntary context switch.  Should only be called when the
852  * current process explicitly requests it (eg sched_yield(2) in compat code).
853  */
854 void
855 yield(void)
856 {
857 	struct lwp *l = curlwp;
858 	int s;
859 
860 	SCHED_LOCK(s);
861 	l->l_priority = l->l_usrpri;
862 	l->l_stat = LSRUN;
863 	setrunqueue(l);
864 	l->l_proc->p_stats->p_ru.ru_nvcsw++;
865 	mi_switch(l, NULL);
866 	SCHED_ASSERT_UNLOCKED();
867 	splx(s);
868 }
869 
870 /*
871  * General preemption call.  Puts the current process back on its run queue
872  * and performs an involuntary context switch.
873  * The 'more' ("more work to do") argument is boolean. Returning to userspace
874  * preempt() calls pass 0. "Voluntary" preemptions in e.g. uiomove() pass 1.
875  * This will be used to indicate to the SA subsystem that the LWP is
876  * not yet finished in the kernel.
877  */
878 
879 void
880 preempt(int more)
881 {
882 	struct lwp *l = curlwp;
883 	int r, s;
884 
885 	SCHED_LOCK(s);
886 	l->l_priority = l->l_usrpri;
887 	l->l_stat = LSRUN;
888 	setrunqueue(l);
889 	l->l_proc->p_stats->p_ru.ru_nivcsw++;
890 	r = mi_switch(l, NULL);
891 	SCHED_ASSERT_UNLOCKED();
892 	splx(s);
893 	if ((l->l_flag & L_SA) != 0 && r != 0 && more == 0)
894 		sa_preempt(l);
895 }
896 
897 /*
898  * The machine independent parts of context switch.
899  * Must be called at splsched() (no higher!) and with
900  * the sched_lock held.
901  * Switch to "new" if non-NULL, otherwise let cpu_switch choose
902  * the next lwp.
903  *
904  * Returns 1 if another process was actually run.
905  */
906 int
907 mi_switch(struct lwp *l, struct lwp *newl)
908 {
909 	struct schedstate_percpu *spc;
910 	struct rlimit *rlim;
911 	long s, u;
912 	struct timeval tv;
913 	int hold_count;
914 	struct proc *p = l->l_proc;
915 	int retval;
916 
917 	SCHED_ASSERT_LOCKED();
918 
919 	/*
920 	 * Release the kernel_lock, as we are about to yield the CPU.
921 	 * The scheduler lock is still held until cpu_switch()
922 	 * selects a new process and removes it from the run queue.
923 	 */
924 	hold_count = KERNEL_LOCK_RELEASE_ALL();
925 
926 	KDASSERT(l->l_cpu != NULL);
927 	KDASSERT(l->l_cpu == curcpu());
928 
929 	spc = &l->l_cpu->ci_schedstate;
930 
931 #ifdef LOCKDEBUG
932 	spinlock_switchcheck();
933 	simple_lock_switchcheck();
934 #endif
935 
936 	/*
937 	 * Compute the amount of time during which the current
938 	 * process was running.
939 	 */
940 	microtime(&tv);
941 	u = p->p_rtime.tv_usec +
942 	    (tv.tv_usec - spc->spc_runtime.tv_usec);
943 	s = p->p_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
944 	if (u < 0) {
945 		u += 1000000;
946 		s--;
947 	} else if (u >= 1000000) {
948 		u -= 1000000;
949 		s++;
950 	}
951 	p->p_rtime.tv_usec = u;
952 	p->p_rtime.tv_sec = s;
953 
954 	/*
955 	 * Check if the process exceeds its CPU resource allocation.
956 	 * If over max, kill it.  In any case, if it has run for more
957 	 * than 10 minutes, reduce priority to give others a chance.
958 	 */
959 	rlim = &p->p_rlimit[RLIMIT_CPU];
960 	if (s >= rlim->rlim_cur) {
961 		/*
962 		 * XXXSMP: we're inside the scheduler lock perimeter;
963 		 * use sched_psignal.
964 		 */
965 		if (s >= rlim->rlim_max)
966 			sched_psignal(p, SIGKILL);
967 		else {
968 			sched_psignal(p, SIGXCPU);
969 			if (rlim->rlim_cur < rlim->rlim_max)
970 				rlim->rlim_cur += 5;
971 		}
972 	}
973 	if (autonicetime && s > autonicetime &&
974 	    kauth_cred_geteuid(p->p_cred) && p->p_nice == NZERO) {
975 		p->p_nice = autoniceval + NZERO;
976 		resetpriority(l);
977 	}
978 
979 	/*
980 	 * Process is about to yield the CPU; clear the appropriate
981 	 * scheduling flags.
982 	 */
983 	spc->spc_flags &= ~SPCF_SWITCHCLEAR;
984 
985 #ifdef KSTACK_CHECK_MAGIC
986 	kstack_check_magic(l);
987 #endif
988 
989 	/*
990 	 * If we are using h/w performance counters, save context.
991 	 */
992 #if PERFCTRS
993 	if (PMC_ENABLED(p))
994 		pmc_save_context(p);
995 #endif
996 
997 	/*
998 	 * Switch to the new current process.  When we
999 	 * run again, we'll return back here.
1000 	 */
1001 	uvmexp.swtch++;
1002 	if (newl == NULL) {
1003 		retval = cpu_switch(l, NULL);
1004 	} else {
1005 		remrunqueue(newl);
1006 		cpu_switchto(l, newl);
1007 		retval = 0;
1008 	}
1009 
1010 	/*
1011 	 * If we are using h/w performance counters, restore context.
1012 	 */
1013 #if PERFCTRS
1014 	if (PMC_ENABLED(p))
1015 		pmc_restore_context(p);
1016 #endif
1017 
1018 	/*
1019 	 * Make sure that MD code released the scheduler lock before
1020 	 * resuming us.
1021 	 */
1022 	SCHED_ASSERT_UNLOCKED();
1023 
1024 	/*
1025 	 * We're running again; record our new start time.  We might
1026 	 * be running on a new CPU now, so don't use the cache'd
1027 	 * schedstate_percpu pointer.
1028 	 */
1029 	KDASSERT(l->l_cpu != NULL);
1030 	KDASSERT(l->l_cpu == curcpu());
1031 	microtime(&l->l_cpu->ci_schedstate.spc_runtime);
1032 
1033 	/*
1034 	 * Reacquire the kernel_lock now.  We do this after we've
1035 	 * released the scheduler lock to avoid deadlock, and before
1036 	 * we reacquire the interlock.
1037 	 */
1038 	KERNEL_LOCK_ACQUIRE_COUNT(hold_count);
1039 
1040 	return retval;
1041 }
1042 
1043 /*
1044  * Initialize the (doubly-linked) run queues
1045  * to be empty.
1046  */
1047 void
1048 rqinit()
1049 {
1050 	int i;
1051 
1052 	for (i = 0; i < RUNQUE_NQS; i++)
1053 		sched_qs[i].ph_link = sched_qs[i].ph_rlink =
1054 		    (struct lwp *)&sched_qs[i];
1055 }
1056 
1057 static inline void
1058 resched_proc(struct lwp *l, u_char pri)
1059 {
1060 	struct cpu_info *ci;
1061 
1062 	/*
1063 	 * XXXSMP
1064 	 * Since l->l_cpu persists across a context switch,
1065 	 * this gives us *very weak* processor affinity, in
1066 	 * that we notify the CPU on which the process last
1067 	 * ran that it should try to switch.
1068 	 *
1069 	 * This does not guarantee that the process will run on
1070 	 * that processor next, because another processor might
1071 	 * grab it the next time it performs a context switch.
1072 	 *
1073 	 * This also does not handle the case where its last
1074 	 * CPU is running a higher-priority process, but every
1075 	 * other CPU is running a lower-priority process.  There
1076 	 * are ways to handle this situation, but they're not
1077 	 * currently very pretty, and we also need to weigh the
1078 	 * cost of moving a process from one CPU to another.
1079 	 *
1080 	 * XXXSMP
1081 	 * There is also the issue of locking the other CPU's
1082 	 * sched state, which we currently do not do.
1083 	 */
1084 	ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
1085 	if (pri < ci->ci_schedstate.spc_curpriority)
1086 		need_resched(ci);
1087 }
1088 
1089 /*
1090  * Change process state to be runnable,
1091  * placing it on the run queue if it is in memory,
1092  * and awakening the swapper if it isn't in memory.
1093  */
1094 void
1095 setrunnable(struct lwp *l)
1096 {
1097 	struct proc *p = l->l_proc;
1098 
1099 	SCHED_ASSERT_LOCKED();
1100 
1101 	switch (l->l_stat) {
1102 	case 0:
1103 	case LSRUN:
1104 	case LSONPROC:
1105 	case LSZOMB:
1106 	case LSDEAD:
1107 	default:
1108 		panic("setrunnable: lwp %p state was %d", l, l->l_stat);
1109 	case LSSTOP:
1110 		/*
1111 		 * If we're being traced (possibly because someone attached us
1112 		 * while we were stopped), check for a signal from the debugger.
1113 		 */
1114 		if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
1115 			sigaddset(&p->p_sigctx.ps_siglist, p->p_xstat);
1116 			CHECKSIGS(p);
1117 		}
1118 	case LSSLEEP:
1119 		unsleep(l);		/* e.g. when sending signals */
1120 		break;
1121 
1122 	case LSIDL:
1123 		break;
1124 	case LSSUSPENDED:
1125 		break;
1126 	}
1127 
1128 	if (l->l_proc->p_sa)
1129 		sa_awaken(l);
1130 
1131 	l->l_stat = LSRUN;
1132 	p->p_nrlwps++;
1133 
1134 	if (l->l_flag & L_INMEM)
1135 		setrunqueue(l);
1136 
1137 	if (l->l_slptime > 1)
1138 		updatepri(l);
1139 	l->l_slptime = 0;
1140 	if ((l->l_flag & L_INMEM) == 0)
1141 		sched_wakeup((caddr_t)&proc0);
1142 	else
1143 		resched_proc(l, l->l_priority);
1144 }
1145 
1146 /*
1147  * Compute the priority of a process when running in user mode.
1148  * Arrange to reschedule if the resulting priority is better
1149  * than that of the current process.
1150  */
1151 void
1152 resetpriority(struct lwp *l)
1153 {
1154 	unsigned int newpriority;
1155 	struct proc *p = l->l_proc;
1156 
1157 	SCHED_ASSERT_LOCKED();
1158 
1159 	newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
1160 			NICE_WEIGHT * (p->p_nice - NZERO);
1161 	newpriority = min(newpriority, MAXPRI);
1162 	l->l_usrpri = newpriority;
1163 	resched_proc(l, l->l_usrpri);
1164 }
1165 
1166 /*
1167  * Recompute priority for all LWPs in a process.
1168  */
1169 void
1170 resetprocpriority(struct proc *p)
1171 {
1172 	struct lwp *l;
1173 
1174 	LIST_FOREACH(l, &p->p_lwps, l_sibling)
1175 	    resetpriority(l);
1176 }
1177 
1178 /*
1179  * We adjust the priority of the current process.  The priority of a process
1180  * gets worse as it accumulates CPU time.  The CPU usage estimator (p_estcpu)
1181  * is increased here.  The formula for computing priorities (in kern_synch.c)
1182  * will compute a different value each time p_estcpu increases. This can
1183  * cause a switch, but unless the priority crosses a PPQ boundary the actual
1184  * queue will not change.  The CPU usage estimator ramps up quite quickly
1185  * when the process is running (linearly), and decays away exponentially, at
1186  * a rate which is proportionally slower when the system is busy.  The basic
1187  * principle is that the system will 90% forget that the process used a lot
1188  * of CPU time in 5 * loadav seconds.  This causes the system to favor
1189  * processes which haven't run much recently, and to round-robin among other
1190  * processes.
1191  */
1192 
1193 void
1194 schedclock(struct lwp *l)
1195 {
1196 	struct proc *p = l->l_proc;
1197 	int s;
1198 
1199 	p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
1200 	SCHED_LOCK(s);
1201 	resetpriority(l);
1202 	SCHED_UNLOCK(s);
1203 
1204 	if (l->l_priority >= PUSER)
1205 		l->l_priority = l->l_usrpri;
1206 }
1207 
1208 void
1209 suspendsched()
1210 {
1211 	struct lwp *l;
1212 	int s;
1213 
1214 	/*
1215 	 * Convert all non-P_SYSTEM LSSLEEP or LSRUN processes to
1216 	 * LSSUSPENDED.
1217 	 */
1218 	proclist_lock_read();
1219 	SCHED_LOCK(s);
1220 	LIST_FOREACH(l, &alllwp, l_list) {
1221 		if ((l->l_proc->p_flag & P_SYSTEM) != 0)
1222 			continue;
1223 
1224 		switch (l->l_stat) {
1225 		case LSRUN:
1226 			l->l_proc->p_nrlwps--;
1227 			if ((l->l_flag & L_INMEM) != 0)
1228 				remrunqueue(l);
1229 			/* FALLTHROUGH */
1230 		case LSSLEEP:
1231 			l->l_stat = LSSUSPENDED;
1232 			break;
1233 		case LSONPROC:
1234 			/*
1235 			 * XXX SMP: we need to deal with processes on
1236 			 * others CPU !
1237 			 */
1238 			break;
1239 		default:
1240 			break;
1241 		}
1242 	}
1243 	SCHED_UNLOCK(s);
1244 	proclist_unlock_read();
1245 }
1246 
1247 /*
1248  * scheduler_fork_hook:
1249  *
1250  *	Inherit the parent's scheduler history.
1251  */
1252 void
1253 scheduler_fork_hook(struct proc *parent, struct proc *child)
1254 {
1255 
1256 	child->p_estcpu = child->p_estcpu_inherited = parent->p_estcpu;
1257 	child->p_forktime = schedcpu_ticks;
1258 }
1259 
1260 /*
1261  * scheduler_wait_hook:
1262  *
1263  *	Chargeback parents for the sins of their children.
1264  */
1265 void
1266 scheduler_wait_hook(struct proc *parent, struct proc *child)
1267 {
1268 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
1269 	fixpt_t estcpu;
1270 
1271 	/* XXX Only if parent != init?? */
1272 
1273 	estcpu = decay_cpu_batch(loadfac, child->p_estcpu_inherited,
1274 	    schedcpu_ticks - child->p_forktime);
1275 	if (child->p_estcpu > estcpu) {
1276 		parent->p_estcpu =
1277 		    ESTCPULIM(parent->p_estcpu + child->p_estcpu - estcpu);
1278 	}
1279 }
1280 
1281 /*
1282  * Low-level routines to access the run queue.  Optimised assembler
1283  * routines can override these.
1284  */
1285 
1286 #ifndef __HAVE_MD_RUNQUEUE
1287 
1288 /*
1289  * On some architectures, it's faster to use a MSB ordering for the priorites
1290  * than the traditional LSB ordering.
1291  */
1292 #ifdef __HAVE_BIGENDIAN_BITOPS
1293 #define	RQMASK(n) (0x80000000 >> (n))
1294 #else
1295 #define	RQMASK(n) (0x00000001 << (n))
1296 #endif
1297 
1298 /*
1299  * The primitives that manipulate the run queues.  whichqs tells which
1300  * of the 32 queues qs have processes in them.  Setrunqueue puts processes
1301  * into queues, remrunqueue removes them from queues.  The running process is
1302  * on no queue, other processes are on a queue related to p->p_priority,
1303  * divided by 4 actually to shrink the 0-127 range of priorities into the 32
1304  * available queues.
1305  */
1306 
1307 #ifdef RQDEBUG
1308 static void
1309 checkrunqueue(int whichq, struct lwp *l)
1310 {
1311 	const struct prochd * const rq = &sched_qs[whichq];
1312 	struct lwp *l2;
1313 	int found = 0;
1314 	int die = 0;
1315 	int empty = 1;
1316 	for (l2 = rq->ph_link; l2 != (void*) rq; l2 = l2->l_forw) {
1317 		if (l2->l_stat != LSRUN) {
1318 			printf("checkrunqueue[%d]: lwp %p state (%d) "
1319 			    " != LSRUN\n", whichq, l2, l2->l_stat);
1320 		}
1321 		if (l2->l_back->l_forw != l2) {
1322 			printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
1323 			    "corrupt %p\n", whichq, l2, l2->l_back,
1324 			    l2->l_back->l_forw);
1325 			die = 1;
1326 		}
1327 		if (l2->l_forw->l_back != l2) {
1328 			printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
1329 			    "corrupt %p\n", whichq, l2, l2->l_forw,
1330 			    l2->l_forw->l_back);
1331 			die = 1;
1332 		}
1333 		if (l2 == l)
1334 			found = 1;
1335 		empty = 0;
1336 	}
1337 	if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
1338 		printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
1339 		    whichq, rq);
1340 		die = 1;
1341 	} else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
1342 		printf("checkrunqueue[%d]: bit clear for non-empty "
1343 		    "run-queue %p\n", whichq, rq);
1344 		die = 1;
1345 	}
1346 	if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
1347 		printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
1348 		    whichq, l);
1349 		die = 1;
1350 	}
1351 	if (l != NULL && empty) {
1352 		printf("checkrunqueue[%d]: empty run-queue %p with "
1353 		    "active lwp %p\n", whichq, rq, l);
1354 		die = 1;
1355 	}
1356 	if (l != NULL && !found) {
1357 		printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
1358 		    whichq, l, rq);
1359 		die = 1;
1360 	}
1361 	if (die)
1362 		panic("checkrunqueue: inconsistency found");
1363 }
1364 #endif /* RQDEBUG */
1365 
1366 void
1367 setrunqueue(struct lwp *l)
1368 {
1369 	struct prochd *rq;
1370 	struct lwp *prev;
1371 	const int whichq = l->l_priority / PPQ;
1372 
1373 #ifdef RQDEBUG
1374 	checkrunqueue(whichq, NULL);
1375 #endif
1376 #ifdef DIAGNOSTIC
1377 	if (l->l_back != NULL || l->l_wchan != NULL || l->l_stat != LSRUN)
1378 		panic("setrunqueue");
1379 #endif
1380 	sched_whichqs |= RQMASK(whichq);
1381 	rq = &sched_qs[whichq];
1382 	prev = rq->ph_rlink;
1383 	l->l_forw = (struct lwp *)rq;
1384 	rq->ph_rlink = l;
1385 	prev->l_forw = l;
1386 	l->l_back = prev;
1387 #ifdef RQDEBUG
1388 	checkrunqueue(whichq, l);
1389 #endif
1390 }
1391 
1392 void
1393 remrunqueue(struct lwp *l)
1394 {
1395 	struct lwp *prev, *next;
1396 	const int whichq = l->l_priority / PPQ;
1397 #ifdef RQDEBUG
1398 	checkrunqueue(whichq, l);
1399 #endif
1400 #ifdef DIAGNOSTIC
1401 	if (((sched_whichqs & RQMASK(whichq)) == 0))
1402 		panic("remrunqueue: bit %d not set", whichq);
1403 #endif
1404 	prev = l->l_back;
1405 	l->l_back = NULL;
1406 	next = l->l_forw;
1407 	prev->l_forw = next;
1408 	next->l_back = prev;
1409 	if (prev == next)
1410 		sched_whichqs &= ~RQMASK(whichq);
1411 #ifdef RQDEBUG
1412 	checkrunqueue(whichq, NULL);
1413 #endif
1414 }
1415 
1416 #undef RQMASK
1417 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
1418