xref: /netbsd-src/sys/kern/kern_synch.c (revision e5548b402ae4c44fb816de42c7bba9581ce23ef5)
1 /*	$NetBSD: kern_synch.c,v 1.156 2005/12/20 19:26:15 rpaulo Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center.
10  * This code is derived from software contributed to The NetBSD Foundation
11  * by Charles M. Hannum.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by the NetBSD
24  *	Foundation, Inc. and its contributors.
25  * 4. Neither the name of The NetBSD Foundation nor the names of its
26  *    contributors may be used to endorse or promote products derived
27  *    from this software without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
30  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
31  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
32  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
33  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
36  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
37  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39  * POSSIBILITY OF SUCH DAMAGE.
40  */
41 
42 /*-
43  * Copyright (c) 1982, 1986, 1990, 1991, 1993
44  *	The Regents of the University of California.  All rights reserved.
45  * (c) UNIX System Laboratories, Inc.
46  * All or some portions of this file are derived from material licensed
47  * to the University of California by American Telephone and Telegraph
48  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
49  * the permission of UNIX System Laboratories, Inc.
50  *
51  * Redistribution and use in source and binary forms, with or without
52  * modification, are permitted provided that the following conditions
53  * are met:
54  * 1. Redistributions of source code must retain the above copyright
55  *    notice, this list of conditions and the following disclaimer.
56  * 2. Redistributions in binary form must reproduce the above copyright
57  *    notice, this list of conditions and the following disclaimer in the
58  *    documentation and/or other materials provided with the distribution.
59  * 3. Neither the name of the University nor the names of its contributors
60  *    may be used to endorse or promote products derived from this software
61  *    without specific prior written permission.
62  *
63  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73  * SUCH DAMAGE.
74  *
75  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.156 2005/12/20 19:26:15 rpaulo Exp $");
80 
81 #include "opt_ddb.h"
82 #include "opt_ktrace.h"
83 #include "opt_kstack.h"
84 #include "opt_lockdebug.h"
85 #include "opt_multiprocessor.h"
86 #include "opt_perfctrs.h"
87 
88 #include <sys/param.h>
89 #include <sys/systm.h>
90 #include <sys/callout.h>
91 #include <sys/proc.h>
92 #include <sys/kernel.h>
93 #include <sys/buf.h>
94 #if defined(PERFCTRS)
95 #include <sys/pmc.h>
96 #endif
97 #include <sys/signalvar.h>
98 #include <sys/resourcevar.h>
99 #include <sys/sched.h>
100 #include <sys/sa.h>
101 #include <sys/savar.h>
102 
103 #include <uvm/uvm_extern.h>
104 
105 #ifdef KTRACE
106 #include <sys/ktrace.h>
107 #endif
108 
109 #include <machine/cpu.h>
110 
111 int	lbolt;			/* once a second sleep address */
112 int	rrticks;		/* number of hardclock ticks per roundrobin() */
113 
114 /*
115  * Sleep queues.
116  *
117  * We're only looking at 7 bits of the address; everything is
118  * aligned to 4, lots of things are aligned to greater powers
119  * of 2.  Shift right by 8, i.e. drop the bottom 256 worth.
120  */
121 #define	SLPQUE_TABLESIZE	128
122 #define	SLPQUE_LOOKUP(x)	(((u_long)(x) >> 8) & (SLPQUE_TABLESIZE - 1))
123 
124 #define	SLPQUE(ident)	(&sched_slpque[SLPQUE_LOOKUP(ident)])
125 
126 /*
127  * The global scheduler state.
128  */
129 struct prochd sched_qs[RUNQUE_NQS];	/* run queues */
130 __volatile u_int32_t sched_whichqs;	/* bitmap of non-empty queues */
131 struct slpque sched_slpque[SLPQUE_TABLESIZE]; /* sleep queues */
132 
133 struct simplelock sched_lock = SIMPLELOCK_INITIALIZER;
134 
135 void schedcpu(void *);
136 void updatepri(struct lwp *);
137 void endtsleep(void *);
138 
139 __inline void sa_awaken(struct lwp *);
140 __inline void awaken(struct lwp *);
141 
142 struct callout schedcpu_ch = CALLOUT_INITIALIZER_SETFUNC(schedcpu, NULL);
143 
144 
145 
146 /*
147  * Force switch among equal priority processes every 100ms.
148  * Called from hardclock every hz/10 == rrticks hardclock ticks.
149  */
150 /* ARGSUSED */
151 void
152 roundrobin(struct cpu_info *ci)
153 {
154 	struct schedstate_percpu *spc = &ci->ci_schedstate;
155 
156 	spc->spc_rrticks = rrticks;
157 
158 	if (curlwp != NULL) {
159 		if (spc->spc_flags & SPCF_SEENRR) {
160 			/*
161 			 * The process has already been through a roundrobin
162 			 * without switching and may be hogging the CPU.
163 			 * Indicate that the process should yield.
164 			 */
165 			spc->spc_flags |= SPCF_SHOULDYIELD;
166 		} else
167 			spc->spc_flags |= SPCF_SEENRR;
168 	}
169 	need_resched(curcpu());
170 }
171 
172 #define	PPQ	(128 / RUNQUE_NQS)	/* priorities per queue */
173 #define	NICE_WEIGHT 2			/* priorities per nice level */
174 
175 #define	ESTCPU_SHIFT	11
176 #define	ESTCPU_MAX	((NICE_WEIGHT * PRIO_MAX - PPQ) << ESTCPU_SHIFT)
177 #define	ESTCPULIM(e)	min((e), ESTCPU_MAX)
178 
179 /*
180  * Constants for digital decay and forget:
181  *	90% of (p_estcpu) usage in 5 * loadav time
182  *	95% of (p_pctcpu) usage in 60 seconds (load insensitive)
183  *          Note that, as ps(1) mentions, this can let percentages
184  *          total over 100% (I've seen 137.9% for 3 processes).
185  *
186  * Note that hardclock updates p_estcpu and p_cpticks independently.
187  *
188  * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds.
189  * That is, the system wants to compute a value of decay such
190  * that the following for loop:
191  * 	for (i = 0; i < (5 * loadavg); i++)
192  * 		p_estcpu *= decay;
193  * will compute
194  * 	p_estcpu *= 0.1;
195  * for all values of loadavg:
196  *
197  * Mathematically this loop can be expressed by saying:
198  * 	decay ** (5 * loadavg) ~= .1
199  *
200  * The system computes decay as:
201  * 	decay = (2 * loadavg) / (2 * loadavg + 1)
202  *
203  * We wish to prove that the system's computation of decay
204  * will always fulfill the equation:
205  * 	decay ** (5 * loadavg) ~= .1
206  *
207  * If we compute b as:
208  * 	b = 2 * loadavg
209  * then
210  * 	decay = b / (b + 1)
211  *
212  * We now need to prove two things:
213  *	1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
214  *	2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg)
215  *
216  * Facts:
217  *         For x close to zero, exp(x) =~ 1 + x, since
218  *              exp(x) = 0! + x**1/1! + x**2/2! + ... .
219  *              therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
220  *         For x close to zero, ln(1+x) =~ x, since
221  *              ln(1+x) = x - x**2/2 + x**3/3 - ...     -1 < x < 1
222  *              therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
223  *         ln(.1) =~ -2.30
224  *
225  * Proof of (1):
226  *    Solve (factor)**(power) =~ .1 given power (5*loadav):
227  *	solving for factor,
228  *      ln(factor) =~ (-2.30/5*loadav), or
229  *      factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
230  *          exp(-1/b) =~ (b-1)/b =~ b/(b+1).                    QED
231  *
232  * Proof of (2):
233  *    Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
234  *	solving for power,
235  *      power*ln(b/(b+1)) =~ -2.30, or
236  *      power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav.  QED
237  *
238  * Actual power values for the implemented algorithm are as follows:
239  *      loadav: 1       2       3       4
240  *      power:  5.68    10.32   14.94   19.55
241  */
242 
243 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */
244 #define	loadfactor(loadav)	(2 * (loadav))
245 
246 static fixpt_t
247 decay_cpu(fixpt_t loadfac, fixpt_t estcpu)
248 {
249 
250 	if (estcpu == 0) {
251 		return 0;
252 	}
253 
254 #if !defined(_LP64)
255 	/* avoid 64bit arithmetics. */
256 #define	FIXPT_MAX ((fixpt_t)((UINTMAX_C(1) << sizeof(fixpt_t) * CHAR_BIT) - 1))
257 	if (__predict_true(loadfac <= FIXPT_MAX / ESTCPU_MAX)) {
258 		return estcpu * loadfac / (loadfac + FSCALE);
259 	}
260 #endif /* !defined(_LP64) */
261 
262 	return (uint64_t)estcpu * loadfac / (loadfac + FSCALE);
263 }
264 
265 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
266 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
267 
268 /*
269  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
270  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
271  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
272  *
273  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
274  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
275  *
276  * If you dont want to bother with the faster/more-accurate formula, you
277  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
278  * (more general) method of calculating the %age of CPU used by a process.
279  */
280 #define	CCPU_SHIFT	11
281 
282 /*
283  * Recompute process priorities, every hz ticks.
284  */
285 /* ARGSUSED */
286 void
287 schedcpu(void *arg)
288 {
289 	fixpt_t loadfac = loadfactor(averunnable.ldavg[0]);
290 	struct lwp *l;
291 	struct proc *p;
292 	int s, minslp;
293 	int clkhz;
294 
295 	proclist_lock_read();
296 	PROCLIST_FOREACH(p, &allproc) {
297 		/*
298 		 * Increment time in/out of memory and sleep time
299 		 * (if sleeping).  We ignore overflow; with 16-bit int's
300 		 * (remember them?) overflow takes 45 days.
301 		 */
302 		minslp = 2;
303 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
304 			l->l_swtime++;
305 			if (l->l_stat == LSSLEEP || l->l_stat == LSSTOP ||
306 			    l->l_stat == LSSUSPENDED) {
307 				l->l_slptime++;
308 				minslp = min(minslp, l->l_slptime);
309 			} else
310 				minslp = 0;
311 		}
312 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
313 		/*
314 		 * If the process has slept the entire second,
315 		 * stop recalculating its priority until it wakes up.
316 		 */
317 		if (minslp > 1)
318 			continue;
319 		s = splstatclock();	/* prevent state changes */
320 		/*
321 		 * p_pctcpu is only for ps.
322 		 */
323 		clkhz = stathz != 0 ? stathz : hz;
324 #if	(FSHIFT >= CCPU_SHIFT)
325 		p->p_pctcpu += (clkhz == 100)?
326 			((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT):
327                 	100 * (((fixpt_t) p->p_cpticks)
328 				<< (FSHIFT - CCPU_SHIFT)) / clkhz;
329 #else
330 		p->p_pctcpu += ((FSCALE - ccpu) *
331 			(p->p_cpticks * FSCALE / clkhz)) >> FSHIFT;
332 #endif
333 		p->p_cpticks = 0;
334 		p->p_estcpu = decay_cpu(loadfac, p->p_estcpu);
335 		splx(s);	/* Done with the process CPU ticks update */
336 		SCHED_LOCK(s);
337 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
338 			if (l->l_slptime > 1)
339 				continue;
340 			resetpriority(l);
341 			if (l->l_priority >= PUSER) {
342 				if (l->l_stat == LSRUN &&
343 				    (l->l_flag & L_INMEM) &&
344 				    (l->l_priority / PPQ) != (l->l_usrpri / PPQ)) {
345 					remrunqueue(l);
346 					l->l_priority = l->l_usrpri;
347 					setrunqueue(l);
348 				} else
349 					l->l_priority = l->l_usrpri;
350 			}
351 		}
352 		SCHED_UNLOCK(s);
353 	}
354 	proclist_unlock_read();
355 	uvm_meter();
356 	wakeup((caddr_t)&lbolt);
357 	callout_schedule(&schedcpu_ch, hz);
358 }
359 
360 /*
361  * Recalculate the priority of a process after it has slept for a while.
362  * For all load averages >= 1 and max p_estcpu of (255 << ESTCPU_SHIFT),
363  * sleeping for at least eight times the loadfactor will decay p_estcpu to
364  * less than (1 << ESTCPU_SHIFT).
365  *
366  * note that our ESTCPU_MAX is actually much smaller than (255 << ESTCPU_SHIFT).
367  */
368 void
369 updatepri(struct lwp *l)
370 {
371 	struct proc *p = l->l_proc;
372 	fixpt_t newcpu;
373 	fixpt_t loadfac;
374 
375 	SCHED_ASSERT_LOCKED();
376 
377 	newcpu = p->p_estcpu;
378 	loadfac = loadfactor(averunnable.ldavg[0]);
379 
380 	if ((l->l_slptime << FSHIFT) >= 8 * loadfac)
381 		p->p_estcpu = 0; /* XXX NJWLWP */
382 	else {
383 		l->l_slptime--;	/* the first time was done in schedcpu */
384 		while (newcpu && --l->l_slptime)
385 			newcpu = decay_cpu(loadfac, newcpu);
386 		p->p_estcpu = newcpu;
387 	}
388 	resetpriority(l);
389 }
390 
391 /*
392  * During autoconfiguration or after a panic, a sleep will simply
393  * lower the priority briefly to allow interrupts, then return.
394  * The priority to be used (safepri) is machine-dependent, thus this
395  * value is initialized and maintained in the machine-dependent layers.
396  * This priority will typically be 0, or the lowest priority
397  * that is safe for use on the interrupt stack; it can be made
398  * higher to block network software interrupts after panics.
399  */
400 int safepri;
401 
402 /*
403  * General sleep call.  Suspends the current process until a wakeup is
404  * performed on the specified identifier.  The process will then be made
405  * runnable with the specified priority.  Sleeps at most timo/hz seconds
406  * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
407  * before and after sleeping, else signals are not checked.  Returns 0 if
408  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
409  * signal needs to be delivered, ERESTART is returned if the current system
410  * call should be restarted if possible, and EINTR is returned if the system
411  * call should be interrupted by the signal (return EINTR).
412  *
413  * The interlock is held until the scheduler_slock is acquired.  The
414  * interlock will be locked before returning back to the caller
415  * unless the PNORELOCK flag is specified, in which case the
416  * interlock will always be unlocked upon return.
417  */
418 int
419 ltsleep(__volatile const void *ident, int priority, const char *wmesg, int timo,
420     __volatile struct simplelock *interlock)
421 {
422 	struct lwp *l = curlwp;
423 	struct proc *p = l ? l->l_proc : NULL;
424 	struct slpque *qp;
425 	struct sadata_upcall *sau;
426 	int sig, s;
427 	int catch = priority & PCATCH;
428 	int relock = (priority & PNORELOCK) == 0;
429 	int exiterr = (priority & PNOEXITERR) == 0;
430 
431 	/*
432 	 * XXXSMP
433 	 * This is probably bogus.  Figure out what the right
434 	 * thing to do here really is.
435 	 * Note that not sleeping if ltsleep is called with curlwp == NULL
436 	 * in the shutdown case is disgusting but partly necessary given
437 	 * how shutdown (barely) works.
438 	 */
439 	if (cold || (doing_shutdown && (panicstr || (l == NULL)))) {
440 		/*
441 		 * After a panic, or during autoconfiguration,
442 		 * just give interrupts a chance, then just return;
443 		 * don't run any other procs or panic below,
444 		 * in case this is the idle process and already asleep.
445 		 */
446 		s = splhigh();
447 		splx(safepri);
448 		splx(s);
449 		if (interlock != NULL && relock == 0)
450 			simple_unlock(interlock);
451 		return (0);
452 	}
453 
454 	KASSERT(p != NULL);
455 	LOCK_ASSERT(interlock == NULL || simple_lock_held(interlock));
456 
457 #ifdef KTRACE
458 	if (KTRPOINT(p, KTR_CSW))
459 		ktrcsw(l, 1, 0);
460 #endif
461 
462 	/*
463 	 * XXX We need to allocate the sadata_upcall structure here,
464 	 * XXX since we can't sleep while waiting for memory inside
465 	 * XXX sa_upcall().  It would be nice if we could safely
466 	 * XXX allocate the sadata_upcall structure on the stack, here.
467 	 */
468 	if (l->l_flag & L_SA) {
469 		sau = sadata_upcall_alloc(0);
470 	} else {
471 		sau = NULL;
472 	}
473 
474 	SCHED_LOCK(s);
475 
476 #ifdef DIAGNOSTIC
477 	if (ident == NULL)
478 		panic("ltsleep: ident == NULL");
479 	if (l->l_stat != LSONPROC)
480 		panic("ltsleep: l_stat %d != LSONPROC", l->l_stat);
481 	if (l->l_back != NULL)
482 		panic("ltsleep: p_back != NULL");
483 #endif
484 
485 	l->l_wchan = ident;
486 	l->l_wmesg = wmesg;
487 	l->l_slptime = 0;
488 	l->l_priority = priority & PRIMASK;
489 
490 	qp = SLPQUE(ident);
491 	if (qp->sq_head == 0)
492 		qp->sq_head = l;
493 	else {
494 		*qp->sq_tailp = l;
495 	}
496 	*(qp->sq_tailp = &l->l_forw) = 0;
497 
498 	if (timo)
499 		callout_reset(&l->l_tsleep_ch, timo, endtsleep, l);
500 
501 	/*
502 	 * We can now release the interlock; the scheduler_slock
503 	 * is held, so a thread can't get in to do wakeup() before
504 	 * we do the switch.
505 	 *
506 	 * XXX We leave the code block here, after inserting ourselves
507 	 * on the sleep queue, because we might want a more clever
508 	 * data structure for the sleep queues at some point.
509 	 */
510 	if (interlock != NULL)
511 		simple_unlock(interlock);
512 
513 	/*
514 	 * We put ourselves on the sleep queue and start our timeout
515 	 * before calling CURSIG, as we could stop there, and a wakeup
516 	 * or a SIGCONT (or both) could occur while we were stopped.
517 	 * A SIGCONT would cause us to be marked as SSLEEP
518 	 * without resuming us, thus we must be ready for sleep
519 	 * when CURSIG is called.  If the wakeup happens while we're
520 	 * stopped, p->p_wchan will be 0 upon return from CURSIG.
521 	 */
522 	if (catch) {
523 		l->l_flag |= L_SINTR;
524 		if (((sig = CURSIG(l)) != 0) ||
525 		    ((p->p_flag & P_WEXIT) && p->p_nlwps > 1)) {
526 			if (l->l_wchan != NULL)
527 				unsleep(l);
528 			l->l_stat = LSONPROC;
529 			SCHED_UNLOCK(s);
530 			goto resume;
531 		}
532 		if (l->l_wchan == NULL) {
533 			catch = 0;
534 			SCHED_UNLOCK(s);
535 			goto resume;
536 		}
537 	} else
538 		sig = 0;
539 	l->l_stat = LSSLEEP;
540 	p->p_nrlwps--;
541 	p->p_stats->p_ru.ru_nvcsw++;
542 	SCHED_ASSERT_LOCKED();
543 	if (l->l_flag & L_SA)
544 		sa_switch(l, sau, SA_UPCALL_BLOCKED);
545 	else
546 		mi_switch(l, NULL);
547 
548 #if	defined(DDB) && !defined(GPROF)
549 	/* handy breakpoint location after process "wakes" */
550 	__asm(".globl bpendtsleep\nbpendtsleep:");
551 #endif
552 	/*
553 	 * p->p_nrlwps is incremented by whoever made us runnable again,
554 	 * either setrunnable() or awaken().
555 	 */
556 
557 	SCHED_ASSERT_UNLOCKED();
558 	splx(s);
559 
560  resume:
561 	KDASSERT(l->l_cpu != NULL);
562 	KDASSERT(l->l_cpu == curcpu());
563 	l->l_cpu->ci_schedstate.spc_curpriority = l->l_usrpri;
564 
565 	l->l_flag &= ~L_SINTR;
566 	if (l->l_flag & L_TIMEOUT) {
567 		l->l_flag &= ~(L_TIMEOUT|L_CANCELLED);
568 		if (sig == 0) {
569 #ifdef KTRACE
570 			if (KTRPOINT(p, KTR_CSW))
571 				ktrcsw(l, 0, 0);
572 #endif
573 			if (relock && interlock != NULL)
574 				simple_lock(interlock);
575 			return (EWOULDBLOCK);
576 		}
577 	} else if (timo)
578 		callout_stop(&l->l_tsleep_ch);
579 
580 	if (catch) {
581 		const int cancelled = l->l_flag & L_CANCELLED;
582 		l->l_flag &= ~L_CANCELLED;
583 		if (sig != 0 || (sig = CURSIG(l)) != 0 || cancelled) {
584 #ifdef KTRACE
585 			if (KTRPOINT(p, KTR_CSW))
586 				ktrcsw(l, 0, 0);
587 #endif
588 			if (relock && interlock != NULL)
589 				simple_lock(interlock);
590 			/*
591 			 * If this sleep was canceled, don't let the syscall
592 			 * restart.
593 			 */
594 			if (cancelled ||
595 			    (SIGACTION(p, sig).sa_flags & SA_RESTART) == 0)
596 				return (EINTR);
597 			return (ERESTART);
598 		}
599 	}
600 
601 #ifdef KTRACE
602 	if (KTRPOINT(p, KTR_CSW))
603 		ktrcsw(l, 0, 0);
604 #endif
605 	if (relock && interlock != NULL)
606 		simple_lock(interlock);
607 
608 	/* XXXNJW this is very much a kluge.
609 	 * revisit. a better way of preventing looping/hanging syscalls like
610 	 * wait4() and _lwp_wait() from wedging an exiting process
611 	 * would be preferred.
612 	 */
613 	if (catch && ((p->p_flag & P_WEXIT) && p->p_nlwps > 1 && exiterr))
614 		return (EINTR);
615 	return (0);
616 }
617 
618 /*
619  * Implement timeout for tsleep.
620  * If process hasn't been awakened (wchan non-zero),
621  * set timeout flag and undo the sleep.  If proc
622  * is stopped, just unsleep so it will remain stopped.
623  */
624 void
625 endtsleep(void *arg)
626 {
627 	struct lwp *l;
628 	int s;
629 
630 	l = (struct lwp *)arg;
631 	SCHED_LOCK(s);
632 	if (l->l_wchan) {
633 		if (l->l_stat == LSSLEEP)
634 			setrunnable(l);
635 		else
636 			unsleep(l);
637 		l->l_flag |= L_TIMEOUT;
638 	}
639 	SCHED_UNLOCK(s);
640 }
641 
642 /*
643  * Remove a process from its wait queue
644  */
645 void
646 unsleep(struct lwp *l)
647 {
648 	struct slpque *qp;
649 	struct lwp **hp;
650 
651 	SCHED_ASSERT_LOCKED();
652 
653 	if (l->l_wchan) {
654 		hp = &(qp = SLPQUE(l->l_wchan))->sq_head;
655 		while (*hp != l)
656 			hp = &(*hp)->l_forw;
657 		*hp = l->l_forw;
658 		if (qp->sq_tailp == &l->l_forw)
659 			qp->sq_tailp = hp;
660 		l->l_wchan = 0;
661 	}
662 }
663 
664 __inline void
665 sa_awaken(struct lwp *l)
666 {
667 
668 	SCHED_ASSERT_LOCKED();
669 
670 	if (l == l->l_savp->savp_lwp && l->l_flag & L_SA_YIELD)
671 		l->l_flag &= ~L_SA_IDLE;
672 }
673 
674 /*
675  * Optimized-for-wakeup() version of setrunnable().
676  */
677 __inline void
678 awaken(struct lwp *l)
679 {
680 
681 	SCHED_ASSERT_LOCKED();
682 
683 	if (l->l_proc->p_sa)
684 		sa_awaken(l);
685 
686 	if (l->l_slptime > 1)
687 		updatepri(l);
688 	l->l_slptime = 0;
689 	l->l_stat = LSRUN;
690 	l->l_proc->p_nrlwps++;
691 	/*
692 	 * Since curpriority is a user priority, p->p_priority
693 	 * is always better than curpriority on the last CPU on
694 	 * which it ran.
695 	 *
696 	 * XXXSMP See affinity comment in resched_proc().
697 	 */
698 	if (l->l_flag & L_INMEM) {
699 		setrunqueue(l);
700 		KASSERT(l->l_cpu != NULL);
701 		need_resched(l->l_cpu);
702 	} else
703 		sched_wakeup(&proc0);
704 }
705 
706 #if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
707 void
708 sched_unlock_idle(void)
709 {
710 
711 	simple_unlock(&sched_lock);
712 }
713 
714 void
715 sched_lock_idle(void)
716 {
717 
718 	simple_lock(&sched_lock);
719 }
720 #endif /* MULTIPROCESSOR || LOCKDEBUG */
721 
722 /*
723  * Make all processes sleeping on the specified identifier runnable.
724  */
725 
726 void
727 wakeup(__volatile const void *ident)
728 {
729 	int s;
730 
731 	SCHED_ASSERT_UNLOCKED();
732 
733 	SCHED_LOCK(s);
734 	sched_wakeup(ident);
735 	SCHED_UNLOCK(s);
736 }
737 
738 void
739 sched_wakeup(__volatile const void *ident)
740 {
741 	struct slpque *qp;
742 	struct lwp *l, **q;
743 
744 	SCHED_ASSERT_LOCKED();
745 
746 	qp = SLPQUE(ident);
747  restart:
748 	for (q = &qp->sq_head; (l = *q) != NULL; ) {
749 #ifdef DIAGNOSTIC
750 		if (l->l_back || (l->l_stat != LSSLEEP &&
751 		    l->l_stat != LSSTOP && l->l_stat != LSSUSPENDED))
752 			panic("wakeup");
753 #endif
754 		if (l->l_wchan == ident) {
755 			l->l_wchan = 0;
756 			*q = l->l_forw;
757 			if (qp->sq_tailp == &l->l_forw)
758 				qp->sq_tailp = q;
759 			if (l->l_stat == LSSLEEP) {
760 				awaken(l);
761 				goto restart;
762 			}
763 		} else
764 			q = &l->l_forw;
765 	}
766 }
767 
768 /*
769  * Make the highest priority process first in line on the specified
770  * identifier runnable.
771  */
772 void
773 wakeup_one(__volatile const void *ident)
774 {
775 	struct slpque *qp;
776 	struct lwp *l, **q;
777 	struct lwp *best_sleepp, **best_sleepq;
778 	struct lwp *best_stopp, **best_stopq;
779 	int s;
780 
781 	best_sleepp = best_stopp = NULL;
782 	best_sleepq = best_stopq = NULL;
783 
784 	SCHED_LOCK(s);
785 
786 	qp = SLPQUE(ident);
787 
788 	for (q = &qp->sq_head; (l = *q) != NULL; q = &l->l_forw) {
789 #ifdef DIAGNOSTIC
790 		if (l->l_back || (l->l_stat != LSSLEEP &&
791 		    l->l_stat != LSSTOP && l->l_stat != LSSUSPENDED))
792 			panic("wakeup_one");
793 #endif
794 		if (l->l_wchan == ident) {
795 			if (l->l_stat == LSSLEEP) {
796 				if (best_sleepp == NULL ||
797 				    l->l_priority < best_sleepp->l_priority) {
798 					best_sleepp = l;
799 					best_sleepq = q;
800 				}
801 			} else {
802 				if (best_stopp == NULL ||
803 				    l->l_priority < best_stopp->l_priority) {
804 				    	best_stopp = l;
805 					best_stopq = q;
806 				}
807 			}
808 		}
809 	}
810 
811 	/*
812 	 * Consider any SSLEEP process higher than the highest priority SSTOP
813 	 * process.
814 	 */
815 	if (best_sleepp != NULL) {
816 		l = best_sleepp;
817 		q = best_sleepq;
818 	} else {
819 		l = best_stopp;
820 		q = best_stopq;
821 	}
822 
823 	if (l != NULL) {
824 		l->l_wchan = NULL;
825 		*q = l->l_forw;
826 		if (qp->sq_tailp == &l->l_forw)
827 			qp->sq_tailp = q;
828 		if (l->l_stat == LSSLEEP)
829 			awaken(l);
830 	}
831 	SCHED_UNLOCK(s);
832 }
833 
834 /*
835  * General yield call.  Puts the current process back on its run queue and
836  * performs a voluntary context switch.  Should only be called when the
837  * current process explicitly requests it (eg sched_yield(2) in compat code).
838  */
839 void
840 yield(void)
841 {
842 	struct lwp *l = curlwp;
843 	int s;
844 
845 	SCHED_LOCK(s);
846 	l->l_priority = l->l_usrpri;
847 	l->l_stat = LSRUN;
848 	setrunqueue(l);
849 	l->l_proc->p_stats->p_ru.ru_nvcsw++;
850 	mi_switch(l, NULL);
851 	SCHED_ASSERT_UNLOCKED();
852 	splx(s);
853 }
854 
855 /*
856  * General preemption call.  Puts the current process back on its run queue
857  * and performs an involuntary context switch.
858  * The 'more' ("more work to do") argument is boolean. Returning to userspace
859  * preempt() calls pass 0. "Voluntary" preemptions in e.g. uiomove() pass 1.
860  * This will be used to indicate to the SA subsystem that the LWP is
861  * not yet finished in the kernel.
862  */
863 
864 void
865 preempt(int more)
866 {
867 	struct lwp *l = curlwp;
868 	int r, s;
869 
870 	SCHED_LOCK(s);
871 	l->l_priority = l->l_usrpri;
872 	l->l_stat = LSRUN;
873 	setrunqueue(l);
874 	l->l_proc->p_stats->p_ru.ru_nivcsw++;
875 	r = mi_switch(l, NULL);
876 	SCHED_ASSERT_UNLOCKED();
877 	splx(s);
878 	if ((l->l_flag & L_SA) != 0 && r != 0 && more == 0)
879 		sa_preempt(l);
880 }
881 
882 /*
883  * The machine independent parts of context switch.
884  * Must be called at splsched() (no higher!) and with
885  * the sched_lock held.
886  * Switch to "new" if non-NULL, otherwise let cpu_switch choose
887  * the next lwp.
888  *
889  * Returns 1 if another process was actually run.
890  */
891 int
892 mi_switch(struct lwp *l, struct lwp *newl)
893 {
894 	struct schedstate_percpu *spc;
895 	struct rlimit *rlim;
896 	long s, u;
897 	struct timeval tv;
898 	int hold_count;
899 	struct proc *p = l->l_proc;
900 	int retval;
901 
902 	SCHED_ASSERT_LOCKED();
903 
904 	/*
905 	 * Release the kernel_lock, as we are about to yield the CPU.
906 	 * The scheduler lock is still held until cpu_switch()
907 	 * selects a new process and removes it from the run queue.
908 	 */
909 	hold_count = KERNEL_LOCK_RELEASE_ALL();
910 
911 	KDASSERT(l->l_cpu != NULL);
912 	KDASSERT(l->l_cpu == curcpu());
913 
914 	spc = &l->l_cpu->ci_schedstate;
915 
916 #if defined(LOCKDEBUG) || defined(DIAGNOSTIC)
917 	spinlock_switchcheck();
918 #endif
919 #ifdef LOCKDEBUG
920 	simple_lock_switchcheck();
921 #endif
922 
923 	/*
924 	 * Compute the amount of time during which the current
925 	 * process was running.
926 	 */
927 	microtime(&tv);
928 	u = p->p_rtime.tv_usec +
929 	    (tv.tv_usec - spc->spc_runtime.tv_usec);
930 	s = p->p_rtime.tv_sec + (tv.tv_sec - spc->spc_runtime.tv_sec);
931 	if (u < 0) {
932 		u += 1000000;
933 		s--;
934 	} else if (u >= 1000000) {
935 		u -= 1000000;
936 		s++;
937 	}
938 	p->p_rtime.tv_usec = u;
939 	p->p_rtime.tv_sec = s;
940 
941 	/*
942 	 * Check if the process exceeds its CPU resource allocation.
943 	 * If over max, kill it.  In any case, if it has run for more
944 	 * than 10 minutes, reduce priority to give others a chance.
945 	 */
946 	rlim = &p->p_rlimit[RLIMIT_CPU];
947 	if (s >= rlim->rlim_cur) {
948 		/*
949 		 * XXXSMP: we're inside the scheduler lock perimeter;
950 		 * use sched_psignal.
951 		 */
952 		if (s >= rlim->rlim_max)
953 			sched_psignal(p, SIGKILL);
954 		else {
955 			sched_psignal(p, SIGXCPU);
956 			if (rlim->rlim_cur < rlim->rlim_max)
957 				rlim->rlim_cur += 5;
958 		}
959 	}
960 	if (autonicetime && s > autonicetime && p->p_ucred->cr_uid &&
961 	    p->p_nice == NZERO) {
962 		p->p_nice = autoniceval + NZERO;
963 		resetpriority(l);
964 	}
965 
966 	/*
967 	 * Process is about to yield the CPU; clear the appropriate
968 	 * scheduling flags.
969 	 */
970 	spc->spc_flags &= ~SPCF_SWITCHCLEAR;
971 
972 #ifdef KSTACK_CHECK_MAGIC
973 	kstack_check_magic(l);
974 #endif
975 
976 	/*
977 	 * If we are using h/w performance counters, save context.
978 	 */
979 #if PERFCTRS
980 	if (PMC_ENABLED(p))
981 		pmc_save_context(p);
982 #endif
983 
984 	/*
985 	 * Switch to the new current process.  When we
986 	 * run again, we'll return back here.
987 	 */
988 	uvmexp.swtch++;
989 	if (newl == NULL) {
990 		retval = cpu_switch(l, NULL);
991 	} else {
992 		remrunqueue(newl);
993 		cpu_switchto(l, newl);
994 		retval = 0;
995 	}
996 
997 	/*
998 	 * If we are using h/w performance counters, restore context.
999 	 */
1000 #if PERFCTRS
1001 	if (PMC_ENABLED(p))
1002 		pmc_restore_context(p);
1003 #endif
1004 
1005 	/*
1006 	 * Make sure that MD code released the scheduler lock before
1007 	 * resuming us.
1008 	 */
1009 	SCHED_ASSERT_UNLOCKED();
1010 
1011 	/*
1012 	 * We're running again; record our new start time.  We might
1013 	 * be running on a new CPU now, so don't use the cache'd
1014 	 * schedstate_percpu pointer.
1015 	 */
1016 	KDASSERT(l->l_cpu != NULL);
1017 	KDASSERT(l->l_cpu == curcpu());
1018 	microtime(&l->l_cpu->ci_schedstate.spc_runtime);
1019 
1020 	/*
1021 	 * Reacquire the kernel_lock now.  We do this after we've
1022 	 * released the scheduler lock to avoid deadlock, and before
1023 	 * we reacquire the interlock.
1024 	 */
1025 	KERNEL_LOCK_ACQUIRE_COUNT(hold_count);
1026 
1027 	return retval;
1028 }
1029 
1030 /*
1031  * Initialize the (doubly-linked) run queues
1032  * to be empty.
1033  */
1034 void
1035 rqinit()
1036 {
1037 	int i;
1038 
1039 	for (i = 0; i < RUNQUE_NQS; i++)
1040 		sched_qs[i].ph_link = sched_qs[i].ph_rlink =
1041 		    (struct lwp *)&sched_qs[i];
1042 }
1043 
1044 static __inline void
1045 resched_proc(struct lwp *l, u_char pri)
1046 {
1047 	struct cpu_info *ci;
1048 
1049 	/*
1050 	 * XXXSMP
1051 	 * Since l->l_cpu persists across a context switch,
1052 	 * this gives us *very weak* processor affinity, in
1053 	 * that we notify the CPU on which the process last
1054 	 * ran that it should try to switch.
1055 	 *
1056 	 * This does not guarantee that the process will run on
1057 	 * that processor next, because another processor might
1058 	 * grab it the next time it performs a context switch.
1059 	 *
1060 	 * This also does not handle the case where its last
1061 	 * CPU is running a higher-priority process, but every
1062 	 * other CPU is running a lower-priority process.  There
1063 	 * are ways to handle this situation, but they're not
1064 	 * currently very pretty, and we also need to weigh the
1065 	 * cost of moving a process from one CPU to another.
1066 	 *
1067 	 * XXXSMP
1068 	 * There is also the issue of locking the other CPU's
1069 	 * sched state, which we currently do not do.
1070 	 */
1071 	ci = (l->l_cpu != NULL) ? l->l_cpu : curcpu();
1072 	if (pri < ci->ci_schedstate.spc_curpriority)
1073 		need_resched(ci);
1074 }
1075 
1076 /*
1077  * Change process state to be runnable,
1078  * placing it on the run queue if it is in memory,
1079  * and awakening the swapper if it isn't in memory.
1080  */
1081 void
1082 setrunnable(struct lwp *l)
1083 {
1084 	struct proc *p = l->l_proc;
1085 
1086 	SCHED_ASSERT_LOCKED();
1087 
1088 	switch (l->l_stat) {
1089 	case 0:
1090 	case LSRUN:
1091 	case LSONPROC:
1092 	case LSZOMB:
1093 	case LSDEAD:
1094 	default:
1095 		panic("setrunnable: lwp %p state was %d", l, l->l_stat);
1096 	case LSSTOP:
1097 		/*
1098 		 * If we're being traced (possibly because someone attached us
1099 		 * while we were stopped), check for a signal from the debugger.
1100 		 */
1101 		if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) {
1102 			sigaddset(&p->p_sigctx.ps_siglist, p->p_xstat);
1103 			CHECKSIGS(p);
1104 		}
1105 	case LSSLEEP:
1106 		unsleep(l);		/* e.g. when sending signals */
1107 		break;
1108 
1109 	case LSIDL:
1110 		break;
1111 	case LSSUSPENDED:
1112 		break;
1113 	}
1114 
1115 	if (l->l_proc->p_sa)
1116 		sa_awaken(l);
1117 
1118 	l->l_stat = LSRUN;
1119 	p->p_nrlwps++;
1120 
1121 	if (l->l_flag & L_INMEM)
1122 		setrunqueue(l);
1123 
1124 	if (l->l_slptime > 1)
1125 		updatepri(l);
1126 	l->l_slptime = 0;
1127 	if ((l->l_flag & L_INMEM) == 0)
1128 		sched_wakeup((caddr_t)&proc0);
1129 	else
1130 		resched_proc(l, l->l_priority);
1131 }
1132 
1133 /*
1134  * Compute the priority of a process when running in user mode.
1135  * Arrange to reschedule if the resulting priority is better
1136  * than that of the current process.
1137  */
1138 void
1139 resetpriority(struct lwp *l)
1140 {
1141 	unsigned int newpriority;
1142 	struct proc *p = l->l_proc;
1143 
1144 	SCHED_ASSERT_LOCKED();
1145 
1146 	newpriority = PUSER + (p->p_estcpu >> ESTCPU_SHIFT) +
1147 			NICE_WEIGHT * (p->p_nice - NZERO);
1148 	newpriority = min(newpriority, MAXPRI);
1149 	l->l_usrpri = newpriority;
1150 	resched_proc(l, l->l_usrpri);
1151 }
1152 
1153 /*
1154  * Recompute priority for all LWPs in a process.
1155  */
1156 void
1157 resetprocpriority(struct proc *p)
1158 {
1159 	struct lwp *l;
1160 
1161 	LIST_FOREACH(l, &p->p_lwps, l_sibling)
1162 	    resetpriority(l);
1163 }
1164 
1165 /*
1166  * We adjust the priority of the current process.  The priority of a process
1167  * gets worse as it accumulates CPU time.  The CPU usage estimator (p_estcpu)
1168  * is increased here.  The formula for computing priorities (in kern_synch.c)
1169  * will compute a different value each time p_estcpu increases. This can
1170  * cause a switch, but unless the priority crosses a PPQ boundary the actual
1171  * queue will not change.  The CPU usage estimator ramps up quite quickly
1172  * when the process is running (linearly), and decays away exponentially, at
1173  * a rate which is proportionally slower when the system is busy.  The basic
1174  * principle is that the system will 90% forget that the process used a lot
1175  * of CPU time in 5 * loadav seconds.  This causes the system to favor
1176  * processes which haven't run much recently, and to round-robin among other
1177  * processes.
1178  */
1179 
1180 void
1181 schedclock(struct lwp *l)
1182 {
1183 	struct proc *p = l->l_proc;
1184 	int s;
1185 
1186 	p->p_estcpu = ESTCPULIM(p->p_estcpu + (1 << ESTCPU_SHIFT));
1187 	SCHED_LOCK(s);
1188 	resetpriority(l);
1189 	SCHED_UNLOCK(s);
1190 
1191 	if (l->l_priority >= PUSER)
1192 		l->l_priority = l->l_usrpri;
1193 }
1194 
1195 void
1196 suspendsched()
1197 {
1198 	struct lwp *l;
1199 	int s;
1200 
1201 	/*
1202 	 * Convert all non-P_SYSTEM LSSLEEP or LSRUN processes to
1203 	 * LSSUSPENDED.
1204 	 */
1205 	proclist_lock_read();
1206 	SCHED_LOCK(s);
1207 	LIST_FOREACH(l, &alllwp, l_list) {
1208 		if ((l->l_proc->p_flag & P_SYSTEM) != 0)
1209 			continue;
1210 
1211 		switch (l->l_stat) {
1212 		case LSRUN:
1213 			l->l_proc->p_nrlwps--;
1214 			if ((l->l_flag & L_INMEM) != 0)
1215 				remrunqueue(l);
1216 			/* FALLTHROUGH */
1217 		case LSSLEEP:
1218 			l->l_stat = LSSUSPENDED;
1219 			break;
1220 		case LSONPROC:
1221 			/*
1222 			 * XXX SMP: we need to deal with processes on
1223 			 * others CPU !
1224 			 */
1225 			break;
1226 		default:
1227 			break;
1228 		}
1229 	}
1230 	SCHED_UNLOCK(s);
1231 	proclist_unlock_read();
1232 }
1233 
1234 /*
1235  * scheduler_fork_hook:
1236  *
1237  *	Inherit the parent's scheduler history.
1238  */
1239 void
1240 scheduler_fork_hook(struct proc *parent, struct proc *child)
1241 {
1242 
1243 	child->p_estcpu = parent->p_estcpu;
1244 }
1245 
1246 /*
1247  * scheduler_wait_hook:
1248  *
1249  *	Chargeback parents for the sins of their children.
1250  */
1251 void
1252 scheduler_wait_hook(struct proc *parent, struct proc *child)
1253 {
1254 
1255 	/* XXX Only if parent != init?? */
1256 	parent->p_estcpu = ESTCPULIM(parent->p_estcpu + child->p_estcpu);
1257 }
1258 
1259 /*
1260  * Low-level routines to access the run queue.  Optimised assembler
1261  * routines can override these.
1262  */
1263 
1264 #ifndef __HAVE_MD_RUNQUEUE
1265 
1266 /*
1267  * On some architectures, it's faster to use a MSB ordering for the priorites
1268  * than the traditional LSB ordering.
1269  */
1270 #ifdef __HAVE_BIGENDIAN_BITOPS
1271 #define	RQMASK(n) (0x80000000 >> (n))
1272 #else
1273 #define	RQMASK(n) (0x00000001 << (n))
1274 #endif
1275 
1276 /*
1277  * The primitives that manipulate the run queues.  whichqs tells which
1278  * of the 32 queues qs have processes in them.  Setrunqueue puts processes
1279  * into queues, remrunqueue removes them from queues.  The running process is
1280  * on no queue, other processes are on a queue related to p->p_priority,
1281  * divided by 4 actually to shrink the 0-127 range of priorities into the 32
1282  * available queues.
1283  */
1284 
1285 #ifdef RQDEBUG
1286 static void
1287 checkrunqueue(int whichq, struct lwp *l)
1288 {
1289 	const struct prochd * const rq = &sched_qs[whichq];
1290 	struct lwp *l2;
1291 	int found = 0;
1292 	int die = 0;
1293 	int empty = 1;
1294 	for (l2 = rq->ph_link; l2 != (void*) rq; l2 = l2->l_forw) {
1295 		if (l2->l_stat != LSRUN) {
1296 			printf("checkrunqueue[%d]: lwp %p state (%d) "
1297 			    " != LSRUN\n", whichq, l2, l2->l_stat);
1298 		}
1299 		if (l2->l_back->l_forw != l2) {
1300 			printf("checkrunqueue[%d]: lwp %p back-qptr (%p) "
1301 			    "corrupt %p\n", whichq, l2, l2->l_back,
1302 			    l2->l_back->l_forw);
1303 			die = 1;
1304 		}
1305 		if (l2->l_forw->l_back != l2) {
1306 			printf("checkrunqueue[%d]: lwp %p forw-qptr (%p) "
1307 			    "corrupt %p\n", whichq, l2, l2->l_forw,
1308 			    l2->l_forw->l_back);
1309 			die = 1;
1310 		}
1311 		if (l2 == l)
1312 			found = 1;
1313 		empty = 0;
1314 	}
1315 	if (empty && (sched_whichqs & RQMASK(whichq)) != 0) {
1316 		printf("checkrunqueue[%d]: bit set for empty run-queue %p\n",
1317 		    whichq, rq);
1318 		die = 1;
1319 	} else if (!empty && (sched_whichqs & RQMASK(whichq)) == 0) {
1320 		printf("checkrunqueue[%d]: bit clear for non-empty "
1321 		    "run-queue %p\n", whichq, rq);
1322 		die = 1;
1323 	}
1324 	if (l != NULL && (sched_whichqs & RQMASK(whichq)) == 0) {
1325 		printf("checkrunqueue[%d]: bit clear for active lwp %p\n",
1326 		    whichq, l);
1327 		die = 1;
1328 	}
1329 	if (l != NULL && empty) {
1330 		printf("checkrunqueue[%d]: empty run-queue %p with "
1331 		    "active lwp %p\n", whichq, rq, l);
1332 		die = 1;
1333 	}
1334 	if (l != NULL && !found) {
1335 		printf("checkrunqueue[%d]: lwp %p not in runqueue %p!",
1336 		    whichq, l, rq);
1337 		die = 1;
1338 	}
1339 	if (die)
1340 		panic("checkrunqueue: inconsistency found");
1341 }
1342 #endif /* RQDEBUG */
1343 
1344 void
1345 setrunqueue(struct lwp *l)
1346 {
1347 	struct prochd *rq;
1348 	struct lwp *prev;
1349 	const int whichq = l->l_priority / PPQ;
1350 
1351 #ifdef RQDEBUG
1352 	checkrunqueue(whichq, NULL);
1353 #endif
1354 #ifdef DIAGNOSTIC
1355 	if (l->l_back != NULL || l->l_wchan != NULL || l->l_stat != LSRUN)
1356 		panic("setrunqueue");
1357 #endif
1358 	sched_whichqs |= RQMASK(whichq);
1359 	rq = &sched_qs[whichq];
1360 	prev = rq->ph_rlink;
1361 	l->l_forw = (struct lwp *)rq;
1362 	rq->ph_rlink = l;
1363 	prev->l_forw = l;
1364 	l->l_back = prev;
1365 #ifdef RQDEBUG
1366 	checkrunqueue(whichq, l);
1367 #endif
1368 }
1369 
1370 void
1371 remrunqueue(struct lwp *l)
1372 {
1373 	struct lwp *prev, *next;
1374 	const int whichq = l->l_priority / PPQ;
1375 #ifdef RQDEBUG
1376 	checkrunqueue(whichq, l);
1377 #endif
1378 #ifdef DIAGNOSTIC
1379 	if (((sched_whichqs & RQMASK(whichq)) == 0))
1380 		panic("remrunqueue: bit %d not set", whichq);
1381 #endif
1382 	prev = l->l_back;
1383 	l->l_back = NULL;
1384 	next = l->l_forw;
1385 	prev->l_forw = next;
1386 	next->l_back = prev;
1387 	if (prev == next)
1388 		sched_whichqs &= ~RQMASK(whichq);
1389 #ifdef RQDEBUG
1390 	checkrunqueue(whichq, NULL);
1391 #endif
1392 }
1393 
1394 #undef RQMASK
1395 #endif /* !defined(__HAVE_MD_RUNQUEUE) */
1396