xref: /dflybsd-src/sys/kern/kern_synch.c (revision 6693db176654a0f25095ec64d0a74d58dcf0e47e)
1 /*-
2  * Copyright (c) 1982, 1986, 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
39  * $FreeBSD: src/sys/kern/kern_synch.c,v 1.87.2.6 2002/10/13 07:29:53 kbyanc Exp $
40  * $DragonFly: src/sys/kern/kern_synch.c,v 1.91 2008/09/09 04:06:13 dillon Exp $
41  */
42 
43 #include "opt_ktrace.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/proc.h>
48 #include <sys/kernel.h>
49 #include <sys/signalvar.h>
50 #include <sys/resourcevar.h>
51 #include <sys/vmmeter.h>
52 #include <sys/sysctl.h>
53 #include <sys/lock.h>
54 #ifdef KTRACE
55 #include <sys/uio.h>
56 #include <sys/ktrace.h>
57 #endif
58 #include <sys/xwait.h>
59 #include <sys/ktr.h>
60 #include <sys/serialize.h>
61 
62 #include <sys/signal2.h>
63 #include <sys/thread2.h>
64 #include <sys/spinlock2.h>
65 #include <sys/mutex2.h>
66 #include <sys/mplock2.h>
67 
68 #include <machine/cpu.h>
69 #include <machine/smp.h>
70 
71 TAILQ_HEAD(tslpque, thread);
72 
73 static void sched_setup (void *dummy);
74 SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
75 
76 int	hogticks;
77 int	lbolt;
78 int	lbolt_syncer;
79 int	sched_quantum;		/* Roundrobin scheduling quantum in ticks. */
80 int	ncpus;
81 int	ncpus2, ncpus2_shift, ncpus2_mask;
82 int	ncpus_fit, ncpus_fit_mask;
83 int	safepri;
84 int	tsleep_now_works;
85 
86 static struct callout loadav_callout;
87 static struct callout schedcpu_callout;
88 MALLOC_DEFINE(M_TSLEEP, "tslpque", "tsleep queues");
89 
90 #if !defined(KTR_TSLEEP)
91 #define KTR_TSLEEP	KTR_ALL
92 #endif
93 KTR_INFO_MASTER(tsleep);
94 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_beg, 0, "tsleep enter %p", sizeof(void *));
95 KTR_INFO(KTR_TSLEEP, tsleep, tsleep_end, 1, "tsleep exit", 0);
96 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_beg, 2, "wakeup enter %p", sizeof(void *));
97 KTR_INFO(KTR_TSLEEP, tsleep, wakeup_end, 3, "wakeup exit", 0);
98 KTR_INFO(KTR_TSLEEP, tsleep, ilockfail,  4, "interlock failed %p", sizeof(void *));
99 
100 #define logtsleep1(name)	KTR_LOG(tsleep_ ## name)
101 #define logtsleep2(name, val)	KTR_LOG(tsleep_ ## name, val)
102 
103 struct loadavg averunnable =
104 	{ {0, 0, 0}, FSCALE };	/* load average, of runnable procs */
105 /*
106  * Constants for averages over 1, 5, and 15 minutes
107  * when sampling at 5 second intervals.
108  */
109 static fixpt_t cexp[3] = {
110 	0.9200444146293232 * FSCALE,	/* exp(-1/12) */
111 	0.9834714538216174 * FSCALE,	/* exp(-1/60) */
112 	0.9944598480048967 * FSCALE,	/* exp(-1/180) */
113 };
114 
115 static void	endtsleep (void *);
116 static void	loadav (void *arg);
117 static void	schedcpu (void *arg);
118 #ifdef SMP
119 static void	tsleep_wakeup(struct thread *td);
120 #endif
121 
122 /*
123  * Adjust the scheduler quantum.  The quantum is specified in microseconds.
124  * Note that 'tick' is in microseconds per tick.
125  */
126 static int
127 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
128 {
129 	int error, new_val;
130 
131 	new_val = sched_quantum * ustick;
132 	error = sysctl_handle_int(oidp, &new_val, 0, req);
133         if (error != 0 || req->newptr == NULL)
134 		return (error);
135 	if (new_val < ustick)
136 		return (EINVAL);
137 	sched_quantum = new_val / ustick;
138 	hogticks = 2 * sched_quantum;
139 	return (0);
140 }
141 
142 SYSCTL_PROC(_kern, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW,
143 	0, sizeof sched_quantum, sysctl_kern_quantum, "I", "");
144 
145 /*
146  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
147  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
148  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
149  *
150  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
151  *     1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
152  *
153  * If you don't want to bother with the faster/more-accurate formula, you
154  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
155  * (more general) method of calculating the %age of CPU used by a process.
156  *
157  * decay 95% of `lwp_pctcpu' in 60 seconds; see CCPU_SHIFT before changing
158  */
159 #define CCPU_SHIFT	11
160 
161 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
162 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
163 
164 /*
165  * kernel uses `FSCALE', userland (SHOULD) use kern.fscale
166  */
167 int     fscale __unused = FSCALE;	/* exported to systat */
168 SYSCTL_INT(_kern, OID_AUTO, fscale, CTLFLAG_RD, 0, FSCALE, "");
169 
170 /*
171  * Recompute process priorities, once a second.
172  *
173  * Since the userland schedulers are typically event oriented, if the
174  * estcpu calculation at wakeup() time is not sufficient to make a
175  * process runnable relative to other processes in the system we have
176  * a 1-second recalc to help out.
177  *
178  * This code also allows us to store sysclock_t data in the process structure
179  * without fear of an overrun, since sysclock_t are guarenteed to hold
180  * several seconds worth of count.
181  *
182  * WARNING!  callouts can preempt normal threads.  However, they will not
183  * preempt a thread holding a spinlock so we *can* safely use spinlocks.
184  */
185 static int schedcpu_stats(struct proc *p, void *data __unused);
186 static int schedcpu_resource(struct proc *p, void *data __unused);
187 
188 static void
189 schedcpu(void *arg)
190 {
191 	allproc_scan(schedcpu_stats, NULL);
192 	allproc_scan(schedcpu_resource, NULL);
193 	wakeup((caddr_t)&lbolt);
194 	wakeup((caddr_t)&lbolt_syncer);
195 	callout_reset(&schedcpu_callout, hz, schedcpu, NULL);
196 }
197 
198 /*
199  * General process statistics once a second
200  */
201 static int
202 schedcpu_stats(struct proc *p, void *data __unused)
203 {
204 	struct lwp *lp;
205 
206 	crit_enter();
207 	p->p_swtime++;
208 	FOREACH_LWP_IN_PROC(lp, p) {
209 		if (lp->lwp_stat == LSSLEEP)
210 			lp->lwp_slptime++;
211 
212 		/*
213 		 * Only recalculate processes that are active or have slept
214 		 * less then 2 seconds.  The schedulers understand this.
215 		 */
216 		if (lp->lwp_slptime <= 1) {
217 			p->p_usched->recalculate(lp);
218 		} else {
219 			lp->lwp_pctcpu = (lp->lwp_pctcpu * ccpu) >> FSHIFT;
220 		}
221 	}
222 	crit_exit();
223 	return(0);
224 }
225 
226 /*
227  * Resource checks.  XXX break out since ksignal/killproc can block,
228  * limiting us to one process killed per second.  There is probably
229  * a better way.
230  */
231 static int
232 schedcpu_resource(struct proc *p, void *data __unused)
233 {
234 	u_int64_t ttime;
235 	struct lwp *lp;
236 
237 	crit_enter();
238 	if (p->p_stat == SIDL ||
239 	    p->p_stat == SZOMB ||
240 	    p->p_limit == NULL
241 	) {
242 		crit_exit();
243 		return(0);
244 	}
245 
246 	ttime = 0;
247 	FOREACH_LWP_IN_PROC(lp, p) {
248 		/*
249 		 * We may have caught an lp in the middle of being
250 		 * created, lwp_thread can be NULL.
251 		 */
252 		if (lp->lwp_thread) {
253 			ttime += lp->lwp_thread->td_sticks;
254 			ttime += lp->lwp_thread->td_uticks;
255 		}
256 	}
257 
258 	switch(plimit_testcpulimit(p->p_limit, ttime)) {
259 	case PLIMIT_TESTCPU_KILL:
260 		killproc(p, "exceeded maximum CPU limit");
261 		break;
262 	case PLIMIT_TESTCPU_XCPU:
263 		if ((p->p_flag & P_XCPU) == 0) {
264 			p->p_flag |= P_XCPU;
265 			ksignal(p, SIGXCPU);
266 		}
267 		break;
268 	default:
269 		break;
270 	}
271 	crit_exit();
272 	return(0);
273 }
274 
275 /*
276  * This is only used by ps.  Generate a cpu percentage use over
277  * a period of one second.
278  *
279  * MPSAFE
280  */
281 void
282 updatepcpu(struct lwp *lp, int cpticks, int ttlticks)
283 {
284 	fixpt_t acc;
285 	int remticks;
286 
287 	acc = (cpticks << FSHIFT) / ttlticks;
288 	if (ttlticks >= ESTCPUFREQ) {
289 		lp->lwp_pctcpu = acc;
290 	} else {
291 		remticks = ESTCPUFREQ - ttlticks;
292 		lp->lwp_pctcpu = (acc * ttlticks + lp->lwp_pctcpu * remticks) /
293 				ESTCPUFREQ;
294 	}
295 }
296 
297 /*
298  * tsleep/wakeup hash table parameters.  Try to find the sweet spot for
299  * like addresses being slept on.
300  */
301 #define TABLESIZE	1024
302 #define LOOKUP(x)	(((intptr_t)(x) >> 6) & (TABLESIZE - 1))
303 
304 static cpumask_t slpque_cpumasks[TABLESIZE];
305 
306 /*
307  * General scheduler initialization.  We force a reschedule 25 times
308  * a second by default.  Note that cpu0 is initialized in early boot and
309  * cannot make any high level calls.
310  *
311  * Each cpu has its own sleep queue.
312  */
313 void
314 sleep_gdinit(globaldata_t gd)
315 {
316 	static struct tslpque slpque_cpu0[TABLESIZE];
317 	int i;
318 
319 	if (gd->gd_cpuid == 0) {
320 		sched_quantum = (hz + 24) / 25;
321 		hogticks = 2 * sched_quantum;
322 
323 		gd->gd_tsleep_hash = slpque_cpu0;
324 	} else {
325 		gd->gd_tsleep_hash = kmalloc(sizeof(slpque_cpu0),
326 					    M_TSLEEP, M_WAITOK | M_ZERO);
327 	}
328 	for (i = 0; i < TABLESIZE; ++i)
329 		TAILQ_INIT(&gd->gd_tsleep_hash[i]);
330 }
331 
332 /*
333  * This is a dandy function that allows us to interlock tsleep/wakeup
334  * operations with unspecified upper level locks, such as lockmgr locks,
335  * simply by holding a critical section.  The sequence is:
336  *
337  *	(acquire upper level lock)
338  *	tsleep_interlock(blah)
339  *	(release upper level lock)
340  *	tsleep(blah, ...)
341  *
342  * Basically this functions queues us on the tsleep queue without actually
343  * descheduling us.  When tsleep() is later called with PINTERLOCK it
344  * assumes the thread was already queued, otherwise it queues it there.
345  *
346  * Thus it is possible to receive the wakeup prior to going to sleep and
347  * the race conditions are covered.
348  */
349 static __inline void
350 _tsleep_interlock(globaldata_t gd, void *ident, int flags)
351 {
352 	thread_t td = gd->gd_curthread;
353 	int id;
354 
355 	crit_enter_quick(td);
356 	if (td->td_flags & TDF_TSLEEPQ) {
357 		id = LOOKUP(td->td_wchan);
358 		TAILQ_REMOVE(&gd->gd_tsleep_hash[id], td, td_sleepq);
359 		if (TAILQ_FIRST(&gd->gd_tsleep_hash[id]) == NULL)
360 			atomic_clear_int(&slpque_cpumasks[id], gd->gd_cpumask);
361 	} else {
362 		td->td_flags |= TDF_TSLEEPQ;
363 	}
364 	id = LOOKUP(ident);
365 	TAILQ_INSERT_TAIL(&gd->gd_tsleep_hash[id], td, td_sleepq);
366 	atomic_set_int(&slpque_cpumasks[id], gd->gd_cpumask);
367 	td->td_wchan = ident;
368 	td->td_wdomain = flags & PDOMAIN_MASK;
369 	crit_exit_quick(td);
370 }
371 
372 void
373 tsleep_interlock(void *ident, int flags)
374 {
375 	_tsleep_interlock(mycpu, ident, flags);
376 }
377 
378 /*
379  * Remove thread from sleepq.  Must be called with a critical section held.
380  */
381 static __inline void
382 _tsleep_remove(thread_t td)
383 {
384 	globaldata_t gd = mycpu;
385 	int id;
386 
387 	KKASSERT(td->td_gd == gd);
388 	if (td->td_flags & TDF_TSLEEPQ) {
389 		td->td_flags &= ~TDF_TSLEEPQ;
390 		id = LOOKUP(td->td_wchan);
391 		TAILQ_REMOVE(&gd->gd_tsleep_hash[id], td, td_sleepq);
392 		if (TAILQ_FIRST(&gd->gd_tsleep_hash[id]) == NULL)
393 			atomic_clear_int(&slpque_cpumasks[id], gd->gd_cpumask);
394 		td->td_wchan = NULL;
395 		td->td_wdomain = 0;
396 	}
397 }
398 
399 void
400 tsleep_remove(thread_t td)
401 {
402 	_tsleep_remove(td);
403 }
404 
405 /*
406  * This function removes a thread from the tsleep queue and schedules
407  * it.  This function may act asynchronously.  The target thread may be
408  * sleeping on a different cpu.
409  *
410  * This function mus be called while in a critical section but if the
411  * target thread is sleeping on a different cpu we cannot safely probe
412  * td_flags.
413  */
414 static __inline
415 void
416 _tsleep_wakeup(struct thread *td)
417 {
418 #ifdef SMP
419 	globaldata_t gd = mycpu;
420 
421 	if (td->td_gd != gd) {
422 		lwkt_send_ipiq(td->td_gd, (ipifunc1_t)tsleep_wakeup, td);
423 		return;
424 	}
425 #endif
426 	_tsleep_remove(td);
427 	if (td->td_flags & TDF_TSLEEP_DESCHEDULED) {
428 		td->td_flags &= ~TDF_TSLEEP_DESCHEDULED;
429 		lwkt_schedule(td);
430 	}
431 }
432 
433 #ifdef SMP
434 static
435 void
436 tsleep_wakeup(struct thread *td)
437 {
438 	_tsleep_wakeup(td);
439 }
440 #endif
441 
442 
443 /*
444  * General sleep call.  Suspends the current process until a wakeup is
445  * performed on the specified identifier.  The process will then be made
446  * runnable with the specified priority.  Sleeps at most timo/hz seconds
447  * (0 means no timeout).  If flags includes PCATCH flag, signals are checked
448  * before and after sleeping, else signals are not checked.  Returns 0 if
449  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
450  * signal needs to be delivered, ERESTART is returned if the current system
451  * call should be restarted if possible, and EINTR is returned if the system
452  * call should be interrupted by the signal (return EINTR).
453  *
454  * Note that if we are a process, we release_curproc() before messing with
455  * the LWKT scheduler.
456  *
457  * During autoconfiguration or after a panic, a sleep will simply
458  * lower the priority briefly to allow interrupts, then return.
459  */
460 int
461 tsleep(void *ident, int flags, const char *wmesg, int timo)
462 {
463 	struct thread *td = curthread;
464 	struct lwp *lp = td->td_lwp;
465 	struct proc *p = td->td_proc;		/* may be NULL */
466 	globaldata_t gd;
467 	int sig;
468 	int catch;
469 	int id;
470 	int error;
471 	int oldpri;
472 	struct callout thandle;
473 
474 	/*
475 	 * NOTE: removed KTRPOINT, it could cause races due to blocking
476 	 * even in stable.  Just scrap it for now.
477 	 */
478 	if (tsleep_now_works == 0 || panicstr) {
479 		/*
480 		 * After a panic, or before we actually have an operational
481 		 * softclock, just give interrupts a chance, then just return;
482 		 *
483 		 * don't run any other procs or panic below,
484 		 * in case this is the idle process and already asleep.
485 		 */
486 		splz();
487 		oldpri = td->td_pri & TDPRI_MASK;
488 		lwkt_setpri_self(safepri);
489 		lwkt_switch();
490 		lwkt_setpri_self(oldpri);
491 		return (0);
492 	}
493 	logtsleep2(tsleep_beg, ident);
494 	gd = td->td_gd;
495 	KKASSERT(td != &gd->gd_idlethread);	/* you must be kidding! */
496 
497 	/*
498 	 * NOTE: all of this occurs on the current cpu, including any
499 	 * callout-based wakeups, so a critical section is a sufficient
500 	 * interlock.
501 	 *
502 	 * The entire sequence through to where we actually sleep must
503 	 * run without breaking the critical section.
504 	 */
505 	catch = flags & PCATCH;
506 	error = 0;
507 	sig = 0;
508 
509 	crit_enter_quick(td);
510 
511 	KASSERT(ident != NULL, ("tsleep: no ident"));
512 	KASSERT(lp == NULL ||
513 		lp->lwp_stat == LSRUN ||	/* Obvious */
514 		lp->lwp_stat == LSSTOP,		/* Set in tstop */
515 		("tsleep %p %s %d",
516 			ident, wmesg, lp->lwp_stat));
517 
518 	/*
519 	 * Setup for the current process (if this is a process).
520 	 */
521 	if (lp) {
522 		if (catch) {
523 			/*
524 			 * Early termination if PCATCH was set and a
525 			 * signal is pending, interlocked with the
526 			 * critical section.
527 			 *
528 			 * Early termination only occurs when tsleep() is
529 			 * entered while in a normal LSRUN state.
530 			 */
531 			if ((sig = CURSIG(lp)) != 0)
532 				goto resume;
533 
534 			/*
535 			 * Early termination if PCATCH was set and a
536 			 * mailbox signal was possibly delivered prior to
537 			 * the system call even being made, in order to
538 			 * allow the user to interlock without having to
539 			 * make additional system calls.
540 			 */
541 			if (p->p_flag & P_MAILBOX)
542 				goto resume;
543 
544 			/*
545 			 * Causes ksignal to wake us up when.
546 			 */
547 			lp->lwp_flag |= LWP_SINTR;
548 		}
549 	}
550 
551 	/*
552 	 * We interlock the sleep queue if the caller has not already done
553 	 * it for us.
554 	 */
555 	if ((flags & PINTERLOCKED) == 0) {
556 		id = LOOKUP(ident);
557 		_tsleep_interlock(gd, ident, flags);
558 	}
559 
560 	/*
561 	 *
562 	 * If no interlock was set we do an integrated interlock here.
563 	 * Make sure the current process has been untangled from
564 	 * the userland scheduler and initialize slptime to start
565 	 * counting.  We must interlock the sleep queue before doing
566 	 * this to avoid wakeup/process-ipi races which can occur under
567 	 * heavy loads.
568 	 */
569 	if (lp) {
570 		p->p_usched->release_curproc(lp);
571 		lp->lwp_slptime = 0;
572 	}
573 
574 	/*
575 	 * If the interlocked flag is set but our cpu bit in the slpqueue
576 	 * is no longer set, then a wakeup was processed inbetween the
577 	 * tsleep_interlock() (ours or the callers), and here.  This can
578 	 * occur under numerous circumstances including when we release the
579 	 * current process.
580 	 *
581 	 * Extreme loads can cause the sending of an IPI (e.g. wakeup()'s)
582 	 * to process incoming IPIs, thus draining incoming wakeups.
583 	 */
584 	if ((td->td_flags & TDF_TSLEEPQ) == 0) {
585 		logtsleep2(ilockfail, ident);
586 		goto resume;
587 	}
588 
589 	/*
590 	 * scheduling is blocked while in a critical section.  Coincide
591 	 * the descheduled-by-tsleep flag with the descheduling of the
592 	 * lwkt.
593 	 */
594 	lwkt_deschedule_self(td);
595 	td->td_flags |= TDF_TSLEEP_DESCHEDULED;
596 	td->td_wmesg = wmesg;
597 
598 	/*
599 	 * Setup the timeout, if any
600 	 */
601 	if (timo) {
602 		callout_init(&thandle);
603 		callout_reset(&thandle, timo, endtsleep, td);
604 	}
605 
606 	/*
607 	 * Beddy bye bye.
608 	 */
609 	if (lp) {
610 		/*
611 		 * Ok, we are sleeping.  Place us in the SSLEEP state.
612 		 */
613 		KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0);
614 		/*
615 		 * tstop() sets LSSTOP, so don't fiddle with that.
616 		 */
617 		if (lp->lwp_stat != LSSTOP)
618 			lp->lwp_stat = LSSLEEP;
619 		lp->lwp_ru.ru_nvcsw++;
620 		lwkt_switch();
621 
622 		/*
623 		 * And when we are woken up, put us back in LSRUN.  If we
624 		 * slept for over a second, recalculate our estcpu.
625 		 */
626 		lp->lwp_stat = LSRUN;
627 		if (lp->lwp_slptime)
628 			p->p_usched->recalculate(lp);
629 		lp->lwp_slptime = 0;
630 	} else {
631 		lwkt_switch();
632 	}
633 
634 	/*
635 	 * Make sure we haven't switched cpus while we were asleep.  It's
636 	 * not supposed to happen.  Cleanup our temporary flags.
637 	 */
638 	KKASSERT(gd == td->td_gd);
639 
640 	/*
641 	 * Cleanup the timeout.
642 	 */
643 	if (timo) {
644 		if (td->td_flags & TDF_TIMEOUT) {
645 			td->td_flags &= ~TDF_TIMEOUT;
646 			error = EWOULDBLOCK;
647 		} else {
648 			callout_stop(&thandle);
649 		}
650 	}
651 
652 	/*
653 	 * Make sure we have been removed from the sleepq.  This should
654 	 * have been done for us already.
655 	 */
656 	_tsleep_remove(td);
657 	td->td_wmesg = NULL;
658 	if (td->td_flags & TDF_TSLEEP_DESCHEDULED) {
659 		td->td_flags &= ~TDF_TSLEEP_DESCHEDULED;
660 		kprintf("td %p (%s) unexpectedly rescheduled\n",
661 			td, td->td_comm);
662 	}
663 
664 	/*
665 	 * Figure out the correct error return.  If interrupted by a
666 	 * signal we want to return EINTR or ERESTART.
667 	 *
668 	 * If P_MAILBOX is set no automatic system call restart occurs
669 	 * and we return EINTR.  P_MAILBOX is meant to be used as an
670 	 * interlock, the user must poll it prior to any system call
671 	 * that it wishes to interlock a mailbox signal against since
672 	 * the flag is cleared on *any* system call that sleeps.
673 	 */
674 resume:
675 	if (p) {
676 		if (catch && error == 0) {
677 			if ((p->p_flag & P_MAILBOX) && sig == 0) {
678 				error = EINTR;
679 			} else if (sig != 0 || (sig = CURSIG(lp))) {
680 				if (SIGISMEMBER(p->p_sigacts->ps_sigintr, sig))
681 					error = EINTR;
682 				else
683 					error = ERESTART;
684 			}
685 		}
686 		lp->lwp_flag &= ~(LWP_BREAKTSLEEP | LWP_SINTR);
687 		p->p_flag &= ~P_MAILBOX;
688 	}
689 	logtsleep1(tsleep_end);
690 	crit_exit_quick(td);
691 	return (error);
692 }
693 
694 /*
695  * Interlocked spinlock sleep.  An exclusively held spinlock must
696  * be passed to ssleep().  The function will atomically release the
697  * spinlock and tsleep on the ident, then reacquire the spinlock and
698  * return.
699  *
700  * This routine is fairly important along the critical path, so optimize it
701  * heavily.
702  */
703 int
704 ssleep(void *ident, struct spinlock *spin, int flags,
705        const char *wmesg, int timo)
706 {
707 	globaldata_t gd = mycpu;
708 	int error;
709 
710 	_tsleep_interlock(gd, ident, flags);
711 	spin_unlock_wr_quick(gd, spin);
712 	error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo);
713 	spin_lock_wr_quick(gd, spin);
714 
715 	return (error);
716 }
717 
718 int
719 lksleep(void *ident, struct lock *lock, int flags,
720        const char *wmesg, int timo)
721 {
722 	globaldata_t gd = mycpu;
723 	int error;
724 
725 	_tsleep_interlock(gd, ident, flags);
726 	lockmgr(lock, LK_RELEASE);
727 	error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo);
728 	lockmgr(lock, LK_EXCLUSIVE);
729 
730 	return (error);
731 }
732 
733 /*
734  * Interlocked mutex sleep.  An exclusively held mutex must be passed
735  * to mtxsleep().  The function will atomically release the mutex
736  * and tsleep on the ident, then reacquire the mutex and return.
737  */
738 int
739 mtxsleep(void *ident, struct mtx *mtx, int flags,
740 	 const char *wmesg, int timo)
741 {
742 	globaldata_t gd = mycpu;
743 	int error;
744 
745 	_tsleep_interlock(gd, ident, flags);
746 	mtx_unlock(mtx);
747 	error = tsleep(ident, flags | PINTERLOCKED, wmesg, timo);
748 	mtx_lock_ex_quick(mtx, wmesg);
749 
750 	return (error);
751 }
752 
753 /*
754  * Interlocked serializer sleep.  An exclusively held serializer must
755  * be passed to zsleep().  The function will atomically release
756  * the serializer and tsleep on the ident, then reacquire the serializer
757  * and return.
758  */
759 int
760 zsleep(void *ident, struct lwkt_serialize *slz, int flags,
761        const char *wmesg, int timo)
762 {
763 	globaldata_t gd = mycpu;
764 	int ret;
765 
766 	ASSERT_SERIALIZED(slz);
767 
768 	_tsleep_interlock(gd, ident, flags);
769 	lwkt_serialize_exit(slz);
770 	ret = tsleep(ident, flags | PINTERLOCKED, wmesg, timo);
771 	lwkt_serialize_enter(slz);
772 
773 	return ret;
774 }
775 
776 /*
777  * Directly block on the LWKT thread by descheduling it.  This
778  * is much faster then tsleep(), but the only legal way to wake
779  * us up is to directly schedule the thread.
780  *
781  * Setting TDF_SINTR will cause new signals to directly schedule us.
782  *
783  * This routine must be called while in a critical section.
784  */
785 int
786 lwkt_sleep(const char *wmesg, int flags)
787 {
788 	thread_t td = curthread;
789 	int sig;
790 
791 	if ((flags & PCATCH) == 0 || td->td_lwp == NULL) {
792 		td->td_flags |= TDF_BLOCKED;
793 		td->td_wmesg = wmesg;
794 		lwkt_deschedule_self(td);
795 		lwkt_switch();
796 		td->td_wmesg = NULL;
797 		td->td_flags &= ~TDF_BLOCKED;
798 		return(0);
799 	}
800 	if ((sig = CURSIG(td->td_lwp)) != 0) {
801 		if (SIGISMEMBER(td->td_proc->p_sigacts->ps_sigintr, sig))
802 			return(EINTR);
803 		else
804 			return(ERESTART);
805 
806 	}
807 	td->td_flags |= TDF_BLOCKED | TDF_SINTR;
808 	td->td_wmesg = wmesg;
809 	lwkt_deschedule_self(td);
810 	lwkt_switch();
811 	td->td_flags &= ~(TDF_BLOCKED | TDF_SINTR);
812 	td->td_wmesg = NULL;
813 	return(0);
814 }
815 
816 /*
817  * Implement the timeout for tsleep.
818  *
819  * We set LWP_BREAKTSLEEP to indicate that an event has occured, but
820  * we only call setrunnable if the process is not stopped.
821  *
822  * This type of callout timeout is scheduled on the same cpu the process
823  * is sleeping on.  Also, at the moment, the MP lock is held.
824  */
825 static void
826 endtsleep(void *arg)
827 {
828 	thread_t td = arg;
829 	struct lwp *lp;
830 
831 	ASSERT_MP_LOCK_HELD(curthread);
832 	crit_enter();
833 
834 	/*
835 	 * cpu interlock.  Thread flags are only manipulated on
836 	 * the cpu owning the thread.  proc flags are only manipulated
837 	 * by the older of the MP lock.  We have both.
838 	 */
839 	if (td->td_flags & TDF_TSLEEP_DESCHEDULED) {
840 		td->td_flags |= TDF_TIMEOUT;
841 
842 		if ((lp = td->td_lwp) != NULL) {
843 			lp->lwp_flag |= LWP_BREAKTSLEEP;
844 			if (lp->lwp_proc->p_stat != SSTOP)
845 				setrunnable(lp);
846 		} else {
847 			_tsleep_wakeup(td);
848 		}
849 	}
850 	crit_exit();
851 }
852 
853 /*
854  * Make all processes sleeping on the specified identifier runnable.
855  * count may be zero or one only.
856  *
857  * The domain encodes the sleep/wakeup domain AND the first cpu to check
858  * (which is always the current cpu).  As we iterate across cpus
859  *
860  * This call may run without the MP lock held.  We can only manipulate thread
861  * state on the cpu owning the thread.  We CANNOT manipulate process state
862  * at all.
863  */
864 static void
865 _wakeup(void *ident, int domain)
866 {
867 	struct tslpque *qp;
868 	struct thread *td;
869 	struct thread *ntd;
870 	globaldata_t gd;
871 #ifdef SMP
872 	cpumask_t mask;
873 #endif
874 	int id;
875 
876 	crit_enter();
877 	logtsleep2(wakeup_beg, ident);
878 	gd = mycpu;
879 	id = LOOKUP(ident);
880 	qp = &gd->gd_tsleep_hash[id];
881 restart:
882 	for (td = TAILQ_FIRST(qp); td != NULL; td = ntd) {
883 		ntd = TAILQ_NEXT(td, td_sleepq);
884 		if (td->td_wchan == ident &&
885 		    td->td_wdomain == (domain & PDOMAIN_MASK)
886 		) {
887 			KKASSERT(td->td_gd == gd);
888 			_tsleep_remove(td);
889 			if (td->td_flags & TDF_TSLEEP_DESCHEDULED) {
890 				td->td_flags &= ~TDF_TSLEEP_DESCHEDULED;
891 				lwkt_schedule(td);
892 				if (domain & PWAKEUP_ONE)
893 					goto done;
894 			}
895 			goto restart;
896 		}
897 	}
898 
899 #ifdef SMP
900 	/*
901 	 * We finished checking the current cpu but there still may be
902 	 * more work to do.  Either wakeup_one was requested and no matching
903 	 * thread was found, or a normal wakeup was requested and we have
904 	 * to continue checking cpus.
905 	 *
906 	 * It should be noted that this scheme is actually less expensive then
907 	 * the old scheme when waking up multiple threads, since we send
908 	 * only one IPI message per target candidate which may then schedule
909 	 * multiple threads.  Before we could have wound up sending an IPI
910 	 * message for each thread on the target cpu (!= current cpu) that
911 	 * needed to be woken up.
912 	 *
913 	 * NOTE: Wakeups occuring on remote cpus are asynchronous.  This
914 	 * should be ok since we are passing idents in the IPI rather then
915 	 * thread pointers.
916 	 */
917 	if ((domain & PWAKEUP_MYCPU) == 0 &&
918 	    (mask = slpque_cpumasks[id] & gd->gd_other_cpus) != 0) {
919 		lwkt_send_ipiq2_mask(mask, _wakeup, ident,
920 				     domain | PWAKEUP_MYCPU);
921 	}
922 #endif
923 done:
924 	logtsleep1(wakeup_end);
925 	crit_exit();
926 }
927 
928 /*
929  * Wakeup all threads tsleep()ing on the specified ident, on all cpus
930  */
931 void
932 wakeup(void *ident)
933 {
934     _wakeup(ident, PWAKEUP_ENCODE(0, mycpu->gd_cpuid));
935 }
936 
937 /*
938  * Wakeup one thread tsleep()ing on the specified ident, on any cpu.
939  */
940 void
941 wakeup_one(void *ident)
942 {
943     /* XXX potentially round-robin the first responding cpu */
944     _wakeup(ident, PWAKEUP_ENCODE(0, mycpu->gd_cpuid) | PWAKEUP_ONE);
945 }
946 
947 /*
948  * Wakeup threads tsleep()ing on the specified ident on the current cpu
949  * only.
950  */
951 void
952 wakeup_mycpu(void *ident)
953 {
954     _wakeup(ident, PWAKEUP_MYCPU);
955 }
956 
957 /*
958  * Wakeup one thread tsleep()ing on the specified ident on the current cpu
959  * only.
960  */
961 void
962 wakeup_mycpu_one(void *ident)
963 {
964     /* XXX potentially round-robin the first responding cpu */
965     _wakeup(ident, PWAKEUP_MYCPU|PWAKEUP_ONE);
966 }
967 
968 /*
969  * Wakeup all thread tsleep()ing on the specified ident on the specified cpu
970  * only.
971  */
972 void
973 wakeup_oncpu(globaldata_t gd, void *ident)
974 {
975 #ifdef SMP
976     if (gd == mycpu) {
977 	_wakeup(ident, PWAKEUP_MYCPU);
978     } else {
979 	lwkt_send_ipiq2(gd, _wakeup, ident, PWAKEUP_MYCPU);
980     }
981 #else
982     _wakeup(ident, PWAKEUP_MYCPU);
983 #endif
984 }
985 
986 /*
987  * Wakeup one thread tsleep()ing on the specified ident on the specified cpu
988  * only.
989  */
990 void
991 wakeup_oncpu_one(globaldata_t gd, void *ident)
992 {
993 #ifdef SMP
994     if (gd == mycpu) {
995 	_wakeup(ident, PWAKEUP_MYCPU | PWAKEUP_ONE);
996     } else {
997 	lwkt_send_ipiq2(gd, _wakeup, ident, PWAKEUP_MYCPU | PWAKEUP_ONE);
998     }
999 #else
1000     _wakeup(ident, PWAKEUP_MYCPU | PWAKEUP_ONE);
1001 #endif
1002 }
1003 
1004 /*
1005  * Wakeup all threads waiting on the specified ident that slept using
1006  * the specified domain, on all cpus.
1007  */
1008 void
1009 wakeup_domain(void *ident, int domain)
1010 {
1011     _wakeup(ident, PWAKEUP_ENCODE(domain, mycpu->gd_cpuid));
1012 }
1013 
1014 /*
1015  * Wakeup one thread waiting on the specified ident that slept using
1016  * the specified  domain, on any cpu.
1017  */
1018 void
1019 wakeup_domain_one(void *ident, int domain)
1020 {
1021     /* XXX potentially round-robin the first responding cpu */
1022     _wakeup(ident, PWAKEUP_ENCODE(domain, mycpu->gd_cpuid) | PWAKEUP_ONE);
1023 }
1024 
1025 /*
1026  * setrunnable()
1027  *
1028  * Make a process runnable.  The MP lock must be held on call.  This only
1029  * has an effect if we are in SSLEEP.  We only break out of the
1030  * tsleep if LWP_BREAKTSLEEP is set, otherwise we just fix-up the state.
1031  *
1032  * NOTE: With the MP lock held we can only safely manipulate the process
1033  * structure.  We cannot safely manipulate the thread structure.
1034  */
1035 void
1036 setrunnable(struct lwp *lp)
1037 {
1038 	crit_enter();
1039 	ASSERT_MP_LOCK_HELD(curthread);
1040 	if (lp->lwp_stat == LSSTOP)
1041 		lp->lwp_stat = LSSLEEP;
1042 	if (lp->lwp_stat == LSSLEEP && (lp->lwp_flag & LWP_BREAKTSLEEP))
1043 		_tsleep_wakeup(lp->lwp_thread);
1044 	crit_exit();
1045 }
1046 
1047 /*
1048  * The process is stopped due to some condition, usually because p_stat is
1049  * set to SSTOP, but also possibly due to being traced.
1050  *
1051  * NOTE!  If the caller sets SSTOP, the caller must also clear P_WAITED
1052  * because the parent may check the child's status before the child actually
1053  * gets to this routine.
1054  *
1055  * This routine is called with the current lwp only, typically just
1056  * before returning to userland.
1057  *
1058  * Setting LWP_BREAKTSLEEP before entering the tsleep will cause a passive
1059  * SIGCONT to break out of the tsleep.
1060  */
1061 void
1062 tstop(void)
1063 {
1064 	struct lwp *lp = curthread->td_lwp;
1065 	struct proc *p = lp->lwp_proc;
1066 
1067 	crit_enter();
1068 	/*
1069 	 * If LWP_WSTOP is set, we were sleeping
1070 	 * while our process was stopped.  At this point
1071 	 * we were already counted as stopped.
1072 	 */
1073 	if ((lp->lwp_flag & LWP_WSTOP) == 0) {
1074 		/*
1075 		 * If we're the last thread to stop, signal
1076 		 * our parent.
1077 		 */
1078 		p->p_nstopped++;
1079 		lp->lwp_flag |= LWP_WSTOP;
1080 		wakeup(&p->p_nstopped);
1081 		if (p->p_nstopped == p->p_nthreads) {
1082 			p->p_flag &= ~P_WAITED;
1083 			wakeup(p->p_pptr);
1084 			if ((p->p_pptr->p_sigacts->ps_flag & PS_NOCLDSTOP) == 0)
1085 				ksignal(p->p_pptr, SIGCHLD);
1086 		}
1087 	}
1088 	while (p->p_stat == SSTOP) {
1089 		lp->lwp_flag |= LWP_BREAKTSLEEP;
1090 		lp->lwp_stat = LSSTOP;
1091 		tsleep(p, 0, "stop", 0);
1092 	}
1093 	p->p_nstopped--;
1094 	lp->lwp_flag &= ~LWP_WSTOP;
1095 	crit_exit();
1096 }
1097 
1098 /*
1099  * Yield / synchronous reschedule.  This is a bit tricky because the trap
1100  * code might have set a lazy release on the switch function.   Setting
1101  * P_PASSIVE_ACQ will ensure that the lazy release executes when we call
1102  * switch, and that we are given a greater chance of affinity with our
1103  * current cpu.
1104  *
1105  * We call lwkt_setpri_self() to rotate our thread to the end of the lwkt
1106  * run queue.  lwkt_switch() will also execute any assigned passive release
1107  * (which usually calls release_curproc()), allowing a same/higher priority
1108  * process to be designated as the current process.
1109  *
1110  * While it is possible for a lower priority process to be designated,
1111  * it's call to lwkt_maybe_switch() in acquire_curproc() will likely
1112  * round-robin back to us and we will be able to re-acquire the current
1113  * process designation.
1114  *
1115  * MPSAFE
1116  */
1117 void
1118 uio_yield(void)
1119 {
1120 	struct thread *td = curthread;
1121 	struct proc *p = td->td_proc;
1122 
1123 	lwkt_setpri_self(td->td_pri & TDPRI_MASK);
1124 	if (p) {
1125 		p->p_flag |= P_PASSIVE_ACQ;
1126 		lwkt_switch();
1127 		p->p_flag &= ~P_PASSIVE_ACQ;
1128 	} else {
1129 		lwkt_switch();
1130 	}
1131 }
1132 
1133 /*
1134  * Compute a tenex style load average of a quantity on
1135  * 1, 5 and 15 minute intervals.
1136  */
1137 static int loadav_count_runnable(struct lwp *p, void *data);
1138 
1139 static void
1140 loadav(void *arg)
1141 {
1142 	struct loadavg *avg;
1143 	int i, nrun;
1144 
1145 	nrun = 0;
1146 	alllwp_scan(loadav_count_runnable, &nrun);
1147 	avg = &averunnable;
1148 	for (i = 0; i < 3; i++) {
1149 		avg->ldavg[i] = (cexp[i] * avg->ldavg[i] +
1150 		    nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT;
1151 	}
1152 
1153 	/*
1154 	 * Schedule the next update to occur after 5 seconds, but add a
1155 	 * random variation to avoid synchronisation with processes that
1156 	 * run at regular intervals.
1157 	 */
1158 	callout_reset(&loadav_callout, hz * 4 + (int)(krandom() % (hz * 2 + 1)),
1159 		      loadav, NULL);
1160 }
1161 
1162 static int
1163 loadav_count_runnable(struct lwp *lp, void *data)
1164 {
1165 	int *nrunp = data;
1166 	thread_t td;
1167 
1168 	switch (lp->lwp_stat) {
1169 	case LSRUN:
1170 		if ((td = lp->lwp_thread) == NULL)
1171 			break;
1172 		if (td->td_flags & TDF_BLOCKED)
1173 			break;
1174 		++*nrunp;
1175 		break;
1176 	default:
1177 		break;
1178 	}
1179 	return(0);
1180 }
1181 
1182 /* ARGSUSED */
1183 static void
1184 sched_setup(void *dummy)
1185 {
1186 	callout_init(&loadav_callout);
1187 	callout_init(&schedcpu_callout);
1188 
1189 	/* Kick off timeout driven events by calling first time. */
1190 	schedcpu(NULL);
1191 	loadav(NULL);
1192 }
1193 
1194