xref: /netbsd-src/sys/kern/kern_synch.c (revision 267197ec1eebfcb9810ea27a89625b6ddf68e3e7)
1 /*	$NetBSD: kern_synch.c,v 1.217 2008/02/14 14:26:57 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and
10  * Daniel Sieger.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the NetBSD
23  *	Foundation, Inc. and its contributors.
24  * 4. Neither the name of The NetBSD Foundation nor the names of its
25  *    contributors may be used to endorse or promote products derived
26  *    from this software without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
29  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
30  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
32  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
36  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38  * POSSIBILITY OF SUCH DAMAGE.
39  */
40 
41 /*-
42  * Copyright (c) 1982, 1986, 1990, 1991, 1993
43  *	The Regents of the University of California.  All rights reserved.
44  * (c) UNIX System Laboratories, Inc.
45  * All or some portions of this file are derived from material licensed
46  * to the University of California by American Telephone and Telegraph
47  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
48  * the permission of UNIX System Laboratories, Inc.
49  *
50  * Redistribution and use in source and binary forms, with or without
51  * modification, are permitted provided that the following conditions
52  * are met:
53  * 1. Redistributions of source code must retain the above copyright
54  *    notice, this list of conditions and the following disclaimer.
55  * 2. Redistributions in binary form must reproduce the above copyright
56  *    notice, this list of conditions and the following disclaimer in the
57  *    documentation and/or other materials provided with the distribution.
58  * 3. Neither the name of the University nor the names of its contributors
59  *    may be used to endorse or promote products derived from this software
60  *    without specific prior written permission.
61  *
62  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
63  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
66  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72  * SUCH DAMAGE.
73  *
74  *	@(#)kern_synch.c	8.9 (Berkeley) 5/19/95
75  */
76 
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.217 2008/02/14 14:26:57 ad Exp $");
79 
80 #include "opt_kstack.h"
81 #include "opt_lockdebug.h"
82 #include "opt_multiprocessor.h"
83 #include "opt_perfctrs.h"
84 
85 #define	__MUTEX_PRIVATE
86 
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/proc.h>
90 #include <sys/kernel.h>
91 #if defined(PERFCTRS)
92 #include <sys/pmc.h>
93 #endif
94 #include <sys/cpu.h>
95 #include <sys/resourcevar.h>
96 #include <sys/sched.h>
97 #include <sys/syscall_stats.h>
98 #include <sys/sleepq.h>
99 #include <sys/lockdebug.h>
100 #include <sys/evcnt.h>
101 #include <sys/intr.h>
102 #include <sys/lwpctl.h>
103 #include <sys/atomic.h>
104 #include <sys/simplelock.h>
105 
106 #include <uvm/uvm_extern.h>
107 
108 callout_t sched_pstats_ch;
109 unsigned int sched_pstats_ticks;
110 
111 kcondvar_t	lbolt;			/* once a second sleep address */
112 
113 static void	sched_unsleep(struct lwp *);
114 static void	sched_changepri(struct lwp *, pri_t);
115 static void	sched_lendpri(struct lwp *, pri_t);
116 
117 syncobj_t sleep_syncobj = {
118 	SOBJ_SLEEPQ_SORTED,
119 	sleepq_unsleep,
120 	sleepq_changepri,
121 	sleepq_lendpri,
122 	syncobj_noowner,
123 };
124 
125 syncobj_t sched_syncobj = {
126 	SOBJ_SLEEPQ_SORTED,
127 	sched_unsleep,
128 	sched_changepri,
129 	sched_lendpri,
130 	syncobj_noowner,
131 };
132 
133 /*
134  * During autoconfiguration or after a panic, a sleep will simply lower the
135  * priority briefly to allow interrupts, then return.  The priority to be
136  * used (safepri) is machine-dependent, thus this value is initialized and
137  * maintained in the machine-dependent layers.  This priority will typically
138  * be 0, or the lowest priority that is safe for use on the interrupt stack;
139  * it can be made higher to block network software interrupts after panics.
140  */
141 int	safepri;
142 
143 /*
144  * OBSOLETE INTERFACE
145  *
146  * General sleep call.  Suspends the current process until a wakeup is
147  * performed on the specified identifier.  The process will then be made
148  * runnable with the specified priority.  Sleeps at most timo/hz seconds (0
149  * means no timeout).  If pri includes PCATCH flag, signals are checked
150  * before and after sleeping, else signals are not checked.  Returns 0 if
151  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
152  * signal needs to be delivered, ERESTART is returned if the current system
153  * call should be restarted if possible, and EINTR is returned if the system
154  * call should be interrupted by the signal (return EINTR).
155  *
156  * The interlock is held until we are on a sleep queue. The interlock will
157  * be locked before returning back to the caller unless the PNORELOCK flag
158  * is specified, in which case the interlock will always be unlocked upon
159  * return.
160  */
161 int
162 ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
163 	volatile struct simplelock *interlock)
164 {
165 	struct lwp *l = curlwp;
166 	sleepq_t *sq;
167 	int error;
168 
169 	KASSERT((l->l_pflag & LP_INTR) == 0);
170 
171 	if (sleepq_dontsleep(l)) {
172 		(void)sleepq_abort(NULL, 0);
173 		if ((priority & PNORELOCK) != 0)
174 			simple_unlock(interlock);
175 		return 0;
176 	}
177 
178 	l->l_kpriority = true;
179 	sq = sleeptab_lookup(&sleeptab, ident);
180 	sleepq_enter(sq, l);
181 	sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
182 
183 	if (interlock != NULL) {
184 		KASSERT(simple_lock_held(interlock));
185 		simple_unlock(interlock);
186 	}
187 
188 	error = sleepq_block(timo, priority & PCATCH);
189 
190 	if (interlock != NULL && (priority & PNORELOCK) == 0)
191 		simple_lock(interlock);
192 
193 	return error;
194 }
195 
196 int
197 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo,
198 	kmutex_t *mtx)
199 {
200 	struct lwp *l = curlwp;
201 	sleepq_t *sq;
202 	int error;
203 
204 	KASSERT((l->l_pflag & LP_INTR) == 0);
205 
206 	if (sleepq_dontsleep(l)) {
207 		(void)sleepq_abort(mtx, (priority & PNORELOCK) != 0);
208 		return 0;
209 	}
210 
211 	l->l_kpriority = true;
212 	sq = sleeptab_lookup(&sleeptab, ident);
213 	sleepq_enter(sq, l);
214 	sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj);
215 	mutex_exit(mtx);
216 	error = sleepq_block(timo, priority & PCATCH);
217 
218 	if ((priority & PNORELOCK) == 0)
219 		mutex_enter(mtx);
220 
221 	return error;
222 }
223 
224 /*
225  * General sleep call for situations where a wake-up is not expected.
226  */
227 int
228 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx)
229 {
230 	struct lwp *l = curlwp;
231 	sleepq_t *sq;
232 	int error;
233 
234 	if (sleepq_dontsleep(l))
235 		return sleepq_abort(NULL, 0);
236 
237 	if (mtx != NULL)
238 		mutex_exit(mtx);
239 	l->l_kpriority = true;
240 	sq = sleeptab_lookup(&sleeptab, l);
241 	sleepq_enter(sq, l);
242 	sleepq_enqueue(sq, l, wmesg, &sleep_syncobj);
243 	error = sleepq_block(timo, intr);
244 	if (mtx != NULL)
245 		mutex_enter(mtx);
246 
247 	return error;
248 }
249 
250 /*
251  * OBSOLETE INTERFACE
252  *
253  * Make all processes sleeping on the specified identifier runnable.
254  */
255 void
256 wakeup(wchan_t ident)
257 {
258 	sleepq_t *sq;
259 
260 	if (cold)
261 		return;
262 
263 	sq = sleeptab_lookup(&sleeptab, ident);
264 	sleepq_wake(sq, ident, (u_int)-1);
265 }
266 
267 /*
268  * OBSOLETE INTERFACE
269  *
270  * Make the highest priority process first in line on the specified
271  * identifier runnable.
272  */
273 void
274 wakeup_one(wchan_t ident)
275 {
276 	sleepq_t *sq;
277 
278 	if (cold)
279 		return;
280 
281 	sq = sleeptab_lookup(&sleeptab, ident);
282 	sleepq_wake(sq, ident, 1);
283 }
284 
285 
286 /*
287  * General yield call.  Puts the current process back on its run queue and
288  * performs a voluntary context switch.  Should only be called when the
289  * current process explicitly requests it (eg sched_yield(2)).
290  */
291 void
292 yield(void)
293 {
294 	struct lwp *l = curlwp;
295 
296 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
297 	lwp_lock(l);
298 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
299 	KASSERT(l->l_stat == LSONPROC);
300 	l->l_kpriority = false;
301 	if (l->l_class == SCHED_OTHER) {
302 		/*
303 		 * Only for timeshared threads.  It will be reset
304 		 * by the scheduler in due course.
305 		 */
306 		l->l_priority = 0;
307 	}
308 	(void)mi_switch(l);
309 	KERNEL_LOCK(l->l_biglocks, l);
310 }
311 
312 /*
313  * General preemption call.  Puts the current process back on its run queue
314  * and performs an involuntary context switch.
315  */
316 void
317 preempt(void)
318 {
319 	struct lwp *l = curlwp;
320 
321 	KERNEL_UNLOCK_ALL(l, &l->l_biglocks);
322 	lwp_lock(l);
323 	KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock));
324 	KASSERT(l->l_stat == LSONPROC);
325 	l->l_kpriority = false;
326 	l->l_nivcsw++;
327 	(void)mi_switch(l);
328 	KERNEL_LOCK(l->l_biglocks, l);
329 }
330 
331 /*
332  * Compute the amount of time during which the current lwp was running.
333  *
334  * - update l_rtime unless it's an idle lwp.
335  */
336 
337 void
338 updatertime(lwp_t *l, const struct bintime *now)
339 {
340 
341 	if ((l->l_flag & LW_IDLE) != 0)
342 		return;
343 
344 	/* rtime += now - stime */
345 	bintime_add(&l->l_rtime, now);
346 	bintime_sub(&l->l_rtime, &l->l_stime);
347 }
348 
349 /*
350  * The machine independent parts of context switch.
351  *
352  * Returns 1 if another LWP was actually run.
353  */
354 int
355 mi_switch(lwp_t *l)
356 {
357 	struct cpu_info *ci, *tci = NULL;
358 	struct schedstate_percpu *spc;
359 	struct lwp *newl;
360 	int retval, oldspl;
361 	struct bintime bt;
362 	bool returning;
363 
364 	KASSERT(lwp_locked(l, NULL));
365 	LOCKDEBUG_BARRIER(l->l_mutex, 1);
366 
367 #ifdef KSTACK_CHECK_MAGIC
368 	kstack_check_magic(l);
369 #endif
370 
371 	binuptime(&bt);
372 
373 	KDASSERT(l->l_cpu == curcpu());
374 	ci = l->l_cpu;
375 	spc = &ci->ci_schedstate;
376 	returning = false;
377 	newl = NULL;
378 
379 	/*
380 	 * If we have been asked to switch to a specific LWP, then there
381 	 * is no need to inspect the run queues.  If a soft interrupt is
382 	 * blocking, then return to the interrupted thread without adjusting
383 	 * VM context or its start time: neither have been changed in order
384 	 * to take the interrupt.
385 	 */
386 	if (l->l_switchto != NULL) {
387 		if ((l->l_pflag & LP_INTR) != 0) {
388 			returning = true;
389 			softint_block(l);
390 			if ((l->l_flag & LW_TIMEINTR) != 0)
391 				updatertime(l, &bt);
392 		}
393 		newl = l->l_switchto;
394 		l->l_switchto = NULL;
395 	}
396 #ifndef __HAVE_FAST_SOFTINTS
397 	else if (ci->ci_data.cpu_softints != 0) {
398 		/* There are pending soft interrupts, so pick one. */
399 		newl = softint_picklwp();
400 		newl->l_stat = LSONPROC;
401 		newl->l_flag |= LW_RUNNING;
402 	}
403 #endif	/* !__HAVE_FAST_SOFTINTS */
404 
405 	/* Count time spent in current system call */
406 	if (!returning) {
407 		SYSCALL_TIME_SLEEP(l);
408 
409 		/*
410 		 * XXXSMP If we are using h/w performance counters,
411 		 * save context.
412 		 */
413 #if PERFCTRS
414 		if (PMC_ENABLED(l->l_proc)) {
415 			pmc_save_context(l->l_proc);
416 		}
417 #endif
418 		updatertime(l, &bt);
419 	}
420 
421 	/*
422 	 * If on the CPU and we have gotten this far, then we must yield.
423 	 */
424 	KASSERT(l->l_stat != LSRUN);
425 	if (l->l_stat == LSONPROC && (l->l_target_cpu || l != newl)) {
426 		KASSERT(lwp_locked(l, spc->spc_lwplock));
427 
428 		tci = l->l_target_cpu;
429 		if (__predict_false(tci != NULL)) {
430 			/* Double-lock the runqueues */
431 			spc_dlock(ci, tci);
432 		} else {
433 			/* Lock the runqueue */
434 			spc_lock(ci);
435 		}
436 
437 		if ((l->l_flag & LW_IDLE) == 0) {
438 			l->l_stat = LSRUN;
439 			if (__predict_false(tci != NULL)) {
440 				/*
441 				 * Set the new CPU, lock and unset the
442 				 * l_target_cpu - thread will be enqueued
443 				 * to the runqueue of target CPU.
444 				 */
445 				l->l_cpu = tci;
446 				lwp_setlock(l, tci->ci_schedstate.spc_mutex);
447 				l->l_target_cpu = NULL;
448 			} else {
449 				lwp_setlock(l, spc->spc_mutex);
450 			}
451 			sched_enqueue(l, true);
452 		} else {
453 			KASSERT(tci == NULL);
454 			l->l_stat = LSIDL;
455 		}
456 	} else {
457 		/* Lock the runqueue */
458 		spc_lock(ci);
459 	}
460 
461 	/*
462 	 * Let sched_nextlwp() select the LWP to run the CPU next.
463 	 * If no LWP is runnable, select the idle LWP.
464 	 *
465 	 * Note that spc_lwplock might not necessary be held, and
466 	 * new thread would be unlocked after setting the LWP-lock.
467 	 */
468 	if (newl == NULL) {
469 		newl = sched_nextlwp();
470 		if (newl != NULL) {
471 			sched_dequeue(newl);
472 			KASSERT(lwp_locked(newl, spc->spc_mutex));
473 			newl->l_stat = LSONPROC;
474 			newl->l_cpu = ci;
475 			newl->l_flag |= LW_RUNNING;
476 			lwp_setlock(newl, spc->spc_lwplock);
477 		} else {
478 			newl = ci->ci_data.cpu_idlelwp;
479 			newl->l_stat = LSONPROC;
480 			newl->l_flag |= LW_RUNNING;
481 		}
482 		/*
483 		 * Only clear want_resched if there are no
484 		 * pending (slow) software interrupts.
485 		 */
486 		ci->ci_want_resched = ci->ci_data.cpu_softints;
487 		spc->spc_flags &= ~SPCF_SWITCHCLEAR;
488 		spc->spc_curpriority = lwp_eprio(newl);
489 	}
490 
491 	/* Items that must be updated with the CPU locked. */
492 	if (!returning) {
493 		/* Update the new LWP's start time. */
494 		newl->l_stime = bt;
495 
496 		/*
497 		 * ci_curlwp changes when a fast soft interrupt occurs.
498 		 * We use cpu_onproc to keep track of which kernel or
499 		 * user thread is running 'underneath' the software
500 		 * interrupt.  This is important for time accounting,
501 		 * itimers and forcing user threads to preempt (aston).
502 		 */
503 		ci->ci_data.cpu_onproc = newl;
504 	}
505 
506 	if (l != newl) {
507 		struct lwp *prevlwp;
508 
509 		/* Release all locks, but leave the current LWP locked */
510 		if (l->l_mutex == l->l_cpu->ci_schedstate.spc_mutex) {
511 			/*
512 			 * In case of migration, drop the local runqueue
513 			 * lock, thread is on other runqueue now.
514 			 */
515 			if (__predict_false(tci != NULL))
516 				spc_unlock(ci);
517 			/*
518 			 * Drop spc_lwplock, if the current LWP has been moved
519 			 * to the run queue (it is now locked by spc_mutex).
520 			 */
521 			mutex_spin_exit(spc->spc_lwplock);
522 		} else {
523 			/*
524 			 * Otherwise, drop the spc_mutex, we are done with the
525 			 * run queues.
526 			 */
527 			mutex_spin_exit(spc->spc_mutex);
528 			KASSERT(tci == NULL);
529 		}
530 
531 		/*
532 		 * Mark that context switch is going to be perfomed
533 		 * for this LWP, to protect it from being switched
534 		 * to on another CPU.
535 		 */
536 		KASSERT(l->l_ctxswtch == 0);
537 		l->l_ctxswtch = 1;
538 		l->l_ncsw++;
539 		l->l_flag &= ~LW_RUNNING;
540 
541 		/*
542 		 * Increase the count of spin-mutexes before the release
543 		 * of the last lock - we must remain at IPL_SCHED during
544 		 * the context switch.
545 		 */
546 		oldspl = MUTEX_SPIN_OLDSPL(ci);
547 		ci->ci_mtx_count--;
548 		lwp_unlock(l);
549 
550 		/* Unlocked, but for statistics only. */
551 		uvmexp.swtch++;
552 
553 		/* Update status for lwpctl, if present. */
554 		if (l->l_lwpctl != NULL)
555 			l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE;
556 
557 		/*
558 		 * Save old VM context, unless a soft interrupt
559 		 * handler is blocking.
560 		 */
561 		if (!returning)
562 			pmap_deactivate(l);
563 
564 		/*
565 		 * We may need to spin-wait for if 'newl' is still
566 		 * context switching on another CPU.
567 		 */
568 		if (newl->l_ctxswtch != 0) {
569 			u_int count;
570 			count = SPINLOCK_BACKOFF_MIN;
571 			while (newl->l_ctxswtch)
572 				SPINLOCK_BACKOFF(count);
573 		}
574 
575 		/* Switch to the new LWP.. */
576 		prevlwp = cpu_switchto(l, newl, returning);
577 		ci = curcpu();
578 
579 		/*
580 		 * Switched away - we have new curlwp.
581 		 * Restore VM context and IPL.
582 		 */
583 		pmap_activate(l);
584 		if (prevlwp != NULL) {
585 			/* Normalize the count of the spin-mutexes */
586 			ci->ci_mtx_count++;
587 			/* Unmark the state of context switch */
588 			membar_exit();
589 			prevlwp->l_ctxswtch = 0;
590 		}
591 		splx(oldspl);
592 
593 		/* Update status for lwpctl, if present. */
594 		if (l->l_lwpctl != NULL)
595 			l->l_lwpctl->lc_curcpu = (int)cpu_index(ci);
596 
597 		retval = 1;
598 	} else {
599 		/* Nothing to do - just unlock and return. */
600 		KASSERT(tci == NULL);
601 		spc_unlock(ci);
602 		lwp_unlock(l);
603 		retval = 0;
604 	}
605 
606 	KASSERT(l == curlwp);
607 	KASSERT(l->l_stat == LSONPROC);
608 	KASSERT(l->l_cpu == ci);
609 
610 	/*
611 	 * XXXSMP If we are using h/w performance counters, restore context.
612 	 */
613 #if PERFCTRS
614 	if (PMC_ENABLED(l->l_proc)) {
615 		pmc_restore_context(l->l_proc);
616 	}
617 #endif
618 	SYSCALL_TIME_WAKEUP(l);
619 	LOCKDEBUG_BARRIER(NULL, 1);
620 
621 	return retval;
622 }
623 
624 /*
625  * Change process state to be runnable, placing it on the run queue if it is
626  * in memory, and awakening the swapper if it isn't in memory.
627  *
628  * Call with the process and LWP locked.  Will return with the LWP unlocked.
629  */
630 void
631 setrunnable(struct lwp *l)
632 {
633 	struct proc *p = l->l_proc;
634 	struct cpu_info *ci;
635 	sigset_t *ss;
636 
637 	KASSERT((l->l_flag & LW_IDLE) == 0);
638 	KASSERT(mutex_owned(&p->p_smutex));
639 	KASSERT(lwp_locked(l, NULL));
640 	KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex);
641 
642 	switch (l->l_stat) {
643 	case LSSTOP:
644 		/*
645 		 * If we're being traced (possibly because someone attached us
646 		 * while we were stopped), check for a signal from the debugger.
647 		 */
648 		if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) {
649 			if ((sigprop[p->p_xstat] & SA_TOLWP) != 0)
650 				ss = &l->l_sigpend.sp_set;
651 			else
652 				ss = &p->p_sigpend.sp_set;
653 			sigaddset(ss, p->p_xstat);
654 			signotify(l);
655 		}
656 		p->p_nrlwps++;
657 		break;
658 	case LSSUSPENDED:
659 		l->l_flag &= ~LW_WSUSPEND;
660 		p->p_nrlwps++;
661 		cv_broadcast(&p->p_lwpcv);
662 		break;
663 	case LSSLEEP:
664 		KASSERT(l->l_wchan != NULL);
665 		break;
666 	default:
667 		panic("setrunnable: lwp %p state was %d", l, l->l_stat);
668 	}
669 
670 	/*
671 	 * If the LWP was sleeping interruptably, then it's OK to start it
672 	 * again.  If not, mark it as still sleeping.
673 	 */
674 	if (l->l_wchan != NULL) {
675 		l->l_stat = LSSLEEP;
676 		/* lwp_unsleep() will release the lock. */
677 		lwp_unsleep(l);
678 		return;
679 	}
680 
681 	/*
682 	 * If the LWP is still on the CPU, mark it as LSONPROC.  It may be
683 	 * about to call mi_switch(), in which case it will yield.
684 	 */
685 	if ((l->l_flag & LW_RUNNING) != 0) {
686 		l->l_stat = LSONPROC;
687 		l->l_slptime = 0;
688 		lwp_unlock(l);
689 		return;
690 	}
691 
692 	/*
693 	 * Look for a CPU to run.
694 	 * Set the LWP runnable.
695 	 */
696 	ci = sched_takecpu(l);
697 	l->l_cpu = ci;
698 	if (l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex) {
699 		lwp_unlock_to(l, ci->ci_schedstate.spc_mutex);
700 		lwp_lock(l);
701 	}
702 	sched_setrunnable(l);
703 	l->l_stat = LSRUN;
704 	l->l_slptime = 0;
705 
706 	/*
707 	 * If thread is swapped out - wake the swapper to bring it back in.
708 	 * Otherwise, enter it into a run queue.
709 	 */
710 	if (l->l_flag & LW_INMEM) {
711 		sched_enqueue(l, false);
712 		resched_cpu(l);
713 		lwp_unlock(l);
714 	} else {
715 		lwp_unlock(l);
716 		uvm_kick_scheduler();
717 	}
718 }
719 
720 /*
721  * suspendsched:
722  *
723  *	Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED.
724  */
725 void
726 suspendsched(void)
727 {
728 	CPU_INFO_ITERATOR cii;
729 	struct cpu_info *ci;
730 	struct lwp *l;
731 	struct proc *p;
732 
733 	/*
734 	 * We do this by process in order not to violate the locking rules.
735 	 */
736 	mutex_enter(&proclist_lock);
737 	PROCLIST_FOREACH(p, &allproc) {
738 		mutex_enter(&p->p_smutex);
739 
740 		if ((p->p_flag & PK_SYSTEM) != 0) {
741 			mutex_exit(&p->p_smutex);
742 			continue;
743 		}
744 
745 		p->p_stat = SSTOP;
746 
747 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
748 			if (l == curlwp)
749 				continue;
750 
751 			lwp_lock(l);
752 
753 			/*
754 			 * Set L_WREBOOT so that the LWP will suspend itself
755 			 * when it tries to return to user mode.  We want to
756 			 * try and get to get as many LWPs as possible to
757 			 * the user / kernel boundary, so that they will
758 			 * release any locks that they hold.
759 			 */
760 			l->l_flag |= (LW_WREBOOT | LW_WSUSPEND);
761 
762 			if (l->l_stat == LSSLEEP &&
763 			    (l->l_flag & LW_SINTR) != 0) {
764 				/* setrunnable() will release the lock. */
765 				setrunnable(l);
766 				continue;
767 			}
768 
769 			lwp_unlock(l);
770 		}
771 
772 		mutex_exit(&p->p_smutex);
773 	}
774 	mutex_exit(&proclist_lock);
775 
776 	/*
777 	 * Kick all CPUs to make them preempt any LWPs running in user mode.
778 	 * They'll trap into the kernel and suspend themselves in userret().
779 	 */
780 	for (CPU_INFO_FOREACH(cii, ci)) {
781 		spc_lock(ci);
782 		cpu_need_resched(ci, RESCHED_IMMED);
783 		spc_unlock(ci);
784 	}
785 }
786 
787 /*
788  * sched_unsleep:
789  *
790  *	The is called when the LWP has not been awoken normally but instead
791  *	interrupted: for example, if the sleep timed out.  Because of this,
792  *	it's not a valid action for running or idle LWPs.
793  */
794 static void
795 sched_unsleep(struct lwp *l)
796 {
797 
798 	lwp_unlock(l);
799 	panic("sched_unsleep");
800 }
801 
802 void
803 resched_cpu(struct lwp *l)
804 {
805 	struct cpu_info *ci;
806 
807 	/*
808 	 * XXXSMP
809 	 * Since l->l_cpu persists across a context switch,
810 	 * this gives us *very weak* processor affinity, in
811 	 * that we notify the CPU on which the process last
812 	 * ran that it should try to switch.
813 	 *
814 	 * This does not guarantee that the process will run on
815 	 * that processor next, because another processor might
816 	 * grab it the next time it performs a context switch.
817 	 *
818 	 * This also does not handle the case where its last
819 	 * CPU is running a higher-priority process, but every
820 	 * other CPU is running a lower-priority process.  There
821 	 * are ways to handle this situation, but they're not
822 	 * currently very pretty, and we also need to weigh the
823 	 * cost of moving a process from one CPU to another.
824 	 */
825 	ci = l->l_cpu;
826 	if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority)
827 		cpu_need_resched(ci, 0);
828 }
829 
830 static void
831 sched_changepri(struct lwp *l, pri_t pri)
832 {
833 
834 	KASSERT(lwp_locked(l, NULL));
835 
836 	if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
837 		KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
838 		sched_dequeue(l);
839 		l->l_priority = pri;
840 		sched_enqueue(l, false);
841 	} else {
842 		l->l_priority = pri;
843 	}
844 	resched_cpu(l);
845 }
846 
847 static void
848 sched_lendpri(struct lwp *l, pri_t pri)
849 {
850 
851 	KASSERT(lwp_locked(l, NULL));
852 
853 	if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
854 		KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex));
855 		sched_dequeue(l);
856 		l->l_inheritedprio = pri;
857 		sched_enqueue(l, false);
858 	} else {
859 		l->l_inheritedprio = pri;
860 	}
861 	resched_cpu(l);
862 }
863 
864 struct lwp *
865 syncobj_noowner(wchan_t wchan)
866 {
867 
868 	return NULL;
869 }
870 
871 
872 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */
873 fixpt_t	ccpu = 0.95122942450071400909 * FSCALE;		/* exp(-1/20) */
874 
875 /*
876  * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
877  * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
878  * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT).
879  *
880  * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
881  *	1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
882  *
883  * If you dont want to bother with the faster/more-accurate formula, you
884  * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
885  * (more general) method of calculating the %age of CPU used by a process.
886  */
887 #define	CCPU_SHIFT	(FSHIFT + 1)
888 
889 /*
890  * sched_pstats:
891  *
892  * Update process statistics and check CPU resource allocation.
893  * Call scheduler-specific hook to eventually adjust process/LWP
894  * priorities.
895  */
896 /* ARGSUSED */
897 void
898 sched_pstats(void *arg)
899 {
900 	struct rlimit *rlim;
901 	struct lwp *l;
902 	struct proc *p;
903 	int sig, clkhz;
904 	long runtm;
905 
906 	sched_pstats_ticks++;
907 
908 	mutex_enter(&proclist_lock);
909 	PROCLIST_FOREACH(p, &allproc) {
910 		/*
911 		 * Increment time in/out of memory and sleep time (if
912 		 * sleeping).  We ignore overflow; with 16-bit int's
913 		 * (remember them?) overflow takes 45 days.
914 		 */
915 		mutex_enter(&p->p_smutex);
916 		mutex_spin_enter(&p->p_stmutex);
917 		runtm = p->p_rtime.sec;
918 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
919 			if ((l->l_flag & LW_IDLE) != 0)
920 				continue;
921 			lwp_lock(l);
922 			runtm += l->l_rtime.sec;
923 			l->l_swtime++;
924 			sched_pstats_hook(l);
925 			lwp_unlock(l);
926 
927 			/*
928 			 * p_pctcpu is only for ps.
929 			 */
930 			l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT;
931 			if (l->l_slptime < 1) {
932 				clkhz = stathz != 0 ? stathz : hz;
933 #if	(FSHIFT >= CCPU_SHIFT)
934 				l->l_pctcpu += (clkhz == 100) ?
935 				    ((fixpt_t)l->l_cpticks) <<
936 				        (FSHIFT - CCPU_SHIFT) :
937 				    100 * (((fixpt_t) p->p_cpticks)
938 				        << (FSHIFT - CCPU_SHIFT)) / clkhz;
939 #else
940 				l->l_pctcpu += ((FSCALE - ccpu) *
941 				    (l->l_cpticks * FSCALE / clkhz)) >> FSHIFT;
942 #endif
943 				l->l_cpticks = 0;
944 			}
945 		}
946 		p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT;
947 		mutex_spin_exit(&p->p_stmutex);
948 
949 		/*
950 		 * Check if the process exceeds its CPU resource allocation.
951 		 * If over max, kill it.
952 		 */
953 		rlim = &p->p_rlimit[RLIMIT_CPU];
954 		sig = 0;
955 		if (runtm >= rlim->rlim_cur) {
956 			if (runtm >= rlim->rlim_max)
957 				sig = SIGKILL;
958 			else {
959 				sig = SIGXCPU;
960 				if (rlim->rlim_cur < rlim->rlim_max)
961 					rlim->rlim_cur += 5;
962 			}
963 		}
964 		mutex_exit(&p->p_smutex);
965 		if (sig) {
966 			mutex_enter(&proclist_mutex);
967 			psignal(p, sig);
968 			mutex_exit(&proclist_mutex);
969 		}
970 	}
971 	mutex_exit(&proclist_lock);
972 	uvm_meter();
973 	cv_wakeup(&lbolt);
974 	callout_schedule(&sched_pstats_ch, hz);
975 }
976 
977 void
978 sched_init(void)
979 {
980 
981 	cv_init(&lbolt, "lbolt");
982 	callout_init(&sched_pstats_ch, CALLOUT_MPSAFE);
983 	callout_setfunc(&sched_pstats_ch, sched_pstats, NULL);
984 	sched_setup();
985 	sched_pstats(NULL);
986 }
987