xref: /openbsd-src/sys/kern/kern_synch.c (revision 11efff7f3ac2b3cfeff0c0cddc14294d9b3aca4f)
1 /*	$OpenBSD: kern_synch.c,v 1.61 2004/07/29 06:25:45 tedu Exp $	*/
2 /*	$NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $	*/
3 
4 /*-
5  * Copyright (c) 1982, 1986, 1990, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_synch.c	8.6 (Berkeley) 1/21/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/kernel.h>
44 #include <sys/buf.h>
45 #include <sys/signalvar.h>
46 #include <sys/resourcevar.h>
47 #include <uvm/uvm_extern.h>
48 #include <sys/sched.h>
49 #include <sys/timeout.h>
50 
51 #ifdef KTRACE
52 #include <sys/ktrace.h>
53 #endif
54 
55 void updatepri(struct proc *);
56 void endtsleep(void *);
57 
58 
59 /*
60  * We're only looking at 7 bits of the address; everything is
61  * aligned to 4, lots of things are aligned to greater powers
62  * of 2.  Shift right by 8, i.e. drop the bottom 256 worth.
63  */
64 #define TABLESIZE	128
65 #define LOOKUP(x)	(((long)(x) >> 8) & (TABLESIZE - 1))
66 struct slpque {
67 	struct proc *sq_head;
68 	struct proc **sq_tailp;
69 } slpque[TABLESIZE];
70 
71 /*
72  * During autoconfiguration or after a panic, a sleep will simply
73  * lower the priority briefly to allow interrupts, then return.
74  * The priority to be used (safepri) is machine-dependent, thus this
75  * value is initialized and maintained in the machine-dependent layers.
76  * This priority will typically be 0, or the lowest priority
77  * that is safe for use on the interrupt stack; it can be made
78  * higher to block network software interrupts after panics.
79  */
80 int safepri;
81 
82 /*
83  * General sleep call.  Suspends the current process until a wakeup is
84  * performed on the specified identifier.  The process will then be made
85  * runnable with the specified priority.  Sleeps at most timo/hz seconds
86  * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
87  * before and after sleeping, else signals are not checked.  Returns 0 if
88  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
89  * signal needs to be delivered, ERESTART is returned if the current system
90  * call should be restarted if possible, and EINTR is returned if the system
91  * call should be interrupted by the signal (return EINTR).
92  *
93  * The interlock is held until the scheduler_slock (XXX) is held.  The
94  * interlock will be locked before returning back to the caller
95  * unless the PNORELOCK flag is specified, in which case the
96  * interlock will always be unlocked upon return.
97  */
98 int
99 ltsleep(ident, priority, wmesg, timo, interlock)
100 	void *ident;
101 	int priority, timo;
102 	const char *wmesg;
103 	volatile struct simplelock *interlock;
104 {
105 	struct proc *p = curproc;
106 	struct slpque *qp;
107 	int s, sig;
108 	int catch = priority & PCATCH;
109 	int relock = (priority & PNORELOCK) == 0;
110 
111 	if (cold || panicstr) {
112 		/*
113 		 * After a panic, or during autoconfiguration,
114 		 * just give interrupts a chance, then just return;
115 		 * don't run any other procs or panic below,
116 		 * in case this is the idle process and already asleep.
117 		 */
118 		s = splhigh();
119 		splx(safepri);
120 		splx(s);
121 		if (interlock != NULL && relock == 0)
122 			simple_unlock(interlock);
123 		return (0);
124 	}
125 
126 #ifdef KTRACE
127 	if (KTRPOINT(p, KTR_CSW))
128 		ktrcsw(p, 1, 0);
129 #endif
130 
131 	SCHED_LOCK(s);
132 
133 #ifdef DIAGNOSTIC
134 	if (ident == NULL || p->p_stat != SONPROC || p->p_back != NULL)
135 		panic("tsleep");
136 #endif
137 
138 	p->p_wchan = ident;
139 	p->p_wmesg = wmesg;
140 	p->p_slptime = 0;
141 	p->p_priority = priority & PRIMASK;
142 	qp = &slpque[LOOKUP(ident)];
143 	if (qp->sq_head == 0)
144 		qp->sq_head = p;
145 	else
146 		*qp->sq_tailp = p;
147 	*(qp->sq_tailp = &p->p_forw) = 0;
148 	if (timo)
149 		timeout_add(&p->p_sleep_to, timo);
150 	/*
151 	 * We can now release the interlock; the scheduler_slock
152 	 * is held, so a thread can't get in to do wakeup() before
153 	 * we do the switch.
154 	 *
155 	 * XXX We leave the code block here, after inserting ourselves
156 	 * on the sleep queue, because we might want a more clever
157 	 * data structure for the sleep queues at some point.
158 	 */
159 	if (interlock != NULL)
160 		simple_unlock(interlock);
161 
162 	/*
163 	 * We put ourselves on the sleep queue and start our timeout
164 	 * before calling CURSIG, as we could stop there, and a wakeup
165 	 * or a SIGCONT (or both) could occur while we were stopped.
166 	 * A SIGCONT would cause us to be marked as SSLEEP
167 	 * without resuming us, thus we must be ready for sleep
168 	 * when CURSIG is called.  If the wakeup happens while we're
169 	 * stopped, p->p_wchan will be 0 upon return from CURSIG.
170 	 */
171 	if (catch) {
172 		p->p_flag |= P_SINTR;
173 		if ((sig = CURSIG(p)) != 0) {
174 			if (p->p_wchan)
175 				unsleep(p);
176 			p->p_stat = SONPROC;
177 			SCHED_UNLOCK(s);
178 			goto resume;
179 		}
180 		if (p->p_wchan == 0) {
181 			catch = 0;
182 			SCHED_UNLOCK(s);
183 			goto resume;
184 		}
185 	} else
186 		sig = 0;
187 	p->p_stat = SSLEEP;
188 	p->p_stats->p_ru.ru_nvcsw++;
189 	SCHED_ASSERT_LOCKED();
190 	mi_switch();
191 #ifdef	DDB
192 	/* handy breakpoint location after process "wakes" */
193 	__asm(".globl bpendtsleep\nbpendtsleep:");
194 #endif
195 
196 	SCHED_ASSERT_UNLOCKED();
197 	/*
198 	 * Note! this splx belongs to the SCHED_LOCK(s) above, mi_switch
199 	 * releases the scheduler lock, but does not lower the spl.
200 	 */
201 	splx(s);
202 
203 resume:
204 #ifdef __HAVE_CPUINFO
205 	p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
206 #else
207 	curpriority = p->p_usrpri;
208 #endif
209 	p->p_flag &= ~P_SINTR;
210 	if (p->p_flag & P_TIMEOUT) {
211 		p->p_flag &= ~P_TIMEOUT;
212 		if (sig == 0) {
213 #ifdef KTRACE
214 			if (KTRPOINT(p, KTR_CSW))
215 				ktrcsw(p, 0, 0);
216 #endif
217 			if (interlock != NULL && relock)
218 				simple_lock(interlock);
219 			return (EWOULDBLOCK);
220 		}
221 	} else if (timo)
222 		timeout_del(&p->p_sleep_to);
223 	if (catch && (sig != 0 || (sig = CURSIG(p)) != 0)) {
224 #ifdef KTRACE
225 		if (KTRPOINT(p, KTR_CSW))
226 			ktrcsw(p, 0, 0);
227 #endif
228 		if (interlock != NULL && relock)
229 			simple_lock(interlock);
230 		if (p->p_sigacts->ps_sigintr & sigmask(sig))
231 			return (EINTR);
232 		return (ERESTART);
233 	}
234 #ifdef KTRACE
235 	if (KTRPOINT(p, KTR_CSW))
236 		ktrcsw(p, 0, 0);
237 #endif
238 
239 	if (interlock != NULL && relock)
240 		simple_lock(interlock);
241 	return (0);
242 }
243 
244 /*
245  * Implement timeout for tsleep.
246  * If process hasn't been awakened (wchan non-zero),
247  * set timeout flag and undo the sleep.  If proc
248  * is stopped, just unsleep so it will remain stopped.
249  */
250 void
251 endtsleep(arg)
252 	void *arg;
253 {
254 	struct proc *p;
255 	int s;
256 
257 	p = (struct proc *)arg;
258 	SCHED_LOCK(s);
259 	if (p->p_wchan) {
260 		if (p->p_stat == SSLEEP)
261 			setrunnable(p);
262 		else
263 			unsleep(p);
264 		p->p_flag |= P_TIMEOUT;
265 	}
266 	SCHED_UNLOCK(s);
267 }
268 
269 /*
270  * Remove a process from its wait queue
271  */
272 void
273 unsleep(p)
274 	register struct proc *p;
275 {
276 	register struct slpque *qp;
277 	register struct proc **hp;
278 #if 0
279 	int s;
280 
281 	/*
282 	 * XXX we cannot do recursive SCHED_LOCKing yet.  All callers lock
283 	 * anyhow.
284 	 */
285 	SCHED_LOCK(s);
286 #endif
287 	if (p->p_wchan) {
288 		hp = &(qp = &slpque[LOOKUP(p->p_wchan)])->sq_head;
289 		while (*hp != p)
290 			hp = &(*hp)->p_forw;
291 		*hp = p->p_forw;
292 		if (qp->sq_tailp == &p->p_forw)
293 			qp->sq_tailp = hp;
294 		p->p_wchan = 0;
295 	}
296 #if 0
297 	SCHED_UNLOCK(s);
298 #endif
299 }
300 
301 /*
302  * Make all processes sleeping on the specified identifier runnable.
303  */
304 void
305 wakeup_n(ident, n)
306 	void *ident;
307 	int n;
308 {
309 	struct slpque *qp;
310 	struct proc *p, **q;
311 	int s;
312 
313 	SCHED_LOCK(s);
314 	qp = &slpque[LOOKUP(ident)];
315 restart:
316 	for (q = &qp->sq_head; (p = *q) != NULL; ) {
317 #ifdef DIAGNOSTIC
318 		if (p->p_back || (p->p_stat != SSLEEP && p->p_stat != SSTOP))
319 			panic("wakeup");
320 #endif
321 		if (p->p_wchan == ident) {
322 			--n;
323 			p->p_wchan = 0;
324 			*q = p->p_forw;
325 			if (qp->sq_tailp == &p->p_forw)
326 				qp->sq_tailp = q;
327 			if (p->p_stat == SSLEEP) {
328 				/* OPTIMIZED EXPANSION OF setrunnable(p); */
329 				if (p->p_slptime > 1)
330 					updatepri(p);
331 				p->p_slptime = 0;
332 				p->p_stat = SRUN;
333 
334 				/*
335 				 * Since curpriority is a user priority,
336 				 * p->p_priority is always better than
337 				 * curpriority on the last CPU on
338 				 * which it ran.
339 				 *
340 				 * XXXSMP See affinity comment in
341 				 * resched_proc().
342 				 */
343 				if ((p->p_flag & P_INMEM) != 0) {
344 					setrunqueue(p);
345 #ifdef __HAVE_CPUINFO
346 					KASSERT(p->p_cpu != NULL);
347 					need_resched(p->p_cpu);
348 #else
349 					need_resched(0);
350 #endif
351 				} else {
352 					wakeup((caddr_t)&proc0);
353 				}
354 				/* END INLINE EXPANSION */
355 
356 				if (n != 0)
357 					goto restart;
358 				else
359 					break;
360 			}
361 		} else
362 			q = &p->p_forw;
363 	}
364 	SCHED_UNLOCK(s);
365 }
366 
367 void
368 wakeup(chan)
369 	void *chan;
370 {
371 	wakeup_n(chan, -1);
372 }
373