xref: /openbsd-src/sys/kern/kern_synch.c (revision daf88648c0e349d5c02e1504293082072c981640)
1 /*	$OpenBSD: kern_synch.c,v 1.75 2006/11/29 12:24:17 miod Exp $	*/
2 /*	$NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1990, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * (c) UNIX System Laboratories, Inc.
8  * All or some portions of this file are derived from material licensed
9  * to the University of California by American Telephone and Telegraph
10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11  * the permission of UNIX System Laboratories, Inc.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  *	@(#)kern_synch.c	8.6 (Berkeley) 1/21/94
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/kernel.h>
44 #include <sys/buf.h>
45 #include <sys/signalvar.h>
46 #include <sys/resourcevar.h>
47 #include <uvm/uvm_extern.h>
48 #include <sys/sched.h>
49 #include <sys/timeout.h>
50 #include <sys/mount.h>
51 #include <sys/syscallargs.h>
52 #include <sys/pool.h>
53 
54 #include <machine/spinlock.h>
55 
56 #ifdef KTRACE
57 #include <sys/ktrace.h>
58 #endif
59 
60 void updatepri(struct proc *);
61 void endtsleep(void *);
62 
63 /*
64  * We're only looking at 7 bits of the address; everything is
65  * aligned to 4, lots of things are aligned to greater powers
66  * of 2.  Shift right by 8, i.e. drop the bottom 256 worth.
67  */
68 #define TABLESIZE	128
69 #define LOOKUP(x)	(((long)(x) >> 8) & (TABLESIZE - 1))
70 struct slpque {
71 	struct proc *sq_head;
72 	struct proc **sq_tailp;
73 } slpque[TABLESIZE];
74 
75 /*
76  * During autoconfiguration or after a panic, a sleep will simply
77  * lower the priority briefly to allow interrupts, then return.
78  * The priority to be used (safepri) is machine-dependent, thus this
79  * value is initialized and maintained in the machine-dependent layers.
80  * This priority will typically be 0, or the lowest priority
81  * that is safe for use on the interrupt stack; it can be made
82  * higher to block network software interrupts after panics.
83  */
84 int safepri;
85 
86 /*
87  * General sleep call.  Suspends the current process until a wakeup is
88  * performed on the specified identifier.  The process will then be made
89  * runnable with the specified priority.  Sleeps at most timo/hz seconds
90  * (0 means no timeout).  If pri includes PCATCH flag, signals are checked
91  * before and after sleeping, else signals are not checked.  Returns 0 if
92  * awakened, EWOULDBLOCK if the timeout expires.  If PCATCH is set and a
93  * signal needs to be delivered, ERESTART is returned if the current system
94  * call should be restarted if possible, and EINTR is returned if the system
95  * call should be interrupted by the signal (return EINTR).
96  *
97  * The interlock is held until the scheduler_slock (XXX) is held.  The
98  * interlock will be locked before returning back to the caller
99  * unless the PNORELOCK flag is specified, in which case the
100  * interlock will always be unlocked upon return.
101  */
102 int
103 ltsleep(void *ident, int priority, const char *wmesg, int timo,
104     volatile struct simplelock *interlock)
105 {
106 	struct proc *p = curproc;
107 	struct slpque *qp;
108 	int s, sig;
109 	int catch = priority & PCATCH;
110 	int relock = (priority & PNORELOCK) == 0;
111 
112 	if (cold || panicstr) {
113 		/*
114 		 * After a panic, or during autoconfiguration,
115 		 * just give interrupts a chance, then just return;
116 		 * don't run any other procs or panic below,
117 		 * in case this is the idle process and already asleep.
118 		 */
119 		s = splhigh();
120 		splx(safepri);
121 		splx(s);
122 		if (interlock != NULL && relock == 0)
123 			simple_unlock(interlock);
124 		return (0);
125 	}
126 
127 #ifdef KTRACE
128 	if (KTRPOINT(p, KTR_CSW))
129 		ktrcsw(p, 1, 0);
130 #endif
131 
132 	SCHED_LOCK(s);
133 
134 #ifdef DIAGNOSTIC
135 	if (ident == NULL)
136 		panic("tsleep: no ident");
137 	if (p->p_stat != SONPROC)
138 		panic("tsleep: not SONPROC");
139 	if (p->p_back != NULL)
140 		panic("tsleep: p_back not NULL");
141 #endif
142 
143 	p->p_wchan = ident;
144 	p->p_wmesg = wmesg;
145 	p->p_slptime = 0;
146 	p->p_priority = priority & PRIMASK;
147 	qp = &slpque[LOOKUP(ident)];
148 	if (qp->sq_head == 0)
149 		qp->sq_head = p;
150 	else
151 		*qp->sq_tailp = p;
152 	*(qp->sq_tailp = &p->p_forw) = 0;
153 	if (timo)
154 		timeout_add(&p->p_sleep_to, timo);
155 	/*
156 	 * We can now release the interlock; the scheduler_slock
157 	 * is held, so a thread can't get in to do wakeup() before
158 	 * we do the switch.
159 	 *
160 	 * XXX We leave the code block here, after inserting ourselves
161 	 * on the sleep queue, because we might want a more clever
162 	 * data structure for the sleep queues at some point.
163 	 */
164 	if (interlock != NULL)
165 		simple_unlock(interlock);
166 
167 	/*
168 	 * We put ourselves on the sleep queue and start our timeout
169 	 * before calling CURSIG, as we could stop there, and a wakeup
170 	 * or a SIGCONT (or both) could occur while we were stopped.
171 	 * A SIGCONT would cause us to be marked as SSLEEP
172 	 * without resuming us, thus we must be ready for sleep
173 	 * when CURSIG is called.  If the wakeup happens while we're
174 	 * stopped, p->p_wchan will be 0 upon return from CURSIG.
175 	 */
176 	if (catch) {
177 		p->p_flag |= P_SINTR;
178 		if ((sig = CURSIG(p)) != 0) {
179 			if (p->p_wchan)
180 				unsleep(p);
181 			p->p_stat = SONPROC;
182 			goto resume;
183 		}
184 		if (p->p_wchan == 0) {
185 			catch = 0;
186 			goto resume;
187 		}
188 	} else
189 		sig = 0;
190 	p->p_stat = SSLEEP;
191 	p->p_stats->p_ru.ru_nvcsw++;
192 	SCHED_ASSERT_LOCKED();
193 	mi_switch();
194 #ifdef	DDB
195 	/* handy breakpoint location after process "wakes" */
196 	__asm(".globl bpendtsleep\nbpendtsleep:");
197 #endif
198 
199 resume:
200 	SCHED_UNLOCK(s);
201 
202 #ifdef __HAVE_CPUINFO
203 	p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri;
204 #else
205 	curpriority = p->p_usrpri;
206 #endif
207 	p->p_flag &= ~P_SINTR;
208 	if (p->p_flag & P_TIMEOUT) {
209 		p->p_flag &= ~P_TIMEOUT;
210 		if (sig == 0) {
211 #ifdef KTRACE
212 			if (KTRPOINT(p, KTR_CSW))
213 				ktrcsw(p, 0, 0);
214 #endif
215 			if (interlock != NULL && relock)
216 				simple_lock(interlock);
217 			return (EWOULDBLOCK);
218 		}
219 	} else if (timo)
220 		timeout_del(&p->p_sleep_to);
221 	if (catch && (sig != 0 || (sig = CURSIG(p)) != 0)) {
222 #ifdef KTRACE
223 		if (KTRPOINT(p, KTR_CSW))
224 			ktrcsw(p, 0, 0);
225 #endif
226 		if (interlock != NULL && relock)
227 			simple_lock(interlock);
228 		if (p->p_sigacts->ps_sigintr & sigmask(sig))
229 			return (EINTR);
230 		return (ERESTART);
231 	}
232 #ifdef KTRACE
233 	if (KTRPOINT(p, KTR_CSW))
234 		ktrcsw(p, 0, 0);
235 #endif
236 
237 	if (interlock != NULL && relock)
238 		simple_lock(interlock);
239 	return (0);
240 }
241 
242 /*
243  * Implement timeout for tsleep.
244  * If process hasn't been awakened (wchan non-zero),
245  * set timeout flag and undo the sleep.  If proc
246  * is stopped, just unsleep so it will remain stopped.
247  */
248 void
249 endtsleep(void *arg)
250 {
251 	struct proc *p;
252 	int s;
253 
254 	p = (struct proc *)arg;
255 	SCHED_LOCK(s);
256 	if (p->p_wchan) {
257 		if (p->p_stat == SSLEEP)
258 			setrunnable(p);
259 		else
260 			unsleep(p);
261 		p->p_flag |= P_TIMEOUT;
262 	}
263 	SCHED_UNLOCK(s);
264 }
265 
266 /*
267  * Remove a process from its wait queue
268  */
269 void
270 unsleep(struct proc *p)
271 {
272 	struct slpque *qp;
273 	struct proc **hp;
274 #if 0
275 	int s;
276 
277 	/*
278 	 * XXX we cannot do recursive SCHED_LOCKing yet.  All callers lock
279 	 * anyhow.
280 	 */
281 	SCHED_LOCK(s);
282 #endif
283 	if (p->p_wchan) {
284 		hp = &(qp = &slpque[LOOKUP(p->p_wchan)])->sq_head;
285 		while (*hp != p)
286 			hp = &(*hp)->p_forw;
287 		*hp = p->p_forw;
288 		if (qp->sq_tailp == &p->p_forw)
289 			qp->sq_tailp = hp;
290 		p->p_wchan = 0;
291 	}
292 #if 0
293 	SCHED_UNLOCK(s);
294 #endif
295 }
296 
297 /*
298  * Make a number of processes sleeping on the specified identifier runnable.
299  */
300 void
301 wakeup_n(void *ident, int n)
302 {
303 	struct slpque *qp;
304 	struct proc *p, **q;
305 	int s;
306 
307 	SCHED_LOCK(s);
308 	qp = &slpque[LOOKUP(ident)];
309 restart:
310 	for (q = &qp->sq_head; (p = *q) != NULL; ) {
311 #ifdef DIAGNOSTIC
312 		if (p->p_back)
313 			panic("wakeup: p_back not NULL");
314 		if (p->p_stat != SSLEEP && p->p_stat != SSTOP)
315 			panic("wakeup: p_stat is %d", (int)p->p_stat);
316 #endif
317 		if (p->p_wchan == ident) {
318 			--n;
319 			p->p_wchan = 0;
320 			*q = p->p_forw;
321 			if (qp->sq_tailp == &p->p_forw)
322 				qp->sq_tailp = q;
323 			if (p->p_stat == SSLEEP) {
324 				/* OPTIMIZED EXPANSION OF setrunnable(p); */
325 				if (p->p_slptime > 1)
326 					updatepri(p);
327 				p->p_slptime = 0;
328 				p->p_stat = SRUN;
329 
330 				/*
331 				 * Since curpriority is a user priority,
332 				 * p->p_priority is always better than
333 				 * curpriority on the last CPU on
334 				 * which it ran.
335 				 *
336 				 * XXXSMP See affinity comment in
337 				 * resched_proc().
338 				 */
339 				setrunqueue(p);
340 #ifdef __HAVE_CPUINFO
341 				KASSERT(p->p_cpu != NULL);
342 				need_resched(p->p_cpu);
343 #else
344 				need_resched(NULL);
345 #endif
346 				/* END INLINE EXPANSION */
347 
348 				if (n != 0)
349 					goto restart;
350 				else
351 					break;
352 			}
353 		} else
354 			q = &p->p_forw;
355 	}
356 	SCHED_UNLOCK(s);
357 }
358 
359 /*
360  * Make all processes sleeping on the specified identifier runnable.
361  */
362 void
363 wakeup(void *chan)
364 {
365 	wakeup_n(chan, -1);
366 }
367 
368 int
369 sys_sched_yield(struct proc *p, void *v, register_t *retval)
370 {
371 	yield();
372 	return (0);
373 }
374 
375 #ifdef RTHREADS
376 
377 int
378 sys_thrsleep(struct proc *p, void *v, register_t *revtal)
379 {
380 	struct sys_thrsleep_args *uap = v;
381 	long ident = (long)SCARG(uap, ident);
382 	int timo = SCARG(uap, timeout);
383 	_spinlock_lock_t *lock = SCARG(uap, lock);
384 	_spinlock_lock_t unlocked = _SPINLOCK_UNLOCKED;
385 	int error;
386 
387 	p->p_thrslpid = ident;
388 
389 	if (lock)
390 		copyout(&unlocked, lock, sizeof(unlocked));
391 	if (hz > 1000)
392 		timo = timo * (hz / 1000);
393 	else
394 		timo = timo / (1000 / hz);
395 	if (timo < 0)
396 		timo = 0;
397 	error = tsleep(&p->p_thrslpid, PUSER | PCATCH, "thrsleep", timo);
398 
399 	return (error);
400 
401 }
402 
403 int
404 sys_thrwakeup(struct proc *p, void *v, register_t *retval)
405 {
406 	struct sys_thrwakeup_args *uap = v;
407 	long ident = (long)SCARG(uap, ident);
408 	int n = SCARG(uap, n);
409 	struct proc *q;
410 	int found = 0;
411 
412 	/* have to check the parent, it's not in the thread list */
413 	if (p->p_thrparent->p_thrslpid == ident) {
414 		wakeup(&p->p_thrparent->p_thrslpid);
415 		p->p_thrparent->p_thrslpid = 0;
416 		if (++found == n)
417 			return (0);
418 	}
419 	LIST_FOREACH(q, &p->p_thrparent->p_thrchildren, p_thrsib) {
420 		if (q->p_thrslpid == ident) {
421 			wakeup(&q->p_thrslpid);
422 			q->p_thrslpid = 0;
423 			if (++found == n)
424 				return (0);
425 		}
426 	}
427 	if (!found)
428 		return (ESRCH);
429 
430 	return (0);
431 }
432 #endif
433