1 /* $OpenBSD: kern_synch.c,v 1.79 2007/04/03 08:05:43 art Exp $ */ 2 /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1982, 1986, 1990, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)kern_synch.c 8.6 (Berkeley) 1/21/94 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/proc.h> 43 #include <sys/kernel.h> 44 #include <sys/buf.h> 45 #include <sys/signalvar.h> 46 #include <sys/resourcevar.h> 47 #include <uvm/uvm_extern.h> 48 #include <sys/sched.h> 49 #include <sys/timeout.h> 50 #include <sys/mount.h> 51 #include <sys/syscallargs.h> 52 #include <sys/pool.h> 53 54 #include <machine/spinlock.h> 55 56 #ifdef KTRACE 57 #include <sys/ktrace.h> 58 #endif 59 60 void updatepri(struct proc *); 61 void endtsleep(void *); 62 63 /* 64 * We're only looking at 7 bits of the address; everything is 65 * aligned to 4, lots of things are aligned to greater powers 66 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 67 */ 68 #define TABLESIZE 128 69 #define LOOKUP(x) (((long)(x) >> 8) & (TABLESIZE - 1)) 70 struct slpque { 71 struct proc *sq_head; 72 struct proc **sq_tailp; 73 } slpque[TABLESIZE]; 74 75 /* 76 * During autoconfiguration or after a panic, a sleep will simply 77 * lower the priority briefly to allow interrupts, then return. 78 * The priority to be used (safepri) is machine-dependent, thus this 79 * value is initialized and maintained in the machine-dependent layers. 80 * This priority will typically be 0, or the lowest priority 81 * that is safe for use on the interrupt stack; it can be made 82 * higher to block network software interrupts after panics. 83 */ 84 int safepri; 85 86 /* 87 * General sleep call. Suspends the current process until a wakeup is 88 * performed on the specified identifier. The process will then be made 89 * runnable with the specified priority. Sleeps at most timo/hz seconds 90 * (0 means no timeout). If pri includes PCATCH flag, signals are checked 91 * before and after sleeping, else signals are not checked. Returns 0 if 92 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 93 * signal needs to be delivered, ERESTART is returned if the current system 94 * call should be restarted if possible, and EINTR is returned if the system 95 * call should be interrupted by the signal (return EINTR). 96 */ 97 int 98 tsleep(void *ident, int priority, const char *wmesg, int timo) 99 { 100 struct sleep_state sls; 101 int error, error1; 102 103 if (cold || panicstr) { 104 int s; 105 /* 106 * After a panic, or during autoconfiguration, 107 * just give interrupts a chance, then just return; 108 * don't run any other procs or panic below, 109 * in case this is the idle process and already asleep. 110 */ 111 s = splhigh(); 112 splx(safepri); 113 splx(s); 114 return (0); 115 } 116 117 sleep_setup(&sls, ident, priority, wmesg); 118 sleep_setup_timeout(&sls, timo); 119 sleep_setup_signal(&sls, priority); 120 121 sleep_finish(&sls, 1); 122 error1 = sleep_finish_timeout(&sls); 123 error = sleep_finish_signal(&sls); 124 125 /* Signal errors are higher priority than timeouts. */ 126 if (error == 0 && error1 != 0) 127 error = error1; 128 129 return (error); 130 } 131 132 void 133 sleep_setup(struct sleep_state *sls, void *ident, int prio, const char *wmesg) 134 { 135 struct proc *p = curproc; 136 struct slpque *qp; 137 138 #ifdef DIAGNOSTIC 139 if (ident == NULL) 140 panic("tsleep: no ident"); 141 if (p->p_stat != SONPROC) 142 panic("tsleep: not SONPROC"); 143 if (p->p_back != NULL) 144 panic("tsleep: p_back not NULL"); 145 #endif 146 147 #ifdef KTRACE 148 if (KTRPOINT(p, KTR_CSW)) 149 ktrcsw(p, 1, 0); 150 #endif 151 152 sls->sls_catch = 0; 153 sls->sls_do_sleep = 1; 154 sls->sls_sig = 1; 155 156 SCHED_LOCK(sls->sls_s); 157 158 p->p_wchan = ident; 159 p->p_wmesg = wmesg; 160 p->p_slptime = 0; 161 p->p_priority = prio & PRIMASK; 162 qp = &slpque[LOOKUP(ident)]; 163 if (qp->sq_head == 0) 164 qp->sq_head = p; 165 else 166 *qp->sq_tailp = p; 167 *(qp->sq_tailp = &p->p_forw) = NULL; 168 } 169 170 void 171 sleep_finish(struct sleep_state *sls, int do_sleep) 172 { 173 struct proc *p = curproc; 174 175 if (sls->sls_do_sleep && do_sleep) { 176 p->p_stat = SSLEEP; 177 p->p_stats->p_ru.ru_nvcsw++; 178 SCHED_ASSERT_LOCKED(); 179 mi_switch(); 180 } else if (!do_sleep) { 181 unsleep(p); 182 #ifdef DIAGNOSTIC 183 if (p->p_stat != SONPROC) 184 panic("sleep_finish !SONPROC"); 185 #endif 186 } 187 188 #ifdef __HAVE_CPUINFO 189 p->p_cpu->ci_schedstate.spc_curpriority = p->p_usrpri; 190 #else 191 curpriority = p->p_usrpri; 192 #endif 193 SCHED_UNLOCK(sls->sls_s); 194 195 /* 196 * Even though this belongs to the signal handling part of sleep, 197 * we need to clear it before the ktrace. 198 */ 199 atomic_clearbits_int(&p->p_flag, P_SINTR); 200 201 #ifdef KTRACE 202 if (KTRPOINT(p, KTR_CSW)) 203 ktrcsw(p, 0, 0); 204 #endif 205 } 206 207 void 208 sleep_setup_timeout(struct sleep_state *sls, int timo) 209 { 210 if (timo) 211 timeout_add(&curproc->p_sleep_to, timo); 212 } 213 214 int 215 sleep_finish_timeout(struct sleep_state *sls) 216 { 217 struct proc *p = curproc; 218 219 if (p->p_flag & P_TIMEOUT) { 220 atomic_clearbits_int(&p->p_flag, P_TIMEOUT); 221 return (EWOULDBLOCK); 222 } else if (timeout_pending(&p->p_sleep_to)) { 223 timeout_del(&p->p_sleep_to); 224 } 225 226 return (0); 227 } 228 229 void 230 sleep_setup_signal(struct sleep_state *sls, int prio) 231 { 232 struct proc *p = curproc; 233 234 if ((sls->sls_catch = (prio & PCATCH)) == 0) 235 return; 236 237 /* 238 * We put ourselves on the sleep queue and start our timeout 239 * before calling CURSIG, as we could stop there, and a wakeup 240 * or a SIGCONT (or both) could occur while we were stopped. 241 * A SIGCONT would cause us to be marked as SSLEEP 242 * without resuming us, thus we must be ready for sleep 243 * when CURSIG is called. If the wakeup happens while we're 244 * stopped, p->p_wchan will be 0 upon return from CURSIG. 245 */ 246 atomic_setbits_int(&p->p_flag, P_SINTR); 247 if ((sls->sls_sig = CURSIG(p)) != 0) { 248 if (p->p_wchan) 249 unsleep(p); 250 p->p_stat = SONPROC; 251 sls->sls_do_sleep = 0; 252 } else if (p->p_wchan == 0) { 253 sls->sls_catch = 0; 254 sls->sls_do_sleep = 0; 255 } 256 } 257 258 int 259 sleep_finish_signal(struct sleep_state *sls) 260 { 261 struct proc *p = curproc; 262 263 if (sls->sls_catch != 0) { 264 if (sls->sls_sig != 0 || (sls->sls_sig = CURSIG(p)) != 0) { 265 if (p->p_sigacts->ps_sigintr & sigmask(sls->sls_sig)) 266 return (EINTR); 267 return (ERESTART); 268 } 269 } 270 271 return (0); 272 } 273 274 /* 275 * Implement timeout for tsleep. 276 * If process hasn't been awakened (wchan non-zero), 277 * set timeout flag and undo the sleep. If proc 278 * is stopped, just unsleep so it will remain stopped. 279 */ 280 void 281 endtsleep(void *arg) 282 { 283 struct proc *p = arg; 284 int s; 285 286 SCHED_LOCK(s); 287 if (p->p_wchan) { 288 if (p->p_stat == SSLEEP) 289 setrunnable(p); 290 else 291 unsleep(p); 292 atomic_setbits_int(&p->p_flag, P_TIMEOUT); 293 } 294 SCHED_UNLOCK(s); 295 } 296 297 /* 298 * Remove a process from its wait queue 299 */ 300 void 301 unsleep(struct proc *p) 302 { 303 struct slpque *qp; 304 struct proc **hp; 305 306 if (p->p_wchan) { 307 hp = &(qp = &slpque[LOOKUP(p->p_wchan)])->sq_head; 308 while (*hp != p) 309 hp = &(*hp)->p_forw; 310 *hp = p->p_forw; 311 if (qp->sq_tailp == &p->p_forw) 312 qp->sq_tailp = hp; 313 p->p_wchan = 0; 314 } 315 } 316 317 /* 318 * Make a number of processes sleeping on the specified identifier runnable. 319 */ 320 void 321 wakeup_n(void *ident, int n) 322 { 323 struct slpque *qp; 324 struct proc *p, **q; 325 int s; 326 327 SCHED_LOCK(s); 328 qp = &slpque[LOOKUP(ident)]; 329 restart: 330 for (q = &qp->sq_head; (p = *q) != NULL; ) { 331 #ifdef DIAGNOSTIC 332 if (p->p_back) 333 panic("wakeup: p_back not NULL"); 334 if (p->p_stat != SSLEEP && p->p_stat != SSTOP) 335 panic("wakeup: p_stat is %d", (int)p->p_stat); 336 #endif 337 if (p->p_wchan == ident) { 338 --n; 339 p->p_wchan = 0; 340 *q = p->p_forw; 341 if (qp->sq_tailp == &p->p_forw) 342 qp->sq_tailp = q; 343 if (p->p_stat == SSLEEP) { 344 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 345 if (p->p_slptime > 1) 346 updatepri(p); 347 p->p_slptime = 0; 348 p->p_stat = SRUN; 349 350 /* 351 * Since curpriority is a user priority, 352 * p->p_priority is always better than 353 * curpriority on the last CPU on 354 * which it ran. 355 * 356 * XXXSMP See affinity comment in 357 * resched_proc(). 358 */ 359 setrunqueue(p); 360 #ifdef __HAVE_CPUINFO 361 KASSERT(p->p_cpu != NULL); 362 need_resched(p->p_cpu); 363 #else 364 need_resched(NULL); 365 #endif 366 /* END INLINE EXPANSION */ 367 368 if (n != 0) 369 goto restart; 370 else 371 break; 372 } 373 } else 374 q = &p->p_forw; 375 } 376 SCHED_UNLOCK(s); 377 } 378 379 /* 380 * Make all processes sleeping on the specified identifier runnable. 381 */ 382 void 383 wakeup(void *chan) 384 { 385 wakeup_n(chan, -1); 386 } 387 388 int 389 sys_sched_yield(struct proc *p, void *v, register_t *retval) 390 { 391 yield(); 392 return (0); 393 } 394 395 #ifdef RTHREADS 396 397 int 398 sys_thrsleep(struct proc *p, void *v, register_t *revtal) 399 { 400 struct sys_thrsleep_args *uap = v; 401 long ident = (long)SCARG(uap, ident); 402 int timo = SCARG(uap, timeout); 403 _spinlock_lock_t *lock = SCARG(uap, lock); 404 _spinlock_lock_t unlocked = _SPINLOCK_UNLOCKED; 405 int error; 406 407 p->p_thrslpid = ident; 408 409 if (lock) 410 copyout(&unlocked, lock, sizeof(unlocked)); 411 if (hz > 1000) 412 timo = timo * (hz / 1000); 413 else 414 timo = timo / (1000 / hz); 415 if (timo < 0) 416 timo = 0; 417 error = tsleep(&p->p_thrslpid, PUSER | PCATCH, "thrsleep", timo); 418 419 if (error == ERESTART) 420 error = EINTR; 421 422 return (error); 423 424 } 425 426 int 427 sys_thrwakeup(struct proc *p, void *v, register_t *retval) 428 { 429 struct sys_thrwakeup_args *uap = v; 430 long ident = (long)SCARG(uap, ident); 431 int n = SCARG(uap, n); 432 struct proc *q; 433 int found = 0; 434 435 TAILQ_FOREACH(q, &p->p_p->ps_threads, p_thr_link) { 436 if (q->p_thrslpid == ident) { 437 wakeup(&q->p_thrslpid); 438 q->p_thrslpid = 0; 439 if (++found == n) 440 return (0); 441 } 442 } 443 if (!found) 444 return (ESRCH); 445 446 return (0); 447 } 448 #endif 449