1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate #include <sys/thread.h> 30*0Sstevel@tonic-gate #include <sys/proc.h> 31*0Sstevel@tonic-gate #include <sys/debug.h> 32*0Sstevel@tonic-gate #include <sys/cmn_err.h> 33*0Sstevel@tonic-gate #include <sys/systm.h> 34*0Sstevel@tonic-gate #include <sys/sobject.h> 35*0Sstevel@tonic-gate #include <sys/sleepq.h> 36*0Sstevel@tonic-gate #include <sys/cpuvar.h> 37*0Sstevel@tonic-gate #include <sys/condvar.h> 38*0Sstevel@tonic-gate #include <sys/condvar_impl.h> 39*0Sstevel@tonic-gate #include <sys/schedctl.h> 40*0Sstevel@tonic-gate #include <sys/procfs.h> 41*0Sstevel@tonic-gate #include <sys/sdt.h> 42*0Sstevel@tonic-gate 43*0Sstevel@tonic-gate /* 44*0Sstevel@tonic-gate * CV_MAX_WAITERS is the maximum number of waiters we track; once 45*0Sstevel@tonic-gate * the number becomes higher than that, we look at the sleepq to 46*0Sstevel@tonic-gate * see whether there are *really* any waiters. 47*0Sstevel@tonic-gate */ 48*0Sstevel@tonic-gate #define CV_MAX_WAITERS 1024 /* must be power of 2 */ 49*0Sstevel@tonic-gate #define CV_WAITERS_MASK (CV_MAX_WAITERS - 1) 50*0Sstevel@tonic-gate 51*0Sstevel@tonic-gate /* 52*0Sstevel@tonic-gate * Threads don't "own" condition variables. 53*0Sstevel@tonic-gate */ 54*0Sstevel@tonic-gate /* ARGSUSED */ 55*0Sstevel@tonic-gate static kthread_t * 56*0Sstevel@tonic-gate cv_owner(void *cvp) 57*0Sstevel@tonic-gate { 58*0Sstevel@tonic-gate return (NULL); 59*0Sstevel@tonic-gate } 60*0Sstevel@tonic-gate 61*0Sstevel@tonic-gate /* 62*0Sstevel@tonic-gate * Unsleep a thread that's blocked on a condition variable. 63*0Sstevel@tonic-gate */ 64*0Sstevel@tonic-gate static void 65*0Sstevel@tonic-gate cv_unsleep(kthread_t *t) 66*0Sstevel@tonic-gate { 67*0Sstevel@tonic-gate condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan; 68*0Sstevel@tonic-gate sleepq_head_t *sqh = SQHASH(cvp); 69*0Sstevel@tonic-gate 70*0Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(t)); 71*0Sstevel@tonic-gate 72*0Sstevel@tonic-gate if (cvp == NULL) 73*0Sstevel@tonic-gate panic("cv_unsleep: thread %p not on sleepq %p", t, sqh); 74*0Sstevel@tonic-gate sleepq_unsleep(t); 75*0Sstevel@tonic-gate if (cvp->cv_waiters != CV_MAX_WAITERS) 76*0Sstevel@tonic-gate cvp->cv_waiters--; 77*0Sstevel@tonic-gate disp_lock_exit_high(&sqh->sq_lock); 78*0Sstevel@tonic-gate CL_SETRUN(t); 79*0Sstevel@tonic-gate } 80*0Sstevel@tonic-gate 81*0Sstevel@tonic-gate /* 82*0Sstevel@tonic-gate * Change the priority of a thread that's blocked on a condition variable. 83*0Sstevel@tonic-gate */ 84*0Sstevel@tonic-gate static void 85*0Sstevel@tonic-gate cv_change_pri(kthread_t *t, pri_t pri, pri_t *t_prip) 86*0Sstevel@tonic-gate { 87*0Sstevel@tonic-gate condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan; 88*0Sstevel@tonic-gate sleepq_t *sqp = t->t_sleepq; 89*0Sstevel@tonic-gate 90*0Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(t)); 91*0Sstevel@tonic-gate ASSERT(&SQHASH(cvp)->sq_queue == sqp); 92*0Sstevel@tonic-gate 93*0Sstevel@tonic-gate if (cvp == NULL) 94*0Sstevel@tonic-gate panic("cv_change_pri: %p not on sleep queue", t); 95*0Sstevel@tonic-gate sleepq_dequeue(t); 96*0Sstevel@tonic-gate *t_prip = pri; 97*0Sstevel@tonic-gate sleepq_insert(sqp, t); 98*0Sstevel@tonic-gate } 99*0Sstevel@tonic-gate 100*0Sstevel@tonic-gate /* 101*0Sstevel@tonic-gate * The sobj_ops vector exports a set of functions needed when a thread 102*0Sstevel@tonic-gate * is asleep on a synchronization object of this type. 103*0Sstevel@tonic-gate */ 104*0Sstevel@tonic-gate static sobj_ops_t cv_sobj_ops = { 105*0Sstevel@tonic-gate SOBJ_CV, cv_owner, cv_unsleep, cv_change_pri 106*0Sstevel@tonic-gate }; 107*0Sstevel@tonic-gate 108*0Sstevel@tonic-gate /* ARGSUSED */ 109*0Sstevel@tonic-gate void 110*0Sstevel@tonic-gate cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg) 111*0Sstevel@tonic-gate { 112*0Sstevel@tonic-gate ((condvar_impl_t *)cvp)->cv_waiters = 0; 113*0Sstevel@tonic-gate } 114*0Sstevel@tonic-gate 115*0Sstevel@tonic-gate /* 116*0Sstevel@tonic-gate * cv_destroy is not currently needed, but is part of the DDI. 117*0Sstevel@tonic-gate * This is in case cv_init ever needs to allocate something for a cv. 118*0Sstevel@tonic-gate */ 119*0Sstevel@tonic-gate /* ARGSUSED */ 120*0Sstevel@tonic-gate void 121*0Sstevel@tonic-gate cv_destroy(kcondvar_t *cvp) 122*0Sstevel@tonic-gate { 123*0Sstevel@tonic-gate ASSERT((((condvar_impl_t *)cvp)->cv_waiters & CV_WAITERS_MASK) == 0); 124*0Sstevel@tonic-gate } 125*0Sstevel@tonic-gate 126*0Sstevel@tonic-gate /* 127*0Sstevel@tonic-gate * The cv_block() function blocks a thread on a condition variable 128*0Sstevel@tonic-gate * by putting it in a hashed sleep queue associated with the 129*0Sstevel@tonic-gate * synchronization object. 130*0Sstevel@tonic-gate * 131*0Sstevel@tonic-gate * Threads are taken off the hashed sleep queues via calls to 132*0Sstevel@tonic-gate * cv_signal(), cv_broadcast(), or cv_unsleep(). 133*0Sstevel@tonic-gate */ 134*0Sstevel@tonic-gate static void 135*0Sstevel@tonic-gate cv_block(condvar_impl_t *cvp) 136*0Sstevel@tonic-gate { 137*0Sstevel@tonic-gate kthread_t *t = curthread; 138*0Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 139*0Sstevel@tonic-gate sleepq_head_t *sqh; 140*0Sstevel@tonic-gate 141*0Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(t)); 142*0Sstevel@tonic-gate ASSERT(t != CPU->cpu_idle_thread); 143*0Sstevel@tonic-gate ASSERT(CPU_ON_INTR(CPU) == 0); 144*0Sstevel@tonic-gate ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL); 145*0Sstevel@tonic-gate ASSERT(t->t_state == TS_ONPROC); 146*0Sstevel@tonic-gate 147*0Sstevel@tonic-gate t->t_schedflag &= ~TS_SIGNALLED; 148*0Sstevel@tonic-gate CL_SLEEP(t); /* assign kernel priority */ 149*0Sstevel@tonic-gate t->t_wchan = (caddr_t)cvp; 150*0Sstevel@tonic-gate t->t_sobj_ops = &cv_sobj_ops; 151*0Sstevel@tonic-gate DTRACE_SCHED(sleep); 152*0Sstevel@tonic-gate 153*0Sstevel@tonic-gate /* 154*0Sstevel@tonic-gate * The check for t_intr is to avoid doing the 155*0Sstevel@tonic-gate * account for an interrupt thread on the still-pinned 156*0Sstevel@tonic-gate * lwp's statistics. 157*0Sstevel@tonic-gate */ 158*0Sstevel@tonic-gate if (lwp != NULL && t->t_intr == NULL) { 159*0Sstevel@tonic-gate lwp->lwp_ru.nvcsw++; 160*0Sstevel@tonic-gate (void) new_mstate(t, LMS_SLEEP); 161*0Sstevel@tonic-gate } 162*0Sstevel@tonic-gate 163*0Sstevel@tonic-gate sqh = SQHASH(cvp); 164*0Sstevel@tonic-gate disp_lock_enter_high(&sqh->sq_lock); 165*0Sstevel@tonic-gate if (cvp->cv_waiters < CV_MAX_WAITERS) 166*0Sstevel@tonic-gate cvp->cv_waiters++; 167*0Sstevel@tonic-gate ASSERT(cvp->cv_waiters <= CV_MAX_WAITERS); 168*0Sstevel@tonic-gate THREAD_SLEEP(t, &sqh->sq_lock); 169*0Sstevel@tonic-gate sleepq_insert(&sqh->sq_queue, t); 170*0Sstevel@tonic-gate /* 171*0Sstevel@tonic-gate * THREAD_SLEEP() moves curthread->t_lockp to point to the 172*0Sstevel@tonic-gate * lock sqh->sq_lock. This lock is later released by the caller 173*0Sstevel@tonic-gate * when it calls thread_unlock() on curthread. 174*0Sstevel@tonic-gate */ 175*0Sstevel@tonic-gate } 176*0Sstevel@tonic-gate 177*0Sstevel@tonic-gate #define cv_block_sig(t, cvp) \ 178*0Sstevel@tonic-gate { (t)->t_flag |= T_WAKEABLE; cv_block(cvp); } 179*0Sstevel@tonic-gate 180*0Sstevel@tonic-gate /* 181*0Sstevel@tonic-gate * Block on the indicated condition variable and release the 182*0Sstevel@tonic-gate * associated kmutex while blocked. 183*0Sstevel@tonic-gate */ 184*0Sstevel@tonic-gate void 185*0Sstevel@tonic-gate cv_wait(kcondvar_t *cvp, kmutex_t *mp) 186*0Sstevel@tonic-gate { 187*0Sstevel@tonic-gate if (panicstr) 188*0Sstevel@tonic-gate return; 189*0Sstevel@tonic-gate 190*0Sstevel@tonic-gate ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 191*0Sstevel@tonic-gate thread_lock(curthread); /* lock the thread */ 192*0Sstevel@tonic-gate cv_block((condvar_impl_t *)cvp); 193*0Sstevel@tonic-gate thread_unlock_nopreempt(curthread); /* unlock the waiters field */ 194*0Sstevel@tonic-gate mutex_exit(mp); 195*0Sstevel@tonic-gate swtch(); 196*0Sstevel@tonic-gate mutex_enter(mp); 197*0Sstevel@tonic-gate } 198*0Sstevel@tonic-gate 199*0Sstevel@tonic-gate /* 200*0Sstevel@tonic-gate * Same as cv_wait except the thread will unblock at 'tim' 201*0Sstevel@tonic-gate * (an absolute time) if it hasn't already unblocked. 202*0Sstevel@tonic-gate * 203*0Sstevel@tonic-gate * Returns the amount of time left from the original 'tim' value 204*0Sstevel@tonic-gate * when it was unblocked. 205*0Sstevel@tonic-gate */ 206*0Sstevel@tonic-gate clock_t 207*0Sstevel@tonic-gate cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim) 208*0Sstevel@tonic-gate { 209*0Sstevel@tonic-gate kthread_t *t = curthread; 210*0Sstevel@tonic-gate timeout_id_t id; 211*0Sstevel@tonic-gate clock_t timeleft; 212*0Sstevel@tonic-gate int signalled; 213*0Sstevel@tonic-gate 214*0Sstevel@tonic-gate if (panicstr) 215*0Sstevel@tonic-gate return (-1); 216*0Sstevel@tonic-gate 217*0Sstevel@tonic-gate timeleft = tim - lbolt; 218*0Sstevel@tonic-gate if (timeleft <= 0) 219*0Sstevel@tonic-gate return (-1); 220*0Sstevel@tonic-gate id = realtime_timeout((void (*)(void *))setrun, t, timeleft); 221*0Sstevel@tonic-gate thread_lock(t); /* lock the thread */ 222*0Sstevel@tonic-gate cv_block((condvar_impl_t *)cvp); 223*0Sstevel@tonic-gate thread_unlock_nopreempt(t); 224*0Sstevel@tonic-gate mutex_exit(mp); 225*0Sstevel@tonic-gate if ((tim - lbolt) <= 0) /* allow for wrap */ 226*0Sstevel@tonic-gate setrun(t); 227*0Sstevel@tonic-gate swtch(); 228*0Sstevel@tonic-gate signalled = (t->t_schedflag & TS_SIGNALLED); 229*0Sstevel@tonic-gate /* 230*0Sstevel@tonic-gate * Get the time left. untimeout() returns -1 if the timeout has 231*0Sstevel@tonic-gate * occured or the time remaining. If the time remaining is zero, 232*0Sstevel@tonic-gate * the timeout has occured between when we were awoken and 233*0Sstevel@tonic-gate * we called untimeout. We will treat this as if the timeout 234*0Sstevel@tonic-gate * has occured and set timeleft to -1. 235*0Sstevel@tonic-gate */ 236*0Sstevel@tonic-gate timeleft = untimeout(id); 237*0Sstevel@tonic-gate mutex_enter(mp); 238*0Sstevel@tonic-gate if (timeleft <= 0) { 239*0Sstevel@tonic-gate timeleft = -1; 240*0Sstevel@tonic-gate if (signalled) /* avoid consuming the cv_signal() */ 241*0Sstevel@tonic-gate cv_signal(cvp); 242*0Sstevel@tonic-gate } 243*0Sstevel@tonic-gate return (timeleft); 244*0Sstevel@tonic-gate } 245*0Sstevel@tonic-gate 246*0Sstevel@tonic-gate int 247*0Sstevel@tonic-gate cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp) 248*0Sstevel@tonic-gate { 249*0Sstevel@tonic-gate kthread_t *t = curthread; 250*0Sstevel@tonic-gate proc_t *p = ttoproc(t); 251*0Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 252*0Sstevel@tonic-gate int rval = 1; 253*0Sstevel@tonic-gate int signalled = 0; 254*0Sstevel@tonic-gate 255*0Sstevel@tonic-gate if (panicstr) 256*0Sstevel@tonic-gate return (rval); 257*0Sstevel@tonic-gate 258*0Sstevel@tonic-gate /* 259*0Sstevel@tonic-gate * The check for t_intr is to catch an interrupt thread 260*0Sstevel@tonic-gate * that has not yet unpinned the thread underneath. 261*0Sstevel@tonic-gate */ 262*0Sstevel@tonic-gate if (lwp == NULL || t->t_intr) { 263*0Sstevel@tonic-gate cv_wait(cvp, mp); 264*0Sstevel@tonic-gate return (rval); 265*0Sstevel@tonic-gate } 266*0Sstevel@tonic-gate 267*0Sstevel@tonic-gate ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 268*0Sstevel@tonic-gate lwp->lwp_asleep = 1; 269*0Sstevel@tonic-gate lwp->lwp_sysabort = 0; 270*0Sstevel@tonic-gate thread_lock(t); 271*0Sstevel@tonic-gate cv_block_sig(t, (condvar_impl_t *)cvp); 272*0Sstevel@tonic-gate thread_unlock_nopreempt(t); 273*0Sstevel@tonic-gate mutex_exit(mp); 274*0Sstevel@tonic-gate if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t)) 275*0Sstevel@tonic-gate setrun(t); 276*0Sstevel@tonic-gate /* ASSERT(no locks are held) */ 277*0Sstevel@tonic-gate swtch(); 278*0Sstevel@tonic-gate signalled = (t->t_schedflag & TS_SIGNALLED); 279*0Sstevel@tonic-gate t->t_flag &= ~T_WAKEABLE; 280*0Sstevel@tonic-gate mutex_enter(mp); 281*0Sstevel@tonic-gate if (ISSIG_PENDING(t, lwp, p)) { 282*0Sstevel@tonic-gate mutex_exit(mp); 283*0Sstevel@tonic-gate if (issig(FORREAL)) 284*0Sstevel@tonic-gate rval = 0; 285*0Sstevel@tonic-gate mutex_enter(mp); 286*0Sstevel@tonic-gate } 287*0Sstevel@tonic-gate if (lwp->lwp_sysabort || MUSTRETURN(p, t)) 288*0Sstevel@tonic-gate rval = 0; 289*0Sstevel@tonic-gate lwp->lwp_asleep = 0; 290*0Sstevel@tonic-gate lwp->lwp_sysabort = 0; 291*0Sstevel@tonic-gate if (rval == 0 && signalled) /* avoid consuming the cv_signal() */ 292*0Sstevel@tonic-gate cv_signal(cvp); 293*0Sstevel@tonic-gate return (rval); 294*0Sstevel@tonic-gate } 295*0Sstevel@tonic-gate 296*0Sstevel@tonic-gate /* 297*0Sstevel@tonic-gate * Returns: 298*0Sstevel@tonic-gate * Function result in order of presidence: 299*0Sstevel@tonic-gate * 0 if a signal was received 300*0Sstevel@tonic-gate * -1 if timeout occured 301*0Sstevel@tonic-gate * >0 if awakened via cv_signal() or cv_broadcast(). 302*0Sstevel@tonic-gate * (returns time remaining) 303*0Sstevel@tonic-gate * 304*0Sstevel@tonic-gate * cv_timedwait_sig() is now part of the DDI. 305*0Sstevel@tonic-gate */ 306*0Sstevel@tonic-gate clock_t 307*0Sstevel@tonic-gate cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t tim) 308*0Sstevel@tonic-gate { 309*0Sstevel@tonic-gate kthread_t *t = curthread; 310*0Sstevel@tonic-gate proc_t *p = ttoproc(t); 311*0Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 312*0Sstevel@tonic-gate timeout_id_t id; 313*0Sstevel@tonic-gate clock_t rval = 1; 314*0Sstevel@tonic-gate clock_t timeleft; 315*0Sstevel@tonic-gate int signalled = 0; 316*0Sstevel@tonic-gate 317*0Sstevel@tonic-gate if (panicstr) 318*0Sstevel@tonic-gate return (rval); 319*0Sstevel@tonic-gate 320*0Sstevel@tonic-gate /* 321*0Sstevel@tonic-gate * If there is no lwp, then we don't need to wait for a signal. 322*0Sstevel@tonic-gate * The check for t_intr is to catch an interrupt thread 323*0Sstevel@tonic-gate * that has not yet unpinned the thread underneath. 324*0Sstevel@tonic-gate */ 325*0Sstevel@tonic-gate if (lwp == NULL || t->t_intr) 326*0Sstevel@tonic-gate return (cv_timedwait(cvp, mp, tim)); 327*0Sstevel@tonic-gate 328*0Sstevel@tonic-gate /* 329*0Sstevel@tonic-gate * If tim is less than or equal to lbolt, then the timeout 330*0Sstevel@tonic-gate * has already occured. So just check to see if there is a signal 331*0Sstevel@tonic-gate * pending. If so return 0 indicating that there is a signal pending. 332*0Sstevel@tonic-gate * Else return -1 indicating that the timeout occured. No need to 333*0Sstevel@tonic-gate * wait on anything. 334*0Sstevel@tonic-gate */ 335*0Sstevel@tonic-gate timeleft = tim - lbolt; 336*0Sstevel@tonic-gate if (timeleft <= 0) { 337*0Sstevel@tonic-gate lwp->lwp_asleep = 1; 338*0Sstevel@tonic-gate lwp->lwp_sysabort = 0; 339*0Sstevel@tonic-gate rval = -1; 340*0Sstevel@tonic-gate goto out; 341*0Sstevel@tonic-gate } 342*0Sstevel@tonic-gate 343*0Sstevel@tonic-gate /* 344*0Sstevel@tonic-gate * Set the timeout and wait. 345*0Sstevel@tonic-gate */ 346*0Sstevel@tonic-gate id = realtime_timeout((void (*)(void *))setrun, t, timeleft); 347*0Sstevel@tonic-gate lwp->lwp_asleep = 1; 348*0Sstevel@tonic-gate lwp->lwp_sysabort = 0; 349*0Sstevel@tonic-gate thread_lock(t); 350*0Sstevel@tonic-gate cv_block_sig(t, (condvar_impl_t *)cvp); 351*0Sstevel@tonic-gate thread_unlock_nopreempt(t); 352*0Sstevel@tonic-gate mutex_exit(mp); 353*0Sstevel@tonic-gate if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || (tim - lbolt <= 0)) 354*0Sstevel@tonic-gate setrun(t); 355*0Sstevel@tonic-gate /* ASSERT(no locks are held) */ 356*0Sstevel@tonic-gate swtch(); 357*0Sstevel@tonic-gate signalled = (t->t_schedflag & TS_SIGNALLED); 358*0Sstevel@tonic-gate t->t_flag &= ~T_WAKEABLE; 359*0Sstevel@tonic-gate mutex_enter(mp); 360*0Sstevel@tonic-gate 361*0Sstevel@tonic-gate /* 362*0Sstevel@tonic-gate * Untimeout the thread. untimeout() returns -1 if the timeout has 363*0Sstevel@tonic-gate * occured or the time remaining. If the time remaining is zero, 364*0Sstevel@tonic-gate * the timeout has occured between when we were awoken and 365*0Sstevel@tonic-gate * we called untimeout. We will treat this as if the timeout 366*0Sstevel@tonic-gate * has occured and set rval to -1. 367*0Sstevel@tonic-gate */ 368*0Sstevel@tonic-gate rval = untimeout(id); 369*0Sstevel@tonic-gate if (rval <= 0) 370*0Sstevel@tonic-gate rval = -1; 371*0Sstevel@tonic-gate 372*0Sstevel@tonic-gate /* 373*0Sstevel@tonic-gate * Check to see if a signal is pending. If so, regardless of whether 374*0Sstevel@tonic-gate * or not we were awoken due to the signal, the signal is now pending 375*0Sstevel@tonic-gate * and a return of 0 has the highest priority. 376*0Sstevel@tonic-gate */ 377*0Sstevel@tonic-gate out: 378*0Sstevel@tonic-gate if (ISSIG_PENDING(t, lwp, p)) { 379*0Sstevel@tonic-gate mutex_exit(mp); 380*0Sstevel@tonic-gate if (issig(FORREAL)) 381*0Sstevel@tonic-gate rval = 0; 382*0Sstevel@tonic-gate mutex_enter(mp); 383*0Sstevel@tonic-gate } 384*0Sstevel@tonic-gate if (lwp->lwp_sysabort || MUSTRETURN(p, t)) 385*0Sstevel@tonic-gate rval = 0; 386*0Sstevel@tonic-gate lwp->lwp_asleep = 0; 387*0Sstevel@tonic-gate lwp->lwp_sysabort = 0; 388*0Sstevel@tonic-gate if (rval <= 0 && signalled) /* avoid consuming the cv_signal() */ 389*0Sstevel@tonic-gate cv_signal(cvp); 390*0Sstevel@tonic-gate return (rval); 391*0Sstevel@tonic-gate } 392*0Sstevel@tonic-gate 393*0Sstevel@tonic-gate /* 394*0Sstevel@tonic-gate * Like cv_wait_sig_swap but allows the caller to indicate (with a 395*0Sstevel@tonic-gate * non-NULL sigret) that they will take care of signalling the cv 396*0Sstevel@tonic-gate * after wakeup, if necessary. This is a vile hack that should only 397*0Sstevel@tonic-gate * be used when no other option is available; almost all callers 398*0Sstevel@tonic-gate * should just use cv_wait_sig_swap (which takes care of the cv_signal 399*0Sstevel@tonic-gate * stuff automatically) instead. 400*0Sstevel@tonic-gate */ 401*0Sstevel@tonic-gate int 402*0Sstevel@tonic-gate cv_wait_sig_swap_core(kcondvar_t *cvp, kmutex_t *mp, int *sigret) 403*0Sstevel@tonic-gate { 404*0Sstevel@tonic-gate kthread_t *t = curthread; 405*0Sstevel@tonic-gate proc_t *p = ttoproc(t); 406*0Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 407*0Sstevel@tonic-gate int rval = 1; 408*0Sstevel@tonic-gate int signalled = 0; 409*0Sstevel@tonic-gate 410*0Sstevel@tonic-gate if (panicstr) 411*0Sstevel@tonic-gate return (rval); 412*0Sstevel@tonic-gate 413*0Sstevel@tonic-gate /* 414*0Sstevel@tonic-gate * The check for t_intr is to catch an interrupt thread 415*0Sstevel@tonic-gate * that has not yet unpinned the thread underneath. 416*0Sstevel@tonic-gate */ 417*0Sstevel@tonic-gate if (lwp == NULL || t->t_intr) { 418*0Sstevel@tonic-gate cv_wait(cvp, mp); 419*0Sstevel@tonic-gate return (rval); 420*0Sstevel@tonic-gate } 421*0Sstevel@tonic-gate 422*0Sstevel@tonic-gate lwp->lwp_asleep = 1; 423*0Sstevel@tonic-gate lwp->lwp_sysabort = 0; 424*0Sstevel@tonic-gate thread_lock(t); 425*0Sstevel@tonic-gate t->t_kpri_req = 0; /* don't need kernel priority */ 426*0Sstevel@tonic-gate cv_block_sig(t, (condvar_impl_t *)cvp); 427*0Sstevel@tonic-gate /* I can be swapped now */ 428*0Sstevel@tonic-gate curthread->t_schedflag &= ~TS_DONT_SWAP; 429*0Sstevel@tonic-gate thread_unlock_nopreempt(t); 430*0Sstevel@tonic-gate mutex_exit(mp); 431*0Sstevel@tonic-gate if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t)) 432*0Sstevel@tonic-gate setrun(t); 433*0Sstevel@tonic-gate /* ASSERT(no locks are held) */ 434*0Sstevel@tonic-gate swtch(); 435*0Sstevel@tonic-gate signalled = (t->t_schedflag & TS_SIGNALLED); 436*0Sstevel@tonic-gate t->t_flag &= ~T_WAKEABLE; 437*0Sstevel@tonic-gate /* TS_DONT_SWAP set by disp() */ 438*0Sstevel@tonic-gate ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 439*0Sstevel@tonic-gate mutex_enter(mp); 440*0Sstevel@tonic-gate if (ISSIG_PENDING(t, lwp, p)) { 441*0Sstevel@tonic-gate mutex_exit(mp); 442*0Sstevel@tonic-gate if (issig(FORREAL)) 443*0Sstevel@tonic-gate rval = 0; 444*0Sstevel@tonic-gate mutex_enter(mp); 445*0Sstevel@tonic-gate } 446*0Sstevel@tonic-gate if (lwp->lwp_sysabort || MUSTRETURN(p, t)) 447*0Sstevel@tonic-gate rval = 0; 448*0Sstevel@tonic-gate lwp->lwp_asleep = 0; 449*0Sstevel@tonic-gate lwp->lwp_sysabort = 0; 450*0Sstevel@tonic-gate if (rval == 0) { 451*0Sstevel@tonic-gate if (sigret != NULL) 452*0Sstevel@tonic-gate *sigret = signalled; /* just tell the caller */ 453*0Sstevel@tonic-gate else if (signalled) 454*0Sstevel@tonic-gate cv_signal(cvp); /* avoid consuming the cv_signal() */ 455*0Sstevel@tonic-gate } 456*0Sstevel@tonic-gate return (rval); 457*0Sstevel@tonic-gate } 458*0Sstevel@tonic-gate 459*0Sstevel@tonic-gate /* 460*0Sstevel@tonic-gate * Same as cv_wait_sig but the thread can be swapped out while waiting. 461*0Sstevel@tonic-gate * This should only be used when we know we aren't holding any locks. 462*0Sstevel@tonic-gate */ 463*0Sstevel@tonic-gate int 464*0Sstevel@tonic-gate cv_wait_sig_swap(kcondvar_t *cvp, kmutex_t *mp) 465*0Sstevel@tonic-gate { 466*0Sstevel@tonic-gate return (cv_wait_sig_swap_core(cvp, mp, NULL)); 467*0Sstevel@tonic-gate } 468*0Sstevel@tonic-gate 469*0Sstevel@tonic-gate void 470*0Sstevel@tonic-gate cv_signal(kcondvar_t *cvp) 471*0Sstevel@tonic-gate { 472*0Sstevel@tonic-gate condvar_impl_t *cp = (condvar_impl_t *)cvp; 473*0Sstevel@tonic-gate 474*0Sstevel@tonic-gate /* make sure the cv_waiters field looks sane */ 475*0Sstevel@tonic-gate ASSERT(cp->cv_waiters <= CV_MAX_WAITERS); 476*0Sstevel@tonic-gate if (cp->cv_waiters > 0) { 477*0Sstevel@tonic-gate sleepq_head_t *sqh = SQHASH(cp); 478*0Sstevel@tonic-gate disp_lock_enter(&sqh->sq_lock); 479*0Sstevel@tonic-gate ASSERT(CPU_ON_INTR(CPU) == 0); 480*0Sstevel@tonic-gate if (cp->cv_waiters & CV_WAITERS_MASK) { 481*0Sstevel@tonic-gate kthread_t *t; 482*0Sstevel@tonic-gate cp->cv_waiters--; 483*0Sstevel@tonic-gate t = sleepq_wakeone_chan(&sqh->sq_queue, cp); 484*0Sstevel@tonic-gate /* 485*0Sstevel@tonic-gate * If cv_waiters is non-zero (and less than 486*0Sstevel@tonic-gate * CV_MAX_WAITERS) there should be a thread 487*0Sstevel@tonic-gate * in the queue. 488*0Sstevel@tonic-gate */ 489*0Sstevel@tonic-gate ASSERT(t != NULL); 490*0Sstevel@tonic-gate } else if (sleepq_wakeone_chan(&sqh->sq_queue, cp) == NULL) { 491*0Sstevel@tonic-gate cp->cv_waiters = 0; 492*0Sstevel@tonic-gate } 493*0Sstevel@tonic-gate disp_lock_exit(&sqh->sq_lock); 494*0Sstevel@tonic-gate } 495*0Sstevel@tonic-gate } 496*0Sstevel@tonic-gate 497*0Sstevel@tonic-gate void 498*0Sstevel@tonic-gate cv_broadcast(kcondvar_t *cvp) 499*0Sstevel@tonic-gate { 500*0Sstevel@tonic-gate condvar_impl_t *cp = (condvar_impl_t *)cvp; 501*0Sstevel@tonic-gate 502*0Sstevel@tonic-gate /* make sure the cv_waiters field looks sane */ 503*0Sstevel@tonic-gate ASSERT(cp->cv_waiters <= CV_MAX_WAITERS); 504*0Sstevel@tonic-gate if (cp->cv_waiters > 0) { 505*0Sstevel@tonic-gate sleepq_head_t *sqh = SQHASH(cp); 506*0Sstevel@tonic-gate disp_lock_enter(&sqh->sq_lock); 507*0Sstevel@tonic-gate ASSERT(CPU_ON_INTR(CPU) == 0); 508*0Sstevel@tonic-gate sleepq_wakeall_chan(&sqh->sq_queue, cp); 509*0Sstevel@tonic-gate cp->cv_waiters = 0; 510*0Sstevel@tonic-gate disp_lock_exit(&sqh->sq_lock); 511*0Sstevel@tonic-gate } 512*0Sstevel@tonic-gate } 513*0Sstevel@tonic-gate 514*0Sstevel@tonic-gate /* 515*0Sstevel@tonic-gate * Same as cv_wait(), but wakes up (after wakeup_time milliseconds) to check 516*0Sstevel@tonic-gate * for requests to stop, like cv_wait_sig() but without dealing with signals. 517*0Sstevel@tonic-gate * This is a horrible kludge. It is evil. It is vile. It is swill. 518*0Sstevel@tonic-gate * If your code has to call this function then your code is the same. 519*0Sstevel@tonic-gate */ 520*0Sstevel@tonic-gate void 521*0Sstevel@tonic-gate cv_wait_stop(kcondvar_t *cvp, kmutex_t *mp, int wakeup_time) 522*0Sstevel@tonic-gate { 523*0Sstevel@tonic-gate kthread_t *t = curthread; 524*0Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 525*0Sstevel@tonic-gate proc_t *p = ttoproc(t); 526*0Sstevel@tonic-gate timeout_id_t id; 527*0Sstevel@tonic-gate clock_t tim; 528*0Sstevel@tonic-gate 529*0Sstevel@tonic-gate if (panicstr) 530*0Sstevel@tonic-gate return; 531*0Sstevel@tonic-gate 532*0Sstevel@tonic-gate /* 533*0Sstevel@tonic-gate * If there is no lwp, then we don't need to eventually stop it 534*0Sstevel@tonic-gate * The check for t_intr is to catch an interrupt thread 535*0Sstevel@tonic-gate * that has not yet unpinned the thread underneath. 536*0Sstevel@tonic-gate */ 537*0Sstevel@tonic-gate if (lwp == NULL || t->t_intr) { 538*0Sstevel@tonic-gate cv_wait(cvp, mp); 539*0Sstevel@tonic-gate return; 540*0Sstevel@tonic-gate } 541*0Sstevel@tonic-gate 542*0Sstevel@tonic-gate /* 543*0Sstevel@tonic-gate * Wakeup in wakeup_time milliseconds, i.e., human time. 544*0Sstevel@tonic-gate */ 545*0Sstevel@tonic-gate tim = lbolt + MSEC_TO_TICK(wakeup_time); 546*0Sstevel@tonic-gate id = realtime_timeout((void (*)(void *))setrun, t, tim - lbolt); 547*0Sstevel@tonic-gate thread_lock(t); /* lock the thread */ 548*0Sstevel@tonic-gate cv_block((condvar_impl_t *)cvp); 549*0Sstevel@tonic-gate thread_unlock_nopreempt(t); 550*0Sstevel@tonic-gate mutex_exit(mp); 551*0Sstevel@tonic-gate /* ASSERT(no locks are held); */ 552*0Sstevel@tonic-gate if ((tim - lbolt) <= 0) /* allow for wrap */ 553*0Sstevel@tonic-gate setrun(t); 554*0Sstevel@tonic-gate swtch(); 555*0Sstevel@tonic-gate (void) untimeout(id); 556*0Sstevel@tonic-gate 557*0Sstevel@tonic-gate /* 558*0Sstevel@tonic-gate * Check for reasons to stop, if lwp_nostop is not true. 559*0Sstevel@tonic-gate * See issig_forreal() for explanations of the various stops. 560*0Sstevel@tonic-gate */ 561*0Sstevel@tonic-gate mutex_enter(&p->p_lock); 562*0Sstevel@tonic-gate while (lwp->lwp_nostop == 0 && !(p->p_flag & SEXITLWPS)) { 563*0Sstevel@tonic-gate /* 564*0Sstevel@tonic-gate * Hold the lwp here for watchpoint manipulation. 565*0Sstevel@tonic-gate */ 566*0Sstevel@tonic-gate if (t->t_proc_flag & TP_PAUSE) { 567*0Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_PAUSE); 568*0Sstevel@tonic-gate continue; 569*0Sstevel@tonic-gate } 570*0Sstevel@tonic-gate /* 571*0Sstevel@tonic-gate * System checkpoint. 572*0Sstevel@tonic-gate */ 573*0Sstevel@tonic-gate if (t->t_proc_flag & TP_CHKPT) { 574*0Sstevel@tonic-gate stop(PR_CHECKPOINT, 0); 575*0Sstevel@tonic-gate continue; 576*0Sstevel@tonic-gate } 577*0Sstevel@tonic-gate /* 578*0Sstevel@tonic-gate * Honor fork1(), watchpoint activity (remapping a page), 579*0Sstevel@tonic-gate * and lwp_suspend() requests. 580*0Sstevel@tonic-gate */ 581*0Sstevel@tonic-gate if ((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) || 582*0Sstevel@tonic-gate (t->t_proc_flag & TP_HOLDLWP)) { 583*0Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_NORMAL); 584*0Sstevel@tonic-gate continue; 585*0Sstevel@tonic-gate } 586*0Sstevel@tonic-gate /* 587*0Sstevel@tonic-gate * Honor /proc requested stop. 588*0Sstevel@tonic-gate */ 589*0Sstevel@tonic-gate if (t->t_proc_flag & TP_PRSTOP) { 590*0Sstevel@tonic-gate stop(PR_REQUESTED, 0); 591*0Sstevel@tonic-gate } 592*0Sstevel@tonic-gate /* 593*0Sstevel@tonic-gate * If some lwp in the process has already stopped 594*0Sstevel@tonic-gate * showing PR_JOBCONTROL, stop in sympathy with it. 595*0Sstevel@tonic-gate */ 596*0Sstevel@tonic-gate if (p->p_stopsig && t != p->p_agenttp) { 597*0Sstevel@tonic-gate stop(PR_JOBCONTROL, p->p_stopsig); 598*0Sstevel@tonic-gate continue; 599*0Sstevel@tonic-gate } 600*0Sstevel@tonic-gate break; 601*0Sstevel@tonic-gate } 602*0Sstevel@tonic-gate mutex_exit(&p->p_lock); 603*0Sstevel@tonic-gate mutex_enter(mp); 604*0Sstevel@tonic-gate } 605*0Sstevel@tonic-gate 606*0Sstevel@tonic-gate /* 607*0Sstevel@tonic-gate * Like cv_timedwait_sig(), but takes an absolute hires future time 608*0Sstevel@tonic-gate * rather than a future time in clock ticks. Will not return showing 609*0Sstevel@tonic-gate * that a timeout occurred until the future time is passed. 610*0Sstevel@tonic-gate * If 'when' is a NULL pointer, no timeout will occur. 611*0Sstevel@tonic-gate * Returns: 612*0Sstevel@tonic-gate * Function result in order of presidence: 613*0Sstevel@tonic-gate * 0 if a signal was received 614*0Sstevel@tonic-gate * -1 if timeout occured 615*0Sstevel@tonic-gate * >0 if awakened via cv_signal() or cv_broadcast() 616*0Sstevel@tonic-gate * or by a spurious wakeup. 617*0Sstevel@tonic-gate * (might return time remaining) 618*0Sstevel@tonic-gate * As a special test, if someone abruptly resets the system time 619*0Sstevel@tonic-gate * (but not through adjtime(2); drifting of the clock is allowed and 620*0Sstevel@tonic-gate * expected [see timespectohz_adj()]), then we force a return of -1 621*0Sstevel@tonic-gate * so the caller can return a premature timeout to the calling process 622*0Sstevel@tonic-gate * so it can reevaluate the situation in light of the new system time. 623*0Sstevel@tonic-gate * (The system clock has been reset if timecheck != timechanged.) 624*0Sstevel@tonic-gate */ 625*0Sstevel@tonic-gate int 626*0Sstevel@tonic-gate cv_waituntil_sig(kcondvar_t *cvp, kmutex_t *mp, 627*0Sstevel@tonic-gate timestruc_t *when, int timecheck) 628*0Sstevel@tonic-gate { 629*0Sstevel@tonic-gate timestruc_t now; 630*0Sstevel@tonic-gate int rval; 631*0Sstevel@tonic-gate 632*0Sstevel@tonic-gate if (when == NULL) 633*0Sstevel@tonic-gate return (cv_wait_sig_swap(cvp, mp)); 634*0Sstevel@tonic-gate 635*0Sstevel@tonic-gate gethrestime(&now); 636*0Sstevel@tonic-gate if (when->tv_sec < now.tv_sec || 637*0Sstevel@tonic-gate (when->tv_sec == now.tv_sec && 638*0Sstevel@tonic-gate when->tv_nsec <= now.tv_nsec)) { 639*0Sstevel@tonic-gate /* 640*0Sstevel@tonic-gate * We have already reached the absolute future time. 641*0Sstevel@tonic-gate * Call cv_timedwait_sig() just to check for signals. 642*0Sstevel@tonic-gate * We will return immediately with either 0 or -1. 643*0Sstevel@tonic-gate */ 644*0Sstevel@tonic-gate rval = cv_timedwait_sig(cvp, mp, lbolt); 645*0Sstevel@tonic-gate } else { 646*0Sstevel@tonic-gate if (timecheck == timechanged) { 647*0Sstevel@tonic-gate rval = cv_timedwait_sig(cvp, mp, 648*0Sstevel@tonic-gate lbolt + timespectohz_adj(when, now)); 649*0Sstevel@tonic-gate } else { 650*0Sstevel@tonic-gate /* 651*0Sstevel@tonic-gate * Someone reset the system time; 652*0Sstevel@tonic-gate * just force an immediate timeout. 653*0Sstevel@tonic-gate */ 654*0Sstevel@tonic-gate rval = -1; 655*0Sstevel@tonic-gate } 656*0Sstevel@tonic-gate if (rval == -1 && timecheck == timechanged) { 657*0Sstevel@tonic-gate /* 658*0Sstevel@tonic-gate * Even though cv_timedwait_sig() returned showing a 659*0Sstevel@tonic-gate * timeout, the future time may not have passed yet. 660*0Sstevel@tonic-gate * If not, change rval to indicate a normal wakeup. 661*0Sstevel@tonic-gate */ 662*0Sstevel@tonic-gate gethrestime(&now); 663*0Sstevel@tonic-gate if (when->tv_sec > now.tv_sec || 664*0Sstevel@tonic-gate (when->tv_sec == now.tv_sec && 665*0Sstevel@tonic-gate when->tv_nsec > now.tv_nsec)) 666*0Sstevel@tonic-gate rval = 1; 667*0Sstevel@tonic-gate } 668*0Sstevel@tonic-gate } 669*0Sstevel@tonic-gate return (rval); 670*0Sstevel@tonic-gate } 671