10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 52498Sstevel * Common Development and Distribution License (the "License"). 62498Sstevel * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 215891Sraf 220Sstevel@tonic-gate /* 235891Sraf * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate #include <sys/thread.h> 300Sstevel@tonic-gate #include <sys/proc.h> 310Sstevel@tonic-gate #include <sys/debug.h> 320Sstevel@tonic-gate #include <sys/cmn_err.h> 330Sstevel@tonic-gate #include <sys/systm.h> 340Sstevel@tonic-gate #include <sys/sobject.h> 350Sstevel@tonic-gate #include <sys/sleepq.h> 360Sstevel@tonic-gate #include <sys/cpuvar.h> 370Sstevel@tonic-gate #include <sys/condvar.h> 380Sstevel@tonic-gate #include <sys/condvar_impl.h> 390Sstevel@tonic-gate #include <sys/schedctl.h> 400Sstevel@tonic-gate #include <sys/procfs.h> 410Sstevel@tonic-gate #include <sys/sdt.h> 420Sstevel@tonic-gate 430Sstevel@tonic-gate /* 440Sstevel@tonic-gate * CV_MAX_WAITERS is the maximum number of waiters we track; once 450Sstevel@tonic-gate * the number becomes higher than that, we look at the sleepq to 460Sstevel@tonic-gate * see whether there are *really* any waiters. 470Sstevel@tonic-gate */ 480Sstevel@tonic-gate #define CV_MAX_WAITERS 1024 /* must be power of 2 */ 490Sstevel@tonic-gate #define CV_WAITERS_MASK (CV_MAX_WAITERS - 1) 500Sstevel@tonic-gate 510Sstevel@tonic-gate /* 520Sstevel@tonic-gate * Threads don't "own" condition variables. 530Sstevel@tonic-gate */ 540Sstevel@tonic-gate /* ARGSUSED */ 550Sstevel@tonic-gate static kthread_t * 560Sstevel@tonic-gate cv_owner(void *cvp) 570Sstevel@tonic-gate { 580Sstevel@tonic-gate return (NULL); 590Sstevel@tonic-gate } 600Sstevel@tonic-gate 610Sstevel@tonic-gate /* 620Sstevel@tonic-gate * Unsleep a thread that's blocked on a condition variable. 630Sstevel@tonic-gate */ 640Sstevel@tonic-gate static void 650Sstevel@tonic-gate cv_unsleep(kthread_t *t) 660Sstevel@tonic-gate { 670Sstevel@tonic-gate condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan; 680Sstevel@tonic-gate sleepq_head_t *sqh = SQHASH(cvp); 690Sstevel@tonic-gate 700Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(t)); 710Sstevel@tonic-gate 720Sstevel@tonic-gate if (cvp == NULL) 730Sstevel@tonic-gate panic("cv_unsleep: thread %p not on sleepq %p", t, sqh); 742498Sstevel DTRACE_SCHED1(wakeup, kthread_t *, t); 750Sstevel@tonic-gate sleepq_unsleep(t); 760Sstevel@tonic-gate if (cvp->cv_waiters != CV_MAX_WAITERS) 770Sstevel@tonic-gate cvp->cv_waiters--; 780Sstevel@tonic-gate disp_lock_exit_high(&sqh->sq_lock); 790Sstevel@tonic-gate CL_SETRUN(t); 800Sstevel@tonic-gate } 810Sstevel@tonic-gate 820Sstevel@tonic-gate /* 830Sstevel@tonic-gate * Change the priority of a thread that's blocked on a condition variable. 840Sstevel@tonic-gate */ 850Sstevel@tonic-gate static void 860Sstevel@tonic-gate cv_change_pri(kthread_t *t, pri_t pri, pri_t *t_prip) 870Sstevel@tonic-gate { 880Sstevel@tonic-gate condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan; 890Sstevel@tonic-gate sleepq_t *sqp = t->t_sleepq; 900Sstevel@tonic-gate 910Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(t)); 920Sstevel@tonic-gate ASSERT(&SQHASH(cvp)->sq_queue == sqp); 930Sstevel@tonic-gate 940Sstevel@tonic-gate if (cvp == NULL) 950Sstevel@tonic-gate panic("cv_change_pri: %p not on sleep queue", t); 960Sstevel@tonic-gate sleepq_dequeue(t); 970Sstevel@tonic-gate *t_prip = pri; 980Sstevel@tonic-gate sleepq_insert(sqp, t); 990Sstevel@tonic-gate } 1000Sstevel@tonic-gate 1010Sstevel@tonic-gate /* 1020Sstevel@tonic-gate * The sobj_ops vector exports a set of functions needed when a thread 1030Sstevel@tonic-gate * is asleep on a synchronization object of this type. 1040Sstevel@tonic-gate */ 1050Sstevel@tonic-gate static sobj_ops_t cv_sobj_ops = { 1060Sstevel@tonic-gate SOBJ_CV, cv_owner, cv_unsleep, cv_change_pri 1070Sstevel@tonic-gate }; 1080Sstevel@tonic-gate 1090Sstevel@tonic-gate /* ARGSUSED */ 1100Sstevel@tonic-gate void 1110Sstevel@tonic-gate cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg) 1120Sstevel@tonic-gate { 1130Sstevel@tonic-gate ((condvar_impl_t *)cvp)->cv_waiters = 0; 1140Sstevel@tonic-gate } 1150Sstevel@tonic-gate 1160Sstevel@tonic-gate /* 1170Sstevel@tonic-gate * cv_destroy is not currently needed, but is part of the DDI. 1180Sstevel@tonic-gate * This is in case cv_init ever needs to allocate something for a cv. 1190Sstevel@tonic-gate */ 1200Sstevel@tonic-gate /* ARGSUSED */ 1210Sstevel@tonic-gate void 1220Sstevel@tonic-gate cv_destroy(kcondvar_t *cvp) 1230Sstevel@tonic-gate { 1240Sstevel@tonic-gate ASSERT((((condvar_impl_t *)cvp)->cv_waiters & CV_WAITERS_MASK) == 0); 1250Sstevel@tonic-gate } 1260Sstevel@tonic-gate 1270Sstevel@tonic-gate /* 1280Sstevel@tonic-gate * The cv_block() function blocks a thread on a condition variable 1290Sstevel@tonic-gate * by putting it in a hashed sleep queue associated with the 1300Sstevel@tonic-gate * synchronization object. 1310Sstevel@tonic-gate * 1320Sstevel@tonic-gate * Threads are taken off the hashed sleep queues via calls to 1330Sstevel@tonic-gate * cv_signal(), cv_broadcast(), or cv_unsleep(). 1340Sstevel@tonic-gate */ 1350Sstevel@tonic-gate static void 1360Sstevel@tonic-gate cv_block(condvar_impl_t *cvp) 1370Sstevel@tonic-gate { 1380Sstevel@tonic-gate kthread_t *t = curthread; 1390Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 1400Sstevel@tonic-gate sleepq_head_t *sqh; 1410Sstevel@tonic-gate 1420Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(t)); 1430Sstevel@tonic-gate ASSERT(t != CPU->cpu_idle_thread); 1440Sstevel@tonic-gate ASSERT(CPU_ON_INTR(CPU) == 0); 1450Sstevel@tonic-gate ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL); 1460Sstevel@tonic-gate ASSERT(t->t_state == TS_ONPROC); 1470Sstevel@tonic-gate 1480Sstevel@tonic-gate t->t_schedflag &= ~TS_SIGNALLED; 1490Sstevel@tonic-gate CL_SLEEP(t); /* assign kernel priority */ 1500Sstevel@tonic-gate t->t_wchan = (caddr_t)cvp; 1510Sstevel@tonic-gate t->t_sobj_ops = &cv_sobj_ops; 1520Sstevel@tonic-gate DTRACE_SCHED(sleep); 1530Sstevel@tonic-gate 1540Sstevel@tonic-gate /* 1550Sstevel@tonic-gate * The check for t_intr is to avoid doing the 1560Sstevel@tonic-gate * account for an interrupt thread on the still-pinned 1570Sstevel@tonic-gate * lwp's statistics. 1580Sstevel@tonic-gate */ 1590Sstevel@tonic-gate if (lwp != NULL && t->t_intr == NULL) { 1600Sstevel@tonic-gate lwp->lwp_ru.nvcsw++; 1610Sstevel@tonic-gate (void) new_mstate(t, LMS_SLEEP); 1620Sstevel@tonic-gate } 1630Sstevel@tonic-gate 1640Sstevel@tonic-gate sqh = SQHASH(cvp); 1650Sstevel@tonic-gate disp_lock_enter_high(&sqh->sq_lock); 1660Sstevel@tonic-gate if (cvp->cv_waiters < CV_MAX_WAITERS) 1670Sstevel@tonic-gate cvp->cv_waiters++; 1680Sstevel@tonic-gate ASSERT(cvp->cv_waiters <= CV_MAX_WAITERS); 1690Sstevel@tonic-gate THREAD_SLEEP(t, &sqh->sq_lock); 1700Sstevel@tonic-gate sleepq_insert(&sqh->sq_queue, t); 1710Sstevel@tonic-gate /* 1720Sstevel@tonic-gate * THREAD_SLEEP() moves curthread->t_lockp to point to the 1730Sstevel@tonic-gate * lock sqh->sq_lock. This lock is later released by the caller 1740Sstevel@tonic-gate * when it calls thread_unlock() on curthread. 1750Sstevel@tonic-gate */ 1760Sstevel@tonic-gate } 1770Sstevel@tonic-gate 1780Sstevel@tonic-gate #define cv_block_sig(t, cvp) \ 1790Sstevel@tonic-gate { (t)->t_flag |= T_WAKEABLE; cv_block(cvp); } 1800Sstevel@tonic-gate 1810Sstevel@tonic-gate /* 1820Sstevel@tonic-gate * Block on the indicated condition variable and release the 1830Sstevel@tonic-gate * associated kmutex while blocked. 1840Sstevel@tonic-gate */ 1850Sstevel@tonic-gate void 1860Sstevel@tonic-gate cv_wait(kcondvar_t *cvp, kmutex_t *mp) 1870Sstevel@tonic-gate { 1880Sstevel@tonic-gate if (panicstr) 1890Sstevel@tonic-gate return; 1900Sstevel@tonic-gate 1910Sstevel@tonic-gate ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 1920Sstevel@tonic-gate thread_lock(curthread); /* lock the thread */ 1930Sstevel@tonic-gate cv_block((condvar_impl_t *)cvp); 1940Sstevel@tonic-gate thread_unlock_nopreempt(curthread); /* unlock the waiters field */ 1950Sstevel@tonic-gate mutex_exit(mp); 1960Sstevel@tonic-gate swtch(); 1970Sstevel@tonic-gate mutex_enter(mp); 1980Sstevel@tonic-gate } 1990Sstevel@tonic-gate 2000Sstevel@tonic-gate /* 2010Sstevel@tonic-gate * Same as cv_wait except the thread will unblock at 'tim' 2020Sstevel@tonic-gate * (an absolute time) if it hasn't already unblocked. 2030Sstevel@tonic-gate * 2040Sstevel@tonic-gate * Returns the amount of time left from the original 'tim' value 2050Sstevel@tonic-gate * when it was unblocked. 2060Sstevel@tonic-gate */ 2070Sstevel@tonic-gate clock_t 2080Sstevel@tonic-gate cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim) 2090Sstevel@tonic-gate { 2100Sstevel@tonic-gate kthread_t *t = curthread; 2110Sstevel@tonic-gate timeout_id_t id; 2120Sstevel@tonic-gate clock_t timeleft; 2130Sstevel@tonic-gate int signalled; 2140Sstevel@tonic-gate 2150Sstevel@tonic-gate if (panicstr) 2160Sstevel@tonic-gate return (-1); 2170Sstevel@tonic-gate 2180Sstevel@tonic-gate timeleft = tim - lbolt; 2190Sstevel@tonic-gate if (timeleft <= 0) 2200Sstevel@tonic-gate return (-1); 2210Sstevel@tonic-gate id = realtime_timeout((void (*)(void *))setrun, t, timeleft); 2220Sstevel@tonic-gate thread_lock(t); /* lock the thread */ 2230Sstevel@tonic-gate cv_block((condvar_impl_t *)cvp); 2240Sstevel@tonic-gate thread_unlock_nopreempt(t); 2250Sstevel@tonic-gate mutex_exit(mp); 2260Sstevel@tonic-gate if ((tim - lbolt) <= 0) /* allow for wrap */ 2270Sstevel@tonic-gate setrun(t); 2280Sstevel@tonic-gate swtch(); 2290Sstevel@tonic-gate signalled = (t->t_schedflag & TS_SIGNALLED); 2300Sstevel@tonic-gate /* 2310Sstevel@tonic-gate * Get the time left. untimeout() returns -1 if the timeout has 2320Sstevel@tonic-gate * occured or the time remaining. If the time remaining is zero, 2330Sstevel@tonic-gate * the timeout has occured between when we were awoken and 2340Sstevel@tonic-gate * we called untimeout. We will treat this as if the timeout 2350Sstevel@tonic-gate * has occured and set timeleft to -1. 2360Sstevel@tonic-gate */ 2370Sstevel@tonic-gate timeleft = untimeout(id); 2380Sstevel@tonic-gate mutex_enter(mp); 2390Sstevel@tonic-gate if (timeleft <= 0) { 2400Sstevel@tonic-gate timeleft = -1; 2410Sstevel@tonic-gate if (signalled) /* avoid consuming the cv_signal() */ 2420Sstevel@tonic-gate cv_signal(cvp); 2430Sstevel@tonic-gate } 2440Sstevel@tonic-gate return (timeleft); 2450Sstevel@tonic-gate } 2460Sstevel@tonic-gate 2470Sstevel@tonic-gate int 2480Sstevel@tonic-gate cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp) 2490Sstevel@tonic-gate { 2500Sstevel@tonic-gate kthread_t *t = curthread; 2510Sstevel@tonic-gate proc_t *p = ttoproc(t); 2520Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 2535891Sraf int cancel_pending; 2540Sstevel@tonic-gate int rval = 1; 2550Sstevel@tonic-gate int signalled = 0; 2560Sstevel@tonic-gate 2570Sstevel@tonic-gate if (panicstr) 2580Sstevel@tonic-gate return (rval); 2590Sstevel@tonic-gate 2600Sstevel@tonic-gate /* 2610Sstevel@tonic-gate * The check for t_intr is to catch an interrupt thread 2620Sstevel@tonic-gate * that has not yet unpinned the thread underneath. 2630Sstevel@tonic-gate */ 2640Sstevel@tonic-gate if (lwp == NULL || t->t_intr) { 2650Sstevel@tonic-gate cv_wait(cvp, mp); 2660Sstevel@tonic-gate return (rval); 2670Sstevel@tonic-gate } 2680Sstevel@tonic-gate 2690Sstevel@tonic-gate ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 2705891Sraf cancel_pending = schedctl_cancel_pending(); 2710Sstevel@tonic-gate lwp->lwp_asleep = 1; 2720Sstevel@tonic-gate lwp->lwp_sysabort = 0; 2730Sstevel@tonic-gate thread_lock(t); 2740Sstevel@tonic-gate cv_block_sig(t, (condvar_impl_t *)cvp); 2750Sstevel@tonic-gate thread_unlock_nopreempt(t); 2760Sstevel@tonic-gate mutex_exit(mp); 2775891Sraf if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending) 2780Sstevel@tonic-gate setrun(t); 2790Sstevel@tonic-gate /* ASSERT(no locks are held) */ 2800Sstevel@tonic-gate swtch(); 2810Sstevel@tonic-gate signalled = (t->t_schedflag & TS_SIGNALLED); 2820Sstevel@tonic-gate t->t_flag &= ~T_WAKEABLE; 2830Sstevel@tonic-gate mutex_enter(mp); 2840Sstevel@tonic-gate if (ISSIG_PENDING(t, lwp, p)) { 2850Sstevel@tonic-gate mutex_exit(mp); 2860Sstevel@tonic-gate if (issig(FORREAL)) 2870Sstevel@tonic-gate rval = 0; 2880Sstevel@tonic-gate mutex_enter(mp); 2890Sstevel@tonic-gate } 2900Sstevel@tonic-gate if (lwp->lwp_sysabort || MUSTRETURN(p, t)) 2910Sstevel@tonic-gate rval = 0; 2925891Sraf if (rval != 0 && cancel_pending) { 2935891Sraf schedctl_cancel_eintr(); 2945891Sraf rval = 0; 2955891Sraf } 2960Sstevel@tonic-gate lwp->lwp_asleep = 0; 2970Sstevel@tonic-gate lwp->lwp_sysabort = 0; 2980Sstevel@tonic-gate if (rval == 0 && signalled) /* avoid consuming the cv_signal() */ 2990Sstevel@tonic-gate cv_signal(cvp); 3000Sstevel@tonic-gate return (rval); 3010Sstevel@tonic-gate } 3020Sstevel@tonic-gate 3030Sstevel@tonic-gate /* 3040Sstevel@tonic-gate * Returns: 3050Sstevel@tonic-gate * Function result in order of presidence: 3060Sstevel@tonic-gate * 0 if a signal was received 3070Sstevel@tonic-gate * -1 if timeout occured 3080Sstevel@tonic-gate * >0 if awakened via cv_signal() or cv_broadcast(). 3090Sstevel@tonic-gate * (returns time remaining) 3100Sstevel@tonic-gate * 3110Sstevel@tonic-gate * cv_timedwait_sig() is now part of the DDI. 3120Sstevel@tonic-gate */ 3130Sstevel@tonic-gate clock_t 3140Sstevel@tonic-gate cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t tim) 3150Sstevel@tonic-gate { 3160Sstevel@tonic-gate kthread_t *t = curthread; 3170Sstevel@tonic-gate proc_t *p = ttoproc(t); 3180Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 3195891Sraf int cancel_pending = 0; 3200Sstevel@tonic-gate timeout_id_t id; 3210Sstevel@tonic-gate clock_t rval = 1; 3220Sstevel@tonic-gate clock_t timeleft; 3230Sstevel@tonic-gate int signalled = 0; 3240Sstevel@tonic-gate 3250Sstevel@tonic-gate if (panicstr) 3260Sstevel@tonic-gate return (rval); 3270Sstevel@tonic-gate 3280Sstevel@tonic-gate /* 3290Sstevel@tonic-gate * If there is no lwp, then we don't need to wait for a signal. 3300Sstevel@tonic-gate * The check for t_intr is to catch an interrupt thread 3310Sstevel@tonic-gate * that has not yet unpinned the thread underneath. 3320Sstevel@tonic-gate */ 3330Sstevel@tonic-gate if (lwp == NULL || t->t_intr) 3340Sstevel@tonic-gate return (cv_timedwait(cvp, mp, tim)); 3350Sstevel@tonic-gate 3360Sstevel@tonic-gate /* 3370Sstevel@tonic-gate * If tim is less than or equal to lbolt, then the timeout 3380Sstevel@tonic-gate * has already occured. So just check to see if there is a signal 3390Sstevel@tonic-gate * pending. If so return 0 indicating that there is a signal pending. 3400Sstevel@tonic-gate * Else return -1 indicating that the timeout occured. No need to 3410Sstevel@tonic-gate * wait on anything. 3420Sstevel@tonic-gate */ 3430Sstevel@tonic-gate timeleft = tim - lbolt; 3440Sstevel@tonic-gate if (timeleft <= 0) { 3450Sstevel@tonic-gate lwp->lwp_asleep = 1; 3460Sstevel@tonic-gate lwp->lwp_sysabort = 0; 3470Sstevel@tonic-gate rval = -1; 3480Sstevel@tonic-gate goto out; 3490Sstevel@tonic-gate } 3500Sstevel@tonic-gate 3510Sstevel@tonic-gate /* 3520Sstevel@tonic-gate * Set the timeout and wait. 3530Sstevel@tonic-gate */ 3545891Sraf cancel_pending = schedctl_cancel_pending(); 3550Sstevel@tonic-gate id = realtime_timeout((void (*)(void *))setrun, t, timeleft); 3560Sstevel@tonic-gate lwp->lwp_asleep = 1; 3570Sstevel@tonic-gate lwp->lwp_sysabort = 0; 3580Sstevel@tonic-gate thread_lock(t); 3590Sstevel@tonic-gate cv_block_sig(t, (condvar_impl_t *)cvp); 3600Sstevel@tonic-gate thread_unlock_nopreempt(t); 3610Sstevel@tonic-gate mutex_exit(mp); 3625891Sraf if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending || 3635891Sraf (tim - lbolt <= 0)) 3640Sstevel@tonic-gate setrun(t); 3650Sstevel@tonic-gate /* ASSERT(no locks are held) */ 3660Sstevel@tonic-gate swtch(); 3670Sstevel@tonic-gate signalled = (t->t_schedflag & TS_SIGNALLED); 3680Sstevel@tonic-gate t->t_flag &= ~T_WAKEABLE; 3690Sstevel@tonic-gate mutex_enter(mp); 3700Sstevel@tonic-gate 3710Sstevel@tonic-gate /* 3720Sstevel@tonic-gate * Untimeout the thread. untimeout() returns -1 if the timeout has 3730Sstevel@tonic-gate * occured or the time remaining. If the time remaining is zero, 3740Sstevel@tonic-gate * the timeout has occured between when we were awoken and 3750Sstevel@tonic-gate * we called untimeout. We will treat this as if the timeout 3760Sstevel@tonic-gate * has occured and set rval to -1. 3770Sstevel@tonic-gate */ 3780Sstevel@tonic-gate rval = untimeout(id); 3790Sstevel@tonic-gate if (rval <= 0) 3800Sstevel@tonic-gate rval = -1; 3810Sstevel@tonic-gate 3820Sstevel@tonic-gate /* 3830Sstevel@tonic-gate * Check to see if a signal is pending. If so, regardless of whether 3840Sstevel@tonic-gate * or not we were awoken due to the signal, the signal is now pending 3850Sstevel@tonic-gate * and a return of 0 has the highest priority. 3860Sstevel@tonic-gate */ 3870Sstevel@tonic-gate out: 3880Sstevel@tonic-gate if (ISSIG_PENDING(t, lwp, p)) { 3890Sstevel@tonic-gate mutex_exit(mp); 3900Sstevel@tonic-gate if (issig(FORREAL)) 3910Sstevel@tonic-gate rval = 0; 3920Sstevel@tonic-gate mutex_enter(mp); 3930Sstevel@tonic-gate } 3940Sstevel@tonic-gate if (lwp->lwp_sysabort || MUSTRETURN(p, t)) 3950Sstevel@tonic-gate rval = 0; 3965891Sraf if (rval != 0 && cancel_pending) { 3975891Sraf schedctl_cancel_eintr(); 3985891Sraf rval = 0; 3995891Sraf } 4000Sstevel@tonic-gate lwp->lwp_asleep = 0; 4010Sstevel@tonic-gate lwp->lwp_sysabort = 0; 4020Sstevel@tonic-gate if (rval <= 0 && signalled) /* avoid consuming the cv_signal() */ 4030Sstevel@tonic-gate cv_signal(cvp); 4040Sstevel@tonic-gate return (rval); 4050Sstevel@tonic-gate } 4060Sstevel@tonic-gate 4070Sstevel@tonic-gate /* 4080Sstevel@tonic-gate * Like cv_wait_sig_swap but allows the caller to indicate (with a 4090Sstevel@tonic-gate * non-NULL sigret) that they will take care of signalling the cv 4100Sstevel@tonic-gate * after wakeup, if necessary. This is a vile hack that should only 4110Sstevel@tonic-gate * be used when no other option is available; almost all callers 4120Sstevel@tonic-gate * should just use cv_wait_sig_swap (which takes care of the cv_signal 4130Sstevel@tonic-gate * stuff automatically) instead. 4140Sstevel@tonic-gate */ 4150Sstevel@tonic-gate int 4160Sstevel@tonic-gate cv_wait_sig_swap_core(kcondvar_t *cvp, kmutex_t *mp, int *sigret) 4170Sstevel@tonic-gate { 4180Sstevel@tonic-gate kthread_t *t = curthread; 4190Sstevel@tonic-gate proc_t *p = ttoproc(t); 4200Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 4215891Sraf int cancel_pending; 4220Sstevel@tonic-gate int rval = 1; 4230Sstevel@tonic-gate int signalled = 0; 4240Sstevel@tonic-gate 4250Sstevel@tonic-gate if (panicstr) 4260Sstevel@tonic-gate return (rval); 4270Sstevel@tonic-gate 4280Sstevel@tonic-gate /* 4290Sstevel@tonic-gate * The check for t_intr is to catch an interrupt thread 4300Sstevel@tonic-gate * that has not yet unpinned the thread underneath. 4310Sstevel@tonic-gate */ 4320Sstevel@tonic-gate if (lwp == NULL || t->t_intr) { 4330Sstevel@tonic-gate cv_wait(cvp, mp); 4340Sstevel@tonic-gate return (rval); 4350Sstevel@tonic-gate } 4360Sstevel@tonic-gate 4375891Sraf cancel_pending = schedctl_cancel_pending(); 4380Sstevel@tonic-gate lwp->lwp_asleep = 1; 4390Sstevel@tonic-gate lwp->lwp_sysabort = 0; 4400Sstevel@tonic-gate thread_lock(t); 4410Sstevel@tonic-gate t->t_kpri_req = 0; /* don't need kernel priority */ 4420Sstevel@tonic-gate cv_block_sig(t, (condvar_impl_t *)cvp); 4430Sstevel@tonic-gate /* I can be swapped now */ 4440Sstevel@tonic-gate curthread->t_schedflag &= ~TS_DONT_SWAP; 4450Sstevel@tonic-gate thread_unlock_nopreempt(t); 4460Sstevel@tonic-gate mutex_exit(mp); 4475891Sraf if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending) 4480Sstevel@tonic-gate setrun(t); 4490Sstevel@tonic-gate /* ASSERT(no locks are held) */ 4500Sstevel@tonic-gate swtch(); 4510Sstevel@tonic-gate signalled = (t->t_schedflag & TS_SIGNALLED); 4520Sstevel@tonic-gate t->t_flag &= ~T_WAKEABLE; 4530Sstevel@tonic-gate /* TS_DONT_SWAP set by disp() */ 4540Sstevel@tonic-gate ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 4550Sstevel@tonic-gate mutex_enter(mp); 4560Sstevel@tonic-gate if (ISSIG_PENDING(t, lwp, p)) { 4570Sstevel@tonic-gate mutex_exit(mp); 4580Sstevel@tonic-gate if (issig(FORREAL)) 4590Sstevel@tonic-gate rval = 0; 4600Sstevel@tonic-gate mutex_enter(mp); 4610Sstevel@tonic-gate } 4620Sstevel@tonic-gate if (lwp->lwp_sysabort || MUSTRETURN(p, t)) 4630Sstevel@tonic-gate rval = 0; 4645891Sraf if (rval != 0 && cancel_pending) { 4655891Sraf schedctl_cancel_eintr(); 4665891Sraf rval = 0; 4675891Sraf } 4680Sstevel@tonic-gate lwp->lwp_asleep = 0; 4690Sstevel@tonic-gate lwp->lwp_sysabort = 0; 4700Sstevel@tonic-gate if (rval == 0) { 4710Sstevel@tonic-gate if (sigret != NULL) 4720Sstevel@tonic-gate *sigret = signalled; /* just tell the caller */ 4730Sstevel@tonic-gate else if (signalled) 4740Sstevel@tonic-gate cv_signal(cvp); /* avoid consuming the cv_signal() */ 4750Sstevel@tonic-gate } 4760Sstevel@tonic-gate return (rval); 4770Sstevel@tonic-gate } 4780Sstevel@tonic-gate 4790Sstevel@tonic-gate /* 4800Sstevel@tonic-gate * Same as cv_wait_sig but the thread can be swapped out while waiting. 4810Sstevel@tonic-gate * This should only be used when we know we aren't holding any locks. 4820Sstevel@tonic-gate */ 4830Sstevel@tonic-gate int 4840Sstevel@tonic-gate cv_wait_sig_swap(kcondvar_t *cvp, kmutex_t *mp) 4850Sstevel@tonic-gate { 4860Sstevel@tonic-gate return (cv_wait_sig_swap_core(cvp, mp, NULL)); 4870Sstevel@tonic-gate } 4880Sstevel@tonic-gate 4890Sstevel@tonic-gate void 4900Sstevel@tonic-gate cv_signal(kcondvar_t *cvp) 4910Sstevel@tonic-gate { 4920Sstevel@tonic-gate condvar_impl_t *cp = (condvar_impl_t *)cvp; 4930Sstevel@tonic-gate 4940Sstevel@tonic-gate /* make sure the cv_waiters field looks sane */ 4950Sstevel@tonic-gate ASSERT(cp->cv_waiters <= CV_MAX_WAITERS); 4960Sstevel@tonic-gate if (cp->cv_waiters > 0) { 4970Sstevel@tonic-gate sleepq_head_t *sqh = SQHASH(cp); 4980Sstevel@tonic-gate disp_lock_enter(&sqh->sq_lock); 4990Sstevel@tonic-gate ASSERT(CPU_ON_INTR(CPU) == 0); 5000Sstevel@tonic-gate if (cp->cv_waiters & CV_WAITERS_MASK) { 5010Sstevel@tonic-gate kthread_t *t; 5020Sstevel@tonic-gate cp->cv_waiters--; 5030Sstevel@tonic-gate t = sleepq_wakeone_chan(&sqh->sq_queue, cp); 5040Sstevel@tonic-gate /* 5050Sstevel@tonic-gate * If cv_waiters is non-zero (and less than 5060Sstevel@tonic-gate * CV_MAX_WAITERS) there should be a thread 5070Sstevel@tonic-gate * in the queue. 5080Sstevel@tonic-gate */ 5090Sstevel@tonic-gate ASSERT(t != NULL); 5100Sstevel@tonic-gate } else if (sleepq_wakeone_chan(&sqh->sq_queue, cp) == NULL) { 5110Sstevel@tonic-gate cp->cv_waiters = 0; 5120Sstevel@tonic-gate } 5130Sstevel@tonic-gate disp_lock_exit(&sqh->sq_lock); 5140Sstevel@tonic-gate } 5150Sstevel@tonic-gate } 5160Sstevel@tonic-gate 5170Sstevel@tonic-gate void 5180Sstevel@tonic-gate cv_broadcast(kcondvar_t *cvp) 5190Sstevel@tonic-gate { 5200Sstevel@tonic-gate condvar_impl_t *cp = (condvar_impl_t *)cvp; 5210Sstevel@tonic-gate 5220Sstevel@tonic-gate /* make sure the cv_waiters field looks sane */ 5230Sstevel@tonic-gate ASSERT(cp->cv_waiters <= CV_MAX_WAITERS); 5240Sstevel@tonic-gate if (cp->cv_waiters > 0) { 5250Sstevel@tonic-gate sleepq_head_t *sqh = SQHASH(cp); 5260Sstevel@tonic-gate disp_lock_enter(&sqh->sq_lock); 5270Sstevel@tonic-gate ASSERT(CPU_ON_INTR(CPU) == 0); 5280Sstevel@tonic-gate sleepq_wakeall_chan(&sqh->sq_queue, cp); 5290Sstevel@tonic-gate cp->cv_waiters = 0; 5300Sstevel@tonic-gate disp_lock_exit(&sqh->sq_lock); 5310Sstevel@tonic-gate } 5320Sstevel@tonic-gate } 5330Sstevel@tonic-gate 5340Sstevel@tonic-gate /* 5350Sstevel@tonic-gate * Same as cv_wait(), but wakes up (after wakeup_time milliseconds) to check 5360Sstevel@tonic-gate * for requests to stop, like cv_wait_sig() but without dealing with signals. 5370Sstevel@tonic-gate * This is a horrible kludge. It is evil. It is vile. It is swill. 5380Sstevel@tonic-gate * If your code has to call this function then your code is the same. 5390Sstevel@tonic-gate */ 5400Sstevel@tonic-gate void 5410Sstevel@tonic-gate cv_wait_stop(kcondvar_t *cvp, kmutex_t *mp, int wakeup_time) 5420Sstevel@tonic-gate { 5430Sstevel@tonic-gate kthread_t *t = curthread; 5440Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 5450Sstevel@tonic-gate proc_t *p = ttoproc(t); 5460Sstevel@tonic-gate timeout_id_t id; 5470Sstevel@tonic-gate clock_t tim; 5480Sstevel@tonic-gate 5490Sstevel@tonic-gate if (panicstr) 5500Sstevel@tonic-gate return; 5510Sstevel@tonic-gate 5520Sstevel@tonic-gate /* 5530Sstevel@tonic-gate * If there is no lwp, then we don't need to eventually stop it 5540Sstevel@tonic-gate * The check for t_intr is to catch an interrupt thread 5550Sstevel@tonic-gate * that has not yet unpinned the thread underneath. 5560Sstevel@tonic-gate */ 5570Sstevel@tonic-gate if (lwp == NULL || t->t_intr) { 5580Sstevel@tonic-gate cv_wait(cvp, mp); 5590Sstevel@tonic-gate return; 5600Sstevel@tonic-gate } 5610Sstevel@tonic-gate 5620Sstevel@tonic-gate /* 5630Sstevel@tonic-gate * Wakeup in wakeup_time milliseconds, i.e., human time. 5640Sstevel@tonic-gate */ 5650Sstevel@tonic-gate tim = lbolt + MSEC_TO_TICK(wakeup_time); 5660Sstevel@tonic-gate id = realtime_timeout((void (*)(void *))setrun, t, tim - lbolt); 5670Sstevel@tonic-gate thread_lock(t); /* lock the thread */ 5680Sstevel@tonic-gate cv_block((condvar_impl_t *)cvp); 5690Sstevel@tonic-gate thread_unlock_nopreempt(t); 5700Sstevel@tonic-gate mutex_exit(mp); 5710Sstevel@tonic-gate /* ASSERT(no locks are held); */ 5720Sstevel@tonic-gate if ((tim - lbolt) <= 0) /* allow for wrap */ 5730Sstevel@tonic-gate setrun(t); 5740Sstevel@tonic-gate swtch(); 5750Sstevel@tonic-gate (void) untimeout(id); 5760Sstevel@tonic-gate 5770Sstevel@tonic-gate /* 5783930Snr123932 * Check for reasons to stop, if lwp_nostop is not true. 5790Sstevel@tonic-gate * See issig_forreal() for explanations of the various stops. 5800Sstevel@tonic-gate */ 5810Sstevel@tonic-gate mutex_enter(&p->p_lock); 5823930Snr123932 while (lwp->lwp_nostop == 0 && !(p->p_flag & SEXITLWPS)) { 5830Sstevel@tonic-gate /* 5840Sstevel@tonic-gate * Hold the lwp here for watchpoint manipulation. 5850Sstevel@tonic-gate */ 5863930Snr123932 if (t->t_proc_flag & TP_PAUSE) { 5870Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_PAUSE); 5880Sstevel@tonic-gate continue; 5890Sstevel@tonic-gate } 5900Sstevel@tonic-gate /* 5910Sstevel@tonic-gate * System checkpoint. 5920Sstevel@tonic-gate */ 5933930Snr123932 if (t->t_proc_flag & TP_CHKPT) { 5940Sstevel@tonic-gate stop(PR_CHECKPOINT, 0); 5950Sstevel@tonic-gate continue; 5960Sstevel@tonic-gate } 5970Sstevel@tonic-gate /* 5980Sstevel@tonic-gate * Honor fork1(), watchpoint activity (remapping a page), 5993930Snr123932 * and lwp_suspend() requests. 6000Sstevel@tonic-gate */ 6013930Snr123932 if ((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) || 6023930Snr123932 (t->t_proc_flag & TP_HOLDLWP)) { 6030Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_NORMAL); 6040Sstevel@tonic-gate continue; 6050Sstevel@tonic-gate } 6060Sstevel@tonic-gate /* 6070Sstevel@tonic-gate * Honor /proc requested stop. 6080Sstevel@tonic-gate */ 6093930Snr123932 if (t->t_proc_flag & TP_PRSTOP) { 6100Sstevel@tonic-gate stop(PR_REQUESTED, 0); 6110Sstevel@tonic-gate } 6120Sstevel@tonic-gate /* 6130Sstevel@tonic-gate * If some lwp in the process has already stopped 6140Sstevel@tonic-gate * showing PR_JOBCONTROL, stop in sympathy with it. 6150Sstevel@tonic-gate */ 6163930Snr123932 if (p->p_stopsig && t != p->p_agenttp) { 6170Sstevel@tonic-gate stop(PR_JOBCONTROL, p->p_stopsig); 6180Sstevel@tonic-gate continue; 6190Sstevel@tonic-gate } 6200Sstevel@tonic-gate break; 6210Sstevel@tonic-gate } 6220Sstevel@tonic-gate mutex_exit(&p->p_lock); 6230Sstevel@tonic-gate mutex_enter(mp); 6240Sstevel@tonic-gate } 6250Sstevel@tonic-gate 6260Sstevel@tonic-gate /* 6270Sstevel@tonic-gate * Like cv_timedwait_sig(), but takes an absolute hires future time 6280Sstevel@tonic-gate * rather than a future time in clock ticks. Will not return showing 6290Sstevel@tonic-gate * that a timeout occurred until the future time is passed. 6300Sstevel@tonic-gate * If 'when' is a NULL pointer, no timeout will occur. 6310Sstevel@tonic-gate * Returns: 6320Sstevel@tonic-gate * Function result in order of presidence: 6330Sstevel@tonic-gate * 0 if a signal was received 6340Sstevel@tonic-gate * -1 if timeout occured 6350Sstevel@tonic-gate * >0 if awakened via cv_signal() or cv_broadcast() 6360Sstevel@tonic-gate * or by a spurious wakeup. 6370Sstevel@tonic-gate * (might return time remaining) 6384123Sdm120769 * As a special test, if someone abruptly resets the system time 6394123Sdm120769 * (but not through adjtime(2); drifting of the clock is allowed and 6404123Sdm120769 * expected [see timespectohz_adj()]), then we force a return of -1 6414123Sdm120769 * so the caller can return a premature timeout to the calling process 6424123Sdm120769 * so it can reevaluate the situation in light of the new system time. 6434123Sdm120769 * (The system clock has been reset if timecheck != timechanged.) 6440Sstevel@tonic-gate */ 6450Sstevel@tonic-gate int 6464123Sdm120769 cv_waituntil_sig(kcondvar_t *cvp, kmutex_t *mp, 6474123Sdm120769 timestruc_t *when, int timecheck) 6480Sstevel@tonic-gate { 6490Sstevel@tonic-gate timestruc_t now; 6503346Svb160487 timestruc_t delta; 6510Sstevel@tonic-gate int rval; 6520Sstevel@tonic-gate 6530Sstevel@tonic-gate if (when == NULL) 6540Sstevel@tonic-gate return (cv_wait_sig_swap(cvp, mp)); 6550Sstevel@tonic-gate 656*6422Sqiao gethrestime_lasttick(&now); 6573346Svb160487 delta = *when; 6583346Svb160487 timespecsub(&delta, &now); 6593346Svb160487 if (delta.tv_sec < 0 || (delta.tv_sec == 0 && delta.tv_nsec == 0)) { 6600Sstevel@tonic-gate /* 6610Sstevel@tonic-gate * We have already reached the absolute future time. 6620Sstevel@tonic-gate * Call cv_timedwait_sig() just to check for signals. 6630Sstevel@tonic-gate * We will return immediately with either 0 or -1. 6640Sstevel@tonic-gate */ 6650Sstevel@tonic-gate rval = cv_timedwait_sig(cvp, mp, lbolt); 6660Sstevel@tonic-gate } else { 6674123Sdm120769 if (timecheck == timechanged) { 6684123Sdm120769 rval = cv_timedwait_sig(cvp, mp, 669*6422Sqiao lbolt + timespectohz(when, now)); 670*6422Sqiao 6714123Sdm120769 } else { 6724123Sdm120769 /* 6734123Sdm120769 * Someone reset the system time; 6744123Sdm120769 * just force an immediate timeout. 6754123Sdm120769 */ 6764123Sdm120769 rval = -1; 6774123Sdm120769 } 6784123Sdm120769 if (rval == -1 && timecheck == timechanged) { 6794123Sdm120769 /* 6804123Sdm120769 * Even though cv_timedwait_sig() returned showing a 6814123Sdm120769 * timeout, the future time may not have passed yet. 6824123Sdm120769 * If not, change rval to indicate a normal wakeup. 6834123Sdm120769 */ 6844123Sdm120769 gethrestime(&now); 6854123Sdm120769 delta = *when; 6864123Sdm120769 timespecsub(&delta, &now); 6874123Sdm120769 if (delta.tv_sec > 0 || (delta.tv_sec == 0 && 6884123Sdm120769 delta.tv_nsec > 0)) 6890Sstevel@tonic-gate rval = 1; 6904123Sdm120769 } 6910Sstevel@tonic-gate } 6920Sstevel@tonic-gate return (rval); 6930Sstevel@tonic-gate } 694