10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 52498Sstevel * Common Development and Distribution License (the "License"). 62498Sstevel * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 215891Sraf 220Sstevel@tonic-gate /* 235891Sraf * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #include <sys/thread.h> 280Sstevel@tonic-gate #include <sys/proc.h> 290Sstevel@tonic-gate #include <sys/debug.h> 300Sstevel@tonic-gate #include <sys/cmn_err.h> 310Sstevel@tonic-gate #include <sys/systm.h> 320Sstevel@tonic-gate #include <sys/sobject.h> 330Sstevel@tonic-gate #include <sys/sleepq.h> 340Sstevel@tonic-gate #include <sys/cpuvar.h> 350Sstevel@tonic-gate #include <sys/condvar.h> 360Sstevel@tonic-gate #include <sys/condvar_impl.h> 370Sstevel@tonic-gate #include <sys/schedctl.h> 380Sstevel@tonic-gate #include <sys/procfs.h> 390Sstevel@tonic-gate #include <sys/sdt.h> 400Sstevel@tonic-gate 410Sstevel@tonic-gate /* 420Sstevel@tonic-gate * CV_MAX_WAITERS is the maximum number of waiters we track; once 430Sstevel@tonic-gate * the number becomes higher than that, we look at the sleepq to 440Sstevel@tonic-gate * see whether there are *really* any waiters. 450Sstevel@tonic-gate */ 460Sstevel@tonic-gate #define CV_MAX_WAITERS 1024 /* must be power of 2 */ 470Sstevel@tonic-gate #define CV_WAITERS_MASK (CV_MAX_WAITERS - 1) 480Sstevel@tonic-gate 490Sstevel@tonic-gate /* 500Sstevel@tonic-gate * Threads don't "own" condition variables. 510Sstevel@tonic-gate */ 520Sstevel@tonic-gate /* ARGSUSED */ 530Sstevel@tonic-gate static kthread_t * 540Sstevel@tonic-gate cv_owner(void *cvp) 550Sstevel@tonic-gate { 560Sstevel@tonic-gate return (NULL); 570Sstevel@tonic-gate } 580Sstevel@tonic-gate 590Sstevel@tonic-gate /* 600Sstevel@tonic-gate * Unsleep a thread that's blocked on a condition variable. 610Sstevel@tonic-gate */ 620Sstevel@tonic-gate static void 630Sstevel@tonic-gate cv_unsleep(kthread_t *t) 640Sstevel@tonic-gate { 650Sstevel@tonic-gate condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan; 660Sstevel@tonic-gate sleepq_head_t *sqh = SQHASH(cvp); 670Sstevel@tonic-gate 680Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(t)); 690Sstevel@tonic-gate 700Sstevel@tonic-gate if (cvp == NULL) 717240Srh87107 panic("cv_unsleep: thread %p not on sleepq %p", 727240Srh87107 (void *)t, (void *)sqh); 732498Sstevel DTRACE_SCHED1(wakeup, kthread_t *, t); 740Sstevel@tonic-gate sleepq_unsleep(t); 750Sstevel@tonic-gate if (cvp->cv_waiters != CV_MAX_WAITERS) 760Sstevel@tonic-gate cvp->cv_waiters--; 770Sstevel@tonic-gate disp_lock_exit_high(&sqh->sq_lock); 780Sstevel@tonic-gate CL_SETRUN(t); 790Sstevel@tonic-gate } 800Sstevel@tonic-gate 810Sstevel@tonic-gate /* 820Sstevel@tonic-gate * Change the priority of a thread that's blocked on a condition variable. 830Sstevel@tonic-gate */ 840Sstevel@tonic-gate static void 850Sstevel@tonic-gate cv_change_pri(kthread_t *t, pri_t pri, pri_t *t_prip) 860Sstevel@tonic-gate { 870Sstevel@tonic-gate condvar_impl_t *cvp = (condvar_impl_t *)t->t_wchan; 880Sstevel@tonic-gate sleepq_t *sqp = t->t_sleepq; 890Sstevel@tonic-gate 900Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(t)); 910Sstevel@tonic-gate ASSERT(&SQHASH(cvp)->sq_queue == sqp); 920Sstevel@tonic-gate 930Sstevel@tonic-gate if (cvp == NULL) 947240Srh87107 panic("cv_change_pri: %p not on sleep queue", (void *)t); 950Sstevel@tonic-gate sleepq_dequeue(t); 960Sstevel@tonic-gate *t_prip = pri; 970Sstevel@tonic-gate sleepq_insert(sqp, t); 980Sstevel@tonic-gate } 990Sstevel@tonic-gate 1000Sstevel@tonic-gate /* 1010Sstevel@tonic-gate * The sobj_ops vector exports a set of functions needed when a thread 1020Sstevel@tonic-gate * is asleep on a synchronization object of this type. 1030Sstevel@tonic-gate */ 1040Sstevel@tonic-gate static sobj_ops_t cv_sobj_ops = { 1050Sstevel@tonic-gate SOBJ_CV, cv_owner, cv_unsleep, cv_change_pri 1060Sstevel@tonic-gate }; 1070Sstevel@tonic-gate 1080Sstevel@tonic-gate /* ARGSUSED */ 1090Sstevel@tonic-gate void 1100Sstevel@tonic-gate cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg) 1110Sstevel@tonic-gate { 1120Sstevel@tonic-gate ((condvar_impl_t *)cvp)->cv_waiters = 0; 1130Sstevel@tonic-gate } 1140Sstevel@tonic-gate 1150Sstevel@tonic-gate /* 1160Sstevel@tonic-gate * cv_destroy is not currently needed, but is part of the DDI. 1170Sstevel@tonic-gate * This is in case cv_init ever needs to allocate something for a cv. 1180Sstevel@tonic-gate */ 1190Sstevel@tonic-gate /* ARGSUSED */ 1200Sstevel@tonic-gate void 1210Sstevel@tonic-gate cv_destroy(kcondvar_t *cvp) 1220Sstevel@tonic-gate { 1230Sstevel@tonic-gate ASSERT((((condvar_impl_t *)cvp)->cv_waiters & CV_WAITERS_MASK) == 0); 1240Sstevel@tonic-gate } 1250Sstevel@tonic-gate 1260Sstevel@tonic-gate /* 1270Sstevel@tonic-gate * The cv_block() function blocks a thread on a condition variable 1280Sstevel@tonic-gate * by putting it in a hashed sleep queue associated with the 1290Sstevel@tonic-gate * synchronization object. 1300Sstevel@tonic-gate * 1310Sstevel@tonic-gate * Threads are taken off the hashed sleep queues via calls to 1320Sstevel@tonic-gate * cv_signal(), cv_broadcast(), or cv_unsleep(). 1330Sstevel@tonic-gate */ 1340Sstevel@tonic-gate static void 1350Sstevel@tonic-gate cv_block(condvar_impl_t *cvp) 1360Sstevel@tonic-gate { 1370Sstevel@tonic-gate kthread_t *t = curthread; 1380Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 1390Sstevel@tonic-gate sleepq_head_t *sqh; 1400Sstevel@tonic-gate 1410Sstevel@tonic-gate ASSERT(THREAD_LOCK_HELD(t)); 1420Sstevel@tonic-gate ASSERT(t != CPU->cpu_idle_thread); 1430Sstevel@tonic-gate ASSERT(CPU_ON_INTR(CPU) == 0); 1440Sstevel@tonic-gate ASSERT(t->t_wchan0 == NULL && t->t_wchan == NULL); 1450Sstevel@tonic-gate ASSERT(t->t_state == TS_ONPROC); 1460Sstevel@tonic-gate 1470Sstevel@tonic-gate t->t_schedflag &= ~TS_SIGNALLED; 1480Sstevel@tonic-gate CL_SLEEP(t); /* assign kernel priority */ 1490Sstevel@tonic-gate t->t_wchan = (caddr_t)cvp; 1500Sstevel@tonic-gate t->t_sobj_ops = &cv_sobj_ops; 1510Sstevel@tonic-gate DTRACE_SCHED(sleep); 1520Sstevel@tonic-gate 1530Sstevel@tonic-gate /* 1540Sstevel@tonic-gate * The check for t_intr is to avoid doing the 1550Sstevel@tonic-gate * account for an interrupt thread on the still-pinned 1560Sstevel@tonic-gate * lwp's statistics. 1570Sstevel@tonic-gate */ 1580Sstevel@tonic-gate if (lwp != NULL && t->t_intr == NULL) { 1590Sstevel@tonic-gate lwp->lwp_ru.nvcsw++; 1600Sstevel@tonic-gate (void) new_mstate(t, LMS_SLEEP); 1610Sstevel@tonic-gate } 1620Sstevel@tonic-gate 1630Sstevel@tonic-gate sqh = SQHASH(cvp); 1640Sstevel@tonic-gate disp_lock_enter_high(&sqh->sq_lock); 1650Sstevel@tonic-gate if (cvp->cv_waiters < CV_MAX_WAITERS) 1660Sstevel@tonic-gate cvp->cv_waiters++; 1670Sstevel@tonic-gate ASSERT(cvp->cv_waiters <= CV_MAX_WAITERS); 1680Sstevel@tonic-gate THREAD_SLEEP(t, &sqh->sq_lock); 1690Sstevel@tonic-gate sleepq_insert(&sqh->sq_queue, t); 1700Sstevel@tonic-gate /* 1710Sstevel@tonic-gate * THREAD_SLEEP() moves curthread->t_lockp to point to the 1720Sstevel@tonic-gate * lock sqh->sq_lock. This lock is later released by the caller 1730Sstevel@tonic-gate * when it calls thread_unlock() on curthread. 1740Sstevel@tonic-gate */ 1750Sstevel@tonic-gate } 1760Sstevel@tonic-gate 1770Sstevel@tonic-gate #define cv_block_sig(t, cvp) \ 1780Sstevel@tonic-gate { (t)->t_flag |= T_WAKEABLE; cv_block(cvp); } 1790Sstevel@tonic-gate 1800Sstevel@tonic-gate /* 1810Sstevel@tonic-gate * Block on the indicated condition variable and release the 1820Sstevel@tonic-gate * associated kmutex while blocked. 1830Sstevel@tonic-gate */ 1840Sstevel@tonic-gate void 1850Sstevel@tonic-gate cv_wait(kcondvar_t *cvp, kmutex_t *mp) 1860Sstevel@tonic-gate { 1870Sstevel@tonic-gate if (panicstr) 1880Sstevel@tonic-gate return; 1890Sstevel@tonic-gate 1900Sstevel@tonic-gate ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 1910Sstevel@tonic-gate thread_lock(curthread); /* lock the thread */ 1920Sstevel@tonic-gate cv_block((condvar_impl_t *)cvp); 1930Sstevel@tonic-gate thread_unlock_nopreempt(curthread); /* unlock the waiters field */ 1940Sstevel@tonic-gate mutex_exit(mp); 1950Sstevel@tonic-gate swtch(); 1960Sstevel@tonic-gate mutex_enter(mp); 1970Sstevel@tonic-gate } 1980Sstevel@tonic-gate 1990Sstevel@tonic-gate /* 2000Sstevel@tonic-gate * Same as cv_wait except the thread will unblock at 'tim' 2010Sstevel@tonic-gate * (an absolute time) if it hasn't already unblocked. 2020Sstevel@tonic-gate * 2030Sstevel@tonic-gate * Returns the amount of time left from the original 'tim' value 2040Sstevel@tonic-gate * when it was unblocked. 2050Sstevel@tonic-gate */ 2060Sstevel@tonic-gate clock_t 2070Sstevel@tonic-gate cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t tim) 2080Sstevel@tonic-gate { 2090Sstevel@tonic-gate kthread_t *t = curthread; 2100Sstevel@tonic-gate timeout_id_t id; 2110Sstevel@tonic-gate clock_t timeleft; 2120Sstevel@tonic-gate int signalled; 2130Sstevel@tonic-gate 2140Sstevel@tonic-gate if (panicstr) 2150Sstevel@tonic-gate return (-1); 2160Sstevel@tonic-gate 2170Sstevel@tonic-gate timeleft = tim - lbolt; 2180Sstevel@tonic-gate if (timeleft <= 0) 2190Sstevel@tonic-gate return (-1); 2200Sstevel@tonic-gate id = realtime_timeout((void (*)(void *))setrun, t, timeleft); 2210Sstevel@tonic-gate thread_lock(t); /* lock the thread */ 2220Sstevel@tonic-gate cv_block((condvar_impl_t *)cvp); 2230Sstevel@tonic-gate thread_unlock_nopreempt(t); 2240Sstevel@tonic-gate mutex_exit(mp); 2250Sstevel@tonic-gate if ((tim - lbolt) <= 0) /* allow for wrap */ 2260Sstevel@tonic-gate setrun(t); 2270Sstevel@tonic-gate swtch(); 2280Sstevel@tonic-gate signalled = (t->t_schedflag & TS_SIGNALLED); 2290Sstevel@tonic-gate /* 2300Sstevel@tonic-gate * Get the time left. untimeout() returns -1 if the timeout has 2310Sstevel@tonic-gate * occured or the time remaining. If the time remaining is zero, 2320Sstevel@tonic-gate * the timeout has occured between when we were awoken and 2330Sstevel@tonic-gate * we called untimeout. We will treat this as if the timeout 2340Sstevel@tonic-gate * has occured and set timeleft to -1. 2350Sstevel@tonic-gate */ 2360Sstevel@tonic-gate timeleft = untimeout(id); 2370Sstevel@tonic-gate mutex_enter(mp); 2380Sstevel@tonic-gate if (timeleft <= 0) { 2390Sstevel@tonic-gate timeleft = -1; 2400Sstevel@tonic-gate if (signalled) /* avoid consuming the cv_signal() */ 2410Sstevel@tonic-gate cv_signal(cvp); 2420Sstevel@tonic-gate } 2430Sstevel@tonic-gate return (timeleft); 2440Sstevel@tonic-gate } 2450Sstevel@tonic-gate 2460Sstevel@tonic-gate int 2470Sstevel@tonic-gate cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp) 2480Sstevel@tonic-gate { 2490Sstevel@tonic-gate kthread_t *t = curthread; 2500Sstevel@tonic-gate proc_t *p = ttoproc(t); 2510Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 2525891Sraf int cancel_pending; 2530Sstevel@tonic-gate int rval = 1; 2540Sstevel@tonic-gate int signalled = 0; 2550Sstevel@tonic-gate 2560Sstevel@tonic-gate if (panicstr) 2570Sstevel@tonic-gate return (rval); 2580Sstevel@tonic-gate 2590Sstevel@tonic-gate /* 2600Sstevel@tonic-gate * The check for t_intr is to catch an interrupt thread 2610Sstevel@tonic-gate * that has not yet unpinned the thread underneath. 2620Sstevel@tonic-gate */ 2630Sstevel@tonic-gate if (lwp == NULL || t->t_intr) { 2640Sstevel@tonic-gate cv_wait(cvp, mp); 2650Sstevel@tonic-gate return (rval); 2660Sstevel@tonic-gate } 2670Sstevel@tonic-gate 2680Sstevel@tonic-gate ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 2695891Sraf cancel_pending = schedctl_cancel_pending(); 2700Sstevel@tonic-gate lwp->lwp_asleep = 1; 2710Sstevel@tonic-gate lwp->lwp_sysabort = 0; 2720Sstevel@tonic-gate thread_lock(t); 2730Sstevel@tonic-gate cv_block_sig(t, (condvar_impl_t *)cvp); 2740Sstevel@tonic-gate thread_unlock_nopreempt(t); 2750Sstevel@tonic-gate mutex_exit(mp); 2765891Sraf if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending) 2770Sstevel@tonic-gate setrun(t); 2780Sstevel@tonic-gate /* ASSERT(no locks are held) */ 2790Sstevel@tonic-gate swtch(); 2800Sstevel@tonic-gate signalled = (t->t_schedflag & TS_SIGNALLED); 2810Sstevel@tonic-gate t->t_flag &= ~T_WAKEABLE; 2820Sstevel@tonic-gate mutex_enter(mp); 2830Sstevel@tonic-gate if (ISSIG_PENDING(t, lwp, p)) { 2840Sstevel@tonic-gate mutex_exit(mp); 2850Sstevel@tonic-gate if (issig(FORREAL)) 2860Sstevel@tonic-gate rval = 0; 2870Sstevel@tonic-gate mutex_enter(mp); 2880Sstevel@tonic-gate } 2890Sstevel@tonic-gate if (lwp->lwp_sysabort || MUSTRETURN(p, t)) 2900Sstevel@tonic-gate rval = 0; 2915891Sraf if (rval != 0 && cancel_pending) { 2925891Sraf schedctl_cancel_eintr(); 2935891Sraf rval = 0; 2945891Sraf } 2950Sstevel@tonic-gate lwp->lwp_asleep = 0; 2960Sstevel@tonic-gate lwp->lwp_sysabort = 0; 2970Sstevel@tonic-gate if (rval == 0 && signalled) /* avoid consuming the cv_signal() */ 2980Sstevel@tonic-gate cv_signal(cvp); 2990Sstevel@tonic-gate return (rval); 3000Sstevel@tonic-gate } 3010Sstevel@tonic-gate 3020Sstevel@tonic-gate /* 3030Sstevel@tonic-gate * Returns: 3040Sstevel@tonic-gate * Function result in order of presidence: 3050Sstevel@tonic-gate * 0 if a signal was received 3060Sstevel@tonic-gate * -1 if timeout occured 3070Sstevel@tonic-gate * >0 if awakened via cv_signal() or cv_broadcast(). 3080Sstevel@tonic-gate * (returns time remaining) 3090Sstevel@tonic-gate * 3100Sstevel@tonic-gate * cv_timedwait_sig() is now part of the DDI. 3110Sstevel@tonic-gate */ 3120Sstevel@tonic-gate clock_t 3130Sstevel@tonic-gate cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t tim) 3140Sstevel@tonic-gate { 3150Sstevel@tonic-gate kthread_t *t = curthread; 3160Sstevel@tonic-gate proc_t *p = ttoproc(t); 3170Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 3185891Sraf int cancel_pending = 0; 3190Sstevel@tonic-gate timeout_id_t id; 3200Sstevel@tonic-gate clock_t rval = 1; 3210Sstevel@tonic-gate clock_t timeleft; 3220Sstevel@tonic-gate int signalled = 0; 3230Sstevel@tonic-gate 3240Sstevel@tonic-gate if (panicstr) 3250Sstevel@tonic-gate return (rval); 3260Sstevel@tonic-gate 3270Sstevel@tonic-gate /* 3280Sstevel@tonic-gate * If there is no lwp, then we don't need to wait for a signal. 3290Sstevel@tonic-gate * The check for t_intr is to catch an interrupt thread 3300Sstevel@tonic-gate * that has not yet unpinned the thread underneath. 3310Sstevel@tonic-gate */ 3320Sstevel@tonic-gate if (lwp == NULL || t->t_intr) 3330Sstevel@tonic-gate return (cv_timedwait(cvp, mp, tim)); 3340Sstevel@tonic-gate 3350Sstevel@tonic-gate /* 3360Sstevel@tonic-gate * If tim is less than or equal to lbolt, then the timeout 3370Sstevel@tonic-gate * has already occured. So just check to see if there is a signal 3380Sstevel@tonic-gate * pending. If so return 0 indicating that there is a signal pending. 3390Sstevel@tonic-gate * Else return -1 indicating that the timeout occured. No need to 3400Sstevel@tonic-gate * wait on anything. 3410Sstevel@tonic-gate */ 3420Sstevel@tonic-gate timeleft = tim - lbolt; 3430Sstevel@tonic-gate if (timeleft <= 0) { 3440Sstevel@tonic-gate lwp->lwp_asleep = 1; 3450Sstevel@tonic-gate lwp->lwp_sysabort = 0; 3460Sstevel@tonic-gate rval = -1; 3470Sstevel@tonic-gate goto out; 3480Sstevel@tonic-gate } 3490Sstevel@tonic-gate 3500Sstevel@tonic-gate /* 3510Sstevel@tonic-gate * Set the timeout and wait. 3520Sstevel@tonic-gate */ 3535891Sraf cancel_pending = schedctl_cancel_pending(); 3540Sstevel@tonic-gate id = realtime_timeout((void (*)(void *))setrun, t, timeleft); 3550Sstevel@tonic-gate lwp->lwp_asleep = 1; 3560Sstevel@tonic-gate lwp->lwp_sysabort = 0; 3570Sstevel@tonic-gate thread_lock(t); 3580Sstevel@tonic-gate cv_block_sig(t, (condvar_impl_t *)cvp); 3590Sstevel@tonic-gate thread_unlock_nopreempt(t); 3600Sstevel@tonic-gate mutex_exit(mp); 3615891Sraf if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending || 3625891Sraf (tim - lbolt <= 0)) 3630Sstevel@tonic-gate setrun(t); 3640Sstevel@tonic-gate /* ASSERT(no locks are held) */ 3650Sstevel@tonic-gate swtch(); 3660Sstevel@tonic-gate signalled = (t->t_schedflag & TS_SIGNALLED); 3670Sstevel@tonic-gate t->t_flag &= ~T_WAKEABLE; 3680Sstevel@tonic-gate mutex_enter(mp); 3690Sstevel@tonic-gate 3700Sstevel@tonic-gate /* 3710Sstevel@tonic-gate * Untimeout the thread. untimeout() returns -1 if the timeout has 3720Sstevel@tonic-gate * occured or the time remaining. If the time remaining is zero, 3730Sstevel@tonic-gate * the timeout has occured between when we were awoken and 3740Sstevel@tonic-gate * we called untimeout. We will treat this as if the timeout 3750Sstevel@tonic-gate * has occured and set rval to -1. 3760Sstevel@tonic-gate */ 3770Sstevel@tonic-gate rval = untimeout(id); 3780Sstevel@tonic-gate if (rval <= 0) 3790Sstevel@tonic-gate rval = -1; 3800Sstevel@tonic-gate 3810Sstevel@tonic-gate /* 3820Sstevel@tonic-gate * Check to see if a signal is pending. If so, regardless of whether 3830Sstevel@tonic-gate * or not we were awoken due to the signal, the signal is now pending 3840Sstevel@tonic-gate * and a return of 0 has the highest priority. 3850Sstevel@tonic-gate */ 3860Sstevel@tonic-gate out: 3870Sstevel@tonic-gate if (ISSIG_PENDING(t, lwp, p)) { 3880Sstevel@tonic-gate mutex_exit(mp); 3890Sstevel@tonic-gate if (issig(FORREAL)) 3900Sstevel@tonic-gate rval = 0; 3910Sstevel@tonic-gate mutex_enter(mp); 3920Sstevel@tonic-gate } 3930Sstevel@tonic-gate if (lwp->lwp_sysabort || MUSTRETURN(p, t)) 3940Sstevel@tonic-gate rval = 0; 3955891Sraf if (rval != 0 && cancel_pending) { 3965891Sraf schedctl_cancel_eintr(); 3975891Sraf rval = 0; 3985891Sraf } 3990Sstevel@tonic-gate lwp->lwp_asleep = 0; 4000Sstevel@tonic-gate lwp->lwp_sysabort = 0; 4010Sstevel@tonic-gate if (rval <= 0 && signalled) /* avoid consuming the cv_signal() */ 4020Sstevel@tonic-gate cv_signal(cvp); 4030Sstevel@tonic-gate return (rval); 4040Sstevel@tonic-gate } 4050Sstevel@tonic-gate 4060Sstevel@tonic-gate /* 4070Sstevel@tonic-gate * Like cv_wait_sig_swap but allows the caller to indicate (with a 4080Sstevel@tonic-gate * non-NULL sigret) that they will take care of signalling the cv 4090Sstevel@tonic-gate * after wakeup, if necessary. This is a vile hack that should only 4100Sstevel@tonic-gate * be used when no other option is available; almost all callers 4110Sstevel@tonic-gate * should just use cv_wait_sig_swap (which takes care of the cv_signal 4120Sstevel@tonic-gate * stuff automatically) instead. 4130Sstevel@tonic-gate */ 4140Sstevel@tonic-gate int 4150Sstevel@tonic-gate cv_wait_sig_swap_core(kcondvar_t *cvp, kmutex_t *mp, int *sigret) 4160Sstevel@tonic-gate { 4170Sstevel@tonic-gate kthread_t *t = curthread; 4180Sstevel@tonic-gate proc_t *p = ttoproc(t); 4190Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 4205891Sraf int cancel_pending; 4210Sstevel@tonic-gate int rval = 1; 4220Sstevel@tonic-gate int signalled = 0; 4230Sstevel@tonic-gate 4240Sstevel@tonic-gate if (panicstr) 4250Sstevel@tonic-gate return (rval); 4260Sstevel@tonic-gate 4270Sstevel@tonic-gate /* 4280Sstevel@tonic-gate * The check for t_intr is to catch an interrupt thread 4290Sstevel@tonic-gate * that has not yet unpinned the thread underneath. 4300Sstevel@tonic-gate */ 4310Sstevel@tonic-gate if (lwp == NULL || t->t_intr) { 4320Sstevel@tonic-gate cv_wait(cvp, mp); 4330Sstevel@tonic-gate return (rval); 4340Sstevel@tonic-gate } 4350Sstevel@tonic-gate 4365891Sraf cancel_pending = schedctl_cancel_pending(); 4370Sstevel@tonic-gate lwp->lwp_asleep = 1; 4380Sstevel@tonic-gate lwp->lwp_sysabort = 0; 4390Sstevel@tonic-gate thread_lock(t); 4400Sstevel@tonic-gate t->t_kpri_req = 0; /* don't need kernel priority */ 4410Sstevel@tonic-gate cv_block_sig(t, (condvar_impl_t *)cvp); 4420Sstevel@tonic-gate /* I can be swapped now */ 4430Sstevel@tonic-gate curthread->t_schedflag &= ~TS_DONT_SWAP; 4440Sstevel@tonic-gate thread_unlock_nopreempt(t); 4450Sstevel@tonic-gate mutex_exit(mp); 4465891Sraf if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending) 4470Sstevel@tonic-gate setrun(t); 4480Sstevel@tonic-gate /* ASSERT(no locks are held) */ 4490Sstevel@tonic-gate swtch(); 4500Sstevel@tonic-gate signalled = (t->t_schedflag & TS_SIGNALLED); 4510Sstevel@tonic-gate t->t_flag &= ~T_WAKEABLE; 4520Sstevel@tonic-gate /* TS_DONT_SWAP set by disp() */ 4530Sstevel@tonic-gate ASSERT(curthread->t_schedflag & TS_DONT_SWAP); 4540Sstevel@tonic-gate mutex_enter(mp); 4550Sstevel@tonic-gate if (ISSIG_PENDING(t, lwp, p)) { 4560Sstevel@tonic-gate mutex_exit(mp); 4570Sstevel@tonic-gate if (issig(FORREAL)) 4580Sstevel@tonic-gate rval = 0; 4590Sstevel@tonic-gate mutex_enter(mp); 4600Sstevel@tonic-gate } 4610Sstevel@tonic-gate if (lwp->lwp_sysabort || MUSTRETURN(p, t)) 4620Sstevel@tonic-gate rval = 0; 4635891Sraf if (rval != 0 && cancel_pending) { 4645891Sraf schedctl_cancel_eintr(); 4655891Sraf rval = 0; 4665891Sraf } 4670Sstevel@tonic-gate lwp->lwp_asleep = 0; 4680Sstevel@tonic-gate lwp->lwp_sysabort = 0; 4690Sstevel@tonic-gate if (rval == 0) { 4700Sstevel@tonic-gate if (sigret != NULL) 4710Sstevel@tonic-gate *sigret = signalled; /* just tell the caller */ 4720Sstevel@tonic-gate else if (signalled) 4730Sstevel@tonic-gate cv_signal(cvp); /* avoid consuming the cv_signal() */ 4740Sstevel@tonic-gate } 4750Sstevel@tonic-gate return (rval); 4760Sstevel@tonic-gate } 4770Sstevel@tonic-gate 4780Sstevel@tonic-gate /* 4790Sstevel@tonic-gate * Same as cv_wait_sig but the thread can be swapped out while waiting. 4800Sstevel@tonic-gate * This should only be used when we know we aren't holding any locks. 4810Sstevel@tonic-gate */ 4820Sstevel@tonic-gate int 4830Sstevel@tonic-gate cv_wait_sig_swap(kcondvar_t *cvp, kmutex_t *mp) 4840Sstevel@tonic-gate { 4850Sstevel@tonic-gate return (cv_wait_sig_swap_core(cvp, mp, NULL)); 4860Sstevel@tonic-gate } 4870Sstevel@tonic-gate 4880Sstevel@tonic-gate void 4890Sstevel@tonic-gate cv_signal(kcondvar_t *cvp) 4900Sstevel@tonic-gate { 4910Sstevel@tonic-gate condvar_impl_t *cp = (condvar_impl_t *)cvp; 4920Sstevel@tonic-gate 4930Sstevel@tonic-gate /* make sure the cv_waiters field looks sane */ 4940Sstevel@tonic-gate ASSERT(cp->cv_waiters <= CV_MAX_WAITERS); 4950Sstevel@tonic-gate if (cp->cv_waiters > 0) { 4960Sstevel@tonic-gate sleepq_head_t *sqh = SQHASH(cp); 4970Sstevel@tonic-gate disp_lock_enter(&sqh->sq_lock); 4980Sstevel@tonic-gate ASSERT(CPU_ON_INTR(CPU) == 0); 4990Sstevel@tonic-gate if (cp->cv_waiters & CV_WAITERS_MASK) { 5000Sstevel@tonic-gate kthread_t *t; 5010Sstevel@tonic-gate cp->cv_waiters--; 5020Sstevel@tonic-gate t = sleepq_wakeone_chan(&sqh->sq_queue, cp); 5030Sstevel@tonic-gate /* 5040Sstevel@tonic-gate * If cv_waiters is non-zero (and less than 5050Sstevel@tonic-gate * CV_MAX_WAITERS) there should be a thread 5060Sstevel@tonic-gate * in the queue. 5070Sstevel@tonic-gate */ 5080Sstevel@tonic-gate ASSERT(t != NULL); 5090Sstevel@tonic-gate } else if (sleepq_wakeone_chan(&sqh->sq_queue, cp) == NULL) { 5100Sstevel@tonic-gate cp->cv_waiters = 0; 5110Sstevel@tonic-gate } 5120Sstevel@tonic-gate disp_lock_exit(&sqh->sq_lock); 5130Sstevel@tonic-gate } 5140Sstevel@tonic-gate } 5150Sstevel@tonic-gate 5160Sstevel@tonic-gate void 5170Sstevel@tonic-gate cv_broadcast(kcondvar_t *cvp) 5180Sstevel@tonic-gate { 5190Sstevel@tonic-gate condvar_impl_t *cp = (condvar_impl_t *)cvp; 5200Sstevel@tonic-gate 5210Sstevel@tonic-gate /* make sure the cv_waiters field looks sane */ 5220Sstevel@tonic-gate ASSERT(cp->cv_waiters <= CV_MAX_WAITERS); 5230Sstevel@tonic-gate if (cp->cv_waiters > 0) { 5240Sstevel@tonic-gate sleepq_head_t *sqh = SQHASH(cp); 5250Sstevel@tonic-gate disp_lock_enter(&sqh->sq_lock); 5260Sstevel@tonic-gate ASSERT(CPU_ON_INTR(CPU) == 0); 5270Sstevel@tonic-gate sleepq_wakeall_chan(&sqh->sq_queue, cp); 5280Sstevel@tonic-gate cp->cv_waiters = 0; 5290Sstevel@tonic-gate disp_lock_exit(&sqh->sq_lock); 5300Sstevel@tonic-gate } 5310Sstevel@tonic-gate } 5320Sstevel@tonic-gate 5330Sstevel@tonic-gate /* 5340Sstevel@tonic-gate * Same as cv_wait(), but wakes up (after wakeup_time milliseconds) to check 5350Sstevel@tonic-gate * for requests to stop, like cv_wait_sig() but without dealing with signals. 5360Sstevel@tonic-gate * This is a horrible kludge. It is evil. It is vile. It is swill. 5370Sstevel@tonic-gate * If your code has to call this function then your code is the same. 5380Sstevel@tonic-gate */ 5390Sstevel@tonic-gate void 5400Sstevel@tonic-gate cv_wait_stop(kcondvar_t *cvp, kmutex_t *mp, int wakeup_time) 5410Sstevel@tonic-gate { 5420Sstevel@tonic-gate kthread_t *t = curthread; 5430Sstevel@tonic-gate klwp_t *lwp = ttolwp(t); 5440Sstevel@tonic-gate proc_t *p = ttoproc(t); 5450Sstevel@tonic-gate timeout_id_t id; 5460Sstevel@tonic-gate clock_t tim; 5470Sstevel@tonic-gate 5480Sstevel@tonic-gate if (panicstr) 5490Sstevel@tonic-gate return; 5500Sstevel@tonic-gate 5510Sstevel@tonic-gate /* 5520Sstevel@tonic-gate * If there is no lwp, then we don't need to eventually stop it 5530Sstevel@tonic-gate * The check for t_intr is to catch an interrupt thread 5540Sstevel@tonic-gate * that has not yet unpinned the thread underneath. 5550Sstevel@tonic-gate */ 5560Sstevel@tonic-gate if (lwp == NULL || t->t_intr) { 5570Sstevel@tonic-gate cv_wait(cvp, mp); 5580Sstevel@tonic-gate return; 5590Sstevel@tonic-gate } 5600Sstevel@tonic-gate 5610Sstevel@tonic-gate /* 5620Sstevel@tonic-gate * Wakeup in wakeup_time milliseconds, i.e., human time. 5630Sstevel@tonic-gate */ 5640Sstevel@tonic-gate tim = lbolt + MSEC_TO_TICK(wakeup_time); 5650Sstevel@tonic-gate id = realtime_timeout((void (*)(void *))setrun, t, tim - lbolt); 5660Sstevel@tonic-gate thread_lock(t); /* lock the thread */ 5670Sstevel@tonic-gate cv_block((condvar_impl_t *)cvp); 5680Sstevel@tonic-gate thread_unlock_nopreempt(t); 5690Sstevel@tonic-gate mutex_exit(mp); 5700Sstevel@tonic-gate /* ASSERT(no locks are held); */ 5710Sstevel@tonic-gate if ((tim - lbolt) <= 0) /* allow for wrap */ 5720Sstevel@tonic-gate setrun(t); 5730Sstevel@tonic-gate swtch(); 5740Sstevel@tonic-gate (void) untimeout(id); 5750Sstevel@tonic-gate 5760Sstevel@tonic-gate /* 5773930Snr123932 * Check for reasons to stop, if lwp_nostop is not true. 5780Sstevel@tonic-gate * See issig_forreal() for explanations of the various stops. 5790Sstevel@tonic-gate */ 5800Sstevel@tonic-gate mutex_enter(&p->p_lock); 5813930Snr123932 while (lwp->lwp_nostop == 0 && !(p->p_flag & SEXITLWPS)) { 5820Sstevel@tonic-gate /* 5830Sstevel@tonic-gate * Hold the lwp here for watchpoint manipulation. 5840Sstevel@tonic-gate */ 5853930Snr123932 if (t->t_proc_flag & TP_PAUSE) { 5860Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_PAUSE); 5870Sstevel@tonic-gate continue; 5880Sstevel@tonic-gate } 5890Sstevel@tonic-gate /* 5900Sstevel@tonic-gate * System checkpoint. 5910Sstevel@tonic-gate */ 5923930Snr123932 if (t->t_proc_flag & TP_CHKPT) { 5930Sstevel@tonic-gate stop(PR_CHECKPOINT, 0); 5940Sstevel@tonic-gate continue; 5950Sstevel@tonic-gate } 5960Sstevel@tonic-gate /* 5970Sstevel@tonic-gate * Honor fork1(), watchpoint activity (remapping a page), 5983930Snr123932 * and lwp_suspend() requests. 5990Sstevel@tonic-gate */ 6003930Snr123932 if ((p->p_flag & (SHOLDFORK1|SHOLDWATCH)) || 6013930Snr123932 (t->t_proc_flag & TP_HOLDLWP)) { 6020Sstevel@tonic-gate stop(PR_SUSPENDED, SUSPEND_NORMAL); 6030Sstevel@tonic-gate continue; 6040Sstevel@tonic-gate } 6050Sstevel@tonic-gate /* 6060Sstevel@tonic-gate * Honor /proc requested stop. 6070Sstevel@tonic-gate */ 6083930Snr123932 if (t->t_proc_flag & TP_PRSTOP) { 6090Sstevel@tonic-gate stop(PR_REQUESTED, 0); 6100Sstevel@tonic-gate } 6110Sstevel@tonic-gate /* 6120Sstevel@tonic-gate * If some lwp in the process has already stopped 6130Sstevel@tonic-gate * showing PR_JOBCONTROL, stop in sympathy with it. 6140Sstevel@tonic-gate */ 6153930Snr123932 if (p->p_stopsig && t != p->p_agenttp) { 6160Sstevel@tonic-gate stop(PR_JOBCONTROL, p->p_stopsig); 6170Sstevel@tonic-gate continue; 6180Sstevel@tonic-gate } 6190Sstevel@tonic-gate break; 6200Sstevel@tonic-gate } 6210Sstevel@tonic-gate mutex_exit(&p->p_lock); 6220Sstevel@tonic-gate mutex_enter(mp); 6230Sstevel@tonic-gate } 6240Sstevel@tonic-gate 6250Sstevel@tonic-gate /* 6260Sstevel@tonic-gate * Like cv_timedwait_sig(), but takes an absolute hires future time 6270Sstevel@tonic-gate * rather than a future time in clock ticks. Will not return showing 6280Sstevel@tonic-gate * that a timeout occurred until the future time is passed. 6290Sstevel@tonic-gate * If 'when' is a NULL pointer, no timeout will occur. 6300Sstevel@tonic-gate * Returns: 6310Sstevel@tonic-gate * Function result in order of presidence: 6320Sstevel@tonic-gate * 0 if a signal was received 6330Sstevel@tonic-gate * -1 if timeout occured 6340Sstevel@tonic-gate * >0 if awakened via cv_signal() or cv_broadcast() 6350Sstevel@tonic-gate * or by a spurious wakeup. 6360Sstevel@tonic-gate * (might return time remaining) 6374123Sdm120769 * As a special test, if someone abruptly resets the system time 6384123Sdm120769 * (but not through adjtime(2); drifting of the clock is allowed and 6394123Sdm120769 * expected [see timespectohz_adj()]), then we force a return of -1 6404123Sdm120769 * so the caller can return a premature timeout to the calling process 6414123Sdm120769 * so it can reevaluate the situation in light of the new system time. 6424123Sdm120769 * (The system clock has been reset if timecheck != timechanged.) 6430Sstevel@tonic-gate */ 6440Sstevel@tonic-gate int 6454123Sdm120769 cv_waituntil_sig(kcondvar_t *cvp, kmutex_t *mp, 6464123Sdm120769 timestruc_t *when, int timecheck) 6470Sstevel@tonic-gate { 6480Sstevel@tonic-gate timestruc_t now; 6493346Svb160487 timestruc_t delta; 6500Sstevel@tonic-gate int rval; 6510Sstevel@tonic-gate 6520Sstevel@tonic-gate if (when == NULL) 6530Sstevel@tonic-gate return (cv_wait_sig_swap(cvp, mp)); 6540Sstevel@tonic-gate 655*7982SDonghai.Qiao@Sun.COM gethrestime(&now); 6563346Svb160487 delta = *when; 6573346Svb160487 timespecsub(&delta, &now); 6583346Svb160487 if (delta.tv_sec < 0 || (delta.tv_sec == 0 && delta.tv_nsec == 0)) { 6590Sstevel@tonic-gate /* 6600Sstevel@tonic-gate * We have already reached the absolute future time. 6610Sstevel@tonic-gate * Call cv_timedwait_sig() just to check for signals. 6620Sstevel@tonic-gate * We will return immediately with either 0 or -1. 6630Sstevel@tonic-gate */ 6640Sstevel@tonic-gate rval = cv_timedwait_sig(cvp, mp, lbolt); 6650Sstevel@tonic-gate } else { 666*7982SDonghai.Qiao@Sun.COM gethrestime_lasttick(&now); 6674123Sdm120769 if (timecheck == timechanged) { 6684123Sdm120769 rval = cv_timedwait_sig(cvp, mp, 6696422Sqiao lbolt + timespectohz(when, now)); 6706422Sqiao 6714123Sdm120769 } else { 6724123Sdm120769 /* 6734123Sdm120769 * Someone reset the system time; 6744123Sdm120769 * just force an immediate timeout. 6754123Sdm120769 */ 6764123Sdm120769 rval = -1; 6774123Sdm120769 } 6784123Sdm120769 if (rval == -1 && timecheck == timechanged) { 6794123Sdm120769 /* 6804123Sdm120769 * Even though cv_timedwait_sig() returned showing a 6814123Sdm120769 * timeout, the future time may not have passed yet. 6824123Sdm120769 * If not, change rval to indicate a normal wakeup. 6834123Sdm120769 */ 6844123Sdm120769 gethrestime(&now); 6854123Sdm120769 delta = *when; 6864123Sdm120769 timespecsub(&delta, &now); 6874123Sdm120769 if (delta.tv_sec > 0 || (delta.tv_sec == 0 && 6884123Sdm120769 delta.tv_nsec > 0)) 6890Sstevel@tonic-gate rval = 1; 6904123Sdm120769 } 6910Sstevel@tonic-gate } 6920Sstevel@tonic-gate return (rval); 6930Sstevel@tonic-gate } 694