10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51893Sraf * Common Development and Distribution License (the "License"). 61893Sraf * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 211219Sraf 220Sstevel@tonic-gate /* 235891Sraf * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 296057Sraf #define atomic_cas_64 _atomic_cas_64 300Sstevel@tonic-gate 310Sstevel@tonic-gate #include "lint.h" 320Sstevel@tonic-gate #include "thr_uberdata.h" 336247Sraf #include <sys/rtpriocntl.h> 346057Sraf #include <sys/sdt.h> 356057Sraf #include <atomic.h> 360Sstevel@tonic-gate 376247Sraf #if defined(THREAD_DEBUG) 386247Sraf #define INCR32(x) (((x) != UINT32_MAX)? (x)++ : 0) 396247Sraf #define INCR(x) ((x)++) 406247Sraf #define DECR(x) ((x)--) 416247Sraf #define MAXINCR(m, x) ((m < ++x)? (m = x) : 0) 426247Sraf #else 436247Sraf #define INCR32(x) 446247Sraf #define INCR(x) 456247Sraf #define DECR(x) 466247Sraf #define MAXINCR(m, x) 476247Sraf #endif 486247Sraf 490Sstevel@tonic-gate /* 500Sstevel@tonic-gate * This mutex is initialized to be held by lwp#1. 510Sstevel@tonic-gate * It is used to block a thread that has returned from a mutex_lock() 524574Sraf * of a LOCK_PRIO_INHERIT mutex with an unrecoverable error. 530Sstevel@tonic-gate */ 540Sstevel@tonic-gate mutex_t stall_mutex = DEFAULTMUTEX; 550Sstevel@tonic-gate 560Sstevel@tonic-gate static int shared_mutex_held(mutex_t *); 574574Sraf static int mutex_queuelock_adaptive(mutex_t *); 584574Sraf static void mutex_wakeup_all(mutex_t *); 590Sstevel@tonic-gate 600Sstevel@tonic-gate /* 610Sstevel@tonic-gate * Lock statistics support functions. 620Sstevel@tonic-gate */ 630Sstevel@tonic-gate void 640Sstevel@tonic-gate record_begin_hold(tdb_mutex_stats_t *msp) 650Sstevel@tonic-gate { 660Sstevel@tonic-gate tdb_incr(msp->mutex_lock); 670Sstevel@tonic-gate msp->mutex_begin_hold = gethrtime(); 680Sstevel@tonic-gate } 690Sstevel@tonic-gate 700Sstevel@tonic-gate hrtime_t 710Sstevel@tonic-gate record_hold_time(tdb_mutex_stats_t *msp) 720Sstevel@tonic-gate { 730Sstevel@tonic-gate hrtime_t now = gethrtime(); 740Sstevel@tonic-gate 750Sstevel@tonic-gate if (msp->mutex_begin_hold) 760Sstevel@tonic-gate msp->mutex_hold_time += now - msp->mutex_begin_hold; 770Sstevel@tonic-gate msp->mutex_begin_hold = 0; 780Sstevel@tonic-gate return (now); 790Sstevel@tonic-gate } 800Sstevel@tonic-gate 810Sstevel@tonic-gate /* 820Sstevel@tonic-gate * Called once at library initialization. 830Sstevel@tonic-gate */ 840Sstevel@tonic-gate void 850Sstevel@tonic-gate mutex_setup(void) 860Sstevel@tonic-gate { 870Sstevel@tonic-gate if (set_lock_byte(&stall_mutex.mutex_lockw)) 880Sstevel@tonic-gate thr_panic("mutex_setup() cannot acquire stall_mutex"); 890Sstevel@tonic-gate stall_mutex.mutex_owner = (uintptr_t)curthread; 900Sstevel@tonic-gate } 910Sstevel@tonic-gate 920Sstevel@tonic-gate /* 935629Sraf * The default spin count of 1000 is experimentally determined. 945629Sraf * On sun4u machines with any number of processors it could be raised 950Sstevel@tonic-gate * to 10,000 but that (experimentally) makes almost no difference. 965629Sraf * The environment variable: 970Sstevel@tonic-gate * _THREAD_ADAPTIVE_SPIN=count 985629Sraf * can be used to override and set the count in the range [0 .. 1,000,000]. 990Sstevel@tonic-gate */ 1000Sstevel@tonic-gate int thread_adaptive_spin = 1000; 1010Sstevel@tonic-gate uint_t thread_max_spinners = 100; 1020Sstevel@tonic-gate int thread_queue_verify = 0; 1030Sstevel@tonic-gate static int ncpus; 1040Sstevel@tonic-gate 1050Sstevel@tonic-gate /* 1060Sstevel@tonic-gate * Distinguish spinning for queue locks from spinning for regular locks. 1075629Sraf * We try harder to acquire queue locks by spinning. 1080Sstevel@tonic-gate * The environment variable: 1090Sstevel@tonic-gate * _THREAD_QUEUE_SPIN=count 1100Sstevel@tonic-gate * can be used to override and set the count in the range [0 .. 1,000,000]. 1110Sstevel@tonic-gate */ 1125629Sraf int thread_queue_spin = 10000; 1130Sstevel@tonic-gate 1144574Sraf #define ALL_ATTRIBUTES \ 1154574Sraf (LOCK_RECURSIVE | LOCK_ERRORCHECK | \ 1164574Sraf LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT | \ 1174574Sraf LOCK_ROBUST) 1180Sstevel@tonic-gate 1190Sstevel@tonic-gate /* 1204574Sraf * 'type' can be one of USYNC_THREAD, USYNC_PROCESS, or USYNC_PROCESS_ROBUST, 1214574Sraf * augmented by zero or more the flags: 1224574Sraf * LOCK_RECURSIVE 1234574Sraf * LOCK_ERRORCHECK 1244574Sraf * LOCK_PRIO_INHERIT 1254574Sraf * LOCK_PRIO_PROTECT 1264574Sraf * LOCK_ROBUST 1270Sstevel@tonic-gate */ 1280Sstevel@tonic-gate #pragma weak mutex_init = __mutex_init 1290Sstevel@tonic-gate #pragma weak _mutex_init = __mutex_init 1300Sstevel@tonic-gate /* ARGSUSED2 */ 1310Sstevel@tonic-gate int 1320Sstevel@tonic-gate __mutex_init(mutex_t *mp, int type, void *arg) 1330Sstevel@tonic-gate { 1344574Sraf int basetype = (type & ~ALL_ATTRIBUTES); 1356247Sraf const pcclass_t *pccp; 1364574Sraf int error = 0; 1376247Sraf int ceil; 1384574Sraf 1394574Sraf if (basetype == USYNC_PROCESS_ROBUST) { 1404574Sraf /* 1414574Sraf * USYNC_PROCESS_ROBUST is a deprecated historical type. 1424574Sraf * We change it into (USYNC_PROCESS | LOCK_ROBUST) but 1434574Sraf * retain the USYNC_PROCESS_ROBUST flag so we can return 1444574Sraf * ELOCKUNMAPPED when necessary (only USYNC_PROCESS_ROBUST 1454574Sraf * mutexes will ever draw ELOCKUNMAPPED). 1464574Sraf */ 1474574Sraf type |= (USYNC_PROCESS | LOCK_ROBUST); 1484574Sraf basetype = USYNC_PROCESS; 1494574Sraf } 1504574Sraf 1516247Sraf if (type & LOCK_PRIO_PROTECT) 1526247Sraf pccp = get_info_by_policy(SCHED_FIFO); 1536247Sraf if ((basetype != USYNC_THREAD && basetype != USYNC_PROCESS) || 1544574Sraf (type & (LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT)) 1556247Sraf == (LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT) || 1566247Sraf ((type & LOCK_PRIO_PROTECT) && 1576247Sraf ((ceil = *(int *)arg) < pccp->pcc_primin || 1586247Sraf ceil > pccp->pcc_primax))) { 1594574Sraf error = EINVAL; 1604574Sraf } else if (type & LOCK_ROBUST) { 1614574Sraf /* 1624574Sraf * Callers of mutex_init() with the LOCK_ROBUST attribute 1634574Sraf * are required to pass an initially all-zero mutex. 1644574Sraf * Multiple calls to mutex_init() are allowed; all but 1654574Sraf * the first return EBUSY. A call to mutex_init() is 1664574Sraf * allowed to make an inconsistent robust lock consistent 1674574Sraf * (for historical usage, even though the proper interface 1684574Sraf * for this is mutex_consistent()). Note that we use 1694574Sraf * atomic_or_16() to set the LOCK_INITED flag so as 1704574Sraf * not to disturb surrounding bits (LOCK_OWNERDEAD, etc). 1714574Sraf */ 1724574Sraf extern void _atomic_or_16(volatile uint16_t *, uint16_t); 1734574Sraf if (!(mp->mutex_flag & LOCK_INITED)) { 1744574Sraf mp->mutex_type = (uint8_t)type; 1754574Sraf _atomic_or_16(&mp->mutex_flag, LOCK_INITED); 1764574Sraf mp->mutex_magic = MUTEX_MAGIC; 1774574Sraf } else if (type != mp->mutex_type || 1786247Sraf ((type & LOCK_PRIO_PROTECT) && mp->mutex_ceiling != ceil)) { 1794574Sraf error = EINVAL; 1804574Sraf } else if (__mutex_consistent(mp) != 0) { 1814574Sraf error = EBUSY; 1824574Sraf } 1834574Sraf /* register a process robust mutex with the kernel */ 1844574Sraf if (basetype == USYNC_PROCESS) 1854574Sraf register_lock(mp); 1864574Sraf } else { 187*6515Sraf (void) memset(mp, 0, sizeof (*mp)); 1880Sstevel@tonic-gate mp->mutex_type = (uint8_t)type; 1890Sstevel@tonic-gate mp->mutex_flag = LOCK_INITED; 1904574Sraf mp->mutex_magic = MUTEX_MAGIC; 1910Sstevel@tonic-gate } 1924574Sraf 1936247Sraf if (error == 0 && (type & LOCK_PRIO_PROTECT)) { 1946247Sraf mp->mutex_ceiling = ceil; 1956247Sraf } 1964574Sraf 1970Sstevel@tonic-gate return (error); 1980Sstevel@tonic-gate } 1990Sstevel@tonic-gate 2000Sstevel@tonic-gate /* 2016247Sraf * Delete mp from list of ceiling mutexes owned by curthread. 2020Sstevel@tonic-gate * Return 1 if the head of the chain was updated. 2030Sstevel@tonic-gate */ 2040Sstevel@tonic-gate int 2050Sstevel@tonic-gate _ceil_mylist_del(mutex_t *mp) 2060Sstevel@tonic-gate { 2070Sstevel@tonic-gate ulwp_t *self = curthread; 2080Sstevel@tonic-gate mxchain_t **mcpp; 2090Sstevel@tonic-gate mxchain_t *mcp; 2100Sstevel@tonic-gate 2116247Sraf for (mcpp = &self->ul_mxchain; 2126247Sraf (mcp = *mcpp) != NULL; 2136247Sraf mcpp = &mcp->mxchain_next) { 2146247Sraf if (mcp->mxchain_mx == mp) { 2156247Sraf *mcpp = mcp->mxchain_next; 2166247Sraf lfree(mcp, sizeof (*mcp)); 2176247Sraf return (mcpp == &self->ul_mxchain); 2186247Sraf } 2196247Sraf } 2206247Sraf return (0); 2210Sstevel@tonic-gate } 2220Sstevel@tonic-gate 2230Sstevel@tonic-gate /* 2246247Sraf * Add mp to the list of ceiling mutexes owned by curthread. 2250Sstevel@tonic-gate * Return ENOMEM if no memory could be allocated. 2260Sstevel@tonic-gate */ 2270Sstevel@tonic-gate int 2280Sstevel@tonic-gate _ceil_mylist_add(mutex_t *mp) 2290Sstevel@tonic-gate { 2300Sstevel@tonic-gate ulwp_t *self = curthread; 2310Sstevel@tonic-gate mxchain_t *mcp; 2320Sstevel@tonic-gate 2330Sstevel@tonic-gate if ((mcp = lmalloc(sizeof (*mcp))) == NULL) 2340Sstevel@tonic-gate return (ENOMEM); 2350Sstevel@tonic-gate mcp->mxchain_mx = mp; 2360Sstevel@tonic-gate mcp->mxchain_next = self->ul_mxchain; 2370Sstevel@tonic-gate self->ul_mxchain = mcp; 2380Sstevel@tonic-gate return (0); 2390Sstevel@tonic-gate } 2400Sstevel@tonic-gate 2410Sstevel@tonic-gate /* 2426247Sraf * Helper function for _ceil_prio_inherit() and _ceil_prio_waive(), below. 2436247Sraf */ 2446247Sraf static void 2456247Sraf set_rt_priority(ulwp_t *self, int prio) 2466247Sraf { 2476247Sraf pcparms_t pcparm; 2486247Sraf 2496247Sraf pcparm.pc_cid = self->ul_rtclassid; 2506247Sraf ((rtparms_t *)pcparm.pc_clparms)->rt_tqnsecs = RT_NOCHANGE; 2516247Sraf ((rtparms_t *)pcparm.pc_clparms)->rt_pri = prio; 252*6515Sraf (void) priocntl(P_LWPID, self->ul_lwpid, PC_SETPARMS, &pcparm); 2536247Sraf } 2546247Sraf 2556247Sraf /* 2566247Sraf * Inherit priority from ceiling. 2576247Sraf * This changes the effective priority, not the assigned priority. 2580Sstevel@tonic-gate */ 2590Sstevel@tonic-gate void 2606247Sraf _ceil_prio_inherit(int prio) 2610Sstevel@tonic-gate { 2620Sstevel@tonic-gate ulwp_t *self = curthread; 2636247Sraf 2646247Sraf self->ul_epri = prio; 2656247Sraf set_rt_priority(self, prio); 2660Sstevel@tonic-gate } 2670Sstevel@tonic-gate 2680Sstevel@tonic-gate /* 2690Sstevel@tonic-gate * Waive inherited ceiling priority. Inherit from head of owned ceiling locks 2700Sstevel@tonic-gate * if holding at least one ceiling lock. If no ceiling locks are held at this 2710Sstevel@tonic-gate * point, disinherit completely, reverting back to assigned priority. 2720Sstevel@tonic-gate */ 2730Sstevel@tonic-gate void 2740Sstevel@tonic-gate _ceil_prio_waive(void) 2750Sstevel@tonic-gate { 2760Sstevel@tonic-gate ulwp_t *self = curthread; 2776247Sraf mxchain_t *mcp = self->ul_mxchain; 2786247Sraf int prio; 2796247Sraf 2806247Sraf if (mcp == NULL) { 2816247Sraf prio = self->ul_pri; 2826247Sraf self->ul_epri = 0; 2830Sstevel@tonic-gate } else { 2846247Sraf prio = mcp->mxchain_mx->mutex_ceiling; 2856247Sraf self->ul_epri = prio; 2860Sstevel@tonic-gate } 2876247Sraf set_rt_priority(self, prio); 2880Sstevel@tonic-gate } 2890Sstevel@tonic-gate 2900Sstevel@tonic-gate /* 2915629Sraf * Clear the lock byte. Retain the waiters byte and the spinners byte. 2925629Sraf * Return the old value of the lock word. 2935629Sraf */ 2945629Sraf static uint32_t 2955629Sraf clear_lockbyte(volatile uint32_t *lockword) 2965629Sraf { 2975629Sraf uint32_t old; 2985629Sraf uint32_t new; 2995629Sraf 3005629Sraf do { 3015629Sraf old = *lockword; 3025629Sraf new = old & ~LOCKMASK; 3035629Sraf } while (atomic_cas_32(lockword, old, new) != old); 3045629Sraf 3055629Sraf return (old); 3065629Sraf } 3075629Sraf 3085629Sraf /* 3096057Sraf * Same as clear_lockbyte(), but operates on mutex_lockword64. 3106057Sraf * The mutex_ownerpid field is cleared along with the lock byte. 3116057Sraf */ 3126057Sraf static uint64_t 3136057Sraf clear_lockbyte64(volatile uint64_t *lockword64) 3146057Sraf { 3156057Sraf uint64_t old; 3166057Sraf uint64_t new; 3176057Sraf 3186057Sraf do { 3196057Sraf old = *lockword64; 3206057Sraf new = old & ~LOCKMASK64; 3216057Sraf } while (atomic_cas_64(lockword64, old, new) != old); 3226057Sraf 3236057Sraf return (old); 3246057Sraf } 3256057Sraf 3266057Sraf /* 3276057Sraf * Similar to set_lock_byte(), which only tries to set the lock byte. 3286057Sraf * Here, we attempt to set the lock byte AND the mutex_ownerpid, 3296057Sraf * keeping the remaining bytes constant. 3306057Sraf */ 3316057Sraf static int 3326057Sraf set_lock_byte64(volatile uint64_t *lockword64, pid_t ownerpid) 3336057Sraf { 3346057Sraf uint64_t old; 3356057Sraf uint64_t new; 3366057Sraf 3376057Sraf old = *lockword64 & ~LOCKMASK64; 3386057Sraf new = old | ((uint64_t)(uint_t)ownerpid << PIDSHIFT) | LOCKBYTE64; 3396057Sraf if (atomic_cas_64(lockword64, old, new) == old) 3406057Sraf return (LOCKCLEAR); 3416057Sraf 3426057Sraf return (LOCKSET); 3436057Sraf } 3446057Sraf 3456057Sraf /* 3465629Sraf * Increment the spinners count in the mutex lock word. 3475629Sraf * Return 0 on success. Return -1 if the count would overflow. 3485629Sraf */ 3495629Sraf static int 3505629Sraf spinners_incr(volatile uint32_t *lockword, uint8_t max_spinners) 3515629Sraf { 3525629Sraf uint32_t old; 3535629Sraf uint32_t new; 3545629Sraf 3555629Sraf do { 3565629Sraf old = *lockword; 3575629Sraf if (((old & SPINNERMASK) >> SPINNERSHIFT) >= max_spinners) 3585629Sraf return (-1); 3595629Sraf new = old + (1 << SPINNERSHIFT); 3605629Sraf } while (atomic_cas_32(lockword, old, new) != old); 3615629Sraf 3625629Sraf return (0); 3635629Sraf } 3645629Sraf 3655629Sraf /* 3665629Sraf * Decrement the spinners count in the mutex lock word. 3675629Sraf * Return the new value of the lock word. 3685629Sraf */ 3695629Sraf static uint32_t 3705629Sraf spinners_decr(volatile uint32_t *lockword) 3715629Sraf { 3725629Sraf uint32_t old; 3735629Sraf uint32_t new; 3745629Sraf 3755629Sraf do { 3765629Sraf new = old = *lockword; 3775629Sraf if (new & SPINNERMASK) 3785629Sraf new -= (1 << SPINNERSHIFT); 3795629Sraf } while (atomic_cas_32(lockword, old, new) != old); 3805629Sraf 3815629Sraf return (new); 3825629Sraf } 3835629Sraf 3845629Sraf /* 3850Sstevel@tonic-gate * Non-preemptive spin locks. Used by queue_lock(). 3860Sstevel@tonic-gate * No lock statistics are gathered for these locks. 3875629Sraf * No DTrace probes are provided for these locks. 3880Sstevel@tonic-gate */ 3890Sstevel@tonic-gate void 3900Sstevel@tonic-gate spin_lock_set(mutex_t *mp) 3910Sstevel@tonic-gate { 3920Sstevel@tonic-gate ulwp_t *self = curthread; 3930Sstevel@tonic-gate 3940Sstevel@tonic-gate no_preempt(self); 3950Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 3960Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 3970Sstevel@tonic-gate return; 3980Sstevel@tonic-gate } 3990Sstevel@tonic-gate /* 4000Sstevel@tonic-gate * Spin for a while, attempting to acquire the lock. 4010Sstevel@tonic-gate */ 4026247Sraf INCR32(self->ul_spin_lock_spin); 4030Sstevel@tonic-gate if (mutex_queuelock_adaptive(mp) == 0 || 4040Sstevel@tonic-gate set_lock_byte(&mp->mutex_lockw) == 0) { 4050Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 4060Sstevel@tonic-gate return; 4070Sstevel@tonic-gate } 4080Sstevel@tonic-gate /* 4090Sstevel@tonic-gate * Try harder if we were previously at a no premption level. 4100Sstevel@tonic-gate */ 4110Sstevel@tonic-gate if (self->ul_preempt > 1) { 4126247Sraf INCR32(self->ul_spin_lock_spin2); 4130Sstevel@tonic-gate if (mutex_queuelock_adaptive(mp) == 0 || 4140Sstevel@tonic-gate set_lock_byte(&mp->mutex_lockw) == 0) { 4150Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 4160Sstevel@tonic-gate return; 4170Sstevel@tonic-gate } 4180Sstevel@tonic-gate } 4190Sstevel@tonic-gate /* 4200Sstevel@tonic-gate * Give up and block in the kernel for the mutex. 4210Sstevel@tonic-gate */ 4226247Sraf INCR32(self->ul_spin_lock_sleep); 4230Sstevel@tonic-gate (void) ___lwp_mutex_timedlock(mp, NULL); 4240Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 4250Sstevel@tonic-gate } 4260Sstevel@tonic-gate 4270Sstevel@tonic-gate void 4280Sstevel@tonic-gate spin_lock_clear(mutex_t *mp) 4290Sstevel@tonic-gate { 4300Sstevel@tonic-gate ulwp_t *self = curthread; 4310Sstevel@tonic-gate 4320Sstevel@tonic-gate mp->mutex_owner = 0; 4334570Sraf if (atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK) { 4344574Sraf (void) ___lwp_mutex_wakeup(mp, 0); 4356247Sraf INCR32(self->ul_spin_lock_wakeup); 4360Sstevel@tonic-gate } 4370Sstevel@tonic-gate preempt(self); 4380Sstevel@tonic-gate } 4390Sstevel@tonic-gate 4400Sstevel@tonic-gate /* 4410Sstevel@tonic-gate * Allocate the sleep queue hash table. 4420Sstevel@tonic-gate */ 4430Sstevel@tonic-gate void 4440Sstevel@tonic-gate queue_alloc(void) 4450Sstevel@tonic-gate { 4460Sstevel@tonic-gate ulwp_t *self = curthread; 4470Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 4486247Sraf queue_head_t *qp; 4490Sstevel@tonic-gate void *data; 4500Sstevel@tonic-gate int i; 4510Sstevel@tonic-gate 4520Sstevel@tonic-gate /* 4530Sstevel@tonic-gate * No locks are needed; we call here only when single-threaded. 4540Sstevel@tonic-gate */ 4550Sstevel@tonic-gate ASSERT(self == udp->ulwp_one); 4560Sstevel@tonic-gate ASSERT(!udp->uberflags.uf_mt); 457*6515Sraf if ((data = mmap(NULL, 2 * QHASHSIZE * sizeof (queue_head_t), 4580Sstevel@tonic-gate PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0)) 4590Sstevel@tonic-gate == MAP_FAILED) 4600Sstevel@tonic-gate thr_panic("cannot allocate thread queue_head table"); 4616247Sraf udp->queue_head = qp = (queue_head_t *)data; 4626247Sraf for (i = 0; i < 2 * QHASHSIZE; qp++, i++) { 4636247Sraf qp->qh_type = (i < QHASHSIZE)? MX : CV; 4646247Sraf qp->qh_lock.mutex_flag = LOCK_INITED; 4656247Sraf qp->qh_lock.mutex_magic = MUTEX_MAGIC; 4666247Sraf qp->qh_hlist = &qp->qh_def_root; 4676247Sraf #if defined(THREAD_DEBUG) 4686247Sraf qp->qh_hlen = 1; 4696247Sraf qp->qh_hmax = 1; 4706247Sraf #endif 4714574Sraf } 4720Sstevel@tonic-gate } 4730Sstevel@tonic-gate 4740Sstevel@tonic-gate #if defined(THREAD_DEBUG) 4750Sstevel@tonic-gate 4760Sstevel@tonic-gate /* 4770Sstevel@tonic-gate * Debugging: verify correctness of a sleep queue. 4780Sstevel@tonic-gate */ 4790Sstevel@tonic-gate void 4800Sstevel@tonic-gate QVERIFY(queue_head_t *qp) 4810Sstevel@tonic-gate { 4820Sstevel@tonic-gate ulwp_t *self = curthread; 4830Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 4846247Sraf queue_root_t *qrp; 4850Sstevel@tonic-gate ulwp_t *ulwp; 4860Sstevel@tonic-gate ulwp_t *prev; 4870Sstevel@tonic-gate uint_t index; 4886247Sraf uint32_t cnt; 4890Sstevel@tonic-gate char qtype; 4900Sstevel@tonic-gate void *wchan; 4910Sstevel@tonic-gate 4920Sstevel@tonic-gate ASSERT(qp >= udp->queue_head && (qp - udp->queue_head) < 2 * QHASHSIZE); 4930Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, self)); 4946247Sraf for (cnt = 0, qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next) { 4956247Sraf cnt++; 4966247Sraf ASSERT((qrp->qr_head != NULL && qrp->qr_tail != NULL) || 4976247Sraf (qrp->qr_head == NULL && qrp->qr_tail == NULL)); 4986247Sraf } 4996247Sraf ASSERT(qp->qh_hlen == cnt && qp->qh_hmax >= cnt); 5006247Sraf qtype = ((qp - udp->queue_head) < QHASHSIZE)? MX : CV; 5016247Sraf ASSERT(qp->qh_type == qtype); 5020Sstevel@tonic-gate if (!thread_queue_verify) 5030Sstevel@tonic-gate return; 5040Sstevel@tonic-gate /* real expensive stuff, only for _THREAD_QUEUE_VERIFY */ 5056247Sraf for (cnt = 0, qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next) { 5066247Sraf for (prev = NULL, ulwp = qrp->qr_head; ulwp != NULL; 5076247Sraf prev = ulwp, ulwp = ulwp->ul_link) { 5086247Sraf cnt++; 5096247Sraf if (ulwp->ul_writer) 5106247Sraf ASSERT(prev == NULL || prev->ul_writer); 5116247Sraf ASSERT(ulwp->ul_qtype == qtype); 5126247Sraf ASSERT(ulwp->ul_wchan != NULL); 5136247Sraf ASSERT(ulwp->ul_sleepq == qp); 5146247Sraf wchan = ulwp->ul_wchan; 5156247Sraf ASSERT(qrp->qr_wchan == wchan); 5166247Sraf index = QUEUE_HASH(wchan, qtype); 5176247Sraf ASSERT(&udp->queue_head[index] == qp); 5186247Sraf } 5196247Sraf ASSERT(qrp->qr_tail == prev); 5200Sstevel@tonic-gate } 5210Sstevel@tonic-gate ASSERT(qp->qh_qlen == cnt); 5220Sstevel@tonic-gate } 5230Sstevel@tonic-gate 5240Sstevel@tonic-gate #else /* THREAD_DEBUG */ 5250Sstevel@tonic-gate 5260Sstevel@tonic-gate #define QVERIFY(qp) 5270Sstevel@tonic-gate 5280Sstevel@tonic-gate #endif /* THREAD_DEBUG */ 5290Sstevel@tonic-gate 5300Sstevel@tonic-gate /* 5310Sstevel@tonic-gate * Acquire a queue head. 5320Sstevel@tonic-gate */ 5330Sstevel@tonic-gate queue_head_t * 5340Sstevel@tonic-gate queue_lock(void *wchan, int qtype) 5350Sstevel@tonic-gate { 5360Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 5370Sstevel@tonic-gate queue_head_t *qp; 5386247Sraf queue_root_t *qrp; 5390Sstevel@tonic-gate 5400Sstevel@tonic-gate ASSERT(qtype == MX || qtype == CV); 5410Sstevel@tonic-gate 5420Sstevel@tonic-gate /* 5430Sstevel@tonic-gate * It is possible that we could be called while still single-threaded. 5440Sstevel@tonic-gate * If so, we call queue_alloc() to allocate the queue_head[] array. 5450Sstevel@tonic-gate */ 5460Sstevel@tonic-gate if ((qp = udp->queue_head) == NULL) { 5470Sstevel@tonic-gate queue_alloc(); 5480Sstevel@tonic-gate qp = udp->queue_head; 5490Sstevel@tonic-gate } 5500Sstevel@tonic-gate qp += QUEUE_HASH(wchan, qtype); 5510Sstevel@tonic-gate spin_lock_set(&qp->qh_lock); 5526247Sraf for (qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next) 5536247Sraf if (qrp->qr_wchan == wchan) 5546247Sraf break; 5556247Sraf if (qrp == NULL && qp->qh_def_root.qr_head == NULL) { 5566247Sraf /* the default queue root is available; use it */ 5576247Sraf qrp = &qp->qh_def_root; 5586247Sraf qrp->qr_wchan = wchan; 5596247Sraf ASSERT(qrp->qr_next == NULL); 5606247Sraf ASSERT(qrp->qr_tail == NULL && 5616247Sraf qrp->qr_rtcount == 0 && qrp->qr_qlen == 0); 5626247Sraf } 5636247Sraf qp->qh_wchan = wchan; /* valid until queue_unlock() is called */ 5646247Sraf qp->qh_root = qrp; /* valid until queue_unlock() is called */ 5656247Sraf INCR32(qp->qh_lockcount); 5660Sstevel@tonic-gate QVERIFY(qp); 5670Sstevel@tonic-gate return (qp); 5680Sstevel@tonic-gate } 5690Sstevel@tonic-gate 5700Sstevel@tonic-gate /* 5710Sstevel@tonic-gate * Release a queue head. 5720Sstevel@tonic-gate */ 5730Sstevel@tonic-gate void 5740Sstevel@tonic-gate queue_unlock(queue_head_t *qp) 5750Sstevel@tonic-gate { 5760Sstevel@tonic-gate QVERIFY(qp); 5770Sstevel@tonic-gate spin_lock_clear(&qp->qh_lock); 5780Sstevel@tonic-gate } 5790Sstevel@tonic-gate 5800Sstevel@tonic-gate /* 5810Sstevel@tonic-gate * For rwlock queueing, we must queue writers ahead of readers of the 5820Sstevel@tonic-gate * same priority. We do this by making writers appear to have a half 5830Sstevel@tonic-gate * point higher priority for purposes of priority comparisons below. 5840Sstevel@tonic-gate */ 5850Sstevel@tonic-gate #define CMP_PRIO(ulwp) ((real_priority(ulwp) << 1) + (ulwp)->ul_writer) 5860Sstevel@tonic-gate 5870Sstevel@tonic-gate void 5886247Sraf enqueue(queue_head_t *qp, ulwp_t *ulwp, int force_fifo) 5890Sstevel@tonic-gate { 5906247Sraf queue_root_t *qrp; 5910Sstevel@tonic-gate ulwp_t **ulwpp; 5920Sstevel@tonic-gate ulwp_t *next; 5930Sstevel@tonic-gate int pri = CMP_PRIO(ulwp); 5946247Sraf 5950Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread)); 5960Sstevel@tonic-gate ASSERT(ulwp->ul_sleepq != qp); 5970Sstevel@tonic-gate 5986247Sraf if ((qrp = qp->qh_root) == NULL) { 5996247Sraf /* use the thread's queue root for the linkage */ 6006247Sraf qrp = &ulwp->ul_queue_root; 6016247Sraf qrp->qr_next = qp->qh_hlist; 6026247Sraf qrp->qr_prev = NULL; 6036247Sraf qrp->qr_head = NULL; 6046247Sraf qrp->qr_tail = NULL; 6056247Sraf qrp->qr_wchan = qp->qh_wchan; 6066247Sraf qrp->qr_rtcount = 0; 6076247Sraf qrp->qr_qlen = 0; 6086247Sraf qrp->qr_qmax = 0; 6096247Sraf qp->qh_hlist->qr_prev = qrp; 6106247Sraf qp->qh_hlist = qrp; 6116247Sraf qp->qh_root = qrp; 6126247Sraf MAXINCR(qp->qh_hmax, qp->qh_hlen); 6136247Sraf } 6146247Sraf 6150Sstevel@tonic-gate /* 6160Sstevel@tonic-gate * LIFO queue ordering is unfair and can lead to starvation, 6170Sstevel@tonic-gate * but it gives better performance for heavily contended locks. 6180Sstevel@tonic-gate * We use thread_queue_fifo (range is 0..8) to determine 6190Sstevel@tonic-gate * the frequency of FIFO vs LIFO queuing: 6200Sstevel@tonic-gate * 0 : every 256th time (almost always LIFO) 6210Sstevel@tonic-gate * 1 : every 128th time 6220Sstevel@tonic-gate * 2 : every 64th time 6230Sstevel@tonic-gate * 3 : every 32nd time 6240Sstevel@tonic-gate * 4 : every 16th time (the default value, mostly LIFO) 6250Sstevel@tonic-gate * 5 : every 8th time 6260Sstevel@tonic-gate * 6 : every 4th time 6270Sstevel@tonic-gate * 7 : every 2nd time 6280Sstevel@tonic-gate * 8 : every time (never LIFO, always FIFO) 6290Sstevel@tonic-gate * Note that there is always some degree of FIFO ordering. 6300Sstevel@tonic-gate * This breaks live lock conditions that occur in applications 6310Sstevel@tonic-gate * that are written assuming (incorrectly) that threads acquire 6320Sstevel@tonic-gate * locks fairly, that is, in roughly round-robin order. 6336247Sraf * In any event, the queue is maintained in kernel priority order. 6340Sstevel@tonic-gate * 6356247Sraf * If force_fifo is non-zero, fifo queueing is forced. 6360Sstevel@tonic-gate * SUSV3 requires this for semaphores. 6370Sstevel@tonic-gate */ 6386247Sraf if (qrp->qr_head == NULL) { 6390Sstevel@tonic-gate /* 6400Sstevel@tonic-gate * The queue is empty. LIFO/FIFO doesn't matter. 6410Sstevel@tonic-gate */ 6426247Sraf ASSERT(qrp->qr_tail == NULL); 6436247Sraf ulwpp = &qrp->qr_head; 6446247Sraf } else if (force_fifo | 6456247Sraf (((++qp->qh_qcnt << curthread->ul_queue_fifo) & 0xff) == 0)) { 6460Sstevel@tonic-gate /* 6470Sstevel@tonic-gate * Enqueue after the last thread whose priority is greater 6480Sstevel@tonic-gate * than or equal to the priority of the thread being queued. 6490Sstevel@tonic-gate * Attempt first to go directly onto the tail of the queue. 6500Sstevel@tonic-gate */ 6516247Sraf if (pri <= CMP_PRIO(qrp->qr_tail)) 6526247Sraf ulwpp = &qrp->qr_tail->ul_link; 6530Sstevel@tonic-gate else { 6546247Sraf for (ulwpp = &qrp->qr_head; (next = *ulwpp) != NULL; 6550Sstevel@tonic-gate ulwpp = &next->ul_link) 6560Sstevel@tonic-gate if (pri > CMP_PRIO(next)) 6570Sstevel@tonic-gate break; 6580Sstevel@tonic-gate } 6590Sstevel@tonic-gate } else { 6600Sstevel@tonic-gate /* 6610Sstevel@tonic-gate * Enqueue before the first thread whose priority is less 6620Sstevel@tonic-gate * than or equal to the priority of the thread being queued. 6630Sstevel@tonic-gate * Hopefully we can go directly onto the head of the queue. 6640Sstevel@tonic-gate */ 6656247Sraf for (ulwpp = &qrp->qr_head; (next = *ulwpp) != NULL; 6660Sstevel@tonic-gate ulwpp = &next->ul_link) 6670Sstevel@tonic-gate if (pri >= CMP_PRIO(next)) 6680Sstevel@tonic-gate break; 6690Sstevel@tonic-gate } 6700Sstevel@tonic-gate if ((ulwp->ul_link = *ulwpp) == NULL) 6716247Sraf qrp->qr_tail = ulwp; 6720Sstevel@tonic-gate *ulwpp = ulwp; 6730Sstevel@tonic-gate 6740Sstevel@tonic-gate ulwp->ul_sleepq = qp; 6756247Sraf ulwp->ul_wchan = qp->qh_wchan; 6766247Sraf ulwp->ul_qtype = qp->qh_type; 6776247Sraf if ((ulwp->ul_schedctl != NULL && 6786247Sraf ulwp->ul_schedctl->sc_cid == ulwp->ul_rtclassid) | 6796247Sraf ulwp->ul_pilocks) { 6806247Sraf ulwp->ul_rtqueued = 1; 6816247Sraf qrp->qr_rtcount++; 6826247Sraf } 6836247Sraf MAXINCR(qrp->qr_qmax, qrp->qr_qlen); 6846247Sraf MAXINCR(qp->qh_qmax, qp->qh_qlen); 6856247Sraf } 6866247Sraf 6876247Sraf /* 6886247Sraf * Helper function for queue_slot() and queue_slot_rt(). 6896247Sraf * Try to find a non-suspended thread on the queue. 6906247Sraf */ 6916247Sraf static ulwp_t ** 6926247Sraf queue_slot_runnable(ulwp_t **ulwpp, ulwp_t **prevp, int rt) 6936247Sraf { 6946247Sraf ulwp_t *ulwp; 6956247Sraf ulwp_t **foundpp = NULL; 6966247Sraf int priority = -1; 6976247Sraf ulwp_t *prev; 6986247Sraf int tpri; 6996247Sraf 7006247Sraf for (prev = NULL; 7016247Sraf (ulwp = *ulwpp) != NULL; 7026247Sraf prev = ulwp, ulwpp = &ulwp->ul_link) { 7036247Sraf if (ulwp->ul_stop) /* skip suspended threads */ 7046247Sraf continue; 7056247Sraf tpri = rt? CMP_PRIO(ulwp) : 0; 7066247Sraf if (tpri > priority) { 7076247Sraf foundpp = ulwpp; 7086247Sraf *prevp = prev; 7096247Sraf priority = tpri; 7106247Sraf if (!rt) 7116247Sraf break; 7126247Sraf } 7136247Sraf } 7146247Sraf return (foundpp); 7150Sstevel@tonic-gate } 7160Sstevel@tonic-gate 7170Sstevel@tonic-gate /* 7186247Sraf * For real-time, we search the entire queue because the dispatch 7196247Sraf * (kernel) priorities may have changed since enqueueing. 7200Sstevel@tonic-gate */ 7210Sstevel@tonic-gate static ulwp_t ** 7226247Sraf queue_slot_rt(ulwp_t **ulwpp_org, ulwp_t **prevp) 7236247Sraf { 7246247Sraf ulwp_t **ulwpp = ulwpp_org; 7256247Sraf ulwp_t *ulwp = *ulwpp; 7266247Sraf ulwp_t **foundpp = ulwpp; 7276247Sraf int priority = CMP_PRIO(ulwp); 7286247Sraf ulwp_t *prev; 7296247Sraf int tpri; 7306247Sraf 7316247Sraf for (prev = ulwp, ulwpp = &ulwp->ul_link; 7326247Sraf (ulwp = *ulwpp) != NULL; 7336247Sraf prev = ulwp, ulwpp = &ulwp->ul_link) { 7346247Sraf tpri = CMP_PRIO(ulwp); 7356247Sraf if (tpri > priority) { 7366247Sraf foundpp = ulwpp; 7376247Sraf *prevp = prev; 7386247Sraf priority = tpri; 7396247Sraf } 7406247Sraf } 7416247Sraf ulwp = *foundpp; 7426247Sraf 7436247Sraf /* 7446247Sraf * Try not to return a suspended thread. 7456247Sraf * This mimics the old libthread's behavior. 7466247Sraf */ 7476247Sraf if (ulwp->ul_stop && 7486247Sraf (ulwpp = queue_slot_runnable(ulwpp_org, prevp, 1)) != NULL) { 7496247Sraf foundpp = ulwpp; 7506247Sraf ulwp = *foundpp; 7516247Sraf } 7526247Sraf ulwp->ul_rt = 1; 7536247Sraf return (foundpp); 7546247Sraf } 7556247Sraf 7566247Sraf ulwp_t ** 7576247Sraf queue_slot(queue_head_t *qp, ulwp_t **prevp, int *more) 7586247Sraf { 7596247Sraf queue_root_t *qrp; 7606247Sraf ulwp_t **ulwpp; 7616247Sraf ulwp_t *ulwp; 7626247Sraf int rt; 7636247Sraf 7646247Sraf ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread)); 7656247Sraf 7666247Sraf if ((qrp = qp->qh_root) == NULL || (ulwp = qrp->qr_head) == NULL) { 7676247Sraf *more = 0; 7686247Sraf return (NULL); /* no lwps on the queue */ 7696247Sraf } 7706247Sraf rt = (qrp->qr_rtcount != 0); 7716247Sraf *prevp = NULL; 7726247Sraf if (ulwp->ul_link == NULL) { /* only one lwp on the queue */ 7736247Sraf *more = 0; 7746247Sraf ulwp->ul_rt = rt; 7756247Sraf return (&qrp->qr_head); 7766247Sraf } 7776247Sraf *more = 1; 7786247Sraf 7796247Sraf if (rt) /* real-time queue */ 7806247Sraf return (queue_slot_rt(&qrp->qr_head, prevp)); 7816247Sraf /* 7826247Sraf * Try not to return a suspended thread. 7836247Sraf * This mimics the old libthread's behavior. 7846247Sraf */ 7856247Sraf if (ulwp->ul_stop && 7866247Sraf (ulwpp = queue_slot_runnable(&qrp->qr_head, prevp, 0)) != NULL) { 7876247Sraf ulwp = *ulwpp; 7886247Sraf ulwp->ul_rt = 0; 7896247Sraf return (ulwpp); 7906247Sraf } 7916247Sraf /* 7926247Sraf * The common case; just pick the first thread on the queue. 7936247Sraf */ 7946247Sraf ulwp->ul_rt = 0; 7956247Sraf return (&qrp->qr_head); 7966247Sraf } 7976247Sraf 7986247Sraf /* 7996247Sraf * Common code for unlinking an lwp from a user-level sleep queue. 8006247Sraf */ 8016247Sraf void 8026247Sraf queue_unlink(queue_head_t *qp, ulwp_t **ulwpp, ulwp_t *prev) 8036247Sraf { 8046247Sraf queue_root_t *qrp = qp->qh_root; 8056247Sraf queue_root_t *nqrp; 8066247Sraf ulwp_t *ulwp = *ulwpp; 8076247Sraf ulwp_t *next; 8086247Sraf 8096247Sraf ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread)); 8106247Sraf ASSERT(qp->qh_wchan != NULL && ulwp->ul_wchan == qp->qh_wchan); 8116247Sraf 8126247Sraf DECR(qp->qh_qlen); 8136247Sraf DECR(qrp->qr_qlen); 8146247Sraf if (ulwp->ul_rtqueued) { 8156247Sraf ulwp->ul_rtqueued = 0; 8166247Sraf qrp->qr_rtcount--; 8176247Sraf } 8186247Sraf next = ulwp->ul_link; 8196247Sraf *ulwpp = next; 8206247Sraf ulwp->ul_link = NULL; 8216247Sraf if (qrp->qr_tail == ulwp) 8226247Sraf qrp->qr_tail = prev; 8236247Sraf if (qrp == &ulwp->ul_queue_root) { 8246247Sraf /* 8256247Sraf * We can't continue to use the unlinked thread's 8266247Sraf * queue root for the linkage. 8276247Sraf */ 8286247Sraf queue_root_t *qr_next = qrp->qr_next; 8296247Sraf queue_root_t *qr_prev = qrp->qr_prev; 8306247Sraf 8316247Sraf if (qrp->qr_tail) { 8326247Sraf /* switch to using the last thread's queue root */ 8336247Sraf ASSERT(qrp->qr_qlen != 0); 8346247Sraf nqrp = &qrp->qr_tail->ul_queue_root; 8356247Sraf *nqrp = *qrp; 8366247Sraf if (qr_next) 8376247Sraf qr_next->qr_prev = nqrp; 8386247Sraf if (qr_prev) 8396247Sraf qr_prev->qr_next = nqrp; 8406247Sraf else 8416247Sraf qp->qh_hlist = nqrp; 8426247Sraf qp->qh_root = nqrp; 8436247Sraf } else { 8446247Sraf /* empty queue root; just delete from the hash list */ 8456247Sraf ASSERT(qrp->qr_qlen == 0); 8466247Sraf if (qr_next) 8476247Sraf qr_next->qr_prev = qr_prev; 8486247Sraf if (qr_prev) 8496247Sraf qr_prev->qr_next = qr_next; 8506247Sraf else 8516247Sraf qp->qh_hlist = qr_next; 8526247Sraf qp->qh_root = NULL; 8536247Sraf DECR(qp->qh_hlen); 8546247Sraf } 8556247Sraf } 8566247Sraf } 8576247Sraf 8586247Sraf ulwp_t * 8596247Sraf dequeue(queue_head_t *qp, int *more) 8600Sstevel@tonic-gate { 8610Sstevel@tonic-gate ulwp_t **ulwpp; 8620Sstevel@tonic-gate ulwp_t *ulwp; 8636247Sraf ulwp_t *prev; 8646247Sraf 8656247Sraf if ((ulwpp = queue_slot(qp, &prev, more)) == NULL) 8660Sstevel@tonic-gate return (NULL); 8670Sstevel@tonic-gate ulwp = *ulwpp; 8686247Sraf queue_unlink(qp, ulwpp, prev); 8690Sstevel@tonic-gate ulwp->ul_sleepq = NULL; 8700Sstevel@tonic-gate ulwp->ul_wchan = NULL; 8710Sstevel@tonic-gate return (ulwp); 8720Sstevel@tonic-gate } 8730Sstevel@tonic-gate 8740Sstevel@tonic-gate /* 8750Sstevel@tonic-gate * Return a pointer to the highest priority thread sleeping on wchan. 8760Sstevel@tonic-gate */ 8770Sstevel@tonic-gate ulwp_t * 8786247Sraf queue_waiter(queue_head_t *qp) 8790Sstevel@tonic-gate { 8800Sstevel@tonic-gate ulwp_t **ulwpp; 8816247Sraf ulwp_t *prev; 8826247Sraf int more; 8836247Sraf 8846247Sraf if ((ulwpp = queue_slot(qp, &prev, &more)) == NULL) 8850Sstevel@tonic-gate return (NULL); 8860Sstevel@tonic-gate return (*ulwpp); 8870Sstevel@tonic-gate } 8880Sstevel@tonic-gate 8896247Sraf int 8906247Sraf dequeue_self(queue_head_t *qp) 8910Sstevel@tonic-gate { 8920Sstevel@tonic-gate ulwp_t *self = curthread; 8936247Sraf queue_root_t *qrp; 8940Sstevel@tonic-gate ulwp_t **ulwpp; 8950Sstevel@tonic-gate ulwp_t *ulwp; 8966247Sraf ulwp_t *prev; 8970Sstevel@tonic-gate int found = 0; 8980Sstevel@tonic-gate 8990Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, self)); 9000Sstevel@tonic-gate 9010Sstevel@tonic-gate /* find self on the sleep queue */ 9026247Sraf if ((qrp = qp->qh_root) != NULL) { 9036247Sraf for (prev = NULL, ulwpp = &qrp->qr_head; 9046247Sraf (ulwp = *ulwpp) != NULL; 9056247Sraf prev = ulwp, ulwpp = &ulwp->ul_link) { 9066247Sraf if (ulwp == self) { 9076247Sraf queue_unlink(qp, ulwpp, prev); 9086247Sraf self->ul_cvmutex = NULL; 9096247Sraf self->ul_sleepq = NULL; 9106247Sraf self->ul_wchan = NULL; 9116247Sraf found = 1; 9126247Sraf break; 9136247Sraf } 9140Sstevel@tonic-gate } 9150Sstevel@tonic-gate } 9160Sstevel@tonic-gate 9170Sstevel@tonic-gate if (!found) 9180Sstevel@tonic-gate thr_panic("dequeue_self(): curthread not found on queue"); 9190Sstevel@tonic-gate 9206247Sraf return ((qrp = qp->qh_root) != NULL && qrp->qr_head != NULL); 9210Sstevel@tonic-gate } 9220Sstevel@tonic-gate 9230Sstevel@tonic-gate /* 9240Sstevel@tonic-gate * Called from call_user_handler() and _thrp_suspend() to take 9250Sstevel@tonic-gate * ourself off of our sleep queue so we can grab locks. 9260Sstevel@tonic-gate */ 9270Sstevel@tonic-gate void 9280Sstevel@tonic-gate unsleep_self(void) 9290Sstevel@tonic-gate { 9300Sstevel@tonic-gate ulwp_t *self = curthread; 9310Sstevel@tonic-gate queue_head_t *qp; 9320Sstevel@tonic-gate 9330Sstevel@tonic-gate /* 9340Sstevel@tonic-gate * Calling enter_critical()/exit_critical() here would lead 9350Sstevel@tonic-gate * to recursion. Just manipulate self->ul_critical directly. 9360Sstevel@tonic-gate */ 9370Sstevel@tonic-gate self->ul_critical++; 9380Sstevel@tonic-gate while (self->ul_sleepq != NULL) { 9390Sstevel@tonic-gate qp = queue_lock(self->ul_wchan, self->ul_qtype); 9400Sstevel@tonic-gate /* 9410Sstevel@tonic-gate * We may have been moved from a CV queue to a 9420Sstevel@tonic-gate * mutex queue while we were attempting queue_lock(). 9430Sstevel@tonic-gate * If so, just loop around and try again. 9440Sstevel@tonic-gate * dequeue_self() clears self->ul_sleepq. 9450Sstevel@tonic-gate */ 9466247Sraf if (qp == self->ul_sleepq) 9476247Sraf (void) dequeue_self(qp); 9480Sstevel@tonic-gate queue_unlock(qp); 9490Sstevel@tonic-gate } 9506247Sraf self->ul_writer = 0; 9510Sstevel@tonic-gate self->ul_critical--; 9520Sstevel@tonic-gate } 9530Sstevel@tonic-gate 9540Sstevel@tonic-gate /* 9550Sstevel@tonic-gate * Common code for calling the the ___lwp_mutex_timedlock() system call. 9560Sstevel@tonic-gate * Returns with mutex_owner and mutex_ownerpid set correctly. 9570Sstevel@tonic-gate */ 9584574Sraf static int 9590Sstevel@tonic-gate mutex_lock_kernel(mutex_t *mp, timespec_t *tsp, tdb_mutex_stats_t *msp) 9600Sstevel@tonic-gate { 9610Sstevel@tonic-gate ulwp_t *self = curthread; 9620Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 9634574Sraf int mtype = mp->mutex_type; 9640Sstevel@tonic-gate hrtime_t begin_sleep; 9654574Sraf int acquired; 9660Sstevel@tonic-gate int error; 9670Sstevel@tonic-gate 9680Sstevel@tonic-gate self->ul_sp = stkptr(); 9690Sstevel@tonic-gate self->ul_wchan = mp; 9700Sstevel@tonic-gate if (__td_event_report(self, TD_SLEEP, udp)) { 9710Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_SLEEP; 9720Sstevel@tonic-gate self->ul_td_evbuf.eventdata = mp; 9730Sstevel@tonic-gate tdb_event(TD_SLEEP, udp); 9740Sstevel@tonic-gate } 9750Sstevel@tonic-gate if (msp) { 9760Sstevel@tonic-gate tdb_incr(msp->mutex_sleep); 9770Sstevel@tonic-gate begin_sleep = gethrtime(); 9780Sstevel@tonic-gate } 9790Sstevel@tonic-gate 9800Sstevel@tonic-gate DTRACE_PROBE1(plockstat, mutex__block, mp); 9810Sstevel@tonic-gate 9820Sstevel@tonic-gate for (;;) { 9834574Sraf /* 9844574Sraf * A return value of EOWNERDEAD or ELOCKUNMAPPED 9854574Sraf * means we successfully acquired the lock. 9864574Sraf */ 9874574Sraf if ((error = ___lwp_mutex_timedlock(mp, tsp)) != 0 && 9884574Sraf error != EOWNERDEAD && error != ELOCKUNMAPPED) { 9894574Sraf acquired = 0; 9900Sstevel@tonic-gate break; 9910Sstevel@tonic-gate } 9920Sstevel@tonic-gate 9934574Sraf if (mtype & USYNC_PROCESS) { 9940Sstevel@tonic-gate /* 9950Sstevel@tonic-gate * Defend against forkall(). We may be the child, 9960Sstevel@tonic-gate * in which case we don't actually own the mutex. 9970Sstevel@tonic-gate */ 9980Sstevel@tonic-gate enter_critical(self); 9990Sstevel@tonic-gate if (mp->mutex_ownerpid == udp->pid) { 10000Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 10010Sstevel@tonic-gate exit_critical(self); 10024574Sraf acquired = 1; 10030Sstevel@tonic-gate break; 10040Sstevel@tonic-gate } 10050Sstevel@tonic-gate exit_critical(self); 10060Sstevel@tonic-gate } else { 10070Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 10084574Sraf acquired = 1; 10090Sstevel@tonic-gate break; 10100Sstevel@tonic-gate } 10110Sstevel@tonic-gate } 10120Sstevel@tonic-gate if (msp) 10130Sstevel@tonic-gate msp->mutex_sleep_time += gethrtime() - begin_sleep; 10140Sstevel@tonic-gate self->ul_wchan = NULL; 10150Sstevel@tonic-gate self->ul_sp = 0; 10160Sstevel@tonic-gate 10174574Sraf if (acquired) { 10184574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1); 10194574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 10204574Sraf } else { 10214574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0); 10224574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 10234574Sraf } 10244574Sraf 10250Sstevel@tonic-gate return (error); 10260Sstevel@tonic-gate } 10270Sstevel@tonic-gate 10280Sstevel@tonic-gate /* 10290Sstevel@tonic-gate * Common code for calling the ___lwp_mutex_trylock() system call. 10300Sstevel@tonic-gate * Returns with mutex_owner and mutex_ownerpid set correctly. 10310Sstevel@tonic-gate */ 10320Sstevel@tonic-gate int 10330Sstevel@tonic-gate mutex_trylock_kernel(mutex_t *mp) 10340Sstevel@tonic-gate { 10350Sstevel@tonic-gate ulwp_t *self = curthread; 10360Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 10374574Sraf int mtype = mp->mutex_type; 10380Sstevel@tonic-gate int error; 10394574Sraf int acquired; 10400Sstevel@tonic-gate 10410Sstevel@tonic-gate for (;;) { 10424574Sraf /* 10434574Sraf * A return value of EOWNERDEAD or ELOCKUNMAPPED 10444574Sraf * means we successfully acquired the lock. 10454574Sraf */ 10464574Sraf if ((error = ___lwp_mutex_trylock(mp)) != 0 && 10474574Sraf error != EOWNERDEAD && error != ELOCKUNMAPPED) { 10484574Sraf acquired = 0; 10490Sstevel@tonic-gate break; 10500Sstevel@tonic-gate } 10510Sstevel@tonic-gate 10524574Sraf if (mtype & USYNC_PROCESS) { 10530Sstevel@tonic-gate /* 10540Sstevel@tonic-gate * Defend against forkall(). We may be the child, 10550Sstevel@tonic-gate * in which case we don't actually own the mutex. 10560Sstevel@tonic-gate */ 10570Sstevel@tonic-gate enter_critical(self); 10580Sstevel@tonic-gate if (mp->mutex_ownerpid == udp->pid) { 10590Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 10600Sstevel@tonic-gate exit_critical(self); 10614574Sraf acquired = 1; 10620Sstevel@tonic-gate break; 10630Sstevel@tonic-gate } 10640Sstevel@tonic-gate exit_critical(self); 10650Sstevel@tonic-gate } else { 10660Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 10674574Sraf acquired = 1; 10680Sstevel@tonic-gate break; 10690Sstevel@tonic-gate } 10700Sstevel@tonic-gate } 10710Sstevel@tonic-gate 10724574Sraf if (acquired) { 10734574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 10744574Sraf } else if (error != EBUSY) { 10754574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 10764574Sraf } 10774574Sraf 10780Sstevel@tonic-gate return (error); 10790Sstevel@tonic-gate } 10800Sstevel@tonic-gate 10810Sstevel@tonic-gate volatile sc_shared_t * 10820Sstevel@tonic-gate setup_schedctl(void) 10830Sstevel@tonic-gate { 10840Sstevel@tonic-gate ulwp_t *self = curthread; 10850Sstevel@tonic-gate volatile sc_shared_t *scp; 10860Sstevel@tonic-gate sc_shared_t *tmp; 10870Sstevel@tonic-gate 10880Sstevel@tonic-gate if ((scp = self->ul_schedctl) == NULL && /* no shared state yet */ 10890Sstevel@tonic-gate !self->ul_vfork && /* not a child of vfork() */ 10900Sstevel@tonic-gate !self->ul_schedctl_called) { /* haven't been called before */ 10910Sstevel@tonic-gate enter_critical(self); 10920Sstevel@tonic-gate self->ul_schedctl_called = &self->ul_uberdata->uberflags; 10930Sstevel@tonic-gate if ((tmp = __schedctl()) != (sc_shared_t *)(-1)) 10940Sstevel@tonic-gate self->ul_schedctl = scp = tmp; 10950Sstevel@tonic-gate exit_critical(self); 10960Sstevel@tonic-gate } 10970Sstevel@tonic-gate /* 10980Sstevel@tonic-gate * Unless the call to setup_schedctl() is surrounded 10990Sstevel@tonic-gate * by enter_critical()/exit_critical(), the address 11000Sstevel@tonic-gate * we are returning could be invalid due to a forkall() 11010Sstevel@tonic-gate * having occurred in another thread. 11020Sstevel@tonic-gate */ 11030Sstevel@tonic-gate return (scp); 11040Sstevel@tonic-gate } 11050Sstevel@tonic-gate 11060Sstevel@tonic-gate /* 11070Sstevel@tonic-gate * Interfaces from libsched, incorporated into libc. 11080Sstevel@tonic-gate * libsched.so.1 is now a filter library onto libc. 11090Sstevel@tonic-gate */ 11100Sstevel@tonic-gate #pragma weak schedctl_lookup = _schedctl_init 11110Sstevel@tonic-gate #pragma weak _schedctl_lookup = _schedctl_init 11120Sstevel@tonic-gate #pragma weak schedctl_init = _schedctl_init 11130Sstevel@tonic-gate schedctl_t * 11140Sstevel@tonic-gate _schedctl_init(void) 11150Sstevel@tonic-gate { 11160Sstevel@tonic-gate volatile sc_shared_t *scp = setup_schedctl(); 11170Sstevel@tonic-gate return ((scp == NULL)? NULL : (schedctl_t *)&scp->sc_preemptctl); 11180Sstevel@tonic-gate } 11190Sstevel@tonic-gate 11200Sstevel@tonic-gate #pragma weak schedctl_exit = _schedctl_exit 11210Sstevel@tonic-gate void 11220Sstevel@tonic-gate _schedctl_exit(void) 11230Sstevel@tonic-gate { 11240Sstevel@tonic-gate } 11250Sstevel@tonic-gate 11260Sstevel@tonic-gate /* 11270Sstevel@tonic-gate * Contract private interface for java. 11280Sstevel@tonic-gate * Set up the schedctl data if it doesn't exist yet. 11290Sstevel@tonic-gate * Return a pointer to the pointer to the schedctl data. 11300Sstevel@tonic-gate */ 11310Sstevel@tonic-gate volatile sc_shared_t *volatile * 11320Sstevel@tonic-gate _thr_schedctl(void) 11330Sstevel@tonic-gate { 11340Sstevel@tonic-gate ulwp_t *self = curthread; 11350Sstevel@tonic-gate volatile sc_shared_t *volatile *ptr; 11360Sstevel@tonic-gate 11370Sstevel@tonic-gate if (self->ul_vfork) 11380Sstevel@tonic-gate return (NULL); 11390Sstevel@tonic-gate if (*(ptr = &self->ul_schedctl) == NULL) 11400Sstevel@tonic-gate (void) setup_schedctl(); 11410Sstevel@tonic-gate return (ptr); 11420Sstevel@tonic-gate } 11430Sstevel@tonic-gate 11440Sstevel@tonic-gate /* 11450Sstevel@tonic-gate * Block signals and attempt to block preemption. 11460Sstevel@tonic-gate * no_preempt()/preempt() must be used in pairs but can be nested. 11470Sstevel@tonic-gate */ 11480Sstevel@tonic-gate void 11490Sstevel@tonic-gate no_preempt(ulwp_t *self) 11500Sstevel@tonic-gate { 11510Sstevel@tonic-gate volatile sc_shared_t *scp; 11520Sstevel@tonic-gate 11530Sstevel@tonic-gate if (self->ul_preempt++ == 0) { 11540Sstevel@tonic-gate enter_critical(self); 11550Sstevel@tonic-gate if ((scp = self->ul_schedctl) != NULL || 11560Sstevel@tonic-gate (scp = setup_schedctl()) != NULL) { 11570Sstevel@tonic-gate /* 11580Sstevel@tonic-gate * Save the pre-existing preempt value. 11590Sstevel@tonic-gate */ 11600Sstevel@tonic-gate self->ul_savpreempt = scp->sc_preemptctl.sc_nopreempt; 11610Sstevel@tonic-gate scp->sc_preemptctl.sc_nopreempt = 1; 11620Sstevel@tonic-gate } 11630Sstevel@tonic-gate } 11640Sstevel@tonic-gate } 11650Sstevel@tonic-gate 11660Sstevel@tonic-gate /* 11670Sstevel@tonic-gate * Undo the effects of no_preempt(). 11680Sstevel@tonic-gate */ 11690Sstevel@tonic-gate void 11700Sstevel@tonic-gate preempt(ulwp_t *self) 11710Sstevel@tonic-gate { 11720Sstevel@tonic-gate volatile sc_shared_t *scp; 11730Sstevel@tonic-gate 11740Sstevel@tonic-gate ASSERT(self->ul_preempt > 0); 11750Sstevel@tonic-gate if (--self->ul_preempt == 0) { 11760Sstevel@tonic-gate if ((scp = self->ul_schedctl) != NULL) { 11770Sstevel@tonic-gate /* 11780Sstevel@tonic-gate * Restore the pre-existing preempt value. 11790Sstevel@tonic-gate */ 11800Sstevel@tonic-gate scp->sc_preemptctl.sc_nopreempt = self->ul_savpreempt; 11810Sstevel@tonic-gate if (scp->sc_preemptctl.sc_yield && 11820Sstevel@tonic-gate scp->sc_preemptctl.sc_nopreempt == 0) { 1183*6515Sraf yield(); 11840Sstevel@tonic-gate if (scp->sc_preemptctl.sc_yield) { 11850Sstevel@tonic-gate /* 11860Sstevel@tonic-gate * Shouldn't happen. This is either 11870Sstevel@tonic-gate * a race condition or the thread 11880Sstevel@tonic-gate * just entered the real-time class. 11890Sstevel@tonic-gate */ 1190*6515Sraf yield(); 11910Sstevel@tonic-gate scp->sc_preemptctl.sc_yield = 0; 11920Sstevel@tonic-gate } 11930Sstevel@tonic-gate } 11940Sstevel@tonic-gate } 11950Sstevel@tonic-gate exit_critical(self); 11960Sstevel@tonic-gate } 11970Sstevel@tonic-gate } 11980Sstevel@tonic-gate 11990Sstevel@tonic-gate /* 12000Sstevel@tonic-gate * If a call to preempt() would cause the current thread to yield or to 12010Sstevel@tonic-gate * take deferred actions in exit_critical(), then unpark the specified 12020Sstevel@tonic-gate * lwp so it can run while we delay. Return the original lwpid if the 12030Sstevel@tonic-gate * unpark was not performed, else return zero. The tests are a repeat 12040Sstevel@tonic-gate * of some of the tests in preempt(), above. This is a statistical 12050Sstevel@tonic-gate * optimization solely for cond_sleep_queue(), below. 12060Sstevel@tonic-gate */ 12070Sstevel@tonic-gate static lwpid_t 12080Sstevel@tonic-gate preempt_unpark(ulwp_t *self, lwpid_t lwpid) 12090Sstevel@tonic-gate { 12100Sstevel@tonic-gate volatile sc_shared_t *scp = self->ul_schedctl; 12110Sstevel@tonic-gate 12120Sstevel@tonic-gate ASSERT(self->ul_preempt == 1 && self->ul_critical > 0); 12130Sstevel@tonic-gate if ((scp != NULL && scp->sc_preemptctl.sc_yield) || 12140Sstevel@tonic-gate (self->ul_curplease && self->ul_critical == 1)) { 12150Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 12160Sstevel@tonic-gate lwpid = 0; 12170Sstevel@tonic-gate } 12180Sstevel@tonic-gate return (lwpid); 12190Sstevel@tonic-gate } 12200Sstevel@tonic-gate 12210Sstevel@tonic-gate /* 12224613Sraf * Spin for a while (if 'tryhard' is true), trying to grab the lock. 12230Sstevel@tonic-gate * If this fails, return EBUSY and let the caller deal with it. 12240Sstevel@tonic-gate * If this succeeds, return 0 with mutex_owner set to curthread. 12250Sstevel@tonic-gate */ 12264574Sraf static int 12274613Sraf mutex_trylock_adaptive(mutex_t *mp, int tryhard) 12280Sstevel@tonic-gate { 12290Sstevel@tonic-gate ulwp_t *self = curthread; 12304574Sraf int error = EBUSY; 12310Sstevel@tonic-gate ulwp_t *ulwp; 12320Sstevel@tonic-gate volatile sc_shared_t *scp; 12335629Sraf volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw; 12345629Sraf volatile uint64_t *ownerp = (volatile uint64_t *)&mp->mutex_owner; 12355629Sraf uint32_t new_lockword; 12365629Sraf int count = 0; 12375629Sraf int max_count; 12385629Sraf uint8_t max_spinners; 12394574Sraf 12404574Sraf ASSERT(!(mp->mutex_type & USYNC_PROCESS)); 12414574Sraf 12424574Sraf if (MUTEX_OWNER(mp) == self) 12430Sstevel@tonic-gate return (EBUSY); 12440Sstevel@tonic-gate 12454574Sraf /* short-cut, not definitive (see below) */ 12464574Sraf if (mp->mutex_flag & LOCK_NOTRECOVERABLE) { 12474574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 12485629Sraf error = ENOTRECOVERABLE; 12495629Sraf goto done; 12504574Sraf } 12514574Sraf 12525629Sraf /* 12535629Sraf * Make one attempt to acquire the lock before 12545629Sraf * incurring the overhead of the spin loop. 12555629Sraf */ 12565629Sraf if (set_lock_byte(lockp) == 0) { 12575629Sraf *ownerp = (uintptr_t)self; 12585629Sraf error = 0; 12595629Sraf goto done; 12605629Sraf } 12615629Sraf if (!tryhard) 12625629Sraf goto done; 12635629Sraf if (ncpus == 0) 12645629Sraf ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN); 12655629Sraf if ((max_spinners = self->ul_max_spinners) >= ncpus) 12665629Sraf max_spinners = ncpus - 1; 12675629Sraf max_count = (max_spinners != 0)? self->ul_adaptive_spin : 0; 12685629Sraf if (max_count == 0) 12695629Sraf goto done; 12705629Sraf 12710Sstevel@tonic-gate /* 12720Sstevel@tonic-gate * This spin loop is unfair to lwps that have already dropped into 12730Sstevel@tonic-gate * the kernel to sleep. They will starve on a highly-contended mutex. 12740Sstevel@tonic-gate * This is just too bad. The adaptive spin algorithm is intended 12750Sstevel@tonic-gate * to allow programs with highly-contended locks (that is, broken 12760Sstevel@tonic-gate * programs) to execute with reasonable speed despite their contention. 12770Sstevel@tonic-gate * Being fair would reduce the speed of such programs and well-written 12780Sstevel@tonic-gate * programs will not suffer in any case. 12790Sstevel@tonic-gate */ 12805629Sraf enter_critical(self); 12815629Sraf if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1) { 12825629Sraf exit_critical(self); 12835629Sraf goto done; 12845629Sraf } 12855629Sraf DTRACE_PROBE1(plockstat, mutex__spin, mp); 12865629Sraf for (count = 1; ; count++) { 12870Sstevel@tonic-gate if (*lockp == 0 && set_lock_byte(lockp) == 0) { 12880Sstevel@tonic-gate *ownerp = (uintptr_t)self; 12894574Sraf error = 0; 12904574Sraf break; 12910Sstevel@tonic-gate } 12925629Sraf if (count == max_count) 12935629Sraf break; 12940Sstevel@tonic-gate SMT_PAUSE(); 12950Sstevel@tonic-gate /* 12960Sstevel@tonic-gate * Stop spinning if the mutex owner is not running on 12970Sstevel@tonic-gate * a processor; it will not drop the lock any time soon 12980Sstevel@tonic-gate * and we would just be wasting time to keep spinning. 12990Sstevel@tonic-gate * 13000Sstevel@tonic-gate * Note that we are looking at another thread (ulwp_t) 13010Sstevel@tonic-gate * without ensuring that the other thread does not exit. 13020Sstevel@tonic-gate * The scheme relies on ulwp_t structures never being 13030Sstevel@tonic-gate * deallocated by the library (the library employs a free 13040Sstevel@tonic-gate * list of ulwp_t structs that are reused when new threads 13050Sstevel@tonic-gate * are created) and on schedctl shared memory never being 13060Sstevel@tonic-gate * deallocated once created via __schedctl(). 13070Sstevel@tonic-gate * 13080Sstevel@tonic-gate * Thus, the worst that can happen when the spinning thread 13090Sstevel@tonic-gate * looks at the owner's schedctl data is that it is looking 13100Sstevel@tonic-gate * at some other thread's schedctl data. This almost never 13110Sstevel@tonic-gate * happens and is benign when it does. 13120Sstevel@tonic-gate */ 13130Sstevel@tonic-gate if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL && 13140Sstevel@tonic-gate ((scp = ulwp->ul_schedctl) == NULL || 13150Sstevel@tonic-gate scp->sc_state != SC_ONPROC)) 13160Sstevel@tonic-gate break; 13170Sstevel@tonic-gate } 13185629Sraf new_lockword = spinners_decr(&mp->mutex_lockword); 13195629Sraf if (error && (new_lockword & (LOCKMASK | SPINNERMASK)) == 0) { 13205629Sraf /* 13215629Sraf * We haven't yet acquired the lock, the lock 13225629Sraf * is free, and there are no other spinners. 13235629Sraf * Make one final attempt to acquire the lock. 13245629Sraf * 13255629Sraf * This isn't strictly necessary since mutex_lock_queue() 13265629Sraf * (the next action this thread will take if it doesn't 13275629Sraf * acquire the lock here) makes one attempt to acquire 13285629Sraf * the lock before putting the thread to sleep. 13295629Sraf * 13305629Sraf * If the next action for this thread (on failure here) 13315629Sraf * were not to call mutex_lock_queue(), this would be 13325629Sraf * necessary for correctness, to avoid ending up with an 13335629Sraf * unheld mutex with waiters but no one to wake them up. 13345629Sraf */ 13355629Sraf if (set_lock_byte(lockp) == 0) { 13365629Sraf *ownerp = (uintptr_t)self; 13375629Sraf error = 0; 13385629Sraf } 13395629Sraf count++; 13405629Sraf } 13410Sstevel@tonic-gate exit_critical(self); 13420Sstevel@tonic-gate 13435629Sraf done: 13444574Sraf if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { 13454574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 13464574Sraf /* 13476057Sraf * We shouldn't own the mutex. 13486057Sraf * Just clear the lock; everyone has already been waked up. 13494574Sraf */ 13504574Sraf mp->mutex_owner = 0; 13516057Sraf (void) clear_lockbyte(&mp->mutex_lockword); 13524574Sraf error = ENOTRECOVERABLE; 13534574Sraf } 13544574Sraf 13554574Sraf if (error) { 13565629Sraf if (count) { 13575629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 0, count); 13585629Sraf } 13594574Sraf if (error != EBUSY) { 13604574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 13614574Sraf } 13624574Sraf } else { 13635629Sraf if (count) { 13645629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 1, count); 13655629Sraf } 13664574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); 13674574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) { 13684574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 13694574Sraf error = EOWNERDEAD; 13704574Sraf } 13714574Sraf } 13724574Sraf 13734574Sraf return (error); 13740Sstevel@tonic-gate } 13750Sstevel@tonic-gate 13760Sstevel@tonic-gate /* 13770Sstevel@tonic-gate * Same as mutex_trylock_adaptive(), except specifically for queue locks. 13780Sstevel@tonic-gate * The owner field is not set here; the caller (spin_lock_set()) sets it. 13790Sstevel@tonic-gate */ 13804574Sraf static int 13810Sstevel@tonic-gate mutex_queuelock_adaptive(mutex_t *mp) 13820Sstevel@tonic-gate { 13830Sstevel@tonic-gate ulwp_t *ulwp; 13840Sstevel@tonic-gate volatile sc_shared_t *scp; 13850Sstevel@tonic-gate volatile uint8_t *lockp; 13860Sstevel@tonic-gate volatile uint64_t *ownerp; 13870Sstevel@tonic-gate int count = curthread->ul_queue_spin; 13880Sstevel@tonic-gate 13890Sstevel@tonic-gate ASSERT(mp->mutex_type == USYNC_THREAD); 13900Sstevel@tonic-gate 13910Sstevel@tonic-gate if (count == 0) 13920Sstevel@tonic-gate return (EBUSY); 13930Sstevel@tonic-gate 13940Sstevel@tonic-gate lockp = (volatile uint8_t *)&mp->mutex_lockw; 13950Sstevel@tonic-gate ownerp = (volatile uint64_t *)&mp->mutex_owner; 13960Sstevel@tonic-gate while (--count >= 0) { 13970Sstevel@tonic-gate if (*lockp == 0 && set_lock_byte(lockp) == 0) 13980Sstevel@tonic-gate return (0); 13990Sstevel@tonic-gate SMT_PAUSE(); 14000Sstevel@tonic-gate if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL && 14010Sstevel@tonic-gate ((scp = ulwp->ul_schedctl) == NULL || 14020Sstevel@tonic-gate scp->sc_state != SC_ONPROC)) 14030Sstevel@tonic-gate break; 14040Sstevel@tonic-gate } 14050Sstevel@tonic-gate 14060Sstevel@tonic-gate return (EBUSY); 14070Sstevel@tonic-gate } 14080Sstevel@tonic-gate 14090Sstevel@tonic-gate /* 14100Sstevel@tonic-gate * Like mutex_trylock_adaptive(), but for process-shared mutexes. 14114613Sraf * Spin for a while (if 'tryhard' is true), trying to grab the lock. 14120Sstevel@tonic-gate * If this fails, return EBUSY and let the caller deal with it. 14130Sstevel@tonic-gate * If this succeeds, return 0 with mutex_owner set to curthread 14140Sstevel@tonic-gate * and mutex_ownerpid set to the current pid. 14150Sstevel@tonic-gate */ 14164574Sraf static int 14174613Sraf mutex_trylock_process(mutex_t *mp, int tryhard) 14180Sstevel@tonic-gate { 14190Sstevel@tonic-gate ulwp_t *self = curthread; 14205629Sraf uberdata_t *udp = self->ul_uberdata; 14214574Sraf int error = EBUSY; 14226057Sraf volatile uint64_t *lockp = (volatile uint64_t *)&mp->mutex_lockword64; 14235629Sraf uint32_t new_lockword; 14245629Sraf int count = 0; 14255629Sraf int max_count; 14265629Sraf uint8_t max_spinners; 14274574Sraf 14284574Sraf ASSERT(mp->mutex_type & USYNC_PROCESS); 14294574Sraf 14304574Sraf if (shared_mutex_held(mp)) 14310Sstevel@tonic-gate return (EBUSY); 14320Sstevel@tonic-gate 14334574Sraf /* short-cut, not definitive (see below) */ 14344574Sraf if (mp->mutex_flag & LOCK_NOTRECOVERABLE) { 14354574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 14365629Sraf error = ENOTRECOVERABLE; 14375629Sraf goto done; 14384574Sraf } 14394574Sraf 14405629Sraf /* 14415629Sraf * Make one attempt to acquire the lock before 14425629Sraf * incurring the overhead of the spin loop. 14435629Sraf */ 14445629Sraf enter_critical(self); 14456057Sraf if (set_lock_byte64(lockp, udp->pid) == 0) { 14465629Sraf mp->mutex_owner = (uintptr_t)self; 14476057Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 14485629Sraf exit_critical(self); 14495629Sraf error = 0; 14505629Sraf goto done; 14515629Sraf } 14525629Sraf exit_critical(self); 14535629Sraf if (!tryhard) 14545629Sraf goto done; 14554574Sraf if (ncpus == 0) 14564574Sraf ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN); 14575629Sraf if ((max_spinners = self->ul_max_spinners) >= ncpus) 14585629Sraf max_spinners = ncpus - 1; 14595629Sraf max_count = (max_spinners != 0)? self->ul_adaptive_spin : 0; 14605629Sraf if (max_count == 0) 14615629Sraf goto done; 14625629Sraf 14630Sstevel@tonic-gate /* 14640Sstevel@tonic-gate * This is a process-shared mutex. 14650Sstevel@tonic-gate * We cannot know if the owner is running on a processor. 14660Sstevel@tonic-gate * We just spin and hope that it is on a processor. 14670Sstevel@tonic-gate */ 14684574Sraf enter_critical(self); 14695629Sraf if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1) { 14705629Sraf exit_critical(self); 14715629Sraf goto done; 14725629Sraf } 14735629Sraf DTRACE_PROBE1(plockstat, mutex__spin, mp); 14745629Sraf for (count = 1; ; count++) { 14756057Sraf if ((*lockp & LOCKMASK64) == 0 && 14766057Sraf set_lock_byte64(lockp, udp->pid) == 0) { 14774574Sraf mp->mutex_owner = (uintptr_t)self; 14786057Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 14794574Sraf error = 0; 14804574Sraf break; 14814574Sraf } 14825629Sraf if (count == max_count) 14835629Sraf break; 14844574Sraf SMT_PAUSE(); 14854574Sraf } 14865629Sraf new_lockword = spinners_decr(&mp->mutex_lockword); 14875629Sraf if (error && (new_lockword & (LOCKMASK | SPINNERMASK)) == 0) { 14885629Sraf /* 14895629Sraf * We haven't yet acquired the lock, the lock 14905629Sraf * is free, and there are no other spinners. 14915629Sraf * Make one final attempt to acquire the lock. 14925629Sraf * 14935629Sraf * This isn't strictly necessary since mutex_lock_kernel() 14945629Sraf * (the next action this thread will take if it doesn't 14955629Sraf * acquire the lock here) makes one attempt to acquire 14965629Sraf * the lock before putting the thread to sleep. 14975629Sraf * 14985629Sraf * If the next action for this thread (on failure here) 14995629Sraf * were not to call mutex_lock_kernel(), this would be 15005629Sraf * necessary for correctness, to avoid ending up with an 15015629Sraf * unheld mutex with waiters but no one to wake them up. 15025629Sraf */ 15036057Sraf if (set_lock_byte64(lockp, udp->pid) == 0) { 15045629Sraf mp->mutex_owner = (uintptr_t)self; 15056057Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 15065629Sraf error = 0; 15075629Sraf } 15085629Sraf count++; 15095629Sraf } 15104574Sraf exit_critical(self); 15114574Sraf 15125629Sraf done: 15134574Sraf if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { 15144574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 15154574Sraf /* 15166057Sraf * We shouldn't own the mutex. 15176057Sraf * Just clear the lock; everyone has already been waked up. 15184574Sraf */ 15194574Sraf mp->mutex_owner = 0; 15206057Sraf /* mp->mutex_ownerpid is cleared by clear_lockbyte64() */ 15216057Sraf (void) clear_lockbyte64(&mp->mutex_lockword64); 15224574Sraf error = ENOTRECOVERABLE; 15230Sstevel@tonic-gate } 15240Sstevel@tonic-gate 15254574Sraf if (error) { 15265629Sraf if (count) { 15275629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 0, count); 15285629Sraf } 15294574Sraf if (error != EBUSY) { 15304574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 15314574Sraf } 15324574Sraf } else { 15335629Sraf if (count) { 15345629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 1, count); 15355629Sraf } 15364574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); 15374574Sraf if (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED)) { 15384574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 15394574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) 15404574Sraf error = EOWNERDEAD; 15414574Sraf else if (mp->mutex_type & USYNC_PROCESS_ROBUST) 15424574Sraf error = ELOCKUNMAPPED; 15434574Sraf else 15444574Sraf error = EOWNERDEAD; 15454574Sraf } 15464574Sraf } 15474574Sraf 15484574Sraf return (error); 15490Sstevel@tonic-gate } 15500Sstevel@tonic-gate 15510Sstevel@tonic-gate /* 15520Sstevel@tonic-gate * Mutex wakeup code for releasing a USYNC_THREAD mutex. 15530Sstevel@tonic-gate * Returns the lwpid of the thread that was dequeued, if any. 15540Sstevel@tonic-gate * The caller of mutex_wakeup() must call __lwp_unpark(lwpid) 15550Sstevel@tonic-gate * to wake up the specified lwp. 15560Sstevel@tonic-gate */ 15574574Sraf static lwpid_t 15580Sstevel@tonic-gate mutex_wakeup(mutex_t *mp) 15590Sstevel@tonic-gate { 15600Sstevel@tonic-gate lwpid_t lwpid = 0; 15616247Sraf int more; 15620Sstevel@tonic-gate queue_head_t *qp; 15630Sstevel@tonic-gate ulwp_t *ulwp; 15640Sstevel@tonic-gate 15650Sstevel@tonic-gate /* 15660Sstevel@tonic-gate * Dequeue a waiter from the sleep queue. Don't touch the mutex 15670Sstevel@tonic-gate * waiters bit if no one was found on the queue because the mutex 15680Sstevel@tonic-gate * might have been deallocated or reallocated for another purpose. 15690Sstevel@tonic-gate */ 15700Sstevel@tonic-gate qp = queue_lock(mp, MX); 15716247Sraf if ((ulwp = dequeue(qp, &more)) != NULL) { 15720Sstevel@tonic-gate lwpid = ulwp->ul_lwpid; 15736247Sraf mp->mutex_waiters = more; 15740Sstevel@tonic-gate } 15750Sstevel@tonic-gate queue_unlock(qp); 15760Sstevel@tonic-gate return (lwpid); 15770Sstevel@tonic-gate } 15780Sstevel@tonic-gate 15790Sstevel@tonic-gate /* 15804574Sraf * Mutex wakeup code for releasing all waiters on a USYNC_THREAD mutex. 15814574Sraf */ 15824574Sraf static void 15834574Sraf mutex_wakeup_all(mutex_t *mp) 15844574Sraf { 15854574Sraf queue_head_t *qp; 15866247Sraf queue_root_t *qrp; 15874574Sraf int nlwpid = 0; 15884574Sraf int maxlwps = MAXLWPS; 15894574Sraf ulwp_t *ulwp; 15904574Sraf lwpid_t buffer[MAXLWPS]; 15914574Sraf lwpid_t *lwpid = buffer; 15924574Sraf 15934574Sraf /* 15944574Sraf * Walk the list of waiters and prepare to wake up all of them. 15954574Sraf * The waiters flag has already been cleared from the mutex. 15964574Sraf * 15974574Sraf * We keep track of lwpids that are to be unparked in lwpid[]. 15984574Sraf * __lwp_unpark_all() is called to unpark all of them after 15994574Sraf * they have been removed from the sleep queue and the sleep 16004574Sraf * queue lock has been dropped. If we run out of space in our 16014574Sraf * on-stack buffer, we need to allocate more but we can't call 16024574Sraf * lmalloc() because we are holding a queue lock when the overflow 16034574Sraf * occurs and lmalloc() acquires a lock. We can't use alloca() 16044574Sraf * either because the application may have allocated a small 16054574Sraf * stack and we don't want to overrun the stack. So we call 16064574Sraf * alloc_lwpids() to allocate a bigger buffer using the mmap() 16074574Sraf * system call directly since that path acquires no locks. 16084574Sraf */ 16094574Sraf qp = queue_lock(mp, MX); 16106247Sraf for (;;) { 16116247Sraf if ((qrp = qp->qh_root) == NULL || 16126247Sraf (ulwp = qrp->qr_head) == NULL) 16136247Sraf break; 16146247Sraf ASSERT(ulwp->ul_wchan == mp); 16156247Sraf queue_unlink(qp, &qrp->qr_head, NULL); 16166247Sraf ulwp->ul_sleepq = NULL; 16176247Sraf ulwp->ul_wchan = NULL; 16186247Sraf if (nlwpid == maxlwps) 16196247Sraf lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps); 16206247Sraf lwpid[nlwpid++] = ulwp->ul_lwpid; 16214574Sraf } 16224574Sraf 16234574Sraf if (nlwpid == 0) { 16244574Sraf queue_unlock(qp); 16254574Sraf } else { 16265629Sraf mp->mutex_waiters = 0; 16274574Sraf no_preempt(curthread); 16284574Sraf queue_unlock(qp); 16294574Sraf if (nlwpid == 1) 16304574Sraf (void) __lwp_unpark(lwpid[0]); 16314574Sraf else 16324574Sraf (void) __lwp_unpark_all(lwpid, nlwpid); 16334574Sraf preempt(curthread); 16344574Sraf } 16354574Sraf 16364574Sraf if (lwpid != buffer) 1637*6515Sraf (void) munmap((caddr_t)lwpid, maxlwps * sizeof (lwpid_t)); 16384574Sraf } 16394574Sraf 16404574Sraf /* 16415629Sraf * Release a process-private mutex. 16425629Sraf * As an optimization, if there are waiters but there are also spinners 16435629Sraf * attempting to acquire the mutex, then don't bother waking up a waiter; 16445629Sraf * one of the spinners will acquire the mutex soon and it would be a waste 16455629Sraf * of resources to wake up some thread just to have it spin for a while 16465629Sraf * and then possibly go back to sleep. See mutex_trylock_adaptive(). 16470Sstevel@tonic-gate */ 16484574Sraf static lwpid_t 16494574Sraf mutex_unlock_queue(mutex_t *mp, int release_all) 16500Sstevel@tonic-gate { 16515629Sraf lwpid_t lwpid = 0; 16525629Sraf uint32_t old_lockword; 16535629Sraf 16546057Sraf DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 16555629Sraf mp->mutex_owner = 0; 16565629Sraf old_lockword = clear_lockbyte(&mp->mutex_lockword); 16575629Sraf if ((old_lockword & WAITERMASK) && 16585629Sraf (release_all || (old_lockword & SPINNERMASK) == 0)) { 16595629Sraf ulwp_t *self = curthread; 16600Sstevel@tonic-gate no_preempt(self); /* ensure a prompt wakeup */ 16615629Sraf if (release_all) 16625629Sraf mutex_wakeup_all(mp); 16635629Sraf else 16645629Sraf lwpid = mutex_wakeup(mp); 16655629Sraf if (lwpid == 0) 16665629Sraf preempt(self); 16674574Sraf } 16680Sstevel@tonic-gate return (lwpid); 16690Sstevel@tonic-gate } 16700Sstevel@tonic-gate 16710Sstevel@tonic-gate /* 16720Sstevel@tonic-gate * Like mutex_unlock_queue(), but for process-shared mutexes. 16730Sstevel@tonic-gate */ 16744574Sraf static void 16754574Sraf mutex_unlock_process(mutex_t *mp, int release_all) 16760Sstevel@tonic-gate { 16776057Sraf uint64_t old_lockword64; 16786057Sraf 16796057Sraf DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 16800Sstevel@tonic-gate mp->mutex_owner = 0; 16816057Sraf /* mp->mutex_ownerpid is cleared by clear_lockbyte64() */ 16826057Sraf old_lockword64 = clear_lockbyte64(&mp->mutex_lockword64); 16836057Sraf if ((old_lockword64 & WAITERMASK64) && 16846057Sraf (release_all || (old_lockword64 & SPINNERMASK64) == 0)) { 16855629Sraf ulwp_t *self = curthread; 16865629Sraf no_preempt(self); /* ensure a prompt wakeup */ 16875629Sraf (void) ___lwp_mutex_wakeup(mp, release_all); 16885629Sraf preempt(self); 16890Sstevel@tonic-gate } 16900Sstevel@tonic-gate } 16910Sstevel@tonic-gate 16920Sstevel@tonic-gate void 16930Sstevel@tonic-gate stall(void) 16940Sstevel@tonic-gate { 16950Sstevel@tonic-gate for (;;) 16960Sstevel@tonic-gate (void) mutex_lock_kernel(&stall_mutex, NULL, NULL); 16970Sstevel@tonic-gate } 16980Sstevel@tonic-gate 16990Sstevel@tonic-gate /* 17000Sstevel@tonic-gate * Acquire a USYNC_THREAD mutex via user-level sleep queues. 17010Sstevel@tonic-gate * We failed set_lock_byte(&mp->mutex_lockw) before coming here. 17024574Sraf * If successful, returns with mutex_owner set correctly. 17030Sstevel@tonic-gate */ 17040Sstevel@tonic-gate int 17050Sstevel@tonic-gate mutex_lock_queue(ulwp_t *self, tdb_mutex_stats_t *msp, mutex_t *mp, 17060Sstevel@tonic-gate timespec_t *tsp) 17070Sstevel@tonic-gate { 17080Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 17090Sstevel@tonic-gate queue_head_t *qp; 17100Sstevel@tonic-gate hrtime_t begin_sleep; 17110Sstevel@tonic-gate int error = 0; 17120Sstevel@tonic-gate 17130Sstevel@tonic-gate self->ul_sp = stkptr(); 17140Sstevel@tonic-gate if (__td_event_report(self, TD_SLEEP, udp)) { 17150Sstevel@tonic-gate self->ul_wchan = mp; 17160Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_SLEEP; 17170Sstevel@tonic-gate self->ul_td_evbuf.eventdata = mp; 17180Sstevel@tonic-gate tdb_event(TD_SLEEP, udp); 17190Sstevel@tonic-gate } 17200Sstevel@tonic-gate if (msp) { 17210Sstevel@tonic-gate tdb_incr(msp->mutex_sleep); 17220Sstevel@tonic-gate begin_sleep = gethrtime(); 17230Sstevel@tonic-gate } 17240Sstevel@tonic-gate 17250Sstevel@tonic-gate DTRACE_PROBE1(plockstat, mutex__block, mp); 17260Sstevel@tonic-gate 17270Sstevel@tonic-gate /* 17280Sstevel@tonic-gate * Put ourself on the sleep queue, and while we are 17290Sstevel@tonic-gate * unable to grab the lock, go park in the kernel. 17300Sstevel@tonic-gate * Take ourself off the sleep queue after we acquire the lock. 17310Sstevel@tonic-gate * The waiter bit can be set/cleared only while holding the queue lock. 17320Sstevel@tonic-gate */ 17330Sstevel@tonic-gate qp = queue_lock(mp, MX); 17346247Sraf enqueue(qp, self, 0); 17350Sstevel@tonic-gate mp->mutex_waiters = 1; 17360Sstevel@tonic-gate for (;;) { 17370Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 17380Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 17396247Sraf mp->mutex_waiters = dequeue_self(qp); 17400Sstevel@tonic-gate break; 17410Sstevel@tonic-gate } 17420Sstevel@tonic-gate set_parking_flag(self, 1); 17430Sstevel@tonic-gate queue_unlock(qp); 17440Sstevel@tonic-gate /* 17450Sstevel@tonic-gate * __lwp_park() will return the residual time in tsp 17460Sstevel@tonic-gate * if we are unparked before the timeout expires. 17470Sstevel@tonic-gate */ 17485629Sraf error = __lwp_park(tsp, 0); 17490Sstevel@tonic-gate set_parking_flag(self, 0); 17500Sstevel@tonic-gate /* 17510Sstevel@tonic-gate * We could have taken a signal or suspended ourself. 17520Sstevel@tonic-gate * If we did, then we removed ourself from the queue. 17530Sstevel@tonic-gate * Someone else may have removed us from the queue 17540Sstevel@tonic-gate * as a consequence of mutex_unlock(). We may have 17550Sstevel@tonic-gate * gotten a timeout from __lwp_park(). Or we may still 17560Sstevel@tonic-gate * be on the queue and this is just a spurious wakeup. 17570Sstevel@tonic-gate */ 17580Sstevel@tonic-gate qp = queue_lock(mp, MX); 17590Sstevel@tonic-gate if (self->ul_sleepq == NULL) { 17605629Sraf if (error) { 17616247Sraf mp->mutex_waiters = queue_waiter(qp)? 1 : 0; 17625629Sraf if (error != EINTR) 17635629Sraf break; 17645629Sraf error = 0; 17655629Sraf } 17660Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 17670Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 17680Sstevel@tonic-gate break; 17690Sstevel@tonic-gate } 17706247Sraf enqueue(qp, self, 0); 17710Sstevel@tonic-gate mp->mutex_waiters = 1; 17720Sstevel@tonic-gate } 17730Sstevel@tonic-gate ASSERT(self->ul_sleepq == qp && 17740Sstevel@tonic-gate self->ul_qtype == MX && 17750Sstevel@tonic-gate self->ul_wchan == mp); 17760Sstevel@tonic-gate if (error) { 17775629Sraf if (error != EINTR) { 17786247Sraf mp->mutex_waiters = dequeue_self(qp); 17795629Sraf break; 17805629Sraf } 17815629Sraf error = 0; 17820Sstevel@tonic-gate } 17830Sstevel@tonic-gate } 17840Sstevel@tonic-gate ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL && 17850Sstevel@tonic-gate self->ul_wchan == NULL); 17860Sstevel@tonic-gate self->ul_sp = 0; 17870Sstevel@tonic-gate queue_unlock(qp); 17884574Sraf 17890Sstevel@tonic-gate if (msp) 17900Sstevel@tonic-gate msp->mutex_sleep_time += gethrtime() - begin_sleep; 17910Sstevel@tonic-gate 17920Sstevel@tonic-gate ASSERT(error == 0 || error == EINVAL || error == ETIME); 17934574Sraf 17944574Sraf if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { 17954574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 17964574Sraf /* 17976057Sraf * We shouldn't own the mutex. 17986057Sraf * Just clear the lock; everyone has already been waked up. 17994574Sraf */ 18004574Sraf mp->mutex_owner = 0; 18016057Sraf (void) clear_lockbyte(&mp->mutex_lockword); 18024574Sraf error = ENOTRECOVERABLE; 18034574Sraf } 18044574Sraf 18054574Sraf if (error) { 18064574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0); 18074574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 18084574Sraf } else { 18094574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1); 18104574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 18114574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) { 18124574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 18134574Sraf error = EOWNERDEAD; 18144574Sraf } 18154574Sraf } 18164574Sraf 18170Sstevel@tonic-gate return (error); 18180Sstevel@tonic-gate } 18190Sstevel@tonic-gate 18204574Sraf static int 18214574Sraf mutex_recursion(mutex_t *mp, int mtype, int try) 18224574Sraf { 18234574Sraf ASSERT(mutex_is_held(mp)); 18244574Sraf ASSERT(mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)); 18254574Sraf ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK); 18264574Sraf 18274574Sraf if (mtype & LOCK_RECURSIVE) { 18284574Sraf if (mp->mutex_rcount == RECURSION_MAX) { 18294574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EAGAIN); 18304574Sraf return (EAGAIN); 18314574Sraf } 18324574Sraf mp->mutex_rcount++; 18334574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1, 0); 18344574Sraf return (0); 18354574Sraf } 18364574Sraf if (try == MUTEX_LOCK) { 18374574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK); 18384574Sraf return (EDEADLK); 18394574Sraf } 18404574Sraf return (EBUSY); 18414574Sraf } 18424574Sraf 18434574Sraf /* 18444574Sraf * Register this USYNC_PROCESS|LOCK_ROBUST mutex with the kernel so 18454574Sraf * it can apply LOCK_OWNERDEAD|LOCK_UNMAPPED if it becomes necessary. 18464574Sraf * We use tdb_hash_lock here and in the synch object tracking code in 18474574Sraf * the tdb_agent.c file. There is no conflict between these two usages. 18484574Sraf */ 18494574Sraf void 18504574Sraf register_lock(mutex_t *mp) 18514574Sraf { 18524574Sraf uberdata_t *udp = curthread->ul_uberdata; 18534574Sraf uint_t hash = LOCK_HASH(mp); 18544574Sraf robust_t *rlp; 18554574Sraf robust_t **rlpp; 18564574Sraf robust_t **table; 18574574Sraf 18584574Sraf if ((table = udp->robustlocks) == NULL) { 18594574Sraf lmutex_lock(&udp->tdb_hash_lock); 18604574Sraf if ((table = udp->robustlocks) == NULL) { 18614574Sraf table = lmalloc(LOCKHASHSZ * sizeof (robust_t *)); 18624574Sraf _membar_producer(); 18634574Sraf udp->robustlocks = table; 18644574Sraf } 18654574Sraf lmutex_unlock(&udp->tdb_hash_lock); 18664574Sraf } 18674574Sraf _membar_consumer(); 18684574Sraf 18694574Sraf /* 18704574Sraf * First search the registered table with no locks held. 18714574Sraf * This is safe because the table never shrinks 18724574Sraf * and we can only get a false negative. 18734574Sraf */ 18744574Sraf for (rlp = table[hash]; rlp != NULL; rlp = rlp->robust_next) { 18754574Sraf if (rlp->robust_lock == mp) /* already registered */ 18764574Sraf return; 18774574Sraf } 18784574Sraf 18794574Sraf /* 18804574Sraf * The lock was not found. 18814574Sraf * Repeat the operation with tdb_hash_lock held. 18824574Sraf */ 18834574Sraf lmutex_lock(&udp->tdb_hash_lock); 18844574Sraf 18854574Sraf for (rlpp = &table[hash]; 18864574Sraf (rlp = *rlpp) != NULL; 18874574Sraf rlpp = &rlp->robust_next) { 18884574Sraf if (rlp->robust_lock == mp) { /* already registered */ 18894574Sraf lmutex_unlock(&udp->tdb_hash_lock); 18904574Sraf return; 18914574Sraf } 18924574Sraf } 18934574Sraf 18944574Sraf /* 18954574Sraf * The lock has never been registered. 18964574Sraf * Register it now and add it to the table. 18974574Sraf */ 18984574Sraf (void) ___lwp_mutex_register(mp); 18994574Sraf rlp = lmalloc(sizeof (*rlp)); 19004574Sraf rlp->robust_lock = mp; 19014574Sraf _membar_producer(); 19024574Sraf *rlpp = rlp; 19034574Sraf 19044574Sraf lmutex_unlock(&udp->tdb_hash_lock); 19054574Sraf } 19064574Sraf 19074574Sraf /* 19084574Sraf * This is called in the child of fork()/forkall() to start over 19094574Sraf * with a clean slate. (Each process must register its own locks.) 19104574Sraf * No locks are needed because all other threads are suspended or gone. 19114574Sraf */ 19124574Sraf void 19134574Sraf unregister_locks(void) 19144574Sraf { 19154574Sraf uberdata_t *udp = curthread->ul_uberdata; 19164574Sraf uint_t hash; 19174574Sraf robust_t **table; 19184574Sraf robust_t *rlp; 19194574Sraf robust_t *next; 19204574Sraf 19214574Sraf if ((table = udp->robustlocks) != NULL) { 19224574Sraf for (hash = 0; hash < LOCKHASHSZ; hash++) { 19234574Sraf rlp = table[hash]; 19244574Sraf while (rlp != NULL) { 19254574Sraf next = rlp->robust_next; 19264574Sraf lfree(rlp, sizeof (*rlp)); 19274574Sraf rlp = next; 19284574Sraf } 19294574Sraf } 19304574Sraf lfree(table, LOCKHASHSZ * sizeof (robust_t *)); 19314574Sraf udp->robustlocks = NULL; 19324574Sraf } 19334574Sraf } 19344574Sraf 19350Sstevel@tonic-gate /* 19360Sstevel@tonic-gate * Returns with mutex_owner set correctly. 19370Sstevel@tonic-gate */ 19386247Sraf int 19390Sstevel@tonic-gate mutex_lock_internal(mutex_t *mp, timespec_t *tsp, int try) 19400Sstevel@tonic-gate { 19410Sstevel@tonic-gate ulwp_t *self = curthread; 19420Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 19430Sstevel@tonic-gate int mtype = mp->mutex_type; 19440Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 19450Sstevel@tonic-gate int error = 0; 19466247Sraf int noceil = try & MUTEX_NOCEIL; 19474574Sraf uint8_t ceil; 19484574Sraf int myprio; 19490Sstevel@tonic-gate 19506247Sraf try &= ~MUTEX_NOCEIL; 19510Sstevel@tonic-gate ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK); 19520Sstevel@tonic-gate 19530Sstevel@tonic-gate if (!self->ul_schedctl_called) 19540Sstevel@tonic-gate (void) setup_schedctl(); 19550Sstevel@tonic-gate 19560Sstevel@tonic-gate if (msp && try == MUTEX_TRY) 19570Sstevel@tonic-gate tdb_incr(msp->mutex_try); 19580Sstevel@tonic-gate 19594574Sraf if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && mutex_is_held(mp)) 19604574Sraf return (mutex_recursion(mp, mtype, try)); 19610Sstevel@tonic-gate 19620Sstevel@tonic-gate if (self->ul_error_detection && try == MUTEX_LOCK && 19630Sstevel@tonic-gate tsp == NULL && mutex_is_held(mp)) 19640Sstevel@tonic-gate lock_error(mp, "mutex_lock", NULL, NULL); 19650Sstevel@tonic-gate 19666247Sraf if ((mtype & LOCK_PRIO_PROTECT) && noceil == 0) { 19676247Sraf update_sched(self); 19686247Sraf if (self->ul_cid != self->ul_rtclassid) { 19696247Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EPERM); 19706247Sraf return (EPERM); 19716247Sraf } 19724574Sraf ceil = mp->mutex_ceiling; 19736247Sraf myprio = self->ul_epri? self->ul_epri : self->ul_pri; 19744574Sraf if (myprio > ceil) { 19754574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EINVAL); 19764574Sraf return (EINVAL); 19774574Sraf } 19784574Sraf if ((error = _ceil_mylist_add(mp)) != 0) { 19794574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 19804574Sraf return (error); 19810Sstevel@tonic-gate } 19824574Sraf if (myprio < ceil) 19834574Sraf _ceil_prio_inherit(ceil); 19844574Sraf } 19854574Sraf 19864574Sraf if ((mtype & (USYNC_PROCESS | LOCK_ROBUST)) 19874574Sraf == (USYNC_PROCESS | LOCK_ROBUST)) 19884574Sraf register_lock(mp); 19894574Sraf 19904574Sraf if (mtype & LOCK_PRIO_INHERIT) { 19914574Sraf /* go straight to the kernel */ 19924574Sraf if (try == MUTEX_TRY) 19934574Sraf error = mutex_trylock_kernel(mp); 19944574Sraf else /* MUTEX_LOCK */ 19954574Sraf error = mutex_lock_kernel(mp, tsp, msp); 19964574Sraf /* 19974574Sraf * The kernel never sets or clears the lock byte 19984574Sraf * for LOCK_PRIO_INHERIT mutexes. 19994574Sraf * Set it here for consistency. 20004574Sraf */ 20014574Sraf switch (error) { 20024574Sraf case 0: 20036247Sraf self->ul_pilocks++; 20044574Sraf mp->mutex_lockw = LOCKSET; 20054574Sraf break; 20064574Sraf case EOWNERDEAD: 20074574Sraf case ELOCKUNMAPPED: 20086247Sraf self->ul_pilocks++; 20094574Sraf mp->mutex_lockw = LOCKSET; 20104574Sraf /* FALLTHROUGH */ 20114574Sraf case ENOTRECOVERABLE: 20124574Sraf ASSERT(mtype & LOCK_ROBUST); 20134574Sraf break; 20144574Sraf case EDEADLK: 20154574Sraf if (try == MUTEX_LOCK) 20164574Sraf stall(); 20174574Sraf error = EBUSY; 20184574Sraf break; 20190Sstevel@tonic-gate } 20200Sstevel@tonic-gate } else if (mtype & USYNC_PROCESS) { 20214613Sraf error = mutex_trylock_process(mp, try == MUTEX_LOCK); 20224574Sraf if (error == EBUSY && try == MUTEX_LOCK) 20230Sstevel@tonic-gate error = mutex_lock_kernel(mp, tsp, msp); 20245629Sraf } else { /* USYNC_THREAD */ 20254613Sraf error = mutex_trylock_adaptive(mp, try == MUTEX_LOCK); 20264574Sraf if (error == EBUSY && try == MUTEX_LOCK) 20274574Sraf error = mutex_lock_queue(self, msp, mp, tsp); 20280Sstevel@tonic-gate } 20290Sstevel@tonic-gate 20300Sstevel@tonic-gate switch (error) { 20314574Sraf case 0: 20320Sstevel@tonic-gate case EOWNERDEAD: 20330Sstevel@tonic-gate case ELOCKUNMAPPED: 20344574Sraf if (mtype & LOCK_ROBUST) 20354574Sraf remember_lock(mp); 20360Sstevel@tonic-gate if (msp) 20370Sstevel@tonic-gate record_begin_hold(msp); 20380Sstevel@tonic-gate break; 20390Sstevel@tonic-gate default: 20406247Sraf if ((mtype & LOCK_PRIO_PROTECT) && noceil == 0) { 20414574Sraf (void) _ceil_mylist_del(mp); 20424574Sraf if (myprio < ceil) 20434574Sraf _ceil_prio_waive(); 20444574Sraf } 20450Sstevel@tonic-gate if (try == MUTEX_TRY) { 20460Sstevel@tonic-gate if (msp) 20470Sstevel@tonic-gate tdb_incr(msp->mutex_try_fail); 20480Sstevel@tonic-gate if (__td_event_report(self, TD_LOCK_TRY, udp)) { 20490Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 20500Sstevel@tonic-gate tdb_event(TD_LOCK_TRY, udp); 20510Sstevel@tonic-gate } 20520Sstevel@tonic-gate } 20530Sstevel@tonic-gate break; 20540Sstevel@tonic-gate } 20550Sstevel@tonic-gate 20560Sstevel@tonic-gate return (error); 20570Sstevel@tonic-gate } 20580Sstevel@tonic-gate 20590Sstevel@tonic-gate int 20600Sstevel@tonic-gate fast_process_lock(mutex_t *mp, timespec_t *tsp, int mtype, int try) 20610Sstevel@tonic-gate { 20620Sstevel@tonic-gate ulwp_t *self = curthread; 20630Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 20640Sstevel@tonic-gate 20650Sstevel@tonic-gate /* 20660Sstevel@tonic-gate * We know that USYNC_PROCESS is set in mtype and that 20670Sstevel@tonic-gate * zero, one, or both of the flags LOCK_RECURSIVE and 20680Sstevel@tonic-gate * LOCK_ERRORCHECK are set, and that no other flags are set. 20690Sstevel@tonic-gate */ 20704574Sraf ASSERT((mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0); 20710Sstevel@tonic-gate enter_critical(self); 20726057Sraf if (set_lock_byte64(&mp->mutex_lockword64, udp->pid) == 0) { 20730Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 20746057Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 20750Sstevel@tonic-gate exit_critical(self); 20760Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 20770Sstevel@tonic-gate return (0); 20780Sstevel@tonic-gate } 20790Sstevel@tonic-gate exit_critical(self); 20800Sstevel@tonic-gate 20814574Sraf if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && shared_mutex_held(mp)) 20824574Sraf return (mutex_recursion(mp, mtype, try)); 20834574Sraf 20844613Sraf if (try == MUTEX_LOCK) { 20854613Sraf if (mutex_trylock_process(mp, 1) == 0) 20864613Sraf return (0); 20870Sstevel@tonic-gate return (mutex_lock_kernel(mp, tsp, NULL)); 20884613Sraf } 20890Sstevel@tonic-gate 20900Sstevel@tonic-gate if (__td_event_report(self, TD_LOCK_TRY, udp)) { 20910Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 20920Sstevel@tonic-gate tdb_event(TD_LOCK_TRY, udp); 20930Sstevel@tonic-gate } 20940Sstevel@tonic-gate return (EBUSY); 20950Sstevel@tonic-gate } 20960Sstevel@tonic-gate 20970Sstevel@tonic-gate static int 20980Sstevel@tonic-gate mutex_lock_impl(mutex_t *mp, timespec_t *tsp) 20990Sstevel@tonic-gate { 21000Sstevel@tonic-gate ulwp_t *self = curthread; 21016247Sraf int mtype = mp->mutex_type; 21020Sstevel@tonic-gate uberflags_t *gflags; 21030Sstevel@tonic-gate 21040Sstevel@tonic-gate /* 21050Sstevel@tonic-gate * Optimize the case of USYNC_THREAD, including 21060Sstevel@tonic-gate * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 21070Sstevel@tonic-gate * no error detection, no lock statistics, 21080Sstevel@tonic-gate * and the process has only a single thread. 21090Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 21100Sstevel@tonic-gate */ 21116247Sraf if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 21126247Sraf self->ul_uberdata->uberflags.uf_all) == 0) { 21130Sstevel@tonic-gate /* 21140Sstevel@tonic-gate * Only one thread exists so we don't need an atomic operation. 21150Sstevel@tonic-gate */ 21160Sstevel@tonic-gate if (mp->mutex_lockw == 0) { 21170Sstevel@tonic-gate mp->mutex_lockw = LOCKSET; 21180Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 21190Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 21200Sstevel@tonic-gate return (0); 21210Sstevel@tonic-gate } 21224574Sraf if (mtype && MUTEX_OWNER(mp) == self) 21234574Sraf return (mutex_recursion(mp, mtype, MUTEX_LOCK)); 21240Sstevel@tonic-gate /* 21250Sstevel@tonic-gate * We have reached a deadlock, probably because the 21260Sstevel@tonic-gate * process is executing non-async-signal-safe code in 21270Sstevel@tonic-gate * a signal handler and is attempting to acquire a lock 21280Sstevel@tonic-gate * that it already owns. This is not surprising, given 21290Sstevel@tonic-gate * bad programming practices over the years that has 21300Sstevel@tonic-gate * resulted in applications calling printf() and such 21310Sstevel@tonic-gate * in their signal handlers. Unless the user has told 21320Sstevel@tonic-gate * us that the signal handlers are safe by setting: 21330Sstevel@tonic-gate * export _THREAD_ASYNC_SAFE=1 21340Sstevel@tonic-gate * we return EDEADLK rather than actually deadlocking. 21350Sstevel@tonic-gate */ 21360Sstevel@tonic-gate if (tsp == NULL && 21370Sstevel@tonic-gate MUTEX_OWNER(mp) == self && !self->ul_async_safe) { 21380Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK); 21390Sstevel@tonic-gate return (EDEADLK); 21400Sstevel@tonic-gate } 21410Sstevel@tonic-gate } 21420Sstevel@tonic-gate 21430Sstevel@tonic-gate /* 21440Sstevel@tonic-gate * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 21450Sstevel@tonic-gate * no error detection, and no lock statistics. 21460Sstevel@tonic-gate * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 21470Sstevel@tonic-gate */ 21480Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL && 21490Sstevel@tonic-gate (gflags->uf_trs_ted | 21500Sstevel@tonic-gate (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) { 21510Sstevel@tonic-gate if (mtype & USYNC_PROCESS) 21520Sstevel@tonic-gate return (fast_process_lock(mp, tsp, mtype, MUTEX_LOCK)); 21530Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 21540Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 21550Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 21560Sstevel@tonic-gate return (0); 21570Sstevel@tonic-gate } 21584574Sraf if (mtype && MUTEX_OWNER(mp) == self) 21594574Sraf return (mutex_recursion(mp, mtype, MUTEX_LOCK)); 21604613Sraf if (mutex_trylock_adaptive(mp, 1) != 0) 21614574Sraf return (mutex_lock_queue(self, NULL, mp, tsp)); 21624574Sraf return (0); 21630Sstevel@tonic-gate } 21640Sstevel@tonic-gate 21650Sstevel@tonic-gate /* else do it the long way */ 21660Sstevel@tonic-gate return (mutex_lock_internal(mp, tsp, MUTEX_LOCK)); 21670Sstevel@tonic-gate } 21680Sstevel@tonic-gate 21690Sstevel@tonic-gate #pragma weak mutex_lock = __mutex_lock 21700Sstevel@tonic-gate #pragma weak _mutex_lock = __mutex_lock 21710Sstevel@tonic-gate #pragma weak pthread_mutex_lock = __mutex_lock 21720Sstevel@tonic-gate #pragma weak _pthread_mutex_lock = __mutex_lock 21730Sstevel@tonic-gate int 21740Sstevel@tonic-gate __mutex_lock(mutex_t *mp) 21750Sstevel@tonic-gate { 21760Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 21770Sstevel@tonic-gate return (mutex_lock_impl(mp, NULL)); 21780Sstevel@tonic-gate } 21790Sstevel@tonic-gate 21800Sstevel@tonic-gate #pragma weak pthread_mutex_timedlock = _pthread_mutex_timedlock 21810Sstevel@tonic-gate int 21820Sstevel@tonic-gate _pthread_mutex_timedlock(mutex_t *mp, const timespec_t *abstime) 21830Sstevel@tonic-gate { 21840Sstevel@tonic-gate timespec_t tslocal; 21850Sstevel@tonic-gate int error; 21860Sstevel@tonic-gate 21870Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 21880Sstevel@tonic-gate abstime_to_reltime(CLOCK_REALTIME, abstime, &tslocal); 21890Sstevel@tonic-gate error = mutex_lock_impl(mp, &tslocal); 21900Sstevel@tonic-gate if (error == ETIME) 21910Sstevel@tonic-gate error = ETIMEDOUT; 21920Sstevel@tonic-gate return (error); 21930Sstevel@tonic-gate } 21940Sstevel@tonic-gate 21950Sstevel@tonic-gate #pragma weak pthread_mutex_reltimedlock_np = _pthread_mutex_reltimedlock_np 21960Sstevel@tonic-gate int 21970Sstevel@tonic-gate _pthread_mutex_reltimedlock_np(mutex_t *mp, const timespec_t *reltime) 21980Sstevel@tonic-gate { 21990Sstevel@tonic-gate timespec_t tslocal; 22000Sstevel@tonic-gate int error; 22010Sstevel@tonic-gate 22020Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 22030Sstevel@tonic-gate tslocal = *reltime; 22040Sstevel@tonic-gate error = mutex_lock_impl(mp, &tslocal); 22050Sstevel@tonic-gate if (error == ETIME) 22060Sstevel@tonic-gate error = ETIMEDOUT; 22070Sstevel@tonic-gate return (error); 22080Sstevel@tonic-gate } 22090Sstevel@tonic-gate 22100Sstevel@tonic-gate #pragma weak mutex_trylock = __mutex_trylock 22110Sstevel@tonic-gate #pragma weak _mutex_trylock = __mutex_trylock 22120Sstevel@tonic-gate #pragma weak pthread_mutex_trylock = __mutex_trylock 22130Sstevel@tonic-gate #pragma weak _pthread_mutex_trylock = __mutex_trylock 22140Sstevel@tonic-gate int 22150Sstevel@tonic-gate __mutex_trylock(mutex_t *mp) 22160Sstevel@tonic-gate { 22170Sstevel@tonic-gate ulwp_t *self = curthread; 22180Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 22196247Sraf int mtype = mp->mutex_type; 22200Sstevel@tonic-gate uberflags_t *gflags; 22210Sstevel@tonic-gate 22220Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 22236247Sraf 22240Sstevel@tonic-gate /* 22250Sstevel@tonic-gate * Optimize the case of USYNC_THREAD, including 22260Sstevel@tonic-gate * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 22270Sstevel@tonic-gate * no error detection, no lock statistics, 22280Sstevel@tonic-gate * and the process has only a single thread. 22290Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 22300Sstevel@tonic-gate */ 22316247Sraf if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 22320Sstevel@tonic-gate udp->uberflags.uf_all) == 0) { 22330Sstevel@tonic-gate /* 22340Sstevel@tonic-gate * Only one thread exists so we don't need an atomic operation. 22350Sstevel@tonic-gate */ 22360Sstevel@tonic-gate if (mp->mutex_lockw == 0) { 22370Sstevel@tonic-gate mp->mutex_lockw = LOCKSET; 22380Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 22390Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 22400Sstevel@tonic-gate return (0); 22410Sstevel@tonic-gate } 22424574Sraf if (mtype && MUTEX_OWNER(mp) == self) 22434574Sraf return (mutex_recursion(mp, mtype, MUTEX_TRY)); 22440Sstevel@tonic-gate return (EBUSY); 22450Sstevel@tonic-gate } 22460Sstevel@tonic-gate 22470Sstevel@tonic-gate /* 22480Sstevel@tonic-gate * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 22490Sstevel@tonic-gate * no error detection, and no lock statistics. 22500Sstevel@tonic-gate * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 22510Sstevel@tonic-gate */ 22520Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL && 22530Sstevel@tonic-gate (gflags->uf_trs_ted | 22540Sstevel@tonic-gate (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) { 22550Sstevel@tonic-gate if (mtype & USYNC_PROCESS) 22560Sstevel@tonic-gate return (fast_process_lock(mp, NULL, mtype, MUTEX_TRY)); 22570Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 22580Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 22590Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 22600Sstevel@tonic-gate return (0); 22610Sstevel@tonic-gate } 22624574Sraf if (mtype && MUTEX_OWNER(mp) == self) 22634574Sraf return (mutex_recursion(mp, mtype, MUTEX_TRY)); 22644613Sraf if (__td_event_report(self, TD_LOCK_TRY, udp)) { 22654613Sraf self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 22664613Sraf tdb_event(TD_LOCK_TRY, udp); 22670Sstevel@tonic-gate } 22684613Sraf return (EBUSY); 22690Sstevel@tonic-gate } 22700Sstevel@tonic-gate 22710Sstevel@tonic-gate /* else do it the long way */ 22720Sstevel@tonic-gate return (mutex_lock_internal(mp, NULL, MUTEX_TRY)); 22730Sstevel@tonic-gate } 22740Sstevel@tonic-gate 22750Sstevel@tonic-gate int 22764574Sraf mutex_unlock_internal(mutex_t *mp, int retain_robust_flags) 22770Sstevel@tonic-gate { 22780Sstevel@tonic-gate ulwp_t *self = curthread; 22790Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 22800Sstevel@tonic-gate int mtype = mp->mutex_type; 22810Sstevel@tonic-gate tdb_mutex_stats_t *msp; 22824574Sraf int error = 0; 22834574Sraf int release_all; 22840Sstevel@tonic-gate lwpid_t lwpid; 22850Sstevel@tonic-gate 22860Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !mutex_is_held(mp)) 22870Sstevel@tonic-gate return (EPERM); 22880Sstevel@tonic-gate 22890Sstevel@tonic-gate if (self->ul_error_detection && !mutex_is_held(mp)) 22900Sstevel@tonic-gate lock_error(mp, "mutex_unlock", NULL, NULL); 22910Sstevel@tonic-gate 22920Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 22930Sstevel@tonic-gate mp->mutex_rcount--; 22940Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 22950Sstevel@tonic-gate return (0); 22960Sstevel@tonic-gate } 22970Sstevel@tonic-gate 22980Sstevel@tonic-gate if ((msp = MUTEX_STATS(mp, udp)) != NULL) 22990Sstevel@tonic-gate (void) record_hold_time(msp); 23000Sstevel@tonic-gate 23014574Sraf if (!retain_robust_flags && !(mtype & LOCK_PRIO_INHERIT) && 23024574Sraf (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) { 23034574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 23044574Sraf mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED); 23054574Sraf mp->mutex_flag |= LOCK_NOTRECOVERABLE; 23064574Sraf } 23074574Sraf release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0); 23084574Sraf 23094574Sraf if (mtype & LOCK_PRIO_INHERIT) { 23100Sstevel@tonic-gate no_preempt(self); 23110Sstevel@tonic-gate mp->mutex_owner = 0; 23126057Sraf /* mp->mutex_ownerpid is cleared by ___lwp_mutex_unlock() */ 23130Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 23144574Sraf mp->mutex_lockw = LOCKCLEAR; 23156247Sraf self->ul_pilocks--; 23164574Sraf error = ___lwp_mutex_unlock(mp); 23170Sstevel@tonic-gate preempt(self); 23180Sstevel@tonic-gate } else if (mtype & USYNC_PROCESS) { 23195629Sraf mutex_unlock_process(mp, release_all); 23200Sstevel@tonic-gate } else { /* USYNC_THREAD */ 23214574Sraf if ((lwpid = mutex_unlock_queue(mp, release_all)) != 0) { 23220Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 23230Sstevel@tonic-gate preempt(self); 23240Sstevel@tonic-gate } 23250Sstevel@tonic-gate } 23260Sstevel@tonic-gate 23274574Sraf if (mtype & LOCK_ROBUST) 23284574Sraf forget_lock(mp); 23294574Sraf 23304574Sraf if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp)) 23314574Sraf _ceil_prio_waive(); 23324574Sraf 23330Sstevel@tonic-gate return (error); 23340Sstevel@tonic-gate } 23350Sstevel@tonic-gate 23360Sstevel@tonic-gate #pragma weak mutex_unlock = __mutex_unlock 23370Sstevel@tonic-gate #pragma weak _mutex_unlock = __mutex_unlock 23380Sstevel@tonic-gate #pragma weak pthread_mutex_unlock = __mutex_unlock 23390Sstevel@tonic-gate #pragma weak _pthread_mutex_unlock = __mutex_unlock 23400Sstevel@tonic-gate int 23410Sstevel@tonic-gate __mutex_unlock(mutex_t *mp) 23420Sstevel@tonic-gate { 23430Sstevel@tonic-gate ulwp_t *self = curthread; 23446247Sraf int mtype = mp->mutex_type; 23450Sstevel@tonic-gate uberflags_t *gflags; 23460Sstevel@tonic-gate lwpid_t lwpid; 23470Sstevel@tonic-gate short el; 23480Sstevel@tonic-gate 23490Sstevel@tonic-gate /* 23500Sstevel@tonic-gate * Optimize the case of USYNC_THREAD, including 23510Sstevel@tonic-gate * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 23520Sstevel@tonic-gate * no error detection, no lock statistics, 23530Sstevel@tonic-gate * and the process has only a single thread. 23540Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 23550Sstevel@tonic-gate */ 23566247Sraf if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 23576247Sraf self->ul_uberdata->uberflags.uf_all) == 0) { 23580Sstevel@tonic-gate if (mtype) { 23590Sstevel@tonic-gate /* 23600Sstevel@tonic-gate * At this point we know that one or both of the 23610Sstevel@tonic-gate * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set. 23620Sstevel@tonic-gate */ 23630Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self)) 23640Sstevel@tonic-gate return (EPERM); 23650Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 23660Sstevel@tonic-gate mp->mutex_rcount--; 23670Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 23680Sstevel@tonic-gate return (0); 23690Sstevel@tonic-gate } 23700Sstevel@tonic-gate } 23710Sstevel@tonic-gate /* 23720Sstevel@tonic-gate * Only one thread exists so we don't need an atomic operation. 23730Sstevel@tonic-gate * Also, there can be no waiters. 23740Sstevel@tonic-gate */ 23750Sstevel@tonic-gate mp->mutex_owner = 0; 23760Sstevel@tonic-gate mp->mutex_lockword = 0; 23770Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 23780Sstevel@tonic-gate return (0); 23790Sstevel@tonic-gate } 23800Sstevel@tonic-gate 23810Sstevel@tonic-gate /* 23820Sstevel@tonic-gate * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 23830Sstevel@tonic-gate * no error detection, and no lock statistics. 23840Sstevel@tonic-gate * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 23850Sstevel@tonic-gate */ 23860Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL) { 23870Sstevel@tonic-gate if (((el = gflags->uf_trs_ted) | mtype) == 0) { 23880Sstevel@tonic-gate fast_unlock: 23895629Sraf if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) { 23900Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 23910Sstevel@tonic-gate preempt(self); 23920Sstevel@tonic-gate } 23930Sstevel@tonic-gate return (0); 23940Sstevel@tonic-gate } 23950Sstevel@tonic-gate if (el) /* error detection or lock statistics */ 23960Sstevel@tonic-gate goto slow_unlock; 23970Sstevel@tonic-gate if ((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) { 23980Sstevel@tonic-gate /* 23990Sstevel@tonic-gate * At this point we know that one or both of the 24000Sstevel@tonic-gate * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set. 24010Sstevel@tonic-gate */ 24020Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self)) 24030Sstevel@tonic-gate return (EPERM); 24040Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 24050Sstevel@tonic-gate mp->mutex_rcount--; 24060Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 24070Sstevel@tonic-gate return (0); 24080Sstevel@tonic-gate } 24090Sstevel@tonic-gate goto fast_unlock; 24100Sstevel@tonic-gate } 24110Sstevel@tonic-gate if ((mtype & 24120Sstevel@tonic-gate ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) { 24130Sstevel@tonic-gate /* 24140Sstevel@tonic-gate * At this point we know that zero, one, or both of the 24150Sstevel@tonic-gate * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set and 24160Sstevel@tonic-gate * that the USYNC_PROCESS flag is set. 24170Sstevel@tonic-gate */ 24180Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !shared_mutex_held(mp)) 24190Sstevel@tonic-gate return (EPERM); 24200Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 24210Sstevel@tonic-gate mp->mutex_rcount--; 24220Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 24230Sstevel@tonic-gate return (0); 24240Sstevel@tonic-gate } 24255629Sraf mutex_unlock_process(mp, 0); 24260Sstevel@tonic-gate return (0); 24270Sstevel@tonic-gate } 24280Sstevel@tonic-gate } 24290Sstevel@tonic-gate 24300Sstevel@tonic-gate /* else do it the long way */ 24310Sstevel@tonic-gate slow_unlock: 24324574Sraf return (mutex_unlock_internal(mp, 0)); 24330Sstevel@tonic-gate } 24340Sstevel@tonic-gate 24350Sstevel@tonic-gate /* 24360Sstevel@tonic-gate * Internally to the library, almost all mutex lock/unlock actions 24370Sstevel@tonic-gate * go through these lmutex_ functions, to protect critical regions. 24380Sstevel@tonic-gate * We replicate a bit of code from __mutex_lock() and __mutex_unlock() 24390Sstevel@tonic-gate * to make these functions faster since we know that the mutex type 24400Sstevel@tonic-gate * of all internal locks is USYNC_THREAD. We also know that internal 24410Sstevel@tonic-gate * locking can never fail, so we panic if it does. 24420Sstevel@tonic-gate */ 24430Sstevel@tonic-gate void 24440Sstevel@tonic-gate lmutex_lock(mutex_t *mp) 24450Sstevel@tonic-gate { 24460Sstevel@tonic-gate ulwp_t *self = curthread; 24470Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 24480Sstevel@tonic-gate 24490Sstevel@tonic-gate ASSERT(mp->mutex_type == USYNC_THREAD); 24500Sstevel@tonic-gate 24510Sstevel@tonic-gate enter_critical(self); 24520Sstevel@tonic-gate /* 24530Sstevel@tonic-gate * Optimize the case of no lock statistics and only a single thread. 24540Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 24550Sstevel@tonic-gate */ 24560Sstevel@tonic-gate if (udp->uberflags.uf_all == 0) { 24570Sstevel@tonic-gate /* 24580Sstevel@tonic-gate * Only one thread exists; the mutex must be free. 24590Sstevel@tonic-gate */ 24600Sstevel@tonic-gate ASSERT(mp->mutex_lockw == 0); 24610Sstevel@tonic-gate mp->mutex_lockw = LOCKSET; 24620Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 24630Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 24640Sstevel@tonic-gate } else { 24650Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 24660Sstevel@tonic-gate 24670Sstevel@tonic-gate if (!self->ul_schedctl_called) 24680Sstevel@tonic-gate (void) setup_schedctl(); 24690Sstevel@tonic-gate 24700Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 24710Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 24720Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 24734613Sraf } else if (mutex_trylock_adaptive(mp, 1) != 0) { 24740Sstevel@tonic-gate (void) mutex_lock_queue(self, msp, mp, NULL); 24750Sstevel@tonic-gate } 24760Sstevel@tonic-gate 24770Sstevel@tonic-gate if (msp) 24780Sstevel@tonic-gate record_begin_hold(msp); 24790Sstevel@tonic-gate } 24800Sstevel@tonic-gate } 24810Sstevel@tonic-gate 24820Sstevel@tonic-gate void 24830Sstevel@tonic-gate lmutex_unlock(mutex_t *mp) 24840Sstevel@tonic-gate { 24850Sstevel@tonic-gate ulwp_t *self = curthread; 24860Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 24870Sstevel@tonic-gate 24880Sstevel@tonic-gate ASSERT(mp->mutex_type == USYNC_THREAD); 24890Sstevel@tonic-gate 24900Sstevel@tonic-gate /* 24910Sstevel@tonic-gate * Optimize the case of no lock statistics and only a single thread. 24920Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 24930Sstevel@tonic-gate */ 24940Sstevel@tonic-gate if (udp->uberflags.uf_all == 0) { 24950Sstevel@tonic-gate /* 24960Sstevel@tonic-gate * Only one thread exists so there can be no waiters. 24970Sstevel@tonic-gate */ 24980Sstevel@tonic-gate mp->mutex_owner = 0; 24990Sstevel@tonic-gate mp->mutex_lockword = 0; 25000Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 25010Sstevel@tonic-gate } else { 25020Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 25030Sstevel@tonic-gate lwpid_t lwpid; 25040Sstevel@tonic-gate 25050Sstevel@tonic-gate if (msp) 25060Sstevel@tonic-gate (void) record_hold_time(msp); 25074574Sraf if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) { 25080Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 25090Sstevel@tonic-gate preempt(self); 25100Sstevel@tonic-gate } 25110Sstevel@tonic-gate } 25120Sstevel@tonic-gate exit_critical(self); 25130Sstevel@tonic-gate } 25140Sstevel@tonic-gate 25152248Sraf /* 25162248Sraf * For specialized code in libc, like the asynchronous i/o code, 25172248Sraf * the following sig_*() locking primitives are used in order 25182248Sraf * to make the code asynchronous signal safe. Signals are 25192248Sraf * deferred while locks acquired by these functions are held. 25202248Sraf */ 25212248Sraf void 25222248Sraf sig_mutex_lock(mutex_t *mp) 25232248Sraf { 25242248Sraf sigoff(curthread); 2525*6515Sraf (void) mutex_lock(mp); 25262248Sraf } 25272248Sraf 25282248Sraf void 25292248Sraf sig_mutex_unlock(mutex_t *mp) 25302248Sraf { 2531*6515Sraf (void) mutex_unlock(mp); 25322248Sraf sigon(curthread); 25332248Sraf } 25342248Sraf 25352248Sraf int 25362248Sraf sig_mutex_trylock(mutex_t *mp) 25372248Sraf { 25382248Sraf int error; 25392248Sraf 25402248Sraf sigoff(curthread); 2541*6515Sraf if ((error = mutex_trylock(mp)) != 0) 25422248Sraf sigon(curthread); 25432248Sraf return (error); 25442248Sraf } 25452248Sraf 25462248Sraf /* 25472248Sraf * sig_cond_wait() is a cancellation point. 25482248Sraf */ 25492248Sraf int 25502248Sraf sig_cond_wait(cond_t *cv, mutex_t *mp) 25512248Sraf { 25522248Sraf int error; 25532248Sraf 25542248Sraf ASSERT(curthread->ul_sigdefer != 0); 2555*6515Sraf pthread_testcancel(); 25565891Sraf error = __cond_wait(cv, mp); 25572248Sraf if (error == EINTR && curthread->ul_cursig) { 25582248Sraf sig_mutex_unlock(mp); 25592248Sraf /* take the deferred signal here */ 25602248Sraf sig_mutex_lock(mp); 25612248Sraf } 2562*6515Sraf pthread_testcancel(); 25632248Sraf return (error); 25642248Sraf } 25652248Sraf 25662248Sraf /* 25672248Sraf * sig_cond_reltimedwait() is a cancellation point. 25682248Sraf */ 25692248Sraf int 25702248Sraf sig_cond_reltimedwait(cond_t *cv, mutex_t *mp, const timespec_t *ts) 25712248Sraf { 25722248Sraf int error; 25732248Sraf 25742248Sraf ASSERT(curthread->ul_sigdefer != 0); 2575*6515Sraf pthread_testcancel(); 25765891Sraf error = __cond_reltimedwait(cv, mp, ts); 25772248Sraf if (error == EINTR && curthread->ul_cursig) { 25782248Sraf sig_mutex_unlock(mp); 25792248Sraf /* take the deferred signal here */ 25802248Sraf sig_mutex_lock(mp); 25812248Sraf } 2582*6515Sraf pthread_testcancel(); 25832248Sraf return (error); 25842248Sraf } 25852248Sraf 25865891Sraf /* 25875891Sraf * For specialized code in libc, like the stdio code. 25885891Sraf * the following cancel_safe_*() locking primitives are used in 25895891Sraf * order to make the code cancellation-safe. Cancellation is 25905891Sraf * deferred while locks acquired by these functions are held. 25915891Sraf */ 25925891Sraf void 25935891Sraf cancel_safe_mutex_lock(mutex_t *mp) 25945891Sraf { 2595*6515Sraf (void) mutex_lock(mp); 25965891Sraf curthread->ul_libc_locks++; 25975891Sraf } 25985891Sraf 25995891Sraf int 26005891Sraf cancel_safe_mutex_trylock(mutex_t *mp) 26015891Sraf { 26025891Sraf int error; 26035891Sraf 2604*6515Sraf if ((error = mutex_trylock(mp)) == 0) 26055891Sraf curthread->ul_libc_locks++; 26065891Sraf return (error); 26075891Sraf } 26085891Sraf 26095891Sraf void 26105891Sraf cancel_safe_mutex_unlock(mutex_t *mp) 26115891Sraf { 26125891Sraf ulwp_t *self = curthread; 26135891Sraf 26145891Sraf ASSERT(self->ul_libc_locks != 0); 26155891Sraf 2616*6515Sraf (void) mutex_unlock(mp); 26175891Sraf 26185891Sraf /* 26195891Sraf * Decrement the count of locks held by cancel_safe_mutex_lock(). 26205891Sraf * If we are then in a position to terminate cleanly and 26215891Sraf * if there is a pending cancellation and cancellation 26225891Sraf * is not disabled and we received EINTR from a recent 26235891Sraf * system call then perform the cancellation action now. 26245891Sraf */ 26255891Sraf if (--self->ul_libc_locks == 0 && 26265891Sraf !(self->ul_vfork | self->ul_nocancel | 26275891Sraf self->ul_critical | self->ul_sigdefer) && 26285891Sraf cancel_active()) 26295891Sraf _pthread_exit(PTHREAD_CANCELED); 26305891Sraf } 26315891Sraf 26320Sstevel@tonic-gate static int 26330Sstevel@tonic-gate shared_mutex_held(mutex_t *mparg) 26340Sstevel@tonic-gate { 26350Sstevel@tonic-gate /* 26364574Sraf * The 'volatile' is necessary to make sure the compiler doesn't 26374574Sraf * reorder the tests of the various components of the mutex. 26384574Sraf * They must be tested in this order: 26394574Sraf * mutex_lockw 26404574Sraf * mutex_owner 26414574Sraf * mutex_ownerpid 26424574Sraf * This relies on the fact that everywhere mutex_lockw is cleared, 26434574Sraf * mutex_owner and mutex_ownerpid are cleared before mutex_lockw 26444574Sraf * is cleared, and that everywhere mutex_lockw is set, mutex_owner 26454574Sraf * and mutex_ownerpid are set after mutex_lockw is set, and that 26464574Sraf * mutex_lockw is set or cleared with a memory barrier. 26470Sstevel@tonic-gate */ 26480Sstevel@tonic-gate volatile mutex_t *mp = (volatile mutex_t *)mparg; 26490Sstevel@tonic-gate ulwp_t *self = curthread; 26500Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 26510Sstevel@tonic-gate 26524574Sraf return (MUTEX_OWNED(mp, self) && mp->mutex_ownerpid == udp->pid); 26530Sstevel@tonic-gate } 26540Sstevel@tonic-gate 26550Sstevel@tonic-gate /* 26560Sstevel@tonic-gate * Some crufty old programs define their own version of _mutex_held() 26570Sstevel@tonic-gate * to be simply return(1). This breaks internal libc logic, so we 26580Sstevel@tonic-gate * define a private version for exclusive use by libc, mutex_is_held(), 26590Sstevel@tonic-gate * and also a new public function, __mutex_held(), to be used in new 26600Sstevel@tonic-gate * code to circumvent these crufty old programs. 26610Sstevel@tonic-gate */ 26620Sstevel@tonic-gate #pragma weak mutex_held = mutex_is_held 26630Sstevel@tonic-gate #pragma weak _mutex_held = mutex_is_held 26640Sstevel@tonic-gate #pragma weak __mutex_held = mutex_is_held 26650Sstevel@tonic-gate int 26664574Sraf mutex_is_held(mutex_t *mparg) 26670Sstevel@tonic-gate { 26684574Sraf volatile mutex_t *mp = (volatile mutex_t *)mparg; 26694574Sraf 26704574Sraf if (mparg->mutex_type & USYNC_PROCESS) 26714574Sraf return (shared_mutex_held(mparg)); 26720Sstevel@tonic-gate return (MUTEX_OWNED(mp, curthread)); 26730Sstevel@tonic-gate } 26740Sstevel@tonic-gate 26750Sstevel@tonic-gate #pragma weak mutex_destroy = __mutex_destroy 26760Sstevel@tonic-gate #pragma weak _mutex_destroy = __mutex_destroy 26770Sstevel@tonic-gate #pragma weak pthread_mutex_destroy = __mutex_destroy 26780Sstevel@tonic-gate #pragma weak _pthread_mutex_destroy = __mutex_destroy 26790Sstevel@tonic-gate int 26800Sstevel@tonic-gate __mutex_destroy(mutex_t *mp) 26810Sstevel@tonic-gate { 26824574Sraf if (mp->mutex_type & USYNC_PROCESS) 26834574Sraf forget_lock(mp); 2684*6515Sraf (void) memset(mp, 0, sizeof (*mp)); 26850Sstevel@tonic-gate tdb_sync_obj_deregister(mp); 26860Sstevel@tonic-gate return (0); 26870Sstevel@tonic-gate } 26880Sstevel@tonic-gate 26894574Sraf #pragma weak mutex_consistent = __mutex_consistent 26904574Sraf #pragma weak _mutex_consistent = __mutex_consistent 26914574Sraf #pragma weak pthread_mutex_consistent_np = __mutex_consistent 26924574Sraf #pragma weak _pthread_mutex_consistent_np = __mutex_consistent 26934574Sraf int 26944574Sraf __mutex_consistent(mutex_t *mp) 26954574Sraf { 26964574Sraf /* 26974574Sraf * Do this only for an inconsistent, initialized robust lock 26984574Sraf * that we hold. For all other cases, return EINVAL. 26994574Sraf */ 27004574Sraf if (mutex_is_held(mp) && 27014574Sraf (mp->mutex_type & LOCK_ROBUST) && 27024574Sraf (mp->mutex_flag & LOCK_INITED) && 27034574Sraf (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) { 27044574Sraf mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED); 27054574Sraf mp->mutex_rcount = 0; 27064574Sraf return (0); 27074574Sraf } 27084574Sraf return (EINVAL); 27094574Sraf } 27104574Sraf 27110Sstevel@tonic-gate /* 27120Sstevel@tonic-gate * Spin locks are separate from ordinary mutexes, 27130Sstevel@tonic-gate * but we use the same data structure for them. 27140Sstevel@tonic-gate */ 27150Sstevel@tonic-gate 27160Sstevel@tonic-gate #pragma weak pthread_spin_init = _pthread_spin_init 27170Sstevel@tonic-gate int 27180Sstevel@tonic-gate _pthread_spin_init(pthread_spinlock_t *lock, int pshared) 27190Sstevel@tonic-gate { 27200Sstevel@tonic-gate mutex_t *mp = (mutex_t *)lock; 27210Sstevel@tonic-gate 2722*6515Sraf (void) memset(mp, 0, sizeof (*mp)); 27230Sstevel@tonic-gate if (pshared == PTHREAD_PROCESS_SHARED) 27240Sstevel@tonic-gate mp->mutex_type = USYNC_PROCESS; 27250Sstevel@tonic-gate else 27260Sstevel@tonic-gate mp->mutex_type = USYNC_THREAD; 27270Sstevel@tonic-gate mp->mutex_flag = LOCK_INITED; 27280Sstevel@tonic-gate mp->mutex_magic = MUTEX_MAGIC; 27290Sstevel@tonic-gate return (0); 27300Sstevel@tonic-gate } 27310Sstevel@tonic-gate 27320Sstevel@tonic-gate #pragma weak pthread_spin_destroy = _pthread_spin_destroy 27330Sstevel@tonic-gate int 27340Sstevel@tonic-gate _pthread_spin_destroy(pthread_spinlock_t *lock) 27350Sstevel@tonic-gate { 2736*6515Sraf (void) memset(lock, 0, sizeof (*lock)); 27370Sstevel@tonic-gate return (0); 27380Sstevel@tonic-gate } 27390Sstevel@tonic-gate 27400Sstevel@tonic-gate #pragma weak pthread_spin_trylock = _pthread_spin_trylock 27410Sstevel@tonic-gate int 27420Sstevel@tonic-gate _pthread_spin_trylock(pthread_spinlock_t *lock) 27430Sstevel@tonic-gate { 27440Sstevel@tonic-gate mutex_t *mp = (mutex_t *)lock; 27450Sstevel@tonic-gate ulwp_t *self = curthread; 27460Sstevel@tonic-gate int error = 0; 27470Sstevel@tonic-gate 27480Sstevel@tonic-gate no_preempt(self); 27490Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) != 0) 27500Sstevel@tonic-gate error = EBUSY; 27510Sstevel@tonic-gate else { 27520Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 27530Sstevel@tonic-gate if (mp->mutex_type == USYNC_PROCESS) 27540Sstevel@tonic-gate mp->mutex_ownerpid = self->ul_uberdata->pid; 27550Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 27560Sstevel@tonic-gate } 27570Sstevel@tonic-gate preempt(self); 27580Sstevel@tonic-gate return (error); 27590Sstevel@tonic-gate } 27600Sstevel@tonic-gate 27610Sstevel@tonic-gate #pragma weak pthread_spin_lock = _pthread_spin_lock 27620Sstevel@tonic-gate int 27630Sstevel@tonic-gate _pthread_spin_lock(pthread_spinlock_t *lock) 27640Sstevel@tonic-gate { 27654574Sraf mutex_t *mp = (mutex_t *)lock; 27664574Sraf ulwp_t *self = curthread; 27674574Sraf volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw; 27684574Sraf int count = 0; 27694574Sraf 27704574Sraf ASSERT(!self->ul_critical || self->ul_bindflags); 27714574Sraf 27724574Sraf DTRACE_PROBE1(plockstat, mutex__spin, mp); 27734574Sraf 27740Sstevel@tonic-gate /* 27750Sstevel@tonic-gate * We don't care whether the owner is running on a processor. 27760Sstevel@tonic-gate * We just spin because that's what this interface requires. 27770Sstevel@tonic-gate */ 27780Sstevel@tonic-gate for (;;) { 27790Sstevel@tonic-gate if (*lockp == 0) { /* lock byte appears to be clear */ 27804574Sraf no_preempt(self); 27814574Sraf if (set_lock_byte(lockp) == 0) 27824574Sraf break; 27834574Sraf preempt(self); 27840Sstevel@tonic-gate } 27855629Sraf if (count < INT_MAX) 27865629Sraf count++; 27870Sstevel@tonic-gate SMT_PAUSE(); 27880Sstevel@tonic-gate } 27894574Sraf mp->mutex_owner = (uintptr_t)self; 27904574Sraf if (mp->mutex_type == USYNC_PROCESS) 27914574Sraf mp->mutex_ownerpid = self->ul_uberdata->pid; 27924574Sraf preempt(self); 27935629Sraf if (count) { 27945629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 1, count); 27955629Sraf } 27964574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); 27974574Sraf return (0); 27980Sstevel@tonic-gate } 27990Sstevel@tonic-gate 28000Sstevel@tonic-gate #pragma weak pthread_spin_unlock = _pthread_spin_unlock 28010Sstevel@tonic-gate int 28020Sstevel@tonic-gate _pthread_spin_unlock(pthread_spinlock_t *lock) 28030Sstevel@tonic-gate { 28040Sstevel@tonic-gate mutex_t *mp = (mutex_t *)lock; 28050Sstevel@tonic-gate ulwp_t *self = curthread; 28060Sstevel@tonic-gate 28070Sstevel@tonic-gate no_preempt(self); 28080Sstevel@tonic-gate mp->mutex_owner = 0; 28090Sstevel@tonic-gate mp->mutex_ownerpid = 0; 28100Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 28114570Sraf (void) atomic_swap_32(&mp->mutex_lockword, 0); 28120Sstevel@tonic-gate preempt(self); 28130Sstevel@tonic-gate return (0); 28140Sstevel@tonic-gate } 28150Sstevel@tonic-gate 28165629Sraf #define INITIAL_LOCKS 8 /* initial size of ul_heldlocks.array */ 28174574Sraf 28184574Sraf /* 28194574Sraf * Find/allocate an entry for 'lock' in our array of held locks. 28204574Sraf */ 28214574Sraf static mutex_t ** 28224574Sraf find_lock_entry(mutex_t *lock) 28234574Sraf { 28244574Sraf ulwp_t *self = curthread; 28254574Sraf mutex_t **remembered = NULL; 28264574Sraf mutex_t **lockptr; 28274574Sraf uint_t nlocks; 28284574Sraf 28294574Sraf if ((nlocks = self->ul_heldlockcnt) != 0) 28304574Sraf lockptr = self->ul_heldlocks.array; 28314574Sraf else { 28324574Sraf nlocks = 1; 28334574Sraf lockptr = &self->ul_heldlocks.single; 28344574Sraf } 28354574Sraf 28364574Sraf for (; nlocks; nlocks--, lockptr++) { 28374574Sraf if (*lockptr == lock) 28384574Sraf return (lockptr); 28394574Sraf if (*lockptr == NULL && remembered == NULL) 28404574Sraf remembered = lockptr; 28414574Sraf } 28424574Sraf if (remembered != NULL) { 28434574Sraf *remembered = lock; 28444574Sraf return (remembered); 28454574Sraf } 28464574Sraf 28474574Sraf /* 28484574Sraf * No entry available. Allocate more space, converting 28494574Sraf * the single entry into an array of entries if necessary. 28504574Sraf */ 28514574Sraf if ((nlocks = self->ul_heldlockcnt) == 0) { 28524574Sraf /* 28534574Sraf * Initial allocation of the array. 28544574Sraf * Convert the single entry into an array. 28554574Sraf */ 28564574Sraf self->ul_heldlockcnt = nlocks = INITIAL_LOCKS; 28574574Sraf lockptr = lmalloc(nlocks * sizeof (mutex_t *)); 28584574Sraf /* 28594574Sraf * The single entry becomes the first entry in the array. 28604574Sraf */ 28614574Sraf *lockptr = self->ul_heldlocks.single; 28624574Sraf self->ul_heldlocks.array = lockptr; 28634574Sraf /* 28644574Sraf * Return the next available entry in the array. 28654574Sraf */ 28664574Sraf *++lockptr = lock; 28674574Sraf return (lockptr); 28684574Sraf } 28694574Sraf /* 28704574Sraf * Reallocate the array, double the size each time. 28714574Sraf */ 28724574Sraf lockptr = lmalloc(nlocks * 2 * sizeof (mutex_t *)); 2873*6515Sraf (void) memcpy(lockptr, self->ul_heldlocks.array, 28744574Sraf nlocks * sizeof (mutex_t *)); 28754574Sraf lfree(self->ul_heldlocks.array, nlocks * sizeof (mutex_t *)); 28764574Sraf self->ul_heldlocks.array = lockptr; 28774574Sraf self->ul_heldlockcnt *= 2; 28784574Sraf /* 28794574Sraf * Return the next available entry in the newly allocated array. 28804574Sraf */ 28814574Sraf *(lockptr += nlocks) = lock; 28824574Sraf return (lockptr); 28834574Sraf } 28844574Sraf 28854574Sraf /* 28864574Sraf * Insert 'lock' into our list of held locks. 28874574Sraf * Currently only used for LOCK_ROBUST mutexes. 28884574Sraf */ 28894574Sraf void 28904574Sraf remember_lock(mutex_t *lock) 28914574Sraf { 28924574Sraf (void) find_lock_entry(lock); 28934574Sraf } 28944574Sraf 28954574Sraf /* 28964574Sraf * Remove 'lock' from our list of held locks. 28974574Sraf * Currently only used for LOCK_ROBUST mutexes. 28984574Sraf */ 28994574Sraf void 29004574Sraf forget_lock(mutex_t *lock) 29014574Sraf { 29024574Sraf *find_lock_entry(lock) = NULL; 29034574Sraf } 29044574Sraf 29054574Sraf /* 29064574Sraf * Free the array of held locks. 29074574Sraf */ 29084574Sraf void 29094574Sraf heldlock_free(ulwp_t *ulwp) 29104574Sraf { 29114574Sraf uint_t nlocks; 29124574Sraf 29134574Sraf if ((nlocks = ulwp->ul_heldlockcnt) != 0) 29144574Sraf lfree(ulwp->ul_heldlocks.array, nlocks * sizeof (mutex_t *)); 29154574Sraf ulwp->ul_heldlockcnt = 0; 29164574Sraf ulwp->ul_heldlocks.array = NULL; 29174574Sraf } 29184574Sraf 29194574Sraf /* 29204574Sraf * Mark all held LOCK_ROBUST mutexes LOCK_OWNERDEAD. 29214574Sraf * Called from _thrp_exit() to deal with abandoned locks. 29224574Sraf */ 29234574Sraf void 29244574Sraf heldlock_exit(void) 29254574Sraf { 29264574Sraf ulwp_t *self = curthread; 29274574Sraf mutex_t **lockptr; 29284574Sraf uint_t nlocks; 29294574Sraf mutex_t *mp; 29304574Sraf 29314574Sraf if ((nlocks = self->ul_heldlockcnt) != 0) 29324574Sraf lockptr = self->ul_heldlocks.array; 29334574Sraf else { 29344574Sraf nlocks = 1; 29354574Sraf lockptr = &self->ul_heldlocks.single; 29364574Sraf } 29374574Sraf 29384574Sraf for (; nlocks; nlocks--, lockptr++) { 29394574Sraf /* 29404574Sraf * The kernel takes care of transitioning held 29414574Sraf * LOCK_PRIO_INHERIT mutexes to LOCK_OWNERDEAD. 29424574Sraf * We avoid that case here. 29434574Sraf */ 29444574Sraf if ((mp = *lockptr) != NULL && 29454574Sraf mutex_is_held(mp) && 29464574Sraf (mp->mutex_type & (LOCK_ROBUST | LOCK_PRIO_INHERIT)) == 29474574Sraf LOCK_ROBUST) { 29484574Sraf mp->mutex_rcount = 0; 29494574Sraf if (!(mp->mutex_flag & LOCK_UNMAPPED)) 29504574Sraf mp->mutex_flag |= LOCK_OWNERDEAD; 29514574Sraf (void) mutex_unlock_internal(mp, 1); 29524574Sraf } 29534574Sraf } 29544574Sraf 29554574Sraf heldlock_free(self); 29564574Sraf } 29574574Sraf 29580Sstevel@tonic-gate #pragma weak cond_init = _cond_init 29590Sstevel@tonic-gate /* ARGSUSED2 */ 29600Sstevel@tonic-gate int 29610Sstevel@tonic-gate _cond_init(cond_t *cvp, int type, void *arg) 29620Sstevel@tonic-gate { 29630Sstevel@tonic-gate if (type != USYNC_THREAD && type != USYNC_PROCESS) 29640Sstevel@tonic-gate return (EINVAL); 2965*6515Sraf (void) memset(cvp, 0, sizeof (*cvp)); 29660Sstevel@tonic-gate cvp->cond_type = (uint16_t)type; 29670Sstevel@tonic-gate cvp->cond_magic = COND_MAGIC; 29680Sstevel@tonic-gate return (0); 29690Sstevel@tonic-gate } 29700Sstevel@tonic-gate 29710Sstevel@tonic-gate /* 29720Sstevel@tonic-gate * cond_sleep_queue(): utility function for cond_wait_queue(). 29730Sstevel@tonic-gate * 29740Sstevel@tonic-gate * Go to sleep on a condvar sleep queue, expect to be waked up 29750Sstevel@tonic-gate * by someone calling cond_signal() or cond_broadcast() or due 29760Sstevel@tonic-gate * to receiving a UNIX signal or being cancelled, or just simply 29770Sstevel@tonic-gate * due to a spurious wakeup (like someome calling forkall()). 29780Sstevel@tonic-gate * 29790Sstevel@tonic-gate * The associated mutex is *not* reacquired before returning. 29800Sstevel@tonic-gate * That must be done by the caller of cond_sleep_queue(). 29810Sstevel@tonic-gate */ 29824574Sraf static int 29830Sstevel@tonic-gate cond_sleep_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 29840Sstevel@tonic-gate { 29850Sstevel@tonic-gate ulwp_t *self = curthread; 29860Sstevel@tonic-gate queue_head_t *qp; 29870Sstevel@tonic-gate queue_head_t *mqp; 29880Sstevel@tonic-gate lwpid_t lwpid; 29890Sstevel@tonic-gate int signalled; 29900Sstevel@tonic-gate int error; 29916247Sraf int cv_wake; 29924574Sraf int release_all; 29930Sstevel@tonic-gate 29940Sstevel@tonic-gate /* 29950Sstevel@tonic-gate * Put ourself on the CV sleep queue, unlock the mutex, then 29960Sstevel@tonic-gate * park ourself and unpark a candidate lwp to grab the mutex. 29970Sstevel@tonic-gate * We must go onto the CV sleep queue before dropping the 29980Sstevel@tonic-gate * mutex in order to guarantee atomicity of the operation. 29990Sstevel@tonic-gate */ 30000Sstevel@tonic-gate self->ul_sp = stkptr(); 30010Sstevel@tonic-gate qp = queue_lock(cvp, CV); 30026247Sraf enqueue(qp, self, 0); 30030Sstevel@tonic-gate cvp->cond_waiters_user = 1; 30040Sstevel@tonic-gate self->ul_cvmutex = mp; 30056247Sraf self->ul_cv_wake = cv_wake = (tsp != NULL); 30060Sstevel@tonic-gate self->ul_signalled = 0; 30074574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) { 30084574Sraf mp->mutex_flag &= ~LOCK_OWNERDEAD; 30094574Sraf mp->mutex_flag |= LOCK_NOTRECOVERABLE; 30104574Sraf } 30114574Sraf release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0); 30124574Sraf lwpid = mutex_unlock_queue(mp, release_all); 30130Sstevel@tonic-gate for (;;) { 30140Sstevel@tonic-gate set_parking_flag(self, 1); 30150Sstevel@tonic-gate queue_unlock(qp); 30160Sstevel@tonic-gate if (lwpid != 0) { 30170Sstevel@tonic-gate lwpid = preempt_unpark(self, lwpid); 30180Sstevel@tonic-gate preempt(self); 30190Sstevel@tonic-gate } 30200Sstevel@tonic-gate /* 30210Sstevel@tonic-gate * We may have a deferred signal present, 30220Sstevel@tonic-gate * in which case we should return EINTR. 30230Sstevel@tonic-gate * Also, we may have received a SIGCANCEL; if so 30240Sstevel@tonic-gate * and we are cancelable we should return EINTR. 30250Sstevel@tonic-gate * We force an immediate EINTR return from 30260Sstevel@tonic-gate * __lwp_park() by turning our parking flag off. 30270Sstevel@tonic-gate */ 30280Sstevel@tonic-gate if (self->ul_cursig != 0 || 30290Sstevel@tonic-gate (self->ul_cancelable && self->ul_cancel_pending)) 30300Sstevel@tonic-gate set_parking_flag(self, 0); 30310Sstevel@tonic-gate /* 30320Sstevel@tonic-gate * __lwp_park() will return the residual time in tsp 30330Sstevel@tonic-gate * if we are unparked before the timeout expires. 30340Sstevel@tonic-gate */ 30350Sstevel@tonic-gate error = __lwp_park(tsp, lwpid); 30360Sstevel@tonic-gate set_parking_flag(self, 0); 30370Sstevel@tonic-gate lwpid = 0; /* unpark the other lwp only once */ 30380Sstevel@tonic-gate /* 30390Sstevel@tonic-gate * We were waked up by cond_signal(), cond_broadcast(), 30400Sstevel@tonic-gate * by an interrupt or timeout (EINTR or ETIME), 30410Sstevel@tonic-gate * or we may just have gotten a spurious wakeup. 30420Sstevel@tonic-gate */ 30430Sstevel@tonic-gate qp = queue_lock(cvp, CV); 30446247Sraf if (!cv_wake) 30456247Sraf mqp = queue_lock(mp, MX); 30460Sstevel@tonic-gate if (self->ul_sleepq == NULL) 30470Sstevel@tonic-gate break; 30480Sstevel@tonic-gate /* 30490Sstevel@tonic-gate * We are on either the condvar sleep queue or the 30501893Sraf * mutex sleep queue. Break out of the sleep if we 30511893Sraf * were interrupted or we timed out (EINTR or ETIME). 30520Sstevel@tonic-gate * Else this is a spurious wakeup; continue the loop. 30530Sstevel@tonic-gate */ 30546247Sraf if (!cv_wake && self->ul_sleepq == mqp) { /* mutex queue */ 30551893Sraf if (error) { 30566247Sraf mp->mutex_waiters = dequeue_self(mqp); 30571893Sraf break; 30581893Sraf } 30591893Sraf tsp = NULL; /* no more timeout */ 30601893Sraf } else if (self->ul_sleepq == qp) { /* condvar queue */ 30610Sstevel@tonic-gate if (error) { 30626247Sraf cvp->cond_waiters_user = dequeue_self(qp); 30630Sstevel@tonic-gate break; 30640Sstevel@tonic-gate } 30650Sstevel@tonic-gate /* 30660Sstevel@tonic-gate * Else a spurious wakeup on the condvar queue. 30670Sstevel@tonic-gate * __lwp_park() has already adjusted the timeout. 30680Sstevel@tonic-gate */ 30690Sstevel@tonic-gate } else { 30700Sstevel@tonic-gate thr_panic("cond_sleep_queue(): thread not on queue"); 30710Sstevel@tonic-gate } 30726247Sraf if (!cv_wake) 30736247Sraf queue_unlock(mqp); 30740Sstevel@tonic-gate } 30750Sstevel@tonic-gate 30760Sstevel@tonic-gate self->ul_sp = 0; 30776247Sraf self->ul_cv_wake = 0; 30786247Sraf ASSERT(self->ul_cvmutex == NULL); 30790Sstevel@tonic-gate ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL && 30800Sstevel@tonic-gate self->ul_wchan == NULL); 30810Sstevel@tonic-gate 30820Sstevel@tonic-gate signalled = self->ul_signalled; 30830Sstevel@tonic-gate self->ul_signalled = 0; 30840Sstevel@tonic-gate queue_unlock(qp); 30856247Sraf if (!cv_wake) 30866247Sraf queue_unlock(mqp); 30870Sstevel@tonic-gate 30880Sstevel@tonic-gate /* 30890Sstevel@tonic-gate * If we were concurrently cond_signal()d and any of: 30900Sstevel@tonic-gate * received a UNIX signal, were cancelled, or got a timeout, 30910Sstevel@tonic-gate * then perform another cond_signal() to avoid consuming it. 30920Sstevel@tonic-gate */ 30930Sstevel@tonic-gate if (error && signalled) 30940Sstevel@tonic-gate (void) cond_signal_internal(cvp); 30950Sstevel@tonic-gate 30960Sstevel@tonic-gate return (error); 30970Sstevel@tonic-gate } 30980Sstevel@tonic-gate 30990Sstevel@tonic-gate int 31005629Sraf cond_wait_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 31010Sstevel@tonic-gate { 31020Sstevel@tonic-gate ulwp_t *self = curthread; 31030Sstevel@tonic-gate int error; 31044574Sraf int merror; 31050Sstevel@tonic-gate 31060Sstevel@tonic-gate /* 31070Sstevel@tonic-gate * The old thread library was programmed to defer signals 31080Sstevel@tonic-gate * while in cond_wait() so that the associated mutex would 31090Sstevel@tonic-gate * be guaranteed to be held when the application signal 31100Sstevel@tonic-gate * handler was invoked. 31110Sstevel@tonic-gate * 31120Sstevel@tonic-gate * We do not behave this way by default; the state of the 31130Sstevel@tonic-gate * associated mutex in the signal handler is undefined. 31140Sstevel@tonic-gate * 31150Sstevel@tonic-gate * To accommodate applications that depend on the old 31160Sstevel@tonic-gate * behavior, the _THREAD_COND_WAIT_DEFER environment 31170Sstevel@tonic-gate * variable can be set to 1 and we will behave in the 31180Sstevel@tonic-gate * old way with respect to cond_wait(). 31190Sstevel@tonic-gate */ 31200Sstevel@tonic-gate if (self->ul_cond_wait_defer) 31210Sstevel@tonic-gate sigoff(self); 31220Sstevel@tonic-gate 31230Sstevel@tonic-gate error = cond_sleep_queue(cvp, mp, tsp); 31240Sstevel@tonic-gate 31250Sstevel@tonic-gate /* 31260Sstevel@tonic-gate * Reacquire the mutex. 31270Sstevel@tonic-gate */ 31285629Sraf if ((merror = mutex_lock_impl(mp, NULL)) != 0) 31294574Sraf error = merror; 31300Sstevel@tonic-gate 31310Sstevel@tonic-gate /* 31320Sstevel@tonic-gate * Take any deferred signal now, after we have reacquired the mutex. 31330Sstevel@tonic-gate */ 31340Sstevel@tonic-gate if (self->ul_cond_wait_defer) 31350Sstevel@tonic-gate sigon(self); 31360Sstevel@tonic-gate 31370Sstevel@tonic-gate return (error); 31380Sstevel@tonic-gate } 31390Sstevel@tonic-gate 31400Sstevel@tonic-gate /* 31410Sstevel@tonic-gate * cond_sleep_kernel(): utility function for cond_wait_kernel(). 31420Sstevel@tonic-gate * See the comment ahead of cond_sleep_queue(), above. 31430Sstevel@tonic-gate */ 31444574Sraf static int 31450Sstevel@tonic-gate cond_sleep_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 31460Sstevel@tonic-gate { 31470Sstevel@tonic-gate int mtype = mp->mutex_type; 31480Sstevel@tonic-gate ulwp_t *self = curthread; 31490Sstevel@tonic-gate int error; 31500Sstevel@tonic-gate 31514574Sraf if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp)) 31524574Sraf _ceil_prio_waive(); 31530Sstevel@tonic-gate 31540Sstevel@tonic-gate self->ul_sp = stkptr(); 31550Sstevel@tonic-gate self->ul_wchan = cvp; 31560Sstevel@tonic-gate mp->mutex_owner = 0; 31576057Sraf /* mp->mutex_ownerpid is cleared by ___lwp_cond_wait() */ 31586247Sraf if (mtype & LOCK_PRIO_INHERIT) { 31590Sstevel@tonic-gate mp->mutex_lockw = LOCKCLEAR; 31606247Sraf self->ul_pilocks--; 31616247Sraf } 31620Sstevel@tonic-gate /* 31630Sstevel@tonic-gate * ___lwp_cond_wait() returns immediately with EINTR if 31640Sstevel@tonic-gate * set_parking_flag(self,0) is called on this lwp before it 31650Sstevel@tonic-gate * goes to sleep in the kernel. sigacthandler() calls this 31660Sstevel@tonic-gate * when a deferred signal is noted. This assures that we don't 31670Sstevel@tonic-gate * get stuck in ___lwp_cond_wait() with all signals blocked 31680Sstevel@tonic-gate * due to taking a deferred signal before going to sleep. 31690Sstevel@tonic-gate */ 31700Sstevel@tonic-gate set_parking_flag(self, 1); 31710Sstevel@tonic-gate if (self->ul_cursig != 0 || 31720Sstevel@tonic-gate (self->ul_cancelable && self->ul_cancel_pending)) 31730Sstevel@tonic-gate set_parking_flag(self, 0); 31740Sstevel@tonic-gate error = ___lwp_cond_wait(cvp, mp, tsp, 1); 31750Sstevel@tonic-gate set_parking_flag(self, 0); 31760Sstevel@tonic-gate self->ul_sp = 0; 31770Sstevel@tonic-gate self->ul_wchan = NULL; 31780Sstevel@tonic-gate return (error); 31790Sstevel@tonic-gate } 31800Sstevel@tonic-gate 31810Sstevel@tonic-gate int 31820Sstevel@tonic-gate cond_wait_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 31830Sstevel@tonic-gate { 31840Sstevel@tonic-gate ulwp_t *self = curthread; 31850Sstevel@tonic-gate int error; 31860Sstevel@tonic-gate int merror; 31870Sstevel@tonic-gate 31880Sstevel@tonic-gate /* 31890Sstevel@tonic-gate * See the large comment in cond_wait_queue(), above. 31900Sstevel@tonic-gate */ 31910Sstevel@tonic-gate if (self->ul_cond_wait_defer) 31920Sstevel@tonic-gate sigoff(self); 31930Sstevel@tonic-gate 31940Sstevel@tonic-gate error = cond_sleep_kernel(cvp, mp, tsp); 31950Sstevel@tonic-gate 31960Sstevel@tonic-gate /* 31970Sstevel@tonic-gate * Override the return code from ___lwp_cond_wait() 31980Sstevel@tonic-gate * with any non-zero return code from mutex_lock(). 31990Sstevel@tonic-gate * This addresses robust lock failures in particular; 32000Sstevel@tonic-gate * the caller must see the EOWNERDEAD or ENOTRECOVERABLE 32010Sstevel@tonic-gate * errors in order to take corrective action. 32020Sstevel@tonic-gate */ 32035629Sraf if ((merror = mutex_lock_impl(mp, NULL)) != 0) 32040Sstevel@tonic-gate error = merror; 32050Sstevel@tonic-gate 32060Sstevel@tonic-gate /* 32070Sstevel@tonic-gate * Take any deferred signal now, after we have reacquired the mutex. 32080Sstevel@tonic-gate */ 32090Sstevel@tonic-gate if (self->ul_cond_wait_defer) 32100Sstevel@tonic-gate sigon(self); 32110Sstevel@tonic-gate 32120Sstevel@tonic-gate return (error); 32130Sstevel@tonic-gate } 32140Sstevel@tonic-gate 32150Sstevel@tonic-gate /* 32160Sstevel@tonic-gate * Common code for _cond_wait() and _cond_timedwait() 32170Sstevel@tonic-gate */ 32180Sstevel@tonic-gate int 32190Sstevel@tonic-gate cond_wait_common(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 32200Sstevel@tonic-gate { 32210Sstevel@tonic-gate int mtype = mp->mutex_type; 32220Sstevel@tonic-gate hrtime_t begin_sleep = 0; 32230Sstevel@tonic-gate ulwp_t *self = curthread; 32240Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 32250Sstevel@tonic-gate tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 32260Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 32270Sstevel@tonic-gate uint8_t rcount; 32280Sstevel@tonic-gate int error = 0; 32290Sstevel@tonic-gate 32300Sstevel@tonic-gate /* 32310Sstevel@tonic-gate * The SUSV3 Posix spec for pthread_cond_timedwait() states: 32320Sstevel@tonic-gate * Except in the case of [ETIMEDOUT], all these error checks 32330Sstevel@tonic-gate * shall act as if they were performed immediately at the 32340Sstevel@tonic-gate * beginning of processing for the function and shall cause 32350Sstevel@tonic-gate * an error return, in effect, prior to modifying the state 32360Sstevel@tonic-gate * of the mutex specified by mutex or the condition variable 32370Sstevel@tonic-gate * specified by cond. 32380Sstevel@tonic-gate * Therefore, we must return EINVAL now if the timout is invalid. 32390Sstevel@tonic-gate */ 32400Sstevel@tonic-gate if (tsp != NULL && 32410Sstevel@tonic-gate (tsp->tv_sec < 0 || (ulong_t)tsp->tv_nsec >= NANOSEC)) 32420Sstevel@tonic-gate return (EINVAL); 32430Sstevel@tonic-gate 32440Sstevel@tonic-gate if (__td_event_report(self, TD_SLEEP, udp)) { 32450Sstevel@tonic-gate self->ul_sp = stkptr(); 32460Sstevel@tonic-gate self->ul_wchan = cvp; 32470Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_SLEEP; 32480Sstevel@tonic-gate self->ul_td_evbuf.eventdata = cvp; 32490Sstevel@tonic-gate tdb_event(TD_SLEEP, udp); 32500Sstevel@tonic-gate self->ul_sp = 0; 32510Sstevel@tonic-gate } 32520Sstevel@tonic-gate if (csp) { 32530Sstevel@tonic-gate if (tsp) 32540Sstevel@tonic-gate tdb_incr(csp->cond_timedwait); 32550Sstevel@tonic-gate else 32560Sstevel@tonic-gate tdb_incr(csp->cond_wait); 32570Sstevel@tonic-gate } 32580Sstevel@tonic-gate if (msp) 32590Sstevel@tonic-gate begin_sleep = record_hold_time(msp); 32600Sstevel@tonic-gate else if (csp) 32610Sstevel@tonic-gate begin_sleep = gethrtime(); 32620Sstevel@tonic-gate 32630Sstevel@tonic-gate if (self->ul_error_detection) { 32640Sstevel@tonic-gate if (!mutex_is_held(mp)) 32650Sstevel@tonic-gate lock_error(mp, "cond_wait", cvp, NULL); 32660Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) 32670Sstevel@tonic-gate lock_error(mp, "recursive mutex in cond_wait", 32685629Sraf cvp, NULL); 32690Sstevel@tonic-gate if (cvp->cond_type & USYNC_PROCESS) { 32704574Sraf if (!(mtype & USYNC_PROCESS)) 32710Sstevel@tonic-gate lock_error(mp, "cond_wait", cvp, 32725629Sraf "condvar process-shared, " 32735629Sraf "mutex process-private"); 32740Sstevel@tonic-gate } else { 32754574Sraf if (mtype & USYNC_PROCESS) 32760Sstevel@tonic-gate lock_error(mp, "cond_wait", cvp, 32775629Sraf "condvar process-private, " 32785629Sraf "mutex process-shared"); 32790Sstevel@tonic-gate } 32800Sstevel@tonic-gate } 32810Sstevel@tonic-gate 32820Sstevel@tonic-gate /* 32830Sstevel@tonic-gate * We deal with recursive mutexes by completely 32840Sstevel@tonic-gate * dropping the lock and restoring the recursion 32850Sstevel@tonic-gate * count after waking up. This is arguably wrong, 32860Sstevel@tonic-gate * but it obeys the principle of least astonishment. 32870Sstevel@tonic-gate */ 32880Sstevel@tonic-gate rcount = mp->mutex_rcount; 32890Sstevel@tonic-gate mp->mutex_rcount = 0; 32904574Sraf if ((mtype & 32914574Sraf (USYNC_PROCESS | LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT)) | 32920Sstevel@tonic-gate (cvp->cond_type & USYNC_PROCESS)) 32930Sstevel@tonic-gate error = cond_wait_kernel(cvp, mp, tsp); 32940Sstevel@tonic-gate else 32955629Sraf error = cond_wait_queue(cvp, mp, tsp); 32960Sstevel@tonic-gate mp->mutex_rcount = rcount; 32970Sstevel@tonic-gate 32980Sstevel@tonic-gate if (csp) { 32990Sstevel@tonic-gate hrtime_t lapse = gethrtime() - begin_sleep; 33000Sstevel@tonic-gate if (tsp == NULL) 33010Sstevel@tonic-gate csp->cond_wait_sleep_time += lapse; 33020Sstevel@tonic-gate else { 33030Sstevel@tonic-gate csp->cond_timedwait_sleep_time += lapse; 33040Sstevel@tonic-gate if (error == ETIME) 33050Sstevel@tonic-gate tdb_incr(csp->cond_timedwait_timeout); 33060Sstevel@tonic-gate } 33070Sstevel@tonic-gate } 33080Sstevel@tonic-gate return (error); 33090Sstevel@tonic-gate } 33100Sstevel@tonic-gate 33110Sstevel@tonic-gate /* 33125891Sraf * cond_wait() and _cond_wait() are cancellation points but __cond_wait() 33135891Sraf * is not. Internally, libc calls the non-cancellation version. 33145891Sraf * Other libraries need to use pthread_setcancelstate(), as appropriate, 33155891Sraf * since __cond_wait() is not exported from libc. 33160Sstevel@tonic-gate */ 33170Sstevel@tonic-gate int 33185891Sraf __cond_wait(cond_t *cvp, mutex_t *mp) 33190Sstevel@tonic-gate { 33200Sstevel@tonic-gate ulwp_t *self = curthread; 33210Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 33220Sstevel@tonic-gate uberflags_t *gflags; 33230Sstevel@tonic-gate 33240Sstevel@tonic-gate /* 33250Sstevel@tonic-gate * Optimize the common case of USYNC_THREAD plus 33260Sstevel@tonic-gate * no error detection, no lock statistics, and no event tracing. 33270Sstevel@tonic-gate */ 33280Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL && 33290Sstevel@tonic-gate (cvp->cond_type | mp->mutex_type | gflags->uf_trs_ted | 33300Sstevel@tonic-gate self->ul_td_events_enable | 33310Sstevel@tonic-gate udp->tdb.tdb_ev_global_mask.event_bits[0]) == 0) 33325629Sraf return (cond_wait_queue(cvp, mp, NULL)); 33330Sstevel@tonic-gate 33340Sstevel@tonic-gate /* 33350Sstevel@tonic-gate * Else do it the long way. 33360Sstevel@tonic-gate */ 33370Sstevel@tonic-gate return (cond_wait_common(cvp, mp, NULL)); 33380Sstevel@tonic-gate } 33390Sstevel@tonic-gate 33405891Sraf #pragma weak cond_wait = _cond_wait 33410Sstevel@tonic-gate int 33425891Sraf _cond_wait(cond_t *cvp, mutex_t *mp) 33430Sstevel@tonic-gate { 33440Sstevel@tonic-gate int error; 33450Sstevel@tonic-gate 33460Sstevel@tonic-gate _cancelon(); 33475891Sraf error = __cond_wait(cvp, mp); 33480Sstevel@tonic-gate if (error == EINTR) 33490Sstevel@tonic-gate _canceloff(); 33500Sstevel@tonic-gate else 33510Sstevel@tonic-gate _canceloff_nocancel(); 33520Sstevel@tonic-gate return (error); 33530Sstevel@tonic-gate } 33540Sstevel@tonic-gate 33555891Sraf /* 33565891Sraf * pthread_cond_wait() is a cancellation point. 33575891Sraf */ 33580Sstevel@tonic-gate #pragma weak pthread_cond_wait = _pthread_cond_wait 33590Sstevel@tonic-gate int 33600Sstevel@tonic-gate _pthread_cond_wait(cond_t *cvp, mutex_t *mp) 33610Sstevel@tonic-gate { 33620Sstevel@tonic-gate int error; 33630Sstevel@tonic-gate 33645891Sraf error = _cond_wait(cvp, mp); 33650Sstevel@tonic-gate return ((error == EINTR)? 0 : error); 33660Sstevel@tonic-gate } 33670Sstevel@tonic-gate 33680Sstevel@tonic-gate /* 33695891Sraf * cond_timedwait() and _cond_timedwait() are cancellation points 33705891Sraf * but __cond_timedwait() is not. 33710Sstevel@tonic-gate */ 33720Sstevel@tonic-gate int 33735891Sraf __cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime) 33740Sstevel@tonic-gate { 33750Sstevel@tonic-gate clockid_t clock_id = cvp->cond_clockid; 33760Sstevel@tonic-gate timespec_t reltime; 33770Sstevel@tonic-gate int error; 33780Sstevel@tonic-gate 33790Sstevel@tonic-gate if (clock_id != CLOCK_REALTIME && clock_id != CLOCK_HIGHRES) 33800Sstevel@tonic-gate clock_id = CLOCK_REALTIME; 33810Sstevel@tonic-gate abstime_to_reltime(clock_id, abstime, &reltime); 33820Sstevel@tonic-gate error = cond_wait_common(cvp, mp, &reltime); 33830Sstevel@tonic-gate if (error == ETIME && clock_id == CLOCK_HIGHRES) { 33840Sstevel@tonic-gate /* 33850Sstevel@tonic-gate * Don't return ETIME if we didn't really get a timeout. 33860Sstevel@tonic-gate * This can happen if we return because someone resets 33870Sstevel@tonic-gate * the system clock. Just return zero in this case, 33880Sstevel@tonic-gate * giving a spurious wakeup but not a timeout. 33890Sstevel@tonic-gate */ 33900Sstevel@tonic-gate if ((hrtime_t)(uint32_t)abstime->tv_sec * NANOSEC + 33910Sstevel@tonic-gate abstime->tv_nsec > gethrtime()) 33920Sstevel@tonic-gate error = 0; 33930Sstevel@tonic-gate } 33940Sstevel@tonic-gate return (error); 33950Sstevel@tonic-gate } 33960Sstevel@tonic-gate 33975891Sraf #pragma weak cond_timedwait = _cond_timedwait 33980Sstevel@tonic-gate int 33995891Sraf _cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime) 34000Sstevel@tonic-gate { 34010Sstevel@tonic-gate int error; 34020Sstevel@tonic-gate 34030Sstevel@tonic-gate _cancelon(); 34045891Sraf error = __cond_timedwait(cvp, mp, abstime); 34050Sstevel@tonic-gate if (error == EINTR) 34060Sstevel@tonic-gate _canceloff(); 34070Sstevel@tonic-gate else 34080Sstevel@tonic-gate _canceloff_nocancel(); 34090Sstevel@tonic-gate return (error); 34100Sstevel@tonic-gate } 34110Sstevel@tonic-gate 34125891Sraf /* 34135891Sraf * pthread_cond_timedwait() is a cancellation point. 34145891Sraf */ 34150Sstevel@tonic-gate #pragma weak pthread_cond_timedwait = _pthread_cond_timedwait 34160Sstevel@tonic-gate int 34170Sstevel@tonic-gate _pthread_cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime) 34180Sstevel@tonic-gate { 34190Sstevel@tonic-gate int error; 34200Sstevel@tonic-gate 34215891Sraf error = _cond_timedwait(cvp, mp, abstime); 34220Sstevel@tonic-gate if (error == ETIME) 34230Sstevel@tonic-gate error = ETIMEDOUT; 34240Sstevel@tonic-gate else if (error == EINTR) 34250Sstevel@tonic-gate error = 0; 34260Sstevel@tonic-gate return (error); 34270Sstevel@tonic-gate } 34280Sstevel@tonic-gate 34290Sstevel@tonic-gate /* 34305891Sraf * cond_reltimedwait() and _cond_reltimedwait() are cancellation points 34315891Sraf * but __cond_reltimedwait() is not. 34320Sstevel@tonic-gate */ 34330Sstevel@tonic-gate int 34345891Sraf __cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime) 34350Sstevel@tonic-gate { 34360Sstevel@tonic-gate timespec_t tslocal = *reltime; 34370Sstevel@tonic-gate 34380Sstevel@tonic-gate return (cond_wait_common(cvp, mp, &tslocal)); 34390Sstevel@tonic-gate } 34400Sstevel@tonic-gate 34415891Sraf #pragma weak cond_reltimedwait = _cond_reltimedwait 34420Sstevel@tonic-gate int 34435891Sraf _cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime) 34440Sstevel@tonic-gate { 34450Sstevel@tonic-gate int error; 34460Sstevel@tonic-gate 34470Sstevel@tonic-gate _cancelon(); 34485891Sraf error = __cond_reltimedwait(cvp, mp, reltime); 34490Sstevel@tonic-gate if (error == EINTR) 34500Sstevel@tonic-gate _canceloff(); 34510Sstevel@tonic-gate else 34520Sstevel@tonic-gate _canceloff_nocancel(); 34530Sstevel@tonic-gate return (error); 34540Sstevel@tonic-gate } 34550Sstevel@tonic-gate 34560Sstevel@tonic-gate #pragma weak pthread_cond_reltimedwait_np = _pthread_cond_reltimedwait_np 34570Sstevel@tonic-gate int 34580Sstevel@tonic-gate _pthread_cond_reltimedwait_np(cond_t *cvp, mutex_t *mp, 34590Sstevel@tonic-gate const timespec_t *reltime) 34600Sstevel@tonic-gate { 34610Sstevel@tonic-gate int error; 34620Sstevel@tonic-gate 34635891Sraf error = _cond_reltimedwait(cvp, mp, reltime); 34640Sstevel@tonic-gate if (error == ETIME) 34650Sstevel@tonic-gate error = ETIMEDOUT; 34660Sstevel@tonic-gate else if (error == EINTR) 34670Sstevel@tonic-gate error = 0; 34680Sstevel@tonic-gate return (error); 34690Sstevel@tonic-gate } 34700Sstevel@tonic-gate 34710Sstevel@tonic-gate #pragma weak pthread_cond_signal = cond_signal_internal 34720Sstevel@tonic-gate #pragma weak _pthread_cond_signal = cond_signal_internal 34730Sstevel@tonic-gate #pragma weak cond_signal = cond_signal_internal 34740Sstevel@tonic-gate #pragma weak _cond_signal = cond_signal_internal 34750Sstevel@tonic-gate int 34760Sstevel@tonic-gate cond_signal_internal(cond_t *cvp) 34770Sstevel@tonic-gate { 34780Sstevel@tonic-gate ulwp_t *self = curthread; 34790Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 34800Sstevel@tonic-gate tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 34810Sstevel@tonic-gate int error = 0; 34826247Sraf int more; 34836247Sraf lwpid_t lwpid; 34840Sstevel@tonic-gate queue_head_t *qp; 34850Sstevel@tonic-gate mutex_t *mp; 34860Sstevel@tonic-gate queue_head_t *mqp; 34870Sstevel@tonic-gate ulwp_t **ulwpp; 34880Sstevel@tonic-gate ulwp_t *ulwp; 34896247Sraf ulwp_t *prev; 34900Sstevel@tonic-gate 34910Sstevel@tonic-gate if (csp) 34920Sstevel@tonic-gate tdb_incr(csp->cond_signal); 34930Sstevel@tonic-gate 34940Sstevel@tonic-gate if (cvp->cond_waiters_kernel) /* someone sleeping in the kernel? */ 34950Sstevel@tonic-gate error = __lwp_cond_signal(cvp); 34960Sstevel@tonic-gate 34970Sstevel@tonic-gate if (!cvp->cond_waiters_user) /* no one sleeping at user-level */ 34980Sstevel@tonic-gate return (error); 34990Sstevel@tonic-gate 35000Sstevel@tonic-gate /* 35010Sstevel@tonic-gate * Move someone from the condvar sleep queue to the mutex sleep 35020Sstevel@tonic-gate * queue for the mutex that he will acquire on being waked up. 35030Sstevel@tonic-gate * We can do this only if we own the mutex he will acquire. 35040Sstevel@tonic-gate * If we do not own the mutex, or if his ul_cv_wake flag 35050Sstevel@tonic-gate * is set, just dequeue and unpark him. 35060Sstevel@tonic-gate */ 35070Sstevel@tonic-gate qp = queue_lock(cvp, CV); 35086247Sraf ulwpp = queue_slot(qp, &prev, &more); 35096247Sraf cvp->cond_waiters_user = more; 35106247Sraf if (ulwpp == NULL) { /* no one on the sleep queue */ 35110Sstevel@tonic-gate queue_unlock(qp); 35120Sstevel@tonic-gate return (error); 35130Sstevel@tonic-gate } 35146247Sraf ulwp = *ulwpp; 35150Sstevel@tonic-gate 35160Sstevel@tonic-gate /* 35170Sstevel@tonic-gate * Inform the thread that he was the recipient of a cond_signal(). 35180Sstevel@tonic-gate * This lets him deal with cond_signal() and, concurrently, 35190Sstevel@tonic-gate * one or more of a cancellation, a UNIX signal, or a timeout. 35200Sstevel@tonic-gate * These latter conditions must not consume a cond_signal(). 35210Sstevel@tonic-gate */ 35220Sstevel@tonic-gate ulwp->ul_signalled = 1; 35230Sstevel@tonic-gate 35240Sstevel@tonic-gate /* 35250Sstevel@tonic-gate * Dequeue the waiter but leave his ul_sleepq non-NULL 35260Sstevel@tonic-gate * while we move him to the mutex queue so that he can 35270Sstevel@tonic-gate * deal properly with spurious wakeups. 35280Sstevel@tonic-gate */ 35296247Sraf queue_unlink(qp, ulwpp, prev); 35300Sstevel@tonic-gate 35310Sstevel@tonic-gate mp = ulwp->ul_cvmutex; /* the mutex he will acquire */ 35320Sstevel@tonic-gate ulwp->ul_cvmutex = NULL; 35330Sstevel@tonic-gate ASSERT(mp != NULL); 35340Sstevel@tonic-gate 35350Sstevel@tonic-gate if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) { 35366247Sraf /* just wake him up */ 35376247Sraf lwpid = ulwp->ul_lwpid; 35380Sstevel@tonic-gate no_preempt(self); 35390Sstevel@tonic-gate ulwp->ul_sleepq = NULL; 35400Sstevel@tonic-gate ulwp->ul_wchan = NULL; 35410Sstevel@tonic-gate queue_unlock(qp); 35420Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 35430Sstevel@tonic-gate preempt(self); 35440Sstevel@tonic-gate } else { 35456247Sraf /* move him to the mutex queue */ 35460Sstevel@tonic-gate mqp = queue_lock(mp, MX); 35476247Sraf enqueue(mqp, ulwp, 0); 35480Sstevel@tonic-gate mp->mutex_waiters = 1; 35490Sstevel@tonic-gate queue_unlock(mqp); 35500Sstevel@tonic-gate queue_unlock(qp); 35510Sstevel@tonic-gate } 35520Sstevel@tonic-gate 35530Sstevel@tonic-gate return (error); 35540Sstevel@tonic-gate } 35550Sstevel@tonic-gate 35564570Sraf /* 35574574Sraf * Utility function called by mutex_wakeup_all(), cond_broadcast(), 35584574Sraf * and rw_queue_release() to (re)allocate a big buffer to hold the 35594574Sraf * lwpids of all the threads to be set running after they are removed 35604574Sraf * from their sleep queues. Since we are holding a queue lock, we 35614574Sraf * cannot call any function that might acquire a lock. mmap(), munmap(), 35624574Sraf * lwp_unpark_all() are simple system calls and are safe in this regard. 35634570Sraf */ 35644570Sraf lwpid_t * 35654570Sraf alloc_lwpids(lwpid_t *lwpid, int *nlwpid_ptr, int *maxlwps_ptr) 35664570Sraf { 35674570Sraf /* 35684570Sraf * Allocate NEWLWPS ids on the first overflow. 35694570Sraf * Double the allocation each time after that. 35704570Sraf */ 35714570Sraf int nlwpid = *nlwpid_ptr; 35724570Sraf int maxlwps = *maxlwps_ptr; 35734570Sraf int first_allocation; 35744570Sraf int newlwps; 35754570Sraf void *vaddr; 35764570Sraf 35774570Sraf ASSERT(nlwpid == maxlwps); 35784570Sraf 35794570Sraf first_allocation = (maxlwps == MAXLWPS); 35804570Sraf newlwps = first_allocation? NEWLWPS : 2 * maxlwps; 3581*6515Sraf vaddr = mmap(NULL, newlwps * sizeof (lwpid_t), 35824570Sraf PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0); 35834570Sraf 35844570Sraf if (vaddr == MAP_FAILED) { 35854570Sraf /* 35864570Sraf * Let's hope this never happens. 35874570Sraf * If it does, then we have a terrible 35884570Sraf * thundering herd on our hands. 35894570Sraf */ 35904570Sraf (void) __lwp_unpark_all(lwpid, nlwpid); 35914570Sraf *nlwpid_ptr = 0; 35924570Sraf } else { 3593*6515Sraf (void) memcpy(vaddr, lwpid, maxlwps * sizeof (lwpid_t)); 35944570Sraf if (!first_allocation) 3595*6515Sraf (void) munmap((caddr_t)lwpid, 35964570Sraf maxlwps * sizeof (lwpid_t)); 35974570Sraf lwpid = vaddr; 35984570Sraf *maxlwps_ptr = newlwps; 35994570Sraf } 36004570Sraf 36014570Sraf return (lwpid); 36024570Sraf } 36030Sstevel@tonic-gate 36040Sstevel@tonic-gate #pragma weak pthread_cond_broadcast = cond_broadcast_internal 36050Sstevel@tonic-gate #pragma weak _pthread_cond_broadcast = cond_broadcast_internal 36060Sstevel@tonic-gate #pragma weak cond_broadcast = cond_broadcast_internal 36070Sstevel@tonic-gate #pragma weak _cond_broadcast = cond_broadcast_internal 36080Sstevel@tonic-gate int 36090Sstevel@tonic-gate cond_broadcast_internal(cond_t *cvp) 36100Sstevel@tonic-gate { 36110Sstevel@tonic-gate ulwp_t *self = curthread; 36120Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 36130Sstevel@tonic-gate tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 36140Sstevel@tonic-gate int error = 0; 36150Sstevel@tonic-gate queue_head_t *qp; 36166247Sraf queue_root_t *qrp; 36170Sstevel@tonic-gate mutex_t *mp; 36180Sstevel@tonic-gate mutex_t *mp_cache = NULL; 36194570Sraf queue_head_t *mqp = NULL; 36200Sstevel@tonic-gate ulwp_t *ulwp; 36214570Sraf int nlwpid = 0; 36224570Sraf int maxlwps = MAXLWPS; 36230Sstevel@tonic-gate lwpid_t buffer[MAXLWPS]; 36240Sstevel@tonic-gate lwpid_t *lwpid = buffer; 36250Sstevel@tonic-gate 36260Sstevel@tonic-gate if (csp) 36270Sstevel@tonic-gate tdb_incr(csp->cond_broadcast); 36280Sstevel@tonic-gate 36290Sstevel@tonic-gate if (cvp->cond_waiters_kernel) /* someone sleeping in the kernel? */ 36300Sstevel@tonic-gate error = __lwp_cond_broadcast(cvp); 36310Sstevel@tonic-gate 36320Sstevel@tonic-gate if (!cvp->cond_waiters_user) /* no one sleeping at user-level */ 36330Sstevel@tonic-gate return (error); 36340Sstevel@tonic-gate 36350Sstevel@tonic-gate /* 36360Sstevel@tonic-gate * Move everyone from the condvar sleep queue to the mutex sleep 36370Sstevel@tonic-gate * queue for the mutex that they will acquire on being waked up. 36380Sstevel@tonic-gate * We can do this only if we own the mutex they will acquire. 36390Sstevel@tonic-gate * If we do not own the mutex, or if their ul_cv_wake flag 36400Sstevel@tonic-gate * is set, just dequeue and unpark them. 36410Sstevel@tonic-gate * 36420Sstevel@tonic-gate * We keep track of lwpids that are to be unparked in lwpid[]. 36430Sstevel@tonic-gate * __lwp_unpark_all() is called to unpark all of them after 36440Sstevel@tonic-gate * they have been removed from the sleep queue and the sleep 36450Sstevel@tonic-gate * queue lock has been dropped. If we run out of space in our 36460Sstevel@tonic-gate * on-stack buffer, we need to allocate more but we can't call 36470Sstevel@tonic-gate * lmalloc() because we are holding a queue lock when the overflow 36480Sstevel@tonic-gate * occurs and lmalloc() acquires a lock. We can't use alloca() 36494570Sraf * either because the application may have allocated a small 36504570Sraf * stack and we don't want to overrun the stack. So we call 36514570Sraf * alloc_lwpids() to allocate a bigger buffer using the mmap() 36520Sstevel@tonic-gate * system call directly since that path acquires no locks. 36530Sstevel@tonic-gate */ 36540Sstevel@tonic-gate qp = queue_lock(cvp, CV); 36550Sstevel@tonic-gate cvp->cond_waiters_user = 0; 36566247Sraf for (;;) { 36576247Sraf if ((qrp = qp->qh_root) == NULL || 36586247Sraf (ulwp = qrp->qr_head) == NULL) 36596247Sraf break; 36606247Sraf ASSERT(ulwp->ul_wchan == cvp); 36616247Sraf queue_unlink(qp, &qrp->qr_head, NULL); 36620Sstevel@tonic-gate mp = ulwp->ul_cvmutex; /* his mutex */ 36630Sstevel@tonic-gate ulwp->ul_cvmutex = NULL; 36640Sstevel@tonic-gate ASSERT(mp != NULL); 36650Sstevel@tonic-gate if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) { 36666247Sraf /* just wake him up */ 36670Sstevel@tonic-gate ulwp->ul_sleepq = NULL; 36680Sstevel@tonic-gate ulwp->ul_wchan = NULL; 36694570Sraf if (nlwpid == maxlwps) 36704570Sraf lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps); 36710Sstevel@tonic-gate lwpid[nlwpid++] = ulwp->ul_lwpid; 36720Sstevel@tonic-gate } else { 36736247Sraf /* move him to the mutex queue */ 36740Sstevel@tonic-gate if (mp != mp_cache) { 36750Sstevel@tonic-gate mp_cache = mp; 36764570Sraf if (mqp != NULL) 36774570Sraf queue_unlock(mqp); 36784570Sraf mqp = queue_lock(mp, MX); 36790Sstevel@tonic-gate } 36806247Sraf enqueue(mqp, ulwp, 0); 36810Sstevel@tonic-gate mp->mutex_waiters = 1; 36820Sstevel@tonic-gate } 36830Sstevel@tonic-gate } 36844570Sraf if (mqp != NULL) 36854570Sraf queue_unlock(mqp); 36864570Sraf if (nlwpid == 0) { 36874570Sraf queue_unlock(qp); 36884570Sraf } else { 36894570Sraf no_preempt(self); 36904570Sraf queue_unlock(qp); 36910Sstevel@tonic-gate if (nlwpid == 1) 36920Sstevel@tonic-gate (void) __lwp_unpark(lwpid[0]); 36930Sstevel@tonic-gate else 36940Sstevel@tonic-gate (void) __lwp_unpark_all(lwpid, nlwpid); 36954570Sraf preempt(self); 36960Sstevel@tonic-gate } 36970Sstevel@tonic-gate if (lwpid != buffer) 3698*6515Sraf (void) munmap((caddr_t)lwpid, maxlwps * sizeof (lwpid_t)); 36990Sstevel@tonic-gate return (error); 37000Sstevel@tonic-gate } 37010Sstevel@tonic-gate 37020Sstevel@tonic-gate #pragma weak pthread_cond_destroy = _cond_destroy 37030Sstevel@tonic-gate #pragma weak _pthread_cond_destroy = _cond_destroy 37040Sstevel@tonic-gate #pragma weak cond_destroy = _cond_destroy 37050Sstevel@tonic-gate int 37060Sstevel@tonic-gate _cond_destroy(cond_t *cvp) 37070Sstevel@tonic-gate { 37080Sstevel@tonic-gate cvp->cond_magic = 0; 37090Sstevel@tonic-gate tdb_sync_obj_deregister(cvp); 37100Sstevel@tonic-gate return (0); 37110Sstevel@tonic-gate } 37120Sstevel@tonic-gate 37130Sstevel@tonic-gate #if defined(THREAD_DEBUG) 37140Sstevel@tonic-gate void 37150Sstevel@tonic-gate assert_no_libc_locks_held(void) 37160Sstevel@tonic-gate { 37170Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 37180Sstevel@tonic-gate } 37190Sstevel@tonic-gate 37200Sstevel@tonic-gate /* protected by link_lock */ 37210Sstevel@tonic-gate uint64_t spin_lock_spin; 37220Sstevel@tonic-gate uint64_t spin_lock_spin2; 37230Sstevel@tonic-gate uint64_t spin_lock_sleep; 37240Sstevel@tonic-gate uint64_t spin_lock_wakeup; 37250Sstevel@tonic-gate 37260Sstevel@tonic-gate /* 37270Sstevel@tonic-gate * Record spin lock statistics. 37280Sstevel@tonic-gate * Called by a thread exiting itself in thrp_exit(). 37290Sstevel@tonic-gate * Also called via atexit() from the thread calling 37300Sstevel@tonic-gate * exit() to do all the other threads as well. 37310Sstevel@tonic-gate */ 37320Sstevel@tonic-gate void 37330Sstevel@tonic-gate record_spin_locks(ulwp_t *ulwp) 37340Sstevel@tonic-gate { 37350Sstevel@tonic-gate spin_lock_spin += ulwp->ul_spin_lock_spin; 37360Sstevel@tonic-gate spin_lock_spin2 += ulwp->ul_spin_lock_spin2; 37370Sstevel@tonic-gate spin_lock_sleep += ulwp->ul_spin_lock_sleep; 37380Sstevel@tonic-gate spin_lock_wakeup += ulwp->ul_spin_lock_wakeup; 37390Sstevel@tonic-gate ulwp->ul_spin_lock_spin = 0; 37400Sstevel@tonic-gate ulwp->ul_spin_lock_spin2 = 0; 37410Sstevel@tonic-gate ulwp->ul_spin_lock_sleep = 0; 37420Sstevel@tonic-gate ulwp->ul_spin_lock_wakeup = 0; 37430Sstevel@tonic-gate } 37440Sstevel@tonic-gate 37450Sstevel@tonic-gate /* 37460Sstevel@tonic-gate * atexit function: dump the queue statistics to stderr. 37470Sstevel@tonic-gate */ 37481219Sraf #if !defined(__lint) 37491219Sraf #define fprintf _fprintf 37501219Sraf #endif 37510Sstevel@tonic-gate #include <stdio.h> 37520Sstevel@tonic-gate void 37530Sstevel@tonic-gate dump_queue_statistics(void) 37540Sstevel@tonic-gate { 37550Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 37560Sstevel@tonic-gate queue_head_t *qp; 37570Sstevel@tonic-gate int qn; 37580Sstevel@tonic-gate uint64_t spin_lock_total = 0; 37590Sstevel@tonic-gate 37600Sstevel@tonic-gate if (udp->queue_head == NULL || thread_queue_dump == 0) 37610Sstevel@tonic-gate return; 37620Sstevel@tonic-gate 37630Sstevel@tonic-gate if (fprintf(stderr, "\n%5d mutex queues:\n", QHASHSIZE) < 0 || 37646247Sraf fprintf(stderr, "queue# lockcount max qlen max hlen\n") < 0) 37650Sstevel@tonic-gate return; 37660Sstevel@tonic-gate for (qn = 0, qp = udp->queue_head; qn < QHASHSIZE; qn++, qp++) { 37670Sstevel@tonic-gate if (qp->qh_lockcount == 0) 37680Sstevel@tonic-gate continue; 37690Sstevel@tonic-gate spin_lock_total += qp->qh_lockcount; 37706247Sraf if (fprintf(stderr, "%5d %12llu%12u%12u\n", qn, 37716247Sraf (u_longlong_t)qp->qh_lockcount, 37726247Sraf qp->qh_qmax, qp->qh_hmax) < 0) 37735629Sraf return; 37740Sstevel@tonic-gate } 37750Sstevel@tonic-gate 37760Sstevel@tonic-gate if (fprintf(stderr, "\n%5d condvar queues:\n", QHASHSIZE) < 0 || 37776247Sraf fprintf(stderr, "queue# lockcount max qlen max hlen\n") < 0) 37780Sstevel@tonic-gate return; 37790Sstevel@tonic-gate for (qn = 0; qn < QHASHSIZE; qn++, qp++) { 37800Sstevel@tonic-gate if (qp->qh_lockcount == 0) 37810Sstevel@tonic-gate continue; 37820Sstevel@tonic-gate spin_lock_total += qp->qh_lockcount; 37836247Sraf if (fprintf(stderr, "%5d %12llu%12u%12u\n", qn, 37846247Sraf (u_longlong_t)qp->qh_lockcount, 37856247Sraf qp->qh_qmax, qp->qh_hmax) < 0) 37865629Sraf return; 37870Sstevel@tonic-gate } 37880Sstevel@tonic-gate 37890Sstevel@tonic-gate (void) fprintf(stderr, "\n spin_lock_total = %10llu\n", 37905629Sraf (u_longlong_t)spin_lock_total); 37910Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_spin = %10llu\n", 37925629Sraf (u_longlong_t)spin_lock_spin); 37930Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_spin2 = %10llu\n", 37945629Sraf (u_longlong_t)spin_lock_spin2); 37950Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_sleep = %10llu\n", 37965629Sraf (u_longlong_t)spin_lock_sleep); 37970Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_wakeup = %10llu\n", 37985629Sraf (u_longlong_t)spin_lock_wakeup); 37990Sstevel@tonic-gate } 38006247Sraf #endif 3801