10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51893Sraf * Common Development and Distribution License (the "License"). 61893Sraf * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 211219Sraf 220Sstevel@tonic-gate /* 235891Sraf * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 29*6057Sraf #define atomic_cas_64 _atomic_cas_64 300Sstevel@tonic-gate 310Sstevel@tonic-gate #include "lint.h" 320Sstevel@tonic-gate #include "thr_uberdata.h" 33*6057Sraf #include <sys/sdt.h> 34*6057Sraf #include <atomic.h> 350Sstevel@tonic-gate 360Sstevel@tonic-gate /* 370Sstevel@tonic-gate * This mutex is initialized to be held by lwp#1. 380Sstevel@tonic-gate * It is used to block a thread that has returned from a mutex_lock() 394574Sraf * of a LOCK_PRIO_INHERIT mutex with an unrecoverable error. 400Sstevel@tonic-gate */ 410Sstevel@tonic-gate mutex_t stall_mutex = DEFAULTMUTEX; 420Sstevel@tonic-gate 430Sstevel@tonic-gate static int shared_mutex_held(mutex_t *); 444574Sraf static int mutex_queuelock_adaptive(mutex_t *); 454574Sraf static void mutex_wakeup_all(mutex_t *); 460Sstevel@tonic-gate 470Sstevel@tonic-gate /* 480Sstevel@tonic-gate * Lock statistics support functions. 490Sstevel@tonic-gate */ 500Sstevel@tonic-gate void 510Sstevel@tonic-gate record_begin_hold(tdb_mutex_stats_t *msp) 520Sstevel@tonic-gate { 530Sstevel@tonic-gate tdb_incr(msp->mutex_lock); 540Sstevel@tonic-gate msp->mutex_begin_hold = gethrtime(); 550Sstevel@tonic-gate } 560Sstevel@tonic-gate 570Sstevel@tonic-gate hrtime_t 580Sstevel@tonic-gate record_hold_time(tdb_mutex_stats_t *msp) 590Sstevel@tonic-gate { 600Sstevel@tonic-gate hrtime_t now = gethrtime(); 610Sstevel@tonic-gate 620Sstevel@tonic-gate if (msp->mutex_begin_hold) 630Sstevel@tonic-gate msp->mutex_hold_time += now - msp->mutex_begin_hold; 640Sstevel@tonic-gate msp->mutex_begin_hold = 0; 650Sstevel@tonic-gate return (now); 660Sstevel@tonic-gate } 670Sstevel@tonic-gate 680Sstevel@tonic-gate /* 690Sstevel@tonic-gate * Called once at library initialization. 700Sstevel@tonic-gate */ 710Sstevel@tonic-gate void 720Sstevel@tonic-gate mutex_setup(void) 730Sstevel@tonic-gate { 740Sstevel@tonic-gate if (set_lock_byte(&stall_mutex.mutex_lockw)) 750Sstevel@tonic-gate thr_panic("mutex_setup() cannot acquire stall_mutex"); 760Sstevel@tonic-gate stall_mutex.mutex_owner = (uintptr_t)curthread; 770Sstevel@tonic-gate } 780Sstevel@tonic-gate 790Sstevel@tonic-gate /* 805629Sraf * The default spin count of 1000 is experimentally determined. 815629Sraf * On sun4u machines with any number of processors it could be raised 820Sstevel@tonic-gate * to 10,000 but that (experimentally) makes almost no difference. 835629Sraf * The environment variable: 840Sstevel@tonic-gate * _THREAD_ADAPTIVE_SPIN=count 855629Sraf * can be used to override and set the count in the range [0 .. 1,000,000]. 860Sstevel@tonic-gate */ 870Sstevel@tonic-gate int thread_adaptive_spin = 1000; 880Sstevel@tonic-gate uint_t thread_max_spinners = 100; 890Sstevel@tonic-gate int thread_queue_verify = 0; 900Sstevel@tonic-gate static int ncpus; 910Sstevel@tonic-gate 920Sstevel@tonic-gate /* 930Sstevel@tonic-gate * Distinguish spinning for queue locks from spinning for regular locks. 945629Sraf * We try harder to acquire queue locks by spinning. 950Sstevel@tonic-gate * The environment variable: 960Sstevel@tonic-gate * _THREAD_QUEUE_SPIN=count 970Sstevel@tonic-gate * can be used to override and set the count in the range [0 .. 1,000,000]. 980Sstevel@tonic-gate */ 995629Sraf int thread_queue_spin = 10000; 1000Sstevel@tonic-gate 1014574Sraf #define ALL_ATTRIBUTES \ 1024574Sraf (LOCK_RECURSIVE | LOCK_ERRORCHECK | \ 1034574Sraf LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT | \ 1044574Sraf LOCK_ROBUST) 1050Sstevel@tonic-gate 1060Sstevel@tonic-gate /* 1074574Sraf * 'type' can be one of USYNC_THREAD, USYNC_PROCESS, or USYNC_PROCESS_ROBUST, 1084574Sraf * augmented by zero or more the flags: 1094574Sraf * LOCK_RECURSIVE 1104574Sraf * LOCK_ERRORCHECK 1114574Sraf * LOCK_PRIO_INHERIT 1124574Sraf * LOCK_PRIO_PROTECT 1134574Sraf * LOCK_ROBUST 1140Sstevel@tonic-gate */ 1150Sstevel@tonic-gate #pragma weak _private_mutex_init = __mutex_init 1160Sstevel@tonic-gate #pragma weak mutex_init = __mutex_init 1170Sstevel@tonic-gate #pragma weak _mutex_init = __mutex_init 1180Sstevel@tonic-gate /* ARGSUSED2 */ 1190Sstevel@tonic-gate int 1200Sstevel@tonic-gate __mutex_init(mutex_t *mp, int type, void *arg) 1210Sstevel@tonic-gate { 1224574Sraf int basetype = (type & ~ALL_ATTRIBUTES); 1234574Sraf int error = 0; 1244574Sraf 1254574Sraf if (basetype == USYNC_PROCESS_ROBUST) { 1264574Sraf /* 1274574Sraf * USYNC_PROCESS_ROBUST is a deprecated historical type. 1284574Sraf * We change it into (USYNC_PROCESS | LOCK_ROBUST) but 1294574Sraf * retain the USYNC_PROCESS_ROBUST flag so we can return 1304574Sraf * ELOCKUNMAPPED when necessary (only USYNC_PROCESS_ROBUST 1314574Sraf * mutexes will ever draw ELOCKUNMAPPED). 1324574Sraf */ 1334574Sraf type |= (USYNC_PROCESS | LOCK_ROBUST); 1344574Sraf basetype = USYNC_PROCESS; 1354574Sraf } 1364574Sraf 1374574Sraf if (!(basetype == USYNC_THREAD || basetype == USYNC_PROCESS) || 1384574Sraf (type & (LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT)) 1394574Sraf == (LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT)) { 1404574Sraf error = EINVAL; 1414574Sraf } else if (type & LOCK_ROBUST) { 1424574Sraf /* 1434574Sraf * Callers of mutex_init() with the LOCK_ROBUST attribute 1444574Sraf * are required to pass an initially all-zero mutex. 1454574Sraf * Multiple calls to mutex_init() are allowed; all but 1464574Sraf * the first return EBUSY. A call to mutex_init() is 1474574Sraf * allowed to make an inconsistent robust lock consistent 1484574Sraf * (for historical usage, even though the proper interface 1494574Sraf * for this is mutex_consistent()). Note that we use 1504574Sraf * atomic_or_16() to set the LOCK_INITED flag so as 1514574Sraf * not to disturb surrounding bits (LOCK_OWNERDEAD, etc). 1524574Sraf */ 1534574Sraf extern void _atomic_or_16(volatile uint16_t *, uint16_t); 1544574Sraf if (!(mp->mutex_flag & LOCK_INITED)) { 1554574Sraf mp->mutex_type = (uint8_t)type; 1564574Sraf _atomic_or_16(&mp->mutex_flag, LOCK_INITED); 1574574Sraf mp->mutex_magic = MUTEX_MAGIC; 1584574Sraf } else if (type != mp->mutex_type || 1594574Sraf ((type & LOCK_PRIO_PROTECT) && 1604574Sraf mp->mutex_ceiling != (*(int *)arg))) { 1614574Sraf error = EINVAL; 1624574Sraf } else if (__mutex_consistent(mp) != 0) { 1634574Sraf error = EBUSY; 1644574Sraf } 1654574Sraf /* register a process robust mutex with the kernel */ 1664574Sraf if (basetype == USYNC_PROCESS) 1674574Sraf register_lock(mp); 1684574Sraf } else { 1690Sstevel@tonic-gate (void) _memset(mp, 0, sizeof (*mp)); 1700Sstevel@tonic-gate mp->mutex_type = (uint8_t)type; 1710Sstevel@tonic-gate mp->mutex_flag = LOCK_INITED; 1724574Sraf mp->mutex_magic = MUTEX_MAGIC; 1730Sstevel@tonic-gate } 1744574Sraf 1754574Sraf if (error == 0 && (type & LOCK_PRIO_PROTECT)) 1764574Sraf mp->mutex_ceiling = (uint8_t)(*(int *)arg); 1774574Sraf 1780Sstevel@tonic-gate return (error); 1790Sstevel@tonic-gate } 1800Sstevel@tonic-gate 1810Sstevel@tonic-gate /* 1820Sstevel@tonic-gate * Delete mp from list of ceil mutexes owned by curthread. 1830Sstevel@tonic-gate * Return 1 if the head of the chain was updated. 1840Sstevel@tonic-gate */ 1850Sstevel@tonic-gate int 1860Sstevel@tonic-gate _ceil_mylist_del(mutex_t *mp) 1870Sstevel@tonic-gate { 1880Sstevel@tonic-gate ulwp_t *self = curthread; 1890Sstevel@tonic-gate mxchain_t **mcpp; 1900Sstevel@tonic-gate mxchain_t *mcp; 1910Sstevel@tonic-gate 1920Sstevel@tonic-gate mcpp = &self->ul_mxchain; 1930Sstevel@tonic-gate while ((*mcpp)->mxchain_mx != mp) 1940Sstevel@tonic-gate mcpp = &(*mcpp)->mxchain_next; 1950Sstevel@tonic-gate mcp = *mcpp; 1960Sstevel@tonic-gate *mcpp = mcp->mxchain_next; 1970Sstevel@tonic-gate lfree(mcp, sizeof (*mcp)); 1980Sstevel@tonic-gate return (mcpp == &self->ul_mxchain); 1990Sstevel@tonic-gate } 2000Sstevel@tonic-gate 2010Sstevel@tonic-gate /* 2020Sstevel@tonic-gate * Add mp to head of list of ceil mutexes owned by curthread. 2030Sstevel@tonic-gate * Return ENOMEM if no memory could be allocated. 2040Sstevel@tonic-gate */ 2050Sstevel@tonic-gate int 2060Sstevel@tonic-gate _ceil_mylist_add(mutex_t *mp) 2070Sstevel@tonic-gate { 2080Sstevel@tonic-gate ulwp_t *self = curthread; 2090Sstevel@tonic-gate mxchain_t *mcp; 2100Sstevel@tonic-gate 2110Sstevel@tonic-gate if ((mcp = lmalloc(sizeof (*mcp))) == NULL) 2120Sstevel@tonic-gate return (ENOMEM); 2130Sstevel@tonic-gate mcp->mxchain_mx = mp; 2140Sstevel@tonic-gate mcp->mxchain_next = self->ul_mxchain; 2150Sstevel@tonic-gate self->ul_mxchain = mcp; 2160Sstevel@tonic-gate return (0); 2170Sstevel@tonic-gate } 2180Sstevel@tonic-gate 2190Sstevel@tonic-gate /* 2200Sstevel@tonic-gate * Inherit priority from ceiling. The inheritance impacts the effective 2210Sstevel@tonic-gate * priority, not the assigned priority. See _thread_setschedparam_main(). 2220Sstevel@tonic-gate */ 2230Sstevel@tonic-gate void 2240Sstevel@tonic-gate _ceil_prio_inherit(int ceil) 2250Sstevel@tonic-gate { 2260Sstevel@tonic-gate ulwp_t *self = curthread; 2270Sstevel@tonic-gate struct sched_param param; 2280Sstevel@tonic-gate 2290Sstevel@tonic-gate (void) _memset(¶m, 0, sizeof (param)); 2300Sstevel@tonic-gate param.sched_priority = ceil; 2310Sstevel@tonic-gate if (_thread_setschedparam_main(self->ul_lwpid, 2320Sstevel@tonic-gate self->ul_policy, ¶m, PRIO_INHERIT)) { 2330Sstevel@tonic-gate /* 2340Sstevel@tonic-gate * Panic since unclear what error code to return. 2350Sstevel@tonic-gate * If we do return the error codes returned by above 2360Sstevel@tonic-gate * called routine, update the man page... 2370Sstevel@tonic-gate */ 2380Sstevel@tonic-gate thr_panic("_thread_setschedparam_main() fails"); 2390Sstevel@tonic-gate } 2400Sstevel@tonic-gate } 2410Sstevel@tonic-gate 2420Sstevel@tonic-gate /* 2430Sstevel@tonic-gate * Waive inherited ceiling priority. Inherit from head of owned ceiling locks 2440Sstevel@tonic-gate * if holding at least one ceiling lock. If no ceiling locks are held at this 2450Sstevel@tonic-gate * point, disinherit completely, reverting back to assigned priority. 2460Sstevel@tonic-gate */ 2470Sstevel@tonic-gate void 2480Sstevel@tonic-gate _ceil_prio_waive(void) 2490Sstevel@tonic-gate { 2500Sstevel@tonic-gate ulwp_t *self = curthread; 2510Sstevel@tonic-gate struct sched_param param; 2520Sstevel@tonic-gate 2530Sstevel@tonic-gate (void) _memset(¶m, 0, sizeof (param)); 2540Sstevel@tonic-gate if (self->ul_mxchain == NULL) { 2550Sstevel@tonic-gate /* 2560Sstevel@tonic-gate * No ceil locks held. Zero the epri, revert back to ul_pri. 2570Sstevel@tonic-gate * Since thread's hash lock is not held, one cannot just 2580Sstevel@tonic-gate * read ul_pri here...do it in the called routine... 2590Sstevel@tonic-gate */ 2600Sstevel@tonic-gate param.sched_priority = self->ul_pri; /* ignored */ 2610Sstevel@tonic-gate if (_thread_setschedparam_main(self->ul_lwpid, 2620Sstevel@tonic-gate self->ul_policy, ¶m, PRIO_DISINHERIT)) 2630Sstevel@tonic-gate thr_panic("_thread_setschedparam_main() fails"); 2640Sstevel@tonic-gate } else { 2650Sstevel@tonic-gate /* 2660Sstevel@tonic-gate * Set priority to that of the mutex at the head 2670Sstevel@tonic-gate * of the ceilmutex chain. 2680Sstevel@tonic-gate */ 2690Sstevel@tonic-gate param.sched_priority = 2700Sstevel@tonic-gate self->ul_mxchain->mxchain_mx->mutex_ceiling; 2710Sstevel@tonic-gate if (_thread_setschedparam_main(self->ul_lwpid, 2720Sstevel@tonic-gate self->ul_policy, ¶m, PRIO_INHERIT)) 2730Sstevel@tonic-gate thr_panic("_thread_setschedparam_main() fails"); 2740Sstevel@tonic-gate } 2750Sstevel@tonic-gate } 2760Sstevel@tonic-gate 2770Sstevel@tonic-gate /* 2785629Sraf * Clear the lock byte. Retain the waiters byte and the spinners byte. 2795629Sraf * Return the old value of the lock word. 2805629Sraf */ 2815629Sraf static uint32_t 2825629Sraf clear_lockbyte(volatile uint32_t *lockword) 2835629Sraf { 2845629Sraf uint32_t old; 2855629Sraf uint32_t new; 2865629Sraf 2875629Sraf do { 2885629Sraf old = *lockword; 2895629Sraf new = old & ~LOCKMASK; 2905629Sraf } while (atomic_cas_32(lockword, old, new) != old); 2915629Sraf 2925629Sraf return (old); 2935629Sraf } 2945629Sraf 2955629Sraf /* 296*6057Sraf * Same as clear_lockbyte(), but operates on mutex_lockword64. 297*6057Sraf * The mutex_ownerpid field is cleared along with the lock byte. 298*6057Sraf */ 299*6057Sraf static uint64_t 300*6057Sraf clear_lockbyte64(volatile uint64_t *lockword64) 301*6057Sraf { 302*6057Sraf uint64_t old; 303*6057Sraf uint64_t new; 304*6057Sraf 305*6057Sraf do { 306*6057Sraf old = *lockword64; 307*6057Sraf new = old & ~LOCKMASK64; 308*6057Sraf } while (atomic_cas_64(lockword64, old, new) != old); 309*6057Sraf 310*6057Sraf return (old); 311*6057Sraf } 312*6057Sraf 313*6057Sraf /* 314*6057Sraf * Similar to set_lock_byte(), which only tries to set the lock byte. 315*6057Sraf * Here, we attempt to set the lock byte AND the mutex_ownerpid, 316*6057Sraf * keeping the remaining bytes constant. 317*6057Sraf */ 318*6057Sraf static int 319*6057Sraf set_lock_byte64(volatile uint64_t *lockword64, pid_t ownerpid) 320*6057Sraf { 321*6057Sraf uint64_t old; 322*6057Sraf uint64_t new; 323*6057Sraf 324*6057Sraf old = *lockword64 & ~LOCKMASK64; 325*6057Sraf new = old | ((uint64_t)(uint_t)ownerpid << PIDSHIFT) | LOCKBYTE64; 326*6057Sraf if (atomic_cas_64(lockword64, old, new) == old) 327*6057Sraf return (LOCKCLEAR); 328*6057Sraf 329*6057Sraf return (LOCKSET); 330*6057Sraf } 331*6057Sraf 332*6057Sraf /* 3335629Sraf * Increment the spinners count in the mutex lock word. 3345629Sraf * Return 0 on success. Return -1 if the count would overflow. 3355629Sraf */ 3365629Sraf static int 3375629Sraf spinners_incr(volatile uint32_t *lockword, uint8_t max_spinners) 3385629Sraf { 3395629Sraf uint32_t old; 3405629Sraf uint32_t new; 3415629Sraf 3425629Sraf do { 3435629Sraf old = *lockword; 3445629Sraf if (((old & SPINNERMASK) >> SPINNERSHIFT) >= max_spinners) 3455629Sraf return (-1); 3465629Sraf new = old + (1 << SPINNERSHIFT); 3475629Sraf } while (atomic_cas_32(lockword, old, new) != old); 3485629Sraf 3495629Sraf return (0); 3505629Sraf } 3515629Sraf 3525629Sraf /* 3535629Sraf * Decrement the spinners count in the mutex lock word. 3545629Sraf * Return the new value of the lock word. 3555629Sraf */ 3565629Sraf static uint32_t 3575629Sraf spinners_decr(volatile uint32_t *lockword) 3585629Sraf { 3595629Sraf uint32_t old; 3605629Sraf uint32_t new; 3615629Sraf 3625629Sraf do { 3635629Sraf new = old = *lockword; 3645629Sraf if (new & SPINNERMASK) 3655629Sraf new -= (1 << SPINNERSHIFT); 3665629Sraf } while (atomic_cas_32(lockword, old, new) != old); 3675629Sraf 3685629Sraf return (new); 3695629Sraf } 3705629Sraf 3715629Sraf /* 3720Sstevel@tonic-gate * Non-preemptive spin locks. Used by queue_lock(). 3730Sstevel@tonic-gate * No lock statistics are gathered for these locks. 3745629Sraf * No DTrace probes are provided for these locks. 3750Sstevel@tonic-gate */ 3760Sstevel@tonic-gate void 3770Sstevel@tonic-gate spin_lock_set(mutex_t *mp) 3780Sstevel@tonic-gate { 3790Sstevel@tonic-gate ulwp_t *self = curthread; 3800Sstevel@tonic-gate 3810Sstevel@tonic-gate no_preempt(self); 3820Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 3830Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 3840Sstevel@tonic-gate return; 3850Sstevel@tonic-gate } 3860Sstevel@tonic-gate /* 3870Sstevel@tonic-gate * Spin for a while, attempting to acquire the lock. 3880Sstevel@tonic-gate */ 3890Sstevel@tonic-gate if (self->ul_spin_lock_spin != UINT_MAX) 3900Sstevel@tonic-gate self->ul_spin_lock_spin++; 3910Sstevel@tonic-gate if (mutex_queuelock_adaptive(mp) == 0 || 3920Sstevel@tonic-gate set_lock_byte(&mp->mutex_lockw) == 0) { 3930Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 3940Sstevel@tonic-gate return; 3950Sstevel@tonic-gate } 3960Sstevel@tonic-gate /* 3970Sstevel@tonic-gate * Try harder if we were previously at a no premption level. 3980Sstevel@tonic-gate */ 3990Sstevel@tonic-gate if (self->ul_preempt > 1) { 4000Sstevel@tonic-gate if (self->ul_spin_lock_spin2 != UINT_MAX) 4010Sstevel@tonic-gate self->ul_spin_lock_spin2++; 4020Sstevel@tonic-gate if (mutex_queuelock_adaptive(mp) == 0 || 4030Sstevel@tonic-gate set_lock_byte(&mp->mutex_lockw) == 0) { 4040Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 4050Sstevel@tonic-gate return; 4060Sstevel@tonic-gate } 4070Sstevel@tonic-gate } 4080Sstevel@tonic-gate /* 4090Sstevel@tonic-gate * Give up and block in the kernel for the mutex. 4100Sstevel@tonic-gate */ 4110Sstevel@tonic-gate if (self->ul_spin_lock_sleep != UINT_MAX) 4120Sstevel@tonic-gate self->ul_spin_lock_sleep++; 4130Sstevel@tonic-gate (void) ___lwp_mutex_timedlock(mp, NULL); 4140Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 4150Sstevel@tonic-gate } 4160Sstevel@tonic-gate 4170Sstevel@tonic-gate void 4180Sstevel@tonic-gate spin_lock_clear(mutex_t *mp) 4190Sstevel@tonic-gate { 4200Sstevel@tonic-gate ulwp_t *self = curthread; 4210Sstevel@tonic-gate 4220Sstevel@tonic-gate mp->mutex_owner = 0; 4234570Sraf if (atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK) { 4244574Sraf (void) ___lwp_mutex_wakeup(mp, 0); 4250Sstevel@tonic-gate if (self->ul_spin_lock_wakeup != UINT_MAX) 4260Sstevel@tonic-gate self->ul_spin_lock_wakeup++; 4270Sstevel@tonic-gate } 4280Sstevel@tonic-gate preempt(self); 4290Sstevel@tonic-gate } 4300Sstevel@tonic-gate 4310Sstevel@tonic-gate /* 4320Sstevel@tonic-gate * Allocate the sleep queue hash table. 4330Sstevel@tonic-gate */ 4340Sstevel@tonic-gate void 4350Sstevel@tonic-gate queue_alloc(void) 4360Sstevel@tonic-gate { 4370Sstevel@tonic-gate ulwp_t *self = curthread; 4380Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 4394574Sraf mutex_t *mp; 4400Sstevel@tonic-gate void *data; 4410Sstevel@tonic-gate int i; 4420Sstevel@tonic-gate 4430Sstevel@tonic-gate /* 4440Sstevel@tonic-gate * No locks are needed; we call here only when single-threaded. 4450Sstevel@tonic-gate */ 4460Sstevel@tonic-gate ASSERT(self == udp->ulwp_one); 4470Sstevel@tonic-gate ASSERT(!udp->uberflags.uf_mt); 4480Sstevel@tonic-gate if ((data = _private_mmap(NULL, 2 * QHASHSIZE * sizeof (queue_head_t), 4490Sstevel@tonic-gate PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0)) 4500Sstevel@tonic-gate == MAP_FAILED) 4510Sstevel@tonic-gate thr_panic("cannot allocate thread queue_head table"); 4520Sstevel@tonic-gate udp->queue_head = (queue_head_t *)data; 4534574Sraf for (i = 0; i < 2 * QHASHSIZE; i++) { 4544574Sraf mp = &udp->queue_head[i].qh_lock; 4554574Sraf mp->mutex_flag = LOCK_INITED; 4564574Sraf mp->mutex_magic = MUTEX_MAGIC; 4574574Sraf } 4580Sstevel@tonic-gate } 4590Sstevel@tonic-gate 4600Sstevel@tonic-gate #if defined(THREAD_DEBUG) 4610Sstevel@tonic-gate 4620Sstevel@tonic-gate /* 4630Sstevel@tonic-gate * Debugging: verify correctness of a sleep queue. 4640Sstevel@tonic-gate */ 4650Sstevel@tonic-gate void 4660Sstevel@tonic-gate QVERIFY(queue_head_t *qp) 4670Sstevel@tonic-gate { 4680Sstevel@tonic-gate ulwp_t *self = curthread; 4690Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 4700Sstevel@tonic-gate ulwp_t *ulwp; 4710Sstevel@tonic-gate ulwp_t *prev; 4720Sstevel@tonic-gate uint_t index; 4730Sstevel@tonic-gate uint32_t cnt = 0; 4740Sstevel@tonic-gate char qtype; 4750Sstevel@tonic-gate void *wchan; 4760Sstevel@tonic-gate 4770Sstevel@tonic-gate ASSERT(qp >= udp->queue_head && (qp - udp->queue_head) < 2 * QHASHSIZE); 4780Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, self)); 4790Sstevel@tonic-gate ASSERT((qp->qh_head != NULL && qp->qh_tail != NULL) || 4805629Sraf (qp->qh_head == NULL && qp->qh_tail == NULL)); 4810Sstevel@tonic-gate if (!thread_queue_verify) 4820Sstevel@tonic-gate return; 4830Sstevel@tonic-gate /* real expensive stuff, only for _THREAD_QUEUE_VERIFY */ 4840Sstevel@tonic-gate qtype = ((qp - udp->queue_head) < QHASHSIZE)? MX : CV; 4850Sstevel@tonic-gate for (prev = NULL, ulwp = qp->qh_head; ulwp != NULL; 4860Sstevel@tonic-gate prev = ulwp, ulwp = ulwp->ul_link, cnt++) { 4870Sstevel@tonic-gate ASSERT(ulwp->ul_qtype == qtype); 4880Sstevel@tonic-gate ASSERT(ulwp->ul_wchan != NULL); 4890Sstevel@tonic-gate ASSERT(ulwp->ul_sleepq == qp); 4900Sstevel@tonic-gate wchan = ulwp->ul_wchan; 4910Sstevel@tonic-gate index = QUEUE_HASH(wchan, qtype); 4920Sstevel@tonic-gate ASSERT(&udp->queue_head[index] == qp); 4930Sstevel@tonic-gate } 4940Sstevel@tonic-gate ASSERT(qp->qh_tail == prev); 4950Sstevel@tonic-gate ASSERT(qp->qh_qlen == cnt); 4960Sstevel@tonic-gate } 4970Sstevel@tonic-gate 4980Sstevel@tonic-gate #else /* THREAD_DEBUG */ 4990Sstevel@tonic-gate 5000Sstevel@tonic-gate #define QVERIFY(qp) 5010Sstevel@tonic-gate 5020Sstevel@tonic-gate #endif /* THREAD_DEBUG */ 5030Sstevel@tonic-gate 5040Sstevel@tonic-gate /* 5050Sstevel@tonic-gate * Acquire a queue head. 5060Sstevel@tonic-gate */ 5070Sstevel@tonic-gate queue_head_t * 5080Sstevel@tonic-gate queue_lock(void *wchan, int qtype) 5090Sstevel@tonic-gate { 5100Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 5110Sstevel@tonic-gate queue_head_t *qp; 5120Sstevel@tonic-gate 5130Sstevel@tonic-gate ASSERT(qtype == MX || qtype == CV); 5140Sstevel@tonic-gate 5150Sstevel@tonic-gate /* 5160Sstevel@tonic-gate * It is possible that we could be called while still single-threaded. 5170Sstevel@tonic-gate * If so, we call queue_alloc() to allocate the queue_head[] array. 5180Sstevel@tonic-gate */ 5190Sstevel@tonic-gate if ((qp = udp->queue_head) == NULL) { 5200Sstevel@tonic-gate queue_alloc(); 5210Sstevel@tonic-gate qp = udp->queue_head; 5220Sstevel@tonic-gate } 5230Sstevel@tonic-gate qp += QUEUE_HASH(wchan, qtype); 5240Sstevel@tonic-gate spin_lock_set(&qp->qh_lock); 5250Sstevel@tonic-gate /* 5260Sstevel@tonic-gate * At once per nanosecond, qh_lockcount will wrap after 512 years. 5270Sstevel@tonic-gate * Were we to care about this, we could peg the value at UINT64_MAX. 5280Sstevel@tonic-gate */ 5290Sstevel@tonic-gate qp->qh_lockcount++; 5300Sstevel@tonic-gate QVERIFY(qp); 5310Sstevel@tonic-gate return (qp); 5320Sstevel@tonic-gate } 5330Sstevel@tonic-gate 5340Sstevel@tonic-gate /* 5350Sstevel@tonic-gate * Release a queue head. 5360Sstevel@tonic-gate */ 5370Sstevel@tonic-gate void 5380Sstevel@tonic-gate queue_unlock(queue_head_t *qp) 5390Sstevel@tonic-gate { 5400Sstevel@tonic-gate QVERIFY(qp); 5410Sstevel@tonic-gate spin_lock_clear(&qp->qh_lock); 5420Sstevel@tonic-gate } 5430Sstevel@tonic-gate 5440Sstevel@tonic-gate /* 5450Sstevel@tonic-gate * For rwlock queueing, we must queue writers ahead of readers of the 5460Sstevel@tonic-gate * same priority. We do this by making writers appear to have a half 5470Sstevel@tonic-gate * point higher priority for purposes of priority comparisons below. 5480Sstevel@tonic-gate */ 5490Sstevel@tonic-gate #define CMP_PRIO(ulwp) ((real_priority(ulwp) << 1) + (ulwp)->ul_writer) 5500Sstevel@tonic-gate 5510Sstevel@tonic-gate void 5520Sstevel@tonic-gate enqueue(queue_head_t *qp, ulwp_t *ulwp, void *wchan, int qtype) 5530Sstevel@tonic-gate { 5540Sstevel@tonic-gate ulwp_t **ulwpp; 5550Sstevel@tonic-gate ulwp_t *next; 5560Sstevel@tonic-gate int pri = CMP_PRIO(ulwp); 5570Sstevel@tonic-gate int force_fifo = (qtype & FIFOQ); 5580Sstevel@tonic-gate int do_fifo; 5590Sstevel@tonic-gate 5600Sstevel@tonic-gate qtype &= ~FIFOQ; 5610Sstevel@tonic-gate ASSERT(qtype == MX || qtype == CV); 5620Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread)); 5630Sstevel@tonic-gate ASSERT(ulwp->ul_sleepq != qp); 5640Sstevel@tonic-gate 5650Sstevel@tonic-gate /* 5660Sstevel@tonic-gate * LIFO queue ordering is unfair and can lead to starvation, 5670Sstevel@tonic-gate * but it gives better performance for heavily contended locks. 5680Sstevel@tonic-gate * We use thread_queue_fifo (range is 0..8) to determine 5690Sstevel@tonic-gate * the frequency of FIFO vs LIFO queuing: 5700Sstevel@tonic-gate * 0 : every 256th time (almost always LIFO) 5710Sstevel@tonic-gate * 1 : every 128th time 5720Sstevel@tonic-gate * 2 : every 64th time 5730Sstevel@tonic-gate * 3 : every 32nd time 5740Sstevel@tonic-gate * 4 : every 16th time (the default value, mostly LIFO) 5750Sstevel@tonic-gate * 5 : every 8th time 5760Sstevel@tonic-gate * 6 : every 4th time 5770Sstevel@tonic-gate * 7 : every 2nd time 5780Sstevel@tonic-gate * 8 : every time (never LIFO, always FIFO) 5790Sstevel@tonic-gate * Note that there is always some degree of FIFO ordering. 5800Sstevel@tonic-gate * This breaks live lock conditions that occur in applications 5810Sstevel@tonic-gate * that are written assuming (incorrectly) that threads acquire 5820Sstevel@tonic-gate * locks fairly, that is, in roughly round-robin order. 5830Sstevel@tonic-gate * In any event, the queue is maintained in priority order. 5840Sstevel@tonic-gate * 5850Sstevel@tonic-gate * If we are given the FIFOQ flag in qtype, fifo queueing is forced. 5860Sstevel@tonic-gate * SUSV3 requires this for semaphores. 5870Sstevel@tonic-gate */ 5880Sstevel@tonic-gate do_fifo = (force_fifo || 5895629Sraf ((++qp->qh_qcnt << curthread->ul_queue_fifo) & 0xff) == 0); 5900Sstevel@tonic-gate 5910Sstevel@tonic-gate if (qp->qh_head == NULL) { 5920Sstevel@tonic-gate /* 5930Sstevel@tonic-gate * The queue is empty. LIFO/FIFO doesn't matter. 5940Sstevel@tonic-gate */ 5950Sstevel@tonic-gate ASSERT(qp->qh_tail == NULL); 5960Sstevel@tonic-gate ulwpp = &qp->qh_head; 5970Sstevel@tonic-gate } else if (do_fifo) { 5980Sstevel@tonic-gate /* 5990Sstevel@tonic-gate * Enqueue after the last thread whose priority is greater 6000Sstevel@tonic-gate * than or equal to the priority of the thread being queued. 6010Sstevel@tonic-gate * Attempt first to go directly onto the tail of the queue. 6020Sstevel@tonic-gate */ 6030Sstevel@tonic-gate if (pri <= CMP_PRIO(qp->qh_tail)) 6040Sstevel@tonic-gate ulwpp = &qp->qh_tail->ul_link; 6050Sstevel@tonic-gate else { 6060Sstevel@tonic-gate for (ulwpp = &qp->qh_head; (next = *ulwpp) != NULL; 6070Sstevel@tonic-gate ulwpp = &next->ul_link) 6080Sstevel@tonic-gate if (pri > CMP_PRIO(next)) 6090Sstevel@tonic-gate break; 6100Sstevel@tonic-gate } 6110Sstevel@tonic-gate } else { 6120Sstevel@tonic-gate /* 6130Sstevel@tonic-gate * Enqueue before the first thread whose priority is less 6140Sstevel@tonic-gate * than or equal to the priority of the thread being queued. 6150Sstevel@tonic-gate * Hopefully we can go directly onto the head of the queue. 6160Sstevel@tonic-gate */ 6170Sstevel@tonic-gate for (ulwpp = &qp->qh_head; (next = *ulwpp) != NULL; 6180Sstevel@tonic-gate ulwpp = &next->ul_link) 6190Sstevel@tonic-gate if (pri >= CMP_PRIO(next)) 6200Sstevel@tonic-gate break; 6210Sstevel@tonic-gate } 6220Sstevel@tonic-gate if ((ulwp->ul_link = *ulwpp) == NULL) 6230Sstevel@tonic-gate qp->qh_tail = ulwp; 6240Sstevel@tonic-gate *ulwpp = ulwp; 6250Sstevel@tonic-gate 6260Sstevel@tonic-gate ulwp->ul_sleepq = qp; 6270Sstevel@tonic-gate ulwp->ul_wchan = wchan; 6280Sstevel@tonic-gate ulwp->ul_qtype = qtype; 6290Sstevel@tonic-gate if (qp->qh_qmax < ++qp->qh_qlen) 6300Sstevel@tonic-gate qp->qh_qmax = qp->qh_qlen; 6310Sstevel@tonic-gate } 6320Sstevel@tonic-gate 6330Sstevel@tonic-gate /* 6340Sstevel@tonic-gate * Return a pointer to the queue slot of the 6350Sstevel@tonic-gate * highest priority thread on the queue. 6360Sstevel@tonic-gate * On return, prevp, if not NULL, will contain a pointer 6370Sstevel@tonic-gate * to the thread's predecessor on the queue 6380Sstevel@tonic-gate */ 6390Sstevel@tonic-gate static ulwp_t ** 6400Sstevel@tonic-gate queue_slot(queue_head_t *qp, void *wchan, int *more, ulwp_t **prevp) 6410Sstevel@tonic-gate { 6420Sstevel@tonic-gate ulwp_t **ulwpp; 6430Sstevel@tonic-gate ulwp_t *ulwp; 6440Sstevel@tonic-gate ulwp_t *prev = NULL; 6450Sstevel@tonic-gate ulwp_t **suspp = NULL; 6460Sstevel@tonic-gate ulwp_t *susprev; 6470Sstevel@tonic-gate 6480Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread)); 6490Sstevel@tonic-gate 6500Sstevel@tonic-gate /* 6510Sstevel@tonic-gate * Find a waiter on the sleep queue. 6520Sstevel@tonic-gate */ 6530Sstevel@tonic-gate for (ulwpp = &qp->qh_head; (ulwp = *ulwpp) != NULL; 6540Sstevel@tonic-gate prev = ulwp, ulwpp = &ulwp->ul_link) { 6550Sstevel@tonic-gate if (ulwp->ul_wchan == wchan) { 6560Sstevel@tonic-gate if (!ulwp->ul_stop) 6570Sstevel@tonic-gate break; 6580Sstevel@tonic-gate /* 6590Sstevel@tonic-gate * Try not to return a suspended thread. 6600Sstevel@tonic-gate * This mimics the old libthread's behavior. 6610Sstevel@tonic-gate */ 6620Sstevel@tonic-gate if (suspp == NULL) { 6630Sstevel@tonic-gate suspp = ulwpp; 6640Sstevel@tonic-gate susprev = prev; 6650Sstevel@tonic-gate } 6660Sstevel@tonic-gate } 6670Sstevel@tonic-gate } 6680Sstevel@tonic-gate 6690Sstevel@tonic-gate if (ulwp == NULL && suspp != NULL) { 6700Sstevel@tonic-gate ulwp = *(ulwpp = suspp); 6710Sstevel@tonic-gate prev = susprev; 6720Sstevel@tonic-gate suspp = NULL; 6730Sstevel@tonic-gate } 6740Sstevel@tonic-gate if (ulwp == NULL) { 6750Sstevel@tonic-gate if (more != NULL) 6760Sstevel@tonic-gate *more = 0; 6770Sstevel@tonic-gate return (NULL); 6780Sstevel@tonic-gate } 6790Sstevel@tonic-gate 6800Sstevel@tonic-gate if (prevp != NULL) 6810Sstevel@tonic-gate *prevp = prev; 6820Sstevel@tonic-gate if (more == NULL) 6830Sstevel@tonic-gate return (ulwpp); 6840Sstevel@tonic-gate 6850Sstevel@tonic-gate /* 6860Sstevel@tonic-gate * Scan the remainder of the queue for another waiter. 6870Sstevel@tonic-gate */ 6880Sstevel@tonic-gate if (suspp != NULL) { 6890Sstevel@tonic-gate *more = 1; 6900Sstevel@tonic-gate return (ulwpp); 6910Sstevel@tonic-gate } 6920Sstevel@tonic-gate for (ulwp = ulwp->ul_link; ulwp != NULL; ulwp = ulwp->ul_link) { 6930Sstevel@tonic-gate if (ulwp->ul_wchan == wchan) { 6940Sstevel@tonic-gate *more = 1; 6950Sstevel@tonic-gate return (ulwpp); 6960Sstevel@tonic-gate } 6970Sstevel@tonic-gate } 6980Sstevel@tonic-gate 6990Sstevel@tonic-gate *more = 0; 7000Sstevel@tonic-gate return (ulwpp); 7010Sstevel@tonic-gate } 7020Sstevel@tonic-gate 7030Sstevel@tonic-gate ulwp_t * 7044570Sraf queue_unlink(queue_head_t *qp, ulwp_t **ulwpp, ulwp_t *prev) 7050Sstevel@tonic-gate { 7060Sstevel@tonic-gate ulwp_t *ulwp; 7070Sstevel@tonic-gate 7080Sstevel@tonic-gate ulwp = *ulwpp; 7090Sstevel@tonic-gate *ulwpp = ulwp->ul_link; 7100Sstevel@tonic-gate ulwp->ul_link = NULL; 7110Sstevel@tonic-gate if (qp->qh_tail == ulwp) 7120Sstevel@tonic-gate qp->qh_tail = prev; 7130Sstevel@tonic-gate qp->qh_qlen--; 7140Sstevel@tonic-gate ulwp->ul_sleepq = NULL; 7150Sstevel@tonic-gate ulwp->ul_wchan = NULL; 7160Sstevel@tonic-gate 7170Sstevel@tonic-gate return (ulwp); 7180Sstevel@tonic-gate } 7190Sstevel@tonic-gate 7204570Sraf ulwp_t * 7214570Sraf dequeue(queue_head_t *qp, void *wchan, int *more) 7224570Sraf { 7234570Sraf ulwp_t **ulwpp; 7244570Sraf ulwp_t *prev; 7254570Sraf 7264570Sraf if ((ulwpp = queue_slot(qp, wchan, more, &prev)) == NULL) 7274570Sraf return (NULL); 7284570Sraf return (queue_unlink(qp, ulwpp, prev)); 7294570Sraf } 7304570Sraf 7310Sstevel@tonic-gate /* 7320Sstevel@tonic-gate * Return a pointer to the highest priority thread sleeping on wchan. 7330Sstevel@tonic-gate */ 7340Sstevel@tonic-gate ulwp_t * 7350Sstevel@tonic-gate queue_waiter(queue_head_t *qp, void *wchan) 7360Sstevel@tonic-gate { 7370Sstevel@tonic-gate ulwp_t **ulwpp; 7380Sstevel@tonic-gate 7390Sstevel@tonic-gate if ((ulwpp = queue_slot(qp, wchan, NULL, NULL)) == NULL) 7400Sstevel@tonic-gate return (NULL); 7410Sstevel@tonic-gate return (*ulwpp); 7420Sstevel@tonic-gate } 7430Sstevel@tonic-gate 7440Sstevel@tonic-gate uint8_t 7450Sstevel@tonic-gate dequeue_self(queue_head_t *qp, void *wchan) 7460Sstevel@tonic-gate { 7470Sstevel@tonic-gate ulwp_t *self = curthread; 7480Sstevel@tonic-gate ulwp_t **ulwpp; 7490Sstevel@tonic-gate ulwp_t *ulwp; 7500Sstevel@tonic-gate ulwp_t *prev = NULL; 7510Sstevel@tonic-gate int found = 0; 7520Sstevel@tonic-gate int more = 0; 7530Sstevel@tonic-gate 7540Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, self)); 7550Sstevel@tonic-gate 7560Sstevel@tonic-gate /* find self on the sleep queue */ 7570Sstevel@tonic-gate for (ulwpp = &qp->qh_head; (ulwp = *ulwpp) != NULL; 7580Sstevel@tonic-gate prev = ulwp, ulwpp = &ulwp->ul_link) { 7590Sstevel@tonic-gate if (ulwp == self) { 7600Sstevel@tonic-gate /* dequeue ourself */ 7610Sstevel@tonic-gate ASSERT(self->ul_wchan == wchan); 7624570Sraf (void) queue_unlink(qp, ulwpp, prev); 7630Sstevel@tonic-gate self->ul_cvmutex = NULL; 7640Sstevel@tonic-gate self->ul_cv_wake = 0; 7650Sstevel@tonic-gate found = 1; 7660Sstevel@tonic-gate break; 7670Sstevel@tonic-gate } 7680Sstevel@tonic-gate if (ulwp->ul_wchan == wchan) 7690Sstevel@tonic-gate more = 1; 7700Sstevel@tonic-gate } 7710Sstevel@tonic-gate 7720Sstevel@tonic-gate if (!found) 7730Sstevel@tonic-gate thr_panic("dequeue_self(): curthread not found on queue"); 7740Sstevel@tonic-gate 7750Sstevel@tonic-gate if (more) 7760Sstevel@tonic-gate return (1); 7770Sstevel@tonic-gate 7780Sstevel@tonic-gate /* scan the remainder of the queue for another waiter */ 7790Sstevel@tonic-gate for (ulwp = *ulwpp; ulwp != NULL; ulwp = ulwp->ul_link) { 7800Sstevel@tonic-gate if (ulwp->ul_wchan == wchan) 7810Sstevel@tonic-gate return (1); 7820Sstevel@tonic-gate } 7830Sstevel@tonic-gate 7840Sstevel@tonic-gate return (0); 7850Sstevel@tonic-gate } 7860Sstevel@tonic-gate 7870Sstevel@tonic-gate /* 7880Sstevel@tonic-gate * Called from call_user_handler() and _thrp_suspend() to take 7890Sstevel@tonic-gate * ourself off of our sleep queue so we can grab locks. 7900Sstevel@tonic-gate */ 7910Sstevel@tonic-gate void 7920Sstevel@tonic-gate unsleep_self(void) 7930Sstevel@tonic-gate { 7940Sstevel@tonic-gate ulwp_t *self = curthread; 7950Sstevel@tonic-gate queue_head_t *qp; 7960Sstevel@tonic-gate 7970Sstevel@tonic-gate /* 7980Sstevel@tonic-gate * Calling enter_critical()/exit_critical() here would lead 7990Sstevel@tonic-gate * to recursion. Just manipulate self->ul_critical directly. 8000Sstevel@tonic-gate */ 8010Sstevel@tonic-gate self->ul_critical++; 8020Sstevel@tonic-gate while (self->ul_sleepq != NULL) { 8030Sstevel@tonic-gate qp = queue_lock(self->ul_wchan, self->ul_qtype); 8040Sstevel@tonic-gate /* 8050Sstevel@tonic-gate * We may have been moved from a CV queue to a 8060Sstevel@tonic-gate * mutex queue while we were attempting queue_lock(). 8070Sstevel@tonic-gate * If so, just loop around and try again. 8080Sstevel@tonic-gate * dequeue_self() clears self->ul_sleepq. 8090Sstevel@tonic-gate */ 8104570Sraf if (qp == self->ul_sleepq) { 8110Sstevel@tonic-gate (void) dequeue_self(qp, self->ul_wchan); 8124570Sraf self->ul_writer = 0; 8134570Sraf } 8140Sstevel@tonic-gate queue_unlock(qp); 8150Sstevel@tonic-gate } 8160Sstevel@tonic-gate self->ul_critical--; 8170Sstevel@tonic-gate } 8180Sstevel@tonic-gate 8190Sstevel@tonic-gate /* 8200Sstevel@tonic-gate * Common code for calling the the ___lwp_mutex_timedlock() system call. 8210Sstevel@tonic-gate * Returns with mutex_owner and mutex_ownerpid set correctly. 8220Sstevel@tonic-gate */ 8234574Sraf static int 8240Sstevel@tonic-gate mutex_lock_kernel(mutex_t *mp, timespec_t *tsp, tdb_mutex_stats_t *msp) 8250Sstevel@tonic-gate { 8260Sstevel@tonic-gate ulwp_t *self = curthread; 8270Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 8284574Sraf int mtype = mp->mutex_type; 8290Sstevel@tonic-gate hrtime_t begin_sleep; 8304574Sraf int acquired; 8310Sstevel@tonic-gate int error; 8320Sstevel@tonic-gate 8330Sstevel@tonic-gate self->ul_sp = stkptr(); 8340Sstevel@tonic-gate self->ul_wchan = mp; 8350Sstevel@tonic-gate if (__td_event_report(self, TD_SLEEP, udp)) { 8360Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_SLEEP; 8370Sstevel@tonic-gate self->ul_td_evbuf.eventdata = mp; 8380Sstevel@tonic-gate tdb_event(TD_SLEEP, udp); 8390Sstevel@tonic-gate } 8400Sstevel@tonic-gate if (msp) { 8410Sstevel@tonic-gate tdb_incr(msp->mutex_sleep); 8420Sstevel@tonic-gate begin_sleep = gethrtime(); 8430Sstevel@tonic-gate } 8440Sstevel@tonic-gate 8450Sstevel@tonic-gate DTRACE_PROBE1(plockstat, mutex__block, mp); 8460Sstevel@tonic-gate 8470Sstevel@tonic-gate for (;;) { 8484574Sraf /* 8494574Sraf * A return value of EOWNERDEAD or ELOCKUNMAPPED 8504574Sraf * means we successfully acquired the lock. 8514574Sraf */ 8524574Sraf if ((error = ___lwp_mutex_timedlock(mp, tsp)) != 0 && 8534574Sraf error != EOWNERDEAD && error != ELOCKUNMAPPED) { 8544574Sraf acquired = 0; 8550Sstevel@tonic-gate break; 8560Sstevel@tonic-gate } 8570Sstevel@tonic-gate 8584574Sraf if (mtype & USYNC_PROCESS) { 8590Sstevel@tonic-gate /* 8600Sstevel@tonic-gate * Defend against forkall(). We may be the child, 8610Sstevel@tonic-gate * in which case we don't actually own the mutex. 8620Sstevel@tonic-gate */ 8630Sstevel@tonic-gate enter_critical(self); 8640Sstevel@tonic-gate if (mp->mutex_ownerpid == udp->pid) { 8650Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 8660Sstevel@tonic-gate exit_critical(self); 8674574Sraf acquired = 1; 8680Sstevel@tonic-gate break; 8690Sstevel@tonic-gate } 8700Sstevel@tonic-gate exit_critical(self); 8710Sstevel@tonic-gate } else { 8720Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 8734574Sraf acquired = 1; 8740Sstevel@tonic-gate break; 8750Sstevel@tonic-gate } 8760Sstevel@tonic-gate } 8770Sstevel@tonic-gate if (msp) 8780Sstevel@tonic-gate msp->mutex_sleep_time += gethrtime() - begin_sleep; 8790Sstevel@tonic-gate self->ul_wchan = NULL; 8800Sstevel@tonic-gate self->ul_sp = 0; 8810Sstevel@tonic-gate 8824574Sraf if (acquired) { 8834574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1); 8844574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 8854574Sraf } else { 8864574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0); 8874574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 8884574Sraf } 8894574Sraf 8900Sstevel@tonic-gate return (error); 8910Sstevel@tonic-gate } 8920Sstevel@tonic-gate 8930Sstevel@tonic-gate /* 8940Sstevel@tonic-gate * Common code for calling the ___lwp_mutex_trylock() system call. 8950Sstevel@tonic-gate * Returns with mutex_owner and mutex_ownerpid set correctly. 8960Sstevel@tonic-gate */ 8970Sstevel@tonic-gate int 8980Sstevel@tonic-gate mutex_trylock_kernel(mutex_t *mp) 8990Sstevel@tonic-gate { 9000Sstevel@tonic-gate ulwp_t *self = curthread; 9010Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 9024574Sraf int mtype = mp->mutex_type; 9030Sstevel@tonic-gate int error; 9044574Sraf int acquired; 9050Sstevel@tonic-gate 9060Sstevel@tonic-gate for (;;) { 9074574Sraf /* 9084574Sraf * A return value of EOWNERDEAD or ELOCKUNMAPPED 9094574Sraf * means we successfully acquired the lock. 9104574Sraf */ 9114574Sraf if ((error = ___lwp_mutex_trylock(mp)) != 0 && 9124574Sraf error != EOWNERDEAD && error != ELOCKUNMAPPED) { 9134574Sraf acquired = 0; 9140Sstevel@tonic-gate break; 9150Sstevel@tonic-gate } 9160Sstevel@tonic-gate 9174574Sraf if (mtype & USYNC_PROCESS) { 9180Sstevel@tonic-gate /* 9190Sstevel@tonic-gate * Defend against forkall(). We may be the child, 9200Sstevel@tonic-gate * in which case we don't actually own the mutex. 9210Sstevel@tonic-gate */ 9220Sstevel@tonic-gate enter_critical(self); 9230Sstevel@tonic-gate if (mp->mutex_ownerpid == udp->pid) { 9240Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 9250Sstevel@tonic-gate exit_critical(self); 9264574Sraf acquired = 1; 9270Sstevel@tonic-gate break; 9280Sstevel@tonic-gate } 9290Sstevel@tonic-gate exit_critical(self); 9300Sstevel@tonic-gate } else { 9310Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 9324574Sraf acquired = 1; 9330Sstevel@tonic-gate break; 9340Sstevel@tonic-gate } 9350Sstevel@tonic-gate } 9360Sstevel@tonic-gate 9374574Sraf if (acquired) { 9384574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 9394574Sraf } else if (error != EBUSY) { 9404574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 9414574Sraf } 9424574Sraf 9430Sstevel@tonic-gate return (error); 9440Sstevel@tonic-gate } 9450Sstevel@tonic-gate 9460Sstevel@tonic-gate volatile sc_shared_t * 9470Sstevel@tonic-gate setup_schedctl(void) 9480Sstevel@tonic-gate { 9490Sstevel@tonic-gate ulwp_t *self = curthread; 9500Sstevel@tonic-gate volatile sc_shared_t *scp; 9510Sstevel@tonic-gate sc_shared_t *tmp; 9520Sstevel@tonic-gate 9530Sstevel@tonic-gate if ((scp = self->ul_schedctl) == NULL && /* no shared state yet */ 9540Sstevel@tonic-gate !self->ul_vfork && /* not a child of vfork() */ 9550Sstevel@tonic-gate !self->ul_schedctl_called) { /* haven't been called before */ 9560Sstevel@tonic-gate enter_critical(self); 9570Sstevel@tonic-gate self->ul_schedctl_called = &self->ul_uberdata->uberflags; 9580Sstevel@tonic-gate if ((tmp = __schedctl()) != (sc_shared_t *)(-1)) 9590Sstevel@tonic-gate self->ul_schedctl = scp = tmp; 9600Sstevel@tonic-gate exit_critical(self); 9610Sstevel@tonic-gate } 9620Sstevel@tonic-gate /* 9630Sstevel@tonic-gate * Unless the call to setup_schedctl() is surrounded 9640Sstevel@tonic-gate * by enter_critical()/exit_critical(), the address 9650Sstevel@tonic-gate * we are returning could be invalid due to a forkall() 9660Sstevel@tonic-gate * having occurred in another thread. 9670Sstevel@tonic-gate */ 9680Sstevel@tonic-gate return (scp); 9690Sstevel@tonic-gate } 9700Sstevel@tonic-gate 9710Sstevel@tonic-gate /* 9720Sstevel@tonic-gate * Interfaces from libsched, incorporated into libc. 9730Sstevel@tonic-gate * libsched.so.1 is now a filter library onto libc. 9740Sstevel@tonic-gate */ 9750Sstevel@tonic-gate #pragma weak schedctl_lookup = _schedctl_init 9760Sstevel@tonic-gate #pragma weak _schedctl_lookup = _schedctl_init 9770Sstevel@tonic-gate #pragma weak schedctl_init = _schedctl_init 9780Sstevel@tonic-gate schedctl_t * 9790Sstevel@tonic-gate _schedctl_init(void) 9800Sstevel@tonic-gate { 9810Sstevel@tonic-gate volatile sc_shared_t *scp = setup_schedctl(); 9820Sstevel@tonic-gate return ((scp == NULL)? NULL : (schedctl_t *)&scp->sc_preemptctl); 9830Sstevel@tonic-gate } 9840Sstevel@tonic-gate 9850Sstevel@tonic-gate #pragma weak schedctl_exit = _schedctl_exit 9860Sstevel@tonic-gate void 9870Sstevel@tonic-gate _schedctl_exit(void) 9880Sstevel@tonic-gate { 9890Sstevel@tonic-gate } 9900Sstevel@tonic-gate 9910Sstevel@tonic-gate /* 9920Sstevel@tonic-gate * Contract private interface for java. 9930Sstevel@tonic-gate * Set up the schedctl data if it doesn't exist yet. 9940Sstevel@tonic-gate * Return a pointer to the pointer to the schedctl data. 9950Sstevel@tonic-gate */ 9960Sstevel@tonic-gate volatile sc_shared_t *volatile * 9970Sstevel@tonic-gate _thr_schedctl(void) 9980Sstevel@tonic-gate { 9990Sstevel@tonic-gate ulwp_t *self = curthread; 10000Sstevel@tonic-gate volatile sc_shared_t *volatile *ptr; 10010Sstevel@tonic-gate 10020Sstevel@tonic-gate if (self->ul_vfork) 10030Sstevel@tonic-gate return (NULL); 10040Sstevel@tonic-gate if (*(ptr = &self->ul_schedctl) == NULL) 10050Sstevel@tonic-gate (void) setup_schedctl(); 10060Sstevel@tonic-gate return (ptr); 10070Sstevel@tonic-gate } 10080Sstevel@tonic-gate 10090Sstevel@tonic-gate /* 10100Sstevel@tonic-gate * Block signals and attempt to block preemption. 10110Sstevel@tonic-gate * no_preempt()/preempt() must be used in pairs but can be nested. 10120Sstevel@tonic-gate */ 10130Sstevel@tonic-gate void 10140Sstevel@tonic-gate no_preempt(ulwp_t *self) 10150Sstevel@tonic-gate { 10160Sstevel@tonic-gate volatile sc_shared_t *scp; 10170Sstevel@tonic-gate 10180Sstevel@tonic-gate if (self->ul_preempt++ == 0) { 10190Sstevel@tonic-gate enter_critical(self); 10200Sstevel@tonic-gate if ((scp = self->ul_schedctl) != NULL || 10210Sstevel@tonic-gate (scp = setup_schedctl()) != NULL) { 10220Sstevel@tonic-gate /* 10230Sstevel@tonic-gate * Save the pre-existing preempt value. 10240Sstevel@tonic-gate */ 10250Sstevel@tonic-gate self->ul_savpreempt = scp->sc_preemptctl.sc_nopreempt; 10260Sstevel@tonic-gate scp->sc_preemptctl.sc_nopreempt = 1; 10270Sstevel@tonic-gate } 10280Sstevel@tonic-gate } 10290Sstevel@tonic-gate } 10300Sstevel@tonic-gate 10310Sstevel@tonic-gate /* 10320Sstevel@tonic-gate * Undo the effects of no_preempt(). 10330Sstevel@tonic-gate */ 10340Sstevel@tonic-gate void 10350Sstevel@tonic-gate preempt(ulwp_t *self) 10360Sstevel@tonic-gate { 10370Sstevel@tonic-gate volatile sc_shared_t *scp; 10380Sstevel@tonic-gate 10390Sstevel@tonic-gate ASSERT(self->ul_preempt > 0); 10400Sstevel@tonic-gate if (--self->ul_preempt == 0) { 10410Sstevel@tonic-gate if ((scp = self->ul_schedctl) != NULL) { 10420Sstevel@tonic-gate /* 10430Sstevel@tonic-gate * Restore the pre-existing preempt value. 10440Sstevel@tonic-gate */ 10450Sstevel@tonic-gate scp->sc_preemptctl.sc_nopreempt = self->ul_savpreempt; 10460Sstevel@tonic-gate if (scp->sc_preemptctl.sc_yield && 10470Sstevel@tonic-gate scp->sc_preemptctl.sc_nopreempt == 0) { 10480Sstevel@tonic-gate lwp_yield(); 10490Sstevel@tonic-gate if (scp->sc_preemptctl.sc_yield) { 10500Sstevel@tonic-gate /* 10510Sstevel@tonic-gate * Shouldn't happen. This is either 10520Sstevel@tonic-gate * a race condition or the thread 10530Sstevel@tonic-gate * just entered the real-time class. 10540Sstevel@tonic-gate */ 10550Sstevel@tonic-gate lwp_yield(); 10560Sstevel@tonic-gate scp->sc_preemptctl.sc_yield = 0; 10570Sstevel@tonic-gate } 10580Sstevel@tonic-gate } 10590Sstevel@tonic-gate } 10600Sstevel@tonic-gate exit_critical(self); 10610Sstevel@tonic-gate } 10620Sstevel@tonic-gate } 10630Sstevel@tonic-gate 10640Sstevel@tonic-gate /* 10650Sstevel@tonic-gate * If a call to preempt() would cause the current thread to yield or to 10660Sstevel@tonic-gate * take deferred actions in exit_critical(), then unpark the specified 10670Sstevel@tonic-gate * lwp so it can run while we delay. Return the original lwpid if the 10680Sstevel@tonic-gate * unpark was not performed, else return zero. The tests are a repeat 10690Sstevel@tonic-gate * of some of the tests in preempt(), above. This is a statistical 10700Sstevel@tonic-gate * optimization solely for cond_sleep_queue(), below. 10710Sstevel@tonic-gate */ 10720Sstevel@tonic-gate static lwpid_t 10730Sstevel@tonic-gate preempt_unpark(ulwp_t *self, lwpid_t lwpid) 10740Sstevel@tonic-gate { 10750Sstevel@tonic-gate volatile sc_shared_t *scp = self->ul_schedctl; 10760Sstevel@tonic-gate 10770Sstevel@tonic-gate ASSERT(self->ul_preempt == 1 && self->ul_critical > 0); 10780Sstevel@tonic-gate if ((scp != NULL && scp->sc_preemptctl.sc_yield) || 10790Sstevel@tonic-gate (self->ul_curplease && self->ul_critical == 1)) { 10800Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 10810Sstevel@tonic-gate lwpid = 0; 10820Sstevel@tonic-gate } 10830Sstevel@tonic-gate return (lwpid); 10840Sstevel@tonic-gate } 10850Sstevel@tonic-gate 10860Sstevel@tonic-gate /* 10874613Sraf * Spin for a while (if 'tryhard' is true), trying to grab the lock. 10880Sstevel@tonic-gate * If this fails, return EBUSY and let the caller deal with it. 10890Sstevel@tonic-gate * If this succeeds, return 0 with mutex_owner set to curthread. 10900Sstevel@tonic-gate */ 10914574Sraf static int 10924613Sraf mutex_trylock_adaptive(mutex_t *mp, int tryhard) 10930Sstevel@tonic-gate { 10940Sstevel@tonic-gate ulwp_t *self = curthread; 10954574Sraf int error = EBUSY; 10960Sstevel@tonic-gate ulwp_t *ulwp; 10970Sstevel@tonic-gate volatile sc_shared_t *scp; 10985629Sraf volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw; 10995629Sraf volatile uint64_t *ownerp = (volatile uint64_t *)&mp->mutex_owner; 11005629Sraf uint32_t new_lockword; 11015629Sraf int count = 0; 11025629Sraf int max_count; 11035629Sraf uint8_t max_spinners; 11044574Sraf 11054574Sraf ASSERT(!(mp->mutex_type & USYNC_PROCESS)); 11064574Sraf 11074574Sraf if (MUTEX_OWNER(mp) == self) 11080Sstevel@tonic-gate return (EBUSY); 11090Sstevel@tonic-gate 11104574Sraf /* short-cut, not definitive (see below) */ 11114574Sraf if (mp->mutex_flag & LOCK_NOTRECOVERABLE) { 11124574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 11135629Sraf error = ENOTRECOVERABLE; 11145629Sraf goto done; 11154574Sraf } 11164574Sraf 11175629Sraf /* 11185629Sraf * Make one attempt to acquire the lock before 11195629Sraf * incurring the overhead of the spin loop. 11205629Sraf */ 11215629Sraf if (set_lock_byte(lockp) == 0) { 11225629Sraf *ownerp = (uintptr_t)self; 11235629Sraf error = 0; 11245629Sraf goto done; 11255629Sraf } 11265629Sraf if (!tryhard) 11275629Sraf goto done; 11285629Sraf if (ncpus == 0) 11295629Sraf ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN); 11305629Sraf if ((max_spinners = self->ul_max_spinners) >= ncpus) 11315629Sraf max_spinners = ncpus - 1; 11325629Sraf max_count = (max_spinners != 0)? self->ul_adaptive_spin : 0; 11335629Sraf if (max_count == 0) 11345629Sraf goto done; 11355629Sraf 11360Sstevel@tonic-gate /* 11370Sstevel@tonic-gate * This spin loop is unfair to lwps that have already dropped into 11380Sstevel@tonic-gate * the kernel to sleep. They will starve on a highly-contended mutex. 11390Sstevel@tonic-gate * This is just too bad. The adaptive spin algorithm is intended 11400Sstevel@tonic-gate * to allow programs with highly-contended locks (that is, broken 11410Sstevel@tonic-gate * programs) to execute with reasonable speed despite their contention. 11420Sstevel@tonic-gate * Being fair would reduce the speed of such programs and well-written 11430Sstevel@tonic-gate * programs will not suffer in any case. 11440Sstevel@tonic-gate */ 11455629Sraf enter_critical(self); 11465629Sraf if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1) { 11475629Sraf exit_critical(self); 11485629Sraf goto done; 11495629Sraf } 11505629Sraf DTRACE_PROBE1(plockstat, mutex__spin, mp); 11515629Sraf for (count = 1; ; count++) { 11520Sstevel@tonic-gate if (*lockp == 0 && set_lock_byte(lockp) == 0) { 11530Sstevel@tonic-gate *ownerp = (uintptr_t)self; 11544574Sraf error = 0; 11554574Sraf break; 11560Sstevel@tonic-gate } 11575629Sraf if (count == max_count) 11585629Sraf break; 11590Sstevel@tonic-gate SMT_PAUSE(); 11600Sstevel@tonic-gate /* 11610Sstevel@tonic-gate * Stop spinning if the mutex owner is not running on 11620Sstevel@tonic-gate * a processor; it will not drop the lock any time soon 11630Sstevel@tonic-gate * and we would just be wasting time to keep spinning. 11640Sstevel@tonic-gate * 11650Sstevel@tonic-gate * Note that we are looking at another thread (ulwp_t) 11660Sstevel@tonic-gate * without ensuring that the other thread does not exit. 11670Sstevel@tonic-gate * The scheme relies on ulwp_t structures never being 11680Sstevel@tonic-gate * deallocated by the library (the library employs a free 11690Sstevel@tonic-gate * list of ulwp_t structs that are reused when new threads 11700Sstevel@tonic-gate * are created) and on schedctl shared memory never being 11710Sstevel@tonic-gate * deallocated once created via __schedctl(). 11720Sstevel@tonic-gate * 11730Sstevel@tonic-gate * Thus, the worst that can happen when the spinning thread 11740Sstevel@tonic-gate * looks at the owner's schedctl data is that it is looking 11750Sstevel@tonic-gate * at some other thread's schedctl data. This almost never 11760Sstevel@tonic-gate * happens and is benign when it does. 11770Sstevel@tonic-gate */ 11780Sstevel@tonic-gate if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL && 11790Sstevel@tonic-gate ((scp = ulwp->ul_schedctl) == NULL || 11800Sstevel@tonic-gate scp->sc_state != SC_ONPROC)) 11810Sstevel@tonic-gate break; 11820Sstevel@tonic-gate } 11835629Sraf new_lockword = spinners_decr(&mp->mutex_lockword); 11845629Sraf if (error && (new_lockword & (LOCKMASK | SPINNERMASK)) == 0) { 11855629Sraf /* 11865629Sraf * We haven't yet acquired the lock, the lock 11875629Sraf * is free, and there are no other spinners. 11885629Sraf * Make one final attempt to acquire the lock. 11895629Sraf * 11905629Sraf * This isn't strictly necessary since mutex_lock_queue() 11915629Sraf * (the next action this thread will take if it doesn't 11925629Sraf * acquire the lock here) makes one attempt to acquire 11935629Sraf * the lock before putting the thread to sleep. 11945629Sraf * 11955629Sraf * If the next action for this thread (on failure here) 11965629Sraf * were not to call mutex_lock_queue(), this would be 11975629Sraf * necessary for correctness, to avoid ending up with an 11985629Sraf * unheld mutex with waiters but no one to wake them up. 11995629Sraf */ 12005629Sraf if (set_lock_byte(lockp) == 0) { 12015629Sraf *ownerp = (uintptr_t)self; 12025629Sraf error = 0; 12035629Sraf } 12045629Sraf count++; 12055629Sraf } 12060Sstevel@tonic-gate exit_critical(self); 12070Sstevel@tonic-gate 12085629Sraf done: 12094574Sraf if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { 12104574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 12114574Sraf /* 1212*6057Sraf * We shouldn't own the mutex. 1213*6057Sraf * Just clear the lock; everyone has already been waked up. 12144574Sraf */ 12154574Sraf mp->mutex_owner = 0; 1216*6057Sraf (void) clear_lockbyte(&mp->mutex_lockword); 12174574Sraf error = ENOTRECOVERABLE; 12184574Sraf } 12194574Sraf 12204574Sraf if (error) { 12215629Sraf if (count) { 12225629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 0, count); 12235629Sraf } 12244574Sraf if (error != EBUSY) { 12254574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 12264574Sraf } 12274574Sraf } else { 12285629Sraf if (count) { 12295629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 1, count); 12305629Sraf } 12314574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); 12324574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) { 12334574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 12344574Sraf error = EOWNERDEAD; 12354574Sraf } 12364574Sraf } 12374574Sraf 12384574Sraf return (error); 12390Sstevel@tonic-gate } 12400Sstevel@tonic-gate 12410Sstevel@tonic-gate /* 12420Sstevel@tonic-gate * Same as mutex_trylock_adaptive(), except specifically for queue locks. 12430Sstevel@tonic-gate * The owner field is not set here; the caller (spin_lock_set()) sets it. 12440Sstevel@tonic-gate */ 12454574Sraf static int 12460Sstevel@tonic-gate mutex_queuelock_adaptive(mutex_t *mp) 12470Sstevel@tonic-gate { 12480Sstevel@tonic-gate ulwp_t *ulwp; 12490Sstevel@tonic-gate volatile sc_shared_t *scp; 12500Sstevel@tonic-gate volatile uint8_t *lockp; 12510Sstevel@tonic-gate volatile uint64_t *ownerp; 12520Sstevel@tonic-gate int count = curthread->ul_queue_spin; 12530Sstevel@tonic-gate 12540Sstevel@tonic-gate ASSERT(mp->mutex_type == USYNC_THREAD); 12550Sstevel@tonic-gate 12560Sstevel@tonic-gate if (count == 0) 12570Sstevel@tonic-gate return (EBUSY); 12580Sstevel@tonic-gate 12590Sstevel@tonic-gate lockp = (volatile uint8_t *)&mp->mutex_lockw; 12600Sstevel@tonic-gate ownerp = (volatile uint64_t *)&mp->mutex_owner; 12610Sstevel@tonic-gate while (--count >= 0) { 12620Sstevel@tonic-gate if (*lockp == 0 && set_lock_byte(lockp) == 0) 12630Sstevel@tonic-gate return (0); 12640Sstevel@tonic-gate SMT_PAUSE(); 12650Sstevel@tonic-gate if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL && 12660Sstevel@tonic-gate ((scp = ulwp->ul_schedctl) == NULL || 12670Sstevel@tonic-gate scp->sc_state != SC_ONPROC)) 12680Sstevel@tonic-gate break; 12690Sstevel@tonic-gate } 12700Sstevel@tonic-gate 12710Sstevel@tonic-gate return (EBUSY); 12720Sstevel@tonic-gate } 12730Sstevel@tonic-gate 12740Sstevel@tonic-gate /* 12750Sstevel@tonic-gate * Like mutex_trylock_adaptive(), but for process-shared mutexes. 12764613Sraf * Spin for a while (if 'tryhard' is true), trying to grab the lock. 12770Sstevel@tonic-gate * If this fails, return EBUSY and let the caller deal with it. 12780Sstevel@tonic-gate * If this succeeds, return 0 with mutex_owner set to curthread 12790Sstevel@tonic-gate * and mutex_ownerpid set to the current pid. 12800Sstevel@tonic-gate */ 12814574Sraf static int 12824613Sraf mutex_trylock_process(mutex_t *mp, int tryhard) 12830Sstevel@tonic-gate { 12840Sstevel@tonic-gate ulwp_t *self = curthread; 12855629Sraf uberdata_t *udp = self->ul_uberdata; 12864574Sraf int error = EBUSY; 1287*6057Sraf volatile uint64_t *lockp = (volatile uint64_t *)&mp->mutex_lockword64; 12885629Sraf uint32_t new_lockword; 12895629Sraf int count = 0; 12905629Sraf int max_count; 12915629Sraf uint8_t max_spinners; 12924574Sraf 12934574Sraf ASSERT(mp->mutex_type & USYNC_PROCESS); 12944574Sraf 12954574Sraf if (shared_mutex_held(mp)) 12960Sstevel@tonic-gate return (EBUSY); 12970Sstevel@tonic-gate 12984574Sraf /* short-cut, not definitive (see below) */ 12994574Sraf if (mp->mutex_flag & LOCK_NOTRECOVERABLE) { 13004574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 13015629Sraf error = ENOTRECOVERABLE; 13025629Sraf goto done; 13034574Sraf } 13044574Sraf 13055629Sraf /* 13065629Sraf * Make one attempt to acquire the lock before 13075629Sraf * incurring the overhead of the spin loop. 13085629Sraf */ 13095629Sraf enter_critical(self); 1310*6057Sraf if (set_lock_byte64(lockp, udp->pid) == 0) { 13115629Sraf mp->mutex_owner = (uintptr_t)self; 1312*6057Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 13135629Sraf exit_critical(self); 13145629Sraf error = 0; 13155629Sraf goto done; 13165629Sraf } 13175629Sraf exit_critical(self); 13185629Sraf if (!tryhard) 13195629Sraf goto done; 13204574Sraf if (ncpus == 0) 13214574Sraf ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN); 13225629Sraf if ((max_spinners = self->ul_max_spinners) >= ncpus) 13235629Sraf max_spinners = ncpus - 1; 13245629Sraf max_count = (max_spinners != 0)? self->ul_adaptive_spin : 0; 13255629Sraf if (max_count == 0) 13265629Sraf goto done; 13275629Sraf 13280Sstevel@tonic-gate /* 13290Sstevel@tonic-gate * This is a process-shared mutex. 13300Sstevel@tonic-gate * We cannot know if the owner is running on a processor. 13310Sstevel@tonic-gate * We just spin and hope that it is on a processor. 13320Sstevel@tonic-gate */ 13334574Sraf enter_critical(self); 13345629Sraf if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1) { 13355629Sraf exit_critical(self); 13365629Sraf goto done; 13375629Sraf } 13385629Sraf DTRACE_PROBE1(plockstat, mutex__spin, mp); 13395629Sraf for (count = 1; ; count++) { 1340*6057Sraf if ((*lockp & LOCKMASK64) == 0 && 1341*6057Sraf set_lock_byte64(lockp, udp->pid) == 0) { 13424574Sraf mp->mutex_owner = (uintptr_t)self; 1343*6057Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 13444574Sraf error = 0; 13454574Sraf break; 13464574Sraf } 13475629Sraf if (count == max_count) 13485629Sraf break; 13494574Sraf SMT_PAUSE(); 13504574Sraf } 13515629Sraf new_lockword = spinners_decr(&mp->mutex_lockword); 13525629Sraf if (error && (new_lockword & (LOCKMASK | SPINNERMASK)) == 0) { 13535629Sraf /* 13545629Sraf * We haven't yet acquired the lock, the lock 13555629Sraf * is free, and there are no other spinners. 13565629Sraf * Make one final attempt to acquire the lock. 13575629Sraf * 13585629Sraf * This isn't strictly necessary since mutex_lock_kernel() 13595629Sraf * (the next action this thread will take if it doesn't 13605629Sraf * acquire the lock here) makes one attempt to acquire 13615629Sraf * the lock before putting the thread to sleep. 13625629Sraf * 13635629Sraf * If the next action for this thread (on failure here) 13645629Sraf * were not to call mutex_lock_kernel(), this would be 13655629Sraf * necessary for correctness, to avoid ending up with an 13665629Sraf * unheld mutex with waiters but no one to wake them up. 13675629Sraf */ 1368*6057Sraf if (set_lock_byte64(lockp, udp->pid) == 0) { 13695629Sraf mp->mutex_owner = (uintptr_t)self; 1370*6057Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 13715629Sraf error = 0; 13725629Sraf } 13735629Sraf count++; 13745629Sraf } 13754574Sraf exit_critical(self); 13764574Sraf 13775629Sraf done: 13784574Sraf if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { 13794574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 13804574Sraf /* 1381*6057Sraf * We shouldn't own the mutex. 1382*6057Sraf * Just clear the lock; everyone has already been waked up. 13834574Sraf */ 13844574Sraf mp->mutex_owner = 0; 1385*6057Sraf /* mp->mutex_ownerpid is cleared by clear_lockbyte64() */ 1386*6057Sraf (void) clear_lockbyte64(&mp->mutex_lockword64); 13874574Sraf error = ENOTRECOVERABLE; 13880Sstevel@tonic-gate } 13890Sstevel@tonic-gate 13904574Sraf if (error) { 13915629Sraf if (count) { 13925629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 0, count); 13935629Sraf } 13944574Sraf if (error != EBUSY) { 13954574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 13964574Sraf } 13974574Sraf } else { 13985629Sraf if (count) { 13995629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 1, count); 14005629Sraf } 14014574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); 14024574Sraf if (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED)) { 14034574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 14044574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) 14054574Sraf error = EOWNERDEAD; 14064574Sraf else if (mp->mutex_type & USYNC_PROCESS_ROBUST) 14074574Sraf error = ELOCKUNMAPPED; 14084574Sraf else 14094574Sraf error = EOWNERDEAD; 14104574Sraf } 14114574Sraf } 14124574Sraf 14134574Sraf return (error); 14140Sstevel@tonic-gate } 14150Sstevel@tonic-gate 14160Sstevel@tonic-gate /* 14170Sstevel@tonic-gate * Mutex wakeup code for releasing a USYNC_THREAD mutex. 14180Sstevel@tonic-gate * Returns the lwpid of the thread that was dequeued, if any. 14190Sstevel@tonic-gate * The caller of mutex_wakeup() must call __lwp_unpark(lwpid) 14200Sstevel@tonic-gate * to wake up the specified lwp. 14210Sstevel@tonic-gate */ 14224574Sraf static lwpid_t 14230Sstevel@tonic-gate mutex_wakeup(mutex_t *mp) 14240Sstevel@tonic-gate { 14250Sstevel@tonic-gate lwpid_t lwpid = 0; 14260Sstevel@tonic-gate queue_head_t *qp; 14270Sstevel@tonic-gate ulwp_t *ulwp; 14280Sstevel@tonic-gate int more; 14290Sstevel@tonic-gate 14300Sstevel@tonic-gate /* 14310Sstevel@tonic-gate * Dequeue a waiter from the sleep queue. Don't touch the mutex 14320Sstevel@tonic-gate * waiters bit if no one was found on the queue because the mutex 14330Sstevel@tonic-gate * might have been deallocated or reallocated for another purpose. 14340Sstevel@tonic-gate */ 14350Sstevel@tonic-gate qp = queue_lock(mp, MX); 14360Sstevel@tonic-gate if ((ulwp = dequeue(qp, mp, &more)) != NULL) { 14370Sstevel@tonic-gate lwpid = ulwp->ul_lwpid; 14380Sstevel@tonic-gate mp->mutex_waiters = (more? 1 : 0); 14390Sstevel@tonic-gate } 14400Sstevel@tonic-gate queue_unlock(qp); 14410Sstevel@tonic-gate return (lwpid); 14420Sstevel@tonic-gate } 14430Sstevel@tonic-gate 14440Sstevel@tonic-gate /* 14454574Sraf * Mutex wakeup code for releasing all waiters on a USYNC_THREAD mutex. 14464574Sraf */ 14474574Sraf static void 14484574Sraf mutex_wakeup_all(mutex_t *mp) 14494574Sraf { 14504574Sraf queue_head_t *qp; 14514574Sraf int nlwpid = 0; 14524574Sraf int maxlwps = MAXLWPS; 14534574Sraf ulwp_t **ulwpp; 14544574Sraf ulwp_t *ulwp; 14554574Sraf ulwp_t *prev = NULL; 14564574Sraf lwpid_t buffer[MAXLWPS]; 14574574Sraf lwpid_t *lwpid = buffer; 14584574Sraf 14594574Sraf /* 14604574Sraf * Walk the list of waiters and prepare to wake up all of them. 14614574Sraf * The waiters flag has already been cleared from the mutex. 14624574Sraf * 14634574Sraf * We keep track of lwpids that are to be unparked in lwpid[]. 14644574Sraf * __lwp_unpark_all() is called to unpark all of them after 14654574Sraf * they have been removed from the sleep queue and the sleep 14664574Sraf * queue lock has been dropped. If we run out of space in our 14674574Sraf * on-stack buffer, we need to allocate more but we can't call 14684574Sraf * lmalloc() because we are holding a queue lock when the overflow 14694574Sraf * occurs and lmalloc() acquires a lock. We can't use alloca() 14704574Sraf * either because the application may have allocated a small 14714574Sraf * stack and we don't want to overrun the stack. So we call 14724574Sraf * alloc_lwpids() to allocate a bigger buffer using the mmap() 14734574Sraf * system call directly since that path acquires no locks. 14744574Sraf */ 14754574Sraf qp = queue_lock(mp, MX); 14764574Sraf ulwpp = &qp->qh_head; 14774574Sraf while ((ulwp = *ulwpp) != NULL) { 14784574Sraf if (ulwp->ul_wchan != mp) { 14794574Sraf prev = ulwp; 14804574Sraf ulwpp = &ulwp->ul_link; 14814574Sraf } else { 14824574Sraf if (nlwpid == maxlwps) 14834574Sraf lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps); 14844574Sraf (void) queue_unlink(qp, ulwpp, prev); 14854574Sraf lwpid[nlwpid++] = ulwp->ul_lwpid; 14864574Sraf } 14874574Sraf } 14884574Sraf 14894574Sraf if (nlwpid == 0) { 14904574Sraf queue_unlock(qp); 14914574Sraf } else { 14925629Sraf mp->mutex_waiters = 0; 14934574Sraf no_preempt(curthread); 14944574Sraf queue_unlock(qp); 14954574Sraf if (nlwpid == 1) 14964574Sraf (void) __lwp_unpark(lwpid[0]); 14974574Sraf else 14984574Sraf (void) __lwp_unpark_all(lwpid, nlwpid); 14994574Sraf preempt(curthread); 15004574Sraf } 15014574Sraf 15024574Sraf if (lwpid != buffer) 15034574Sraf (void) _private_munmap(lwpid, maxlwps * sizeof (lwpid_t)); 15044574Sraf } 15054574Sraf 15064574Sraf /* 15075629Sraf * Release a process-private mutex. 15085629Sraf * As an optimization, if there are waiters but there are also spinners 15095629Sraf * attempting to acquire the mutex, then don't bother waking up a waiter; 15105629Sraf * one of the spinners will acquire the mutex soon and it would be a waste 15115629Sraf * of resources to wake up some thread just to have it spin for a while 15125629Sraf * and then possibly go back to sleep. See mutex_trylock_adaptive(). 15130Sstevel@tonic-gate */ 15144574Sraf static lwpid_t 15154574Sraf mutex_unlock_queue(mutex_t *mp, int release_all) 15160Sstevel@tonic-gate { 15175629Sraf lwpid_t lwpid = 0; 15185629Sraf uint32_t old_lockword; 15195629Sraf 1520*6057Sraf DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 15215629Sraf mp->mutex_owner = 0; 15225629Sraf old_lockword = clear_lockbyte(&mp->mutex_lockword); 15235629Sraf if ((old_lockword & WAITERMASK) && 15245629Sraf (release_all || (old_lockword & SPINNERMASK) == 0)) { 15255629Sraf ulwp_t *self = curthread; 15260Sstevel@tonic-gate no_preempt(self); /* ensure a prompt wakeup */ 15275629Sraf if (release_all) 15285629Sraf mutex_wakeup_all(mp); 15295629Sraf else 15305629Sraf lwpid = mutex_wakeup(mp); 15315629Sraf if (lwpid == 0) 15325629Sraf preempt(self); 15334574Sraf } 15340Sstevel@tonic-gate return (lwpid); 15350Sstevel@tonic-gate } 15360Sstevel@tonic-gate 15370Sstevel@tonic-gate /* 15380Sstevel@tonic-gate * Like mutex_unlock_queue(), but for process-shared mutexes. 15390Sstevel@tonic-gate */ 15404574Sraf static void 15414574Sraf mutex_unlock_process(mutex_t *mp, int release_all) 15420Sstevel@tonic-gate { 1543*6057Sraf uint64_t old_lockword64; 1544*6057Sraf 1545*6057Sraf DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 15460Sstevel@tonic-gate mp->mutex_owner = 0; 1547*6057Sraf /* mp->mutex_ownerpid is cleared by clear_lockbyte64() */ 1548*6057Sraf old_lockword64 = clear_lockbyte64(&mp->mutex_lockword64); 1549*6057Sraf if ((old_lockword64 & WAITERMASK64) && 1550*6057Sraf (release_all || (old_lockword64 & SPINNERMASK64) == 0)) { 15515629Sraf ulwp_t *self = curthread; 15525629Sraf no_preempt(self); /* ensure a prompt wakeup */ 15535629Sraf (void) ___lwp_mutex_wakeup(mp, release_all); 15545629Sraf preempt(self); 15550Sstevel@tonic-gate } 15560Sstevel@tonic-gate } 15570Sstevel@tonic-gate 15580Sstevel@tonic-gate /* 15590Sstevel@tonic-gate * Return the real priority of a thread. 15600Sstevel@tonic-gate */ 15610Sstevel@tonic-gate int 15620Sstevel@tonic-gate real_priority(ulwp_t *ulwp) 15630Sstevel@tonic-gate { 15640Sstevel@tonic-gate if (ulwp->ul_epri == 0) 15650Sstevel@tonic-gate return (ulwp->ul_mappedpri? ulwp->ul_mappedpri : ulwp->ul_pri); 15660Sstevel@tonic-gate return (ulwp->ul_emappedpri? ulwp->ul_emappedpri : ulwp->ul_epri); 15670Sstevel@tonic-gate } 15680Sstevel@tonic-gate 15690Sstevel@tonic-gate void 15700Sstevel@tonic-gate stall(void) 15710Sstevel@tonic-gate { 15720Sstevel@tonic-gate for (;;) 15730Sstevel@tonic-gate (void) mutex_lock_kernel(&stall_mutex, NULL, NULL); 15740Sstevel@tonic-gate } 15750Sstevel@tonic-gate 15760Sstevel@tonic-gate /* 15770Sstevel@tonic-gate * Acquire a USYNC_THREAD mutex via user-level sleep queues. 15780Sstevel@tonic-gate * We failed set_lock_byte(&mp->mutex_lockw) before coming here. 15794574Sraf * If successful, returns with mutex_owner set correctly. 15800Sstevel@tonic-gate */ 15810Sstevel@tonic-gate int 15820Sstevel@tonic-gate mutex_lock_queue(ulwp_t *self, tdb_mutex_stats_t *msp, mutex_t *mp, 15830Sstevel@tonic-gate timespec_t *tsp) 15840Sstevel@tonic-gate { 15850Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 15860Sstevel@tonic-gate queue_head_t *qp; 15870Sstevel@tonic-gate hrtime_t begin_sleep; 15880Sstevel@tonic-gate int error = 0; 15890Sstevel@tonic-gate 15900Sstevel@tonic-gate self->ul_sp = stkptr(); 15910Sstevel@tonic-gate if (__td_event_report(self, TD_SLEEP, udp)) { 15920Sstevel@tonic-gate self->ul_wchan = mp; 15930Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_SLEEP; 15940Sstevel@tonic-gate self->ul_td_evbuf.eventdata = mp; 15950Sstevel@tonic-gate tdb_event(TD_SLEEP, udp); 15960Sstevel@tonic-gate } 15970Sstevel@tonic-gate if (msp) { 15980Sstevel@tonic-gate tdb_incr(msp->mutex_sleep); 15990Sstevel@tonic-gate begin_sleep = gethrtime(); 16000Sstevel@tonic-gate } 16010Sstevel@tonic-gate 16020Sstevel@tonic-gate DTRACE_PROBE1(plockstat, mutex__block, mp); 16030Sstevel@tonic-gate 16040Sstevel@tonic-gate /* 16050Sstevel@tonic-gate * Put ourself on the sleep queue, and while we are 16060Sstevel@tonic-gate * unable to grab the lock, go park in the kernel. 16070Sstevel@tonic-gate * Take ourself off the sleep queue after we acquire the lock. 16080Sstevel@tonic-gate * The waiter bit can be set/cleared only while holding the queue lock. 16090Sstevel@tonic-gate */ 16100Sstevel@tonic-gate qp = queue_lock(mp, MX); 16110Sstevel@tonic-gate enqueue(qp, self, mp, MX); 16120Sstevel@tonic-gate mp->mutex_waiters = 1; 16130Sstevel@tonic-gate for (;;) { 16140Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 16150Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 16160Sstevel@tonic-gate mp->mutex_waiters = dequeue_self(qp, mp); 16170Sstevel@tonic-gate break; 16180Sstevel@tonic-gate } 16190Sstevel@tonic-gate set_parking_flag(self, 1); 16200Sstevel@tonic-gate queue_unlock(qp); 16210Sstevel@tonic-gate /* 16220Sstevel@tonic-gate * __lwp_park() will return the residual time in tsp 16230Sstevel@tonic-gate * if we are unparked before the timeout expires. 16240Sstevel@tonic-gate */ 16255629Sraf error = __lwp_park(tsp, 0); 16260Sstevel@tonic-gate set_parking_flag(self, 0); 16270Sstevel@tonic-gate /* 16280Sstevel@tonic-gate * We could have taken a signal or suspended ourself. 16290Sstevel@tonic-gate * If we did, then we removed ourself from the queue. 16300Sstevel@tonic-gate * Someone else may have removed us from the queue 16310Sstevel@tonic-gate * as a consequence of mutex_unlock(). We may have 16320Sstevel@tonic-gate * gotten a timeout from __lwp_park(). Or we may still 16330Sstevel@tonic-gate * be on the queue and this is just a spurious wakeup. 16340Sstevel@tonic-gate */ 16350Sstevel@tonic-gate qp = queue_lock(mp, MX); 16360Sstevel@tonic-gate if (self->ul_sleepq == NULL) { 16375629Sraf if (error) { 16385629Sraf mp->mutex_waiters = queue_waiter(qp, mp)? 1 : 0; 16395629Sraf if (error != EINTR) 16405629Sraf break; 16415629Sraf error = 0; 16425629Sraf } 16430Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 16440Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 16450Sstevel@tonic-gate break; 16460Sstevel@tonic-gate } 16470Sstevel@tonic-gate enqueue(qp, self, mp, MX); 16480Sstevel@tonic-gate mp->mutex_waiters = 1; 16490Sstevel@tonic-gate } 16500Sstevel@tonic-gate ASSERT(self->ul_sleepq == qp && 16510Sstevel@tonic-gate self->ul_qtype == MX && 16520Sstevel@tonic-gate self->ul_wchan == mp); 16530Sstevel@tonic-gate if (error) { 16545629Sraf if (error != EINTR) { 16555629Sraf mp->mutex_waiters = dequeue_self(qp, mp); 16565629Sraf break; 16575629Sraf } 16585629Sraf error = 0; 16590Sstevel@tonic-gate } 16600Sstevel@tonic-gate } 16610Sstevel@tonic-gate ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL && 16620Sstevel@tonic-gate self->ul_wchan == NULL); 16630Sstevel@tonic-gate self->ul_sp = 0; 16640Sstevel@tonic-gate queue_unlock(qp); 16654574Sraf 16660Sstevel@tonic-gate if (msp) 16670Sstevel@tonic-gate msp->mutex_sleep_time += gethrtime() - begin_sleep; 16680Sstevel@tonic-gate 16690Sstevel@tonic-gate ASSERT(error == 0 || error == EINVAL || error == ETIME); 16704574Sraf 16714574Sraf if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { 16724574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 16734574Sraf /* 1674*6057Sraf * We shouldn't own the mutex. 1675*6057Sraf * Just clear the lock; everyone has already been waked up. 16764574Sraf */ 16774574Sraf mp->mutex_owner = 0; 1678*6057Sraf (void) clear_lockbyte(&mp->mutex_lockword); 16794574Sraf error = ENOTRECOVERABLE; 16804574Sraf } 16814574Sraf 16824574Sraf if (error) { 16834574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0); 16844574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 16854574Sraf } else { 16864574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1); 16874574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 16884574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) { 16894574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 16904574Sraf error = EOWNERDEAD; 16914574Sraf } 16924574Sraf } 16934574Sraf 16940Sstevel@tonic-gate return (error); 16950Sstevel@tonic-gate } 16960Sstevel@tonic-gate 16974574Sraf static int 16984574Sraf mutex_recursion(mutex_t *mp, int mtype, int try) 16994574Sraf { 17004574Sraf ASSERT(mutex_is_held(mp)); 17014574Sraf ASSERT(mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)); 17024574Sraf ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK); 17034574Sraf 17044574Sraf if (mtype & LOCK_RECURSIVE) { 17054574Sraf if (mp->mutex_rcount == RECURSION_MAX) { 17064574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EAGAIN); 17074574Sraf return (EAGAIN); 17084574Sraf } 17094574Sraf mp->mutex_rcount++; 17104574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1, 0); 17114574Sraf return (0); 17124574Sraf } 17134574Sraf if (try == MUTEX_LOCK) { 17144574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK); 17154574Sraf return (EDEADLK); 17164574Sraf } 17174574Sraf return (EBUSY); 17184574Sraf } 17194574Sraf 17204574Sraf /* 17214574Sraf * Register this USYNC_PROCESS|LOCK_ROBUST mutex with the kernel so 17224574Sraf * it can apply LOCK_OWNERDEAD|LOCK_UNMAPPED if it becomes necessary. 17234574Sraf * We use tdb_hash_lock here and in the synch object tracking code in 17244574Sraf * the tdb_agent.c file. There is no conflict between these two usages. 17254574Sraf */ 17264574Sraf void 17274574Sraf register_lock(mutex_t *mp) 17284574Sraf { 17294574Sraf uberdata_t *udp = curthread->ul_uberdata; 17304574Sraf uint_t hash = LOCK_HASH(mp); 17314574Sraf robust_t *rlp; 17324574Sraf robust_t **rlpp; 17334574Sraf robust_t **table; 17344574Sraf 17354574Sraf if ((table = udp->robustlocks) == NULL) { 17364574Sraf lmutex_lock(&udp->tdb_hash_lock); 17374574Sraf if ((table = udp->robustlocks) == NULL) { 17384574Sraf table = lmalloc(LOCKHASHSZ * sizeof (robust_t *)); 17394574Sraf _membar_producer(); 17404574Sraf udp->robustlocks = table; 17414574Sraf } 17424574Sraf lmutex_unlock(&udp->tdb_hash_lock); 17434574Sraf } 17444574Sraf _membar_consumer(); 17454574Sraf 17464574Sraf /* 17474574Sraf * First search the registered table with no locks held. 17484574Sraf * This is safe because the table never shrinks 17494574Sraf * and we can only get a false negative. 17504574Sraf */ 17514574Sraf for (rlp = table[hash]; rlp != NULL; rlp = rlp->robust_next) { 17524574Sraf if (rlp->robust_lock == mp) /* already registered */ 17534574Sraf return; 17544574Sraf } 17554574Sraf 17564574Sraf /* 17574574Sraf * The lock was not found. 17584574Sraf * Repeat the operation with tdb_hash_lock held. 17594574Sraf */ 17604574Sraf lmutex_lock(&udp->tdb_hash_lock); 17614574Sraf 17624574Sraf for (rlpp = &table[hash]; 17634574Sraf (rlp = *rlpp) != NULL; 17644574Sraf rlpp = &rlp->robust_next) { 17654574Sraf if (rlp->robust_lock == mp) { /* already registered */ 17664574Sraf lmutex_unlock(&udp->tdb_hash_lock); 17674574Sraf return; 17684574Sraf } 17694574Sraf } 17704574Sraf 17714574Sraf /* 17724574Sraf * The lock has never been registered. 17734574Sraf * Register it now and add it to the table. 17744574Sraf */ 17754574Sraf (void) ___lwp_mutex_register(mp); 17764574Sraf rlp = lmalloc(sizeof (*rlp)); 17774574Sraf rlp->robust_lock = mp; 17784574Sraf _membar_producer(); 17794574Sraf *rlpp = rlp; 17804574Sraf 17814574Sraf lmutex_unlock(&udp->tdb_hash_lock); 17824574Sraf } 17834574Sraf 17844574Sraf /* 17854574Sraf * This is called in the child of fork()/forkall() to start over 17864574Sraf * with a clean slate. (Each process must register its own locks.) 17874574Sraf * No locks are needed because all other threads are suspended or gone. 17884574Sraf */ 17894574Sraf void 17904574Sraf unregister_locks(void) 17914574Sraf { 17924574Sraf uberdata_t *udp = curthread->ul_uberdata; 17934574Sraf uint_t hash; 17944574Sraf robust_t **table; 17954574Sraf robust_t *rlp; 17964574Sraf robust_t *next; 17974574Sraf 17984574Sraf if ((table = udp->robustlocks) != NULL) { 17994574Sraf for (hash = 0; hash < LOCKHASHSZ; hash++) { 18004574Sraf rlp = table[hash]; 18014574Sraf while (rlp != NULL) { 18024574Sraf next = rlp->robust_next; 18034574Sraf lfree(rlp, sizeof (*rlp)); 18044574Sraf rlp = next; 18054574Sraf } 18064574Sraf } 18074574Sraf lfree(table, LOCKHASHSZ * sizeof (robust_t *)); 18084574Sraf udp->robustlocks = NULL; 18094574Sraf } 18104574Sraf } 18114574Sraf 18120Sstevel@tonic-gate /* 18130Sstevel@tonic-gate * Returns with mutex_owner set correctly. 18140Sstevel@tonic-gate */ 18154574Sraf static int 18160Sstevel@tonic-gate mutex_lock_internal(mutex_t *mp, timespec_t *tsp, int try) 18170Sstevel@tonic-gate { 18180Sstevel@tonic-gate ulwp_t *self = curthread; 18190Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 18200Sstevel@tonic-gate int mtype = mp->mutex_type; 18210Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 18220Sstevel@tonic-gate int error = 0; 18234574Sraf uint8_t ceil; 18244574Sraf int myprio; 18250Sstevel@tonic-gate 18260Sstevel@tonic-gate ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK); 18270Sstevel@tonic-gate 18280Sstevel@tonic-gate if (!self->ul_schedctl_called) 18290Sstevel@tonic-gate (void) setup_schedctl(); 18300Sstevel@tonic-gate 18310Sstevel@tonic-gate if (msp && try == MUTEX_TRY) 18320Sstevel@tonic-gate tdb_incr(msp->mutex_try); 18330Sstevel@tonic-gate 18344574Sraf if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && mutex_is_held(mp)) 18354574Sraf return (mutex_recursion(mp, mtype, try)); 18360Sstevel@tonic-gate 18370Sstevel@tonic-gate if (self->ul_error_detection && try == MUTEX_LOCK && 18380Sstevel@tonic-gate tsp == NULL && mutex_is_held(mp)) 18390Sstevel@tonic-gate lock_error(mp, "mutex_lock", NULL, NULL); 18400Sstevel@tonic-gate 18414574Sraf if (mtype & LOCK_PRIO_PROTECT) { 18424574Sraf ceil = mp->mutex_ceiling; 18434574Sraf ASSERT(_validate_rt_prio(SCHED_FIFO, ceil) == 0); 18444574Sraf myprio = real_priority(self); 18454574Sraf if (myprio > ceil) { 18464574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EINVAL); 18474574Sraf return (EINVAL); 18484574Sraf } 18494574Sraf if ((error = _ceil_mylist_add(mp)) != 0) { 18504574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 18514574Sraf return (error); 18520Sstevel@tonic-gate } 18534574Sraf if (myprio < ceil) 18544574Sraf _ceil_prio_inherit(ceil); 18554574Sraf } 18564574Sraf 18574574Sraf if ((mtype & (USYNC_PROCESS | LOCK_ROBUST)) 18584574Sraf == (USYNC_PROCESS | LOCK_ROBUST)) 18594574Sraf register_lock(mp); 18604574Sraf 18614574Sraf if (mtype & LOCK_PRIO_INHERIT) { 18624574Sraf /* go straight to the kernel */ 18634574Sraf if (try == MUTEX_TRY) 18644574Sraf error = mutex_trylock_kernel(mp); 18654574Sraf else /* MUTEX_LOCK */ 18664574Sraf error = mutex_lock_kernel(mp, tsp, msp); 18674574Sraf /* 18684574Sraf * The kernel never sets or clears the lock byte 18694574Sraf * for LOCK_PRIO_INHERIT mutexes. 18704574Sraf * Set it here for consistency. 18714574Sraf */ 18724574Sraf switch (error) { 18734574Sraf case 0: 18744574Sraf mp->mutex_lockw = LOCKSET; 18754574Sraf break; 18764574Sraf case EOWNERDEAD: 18774574Sraf case ELOCKUNMAPPED: 18784574Sraf mp->mutex_lockw = LOCKSET; 18794574Sraf /* FALLTHROUGH */ 18804574Sraf case ENOTRECOVERABLE: 18814574Sraf ASSERT(mtype & LOCK_ROBUST); 18824574Sraf break; 18834574Sraf case EDEADLK: 18844574Sraf if (try == MUTEX_LOCK) 18854574Sraf stall(); 18864574Sraf error = EBUSY; 18874574Sraf break; 18880Sstevel@tonic-gate } 18890Sstevel@tonic-gate } else if (mtype & USYNC_PROCESS) { 18904613Sraf error = mutex_trylock_process(mp, try == MUTEX_LOCK); 18914574Sraf if (error == EBUSY && try == MUTEX_LOCK) 18920Sstevel@tonic-gate error = mutex_lock_kernel(mp, tsp, msp); 18935629Sraf } else { /* USYNC_THREAD */ 18944613Sraf error = mutex_trylock_adaptive(mp, try == MUTEX_LOCK); 18954574Sraf if (error == EBUSY && try == MUTEX_LOCK) 18964574Sraf error = mutex_lock_queue(self, msp, mp, tsp); 18970Sstevel@tonic-gate } 18980Sstevel@tonic-gate 18990Sstevel@tonic-gate switch (error) { 19004574Sraf case 0: 19010Sstevel@tonic-gate case EOWNERDEAD: 19020Sstevel@tonic-gate case ELOCKUNMAPPED: 19034574Sraf if (mtype & LOCK_ROBUST) 19044574Sraf remember_lock(mp); 19050Sstevel@tonic-gate if (msp) 19060Sstevel@tonic-gate record_begin_hold(msp); 19070Sstevel@tonic-gate break; 19080Sstevel@tonic-gate default: 19094574Sraf if (mtype & LOCK_PRIO_PROTECT) { 19104574Sraf (void) _ceil_mylist_del(mp); 19114574Sraf if (myprio < ceil) 19124574Sraf _ceil_prio_waive(); 19134574Sraf } 19140Sstevel@tonic-gate if (try == MUTEX_TRY) { 19150Sstevel@tonic-gate if (msp) 19160Sstevel@tonic-gate tdb_incr(msp->mutex_try_fail); 19170Sstevel@tonic-gate if (__td_event_report(self, TD_LOCK_TRY, udp)) { 19180Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 19190Sstevel@tonic-gate tdb_event(TD_LOCK_TRY, udp); 19200Sstevel@tonic-gate } 19210Sstevel@tonic-gate } 19220Sstevel@tonic-gate break; 19230Sstevel@tonic-gate } 19240Sstevel@tonic-gate 19250Sstevel@tonic-gate return (error); 19260Sstevel@tonic-gate } 19270Sstevel@tonic-gate 19280Sstevel@tonic-gate int 19290Sstevel@tonic-gate fast_process_lock(mutex_t *mp, timespec_t *tsp, int mtype, int try) 19300Sstevel@tonic-gate { 19310Sstevel@tonic-gate ulwp_t *self = curthread; 19320Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 19330Sstevel@tonic-gate 19340Sstevel@tonic-gate /* 19350Sstevel@tonic-gate * We know that USYNC_PROCESS is set in mtype and that 19360Sstevel@tonic-gate * zero, one, or both of the flags LOCK_RECURSIVE and 19370Sstevel@tonic-gate * LOCK_ERRORCHECK are set, and that no other flags are set. 19380Sstevel@tonic-gate */ 19394574Sraf ASSERT((mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0); 19400Sstevel@tonic-gate enter_critical(self); 1941*6057Sraf if (set_lock_byte64(&mp->mutex_lockword64, udp->pid) == 0) { 19420Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 1943*6057Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 19440Sstevel@tonic-gate exit_critical(self); 19450Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 19460Sstevel@tonic-gate return (0); 19470Sstevel@tonic-gate } 19480Sstevel@tonic-gate exit_critical(self); 19490Sstevel@tonic-gate 19504574Sraf if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && shared_mutex_held(mp)) 19514574Sraf return (mutex_recursion(mp, mtype, try)); 19524574Sraf 19534613Sraf if (try == MUTEX_LOCK) { 19544613Sraf if (mutex_trylock_process(mp, 1) == 0) 19554613Sraf return (0); 19560Sstevel@tonic-gate return (mutex_lock_kernel(mp, tsp, NULL)); 19574613Sraf } 19580Sstevel@tonic-gate 19590Sstevel@tonic-gate if (__td_event_report(self, TD_LOCK_TRY, udp)) { 19600Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 19610Sstevel@tonic-gate tdb_event(TD_LOCK_TRY, udp); 19620Sstevel@tonic-gate } 19630Sstevel@tonic-gate return (EBUSY); 19640Sstevel@tonic-gate } 19650Sstevel@tonic-gate 19660Sstevel@tonic-gate static int 19670Sstevel@tonic-gate mutex_lock_impl(mutex_t *mp, timespec_t *tsp) 19680Sstevel@tonic-gate { 19690Sstevel@tonic-gate ulwp_t *self = curthread; 19700Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 19710Sstevel@tonic-gate uberflags_t *gflags; 19720Sstevel@tonic-gate int mtype; 19730Sstevel@tonic-gate 19740Sstevel@tonic-gate /* 19750Sstevel@tonic-gate * Optimize the case of USYNC_THREAD, including 19760Sstevel@tonic-gate * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 19770Sstevel@tonic-gate * no error detection, no lock statistics, 19780Sstevel@tonic-gate * and the process has only a single thread. 19790Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 19800Sstevel@tonic-gate */ 19810Sstevel@tonic-gate if ((((mtype = mp->mutex_type) & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 19820Sstevel@tonic-gate udp->uberflags.uf_all) == 0) { 19830Sstevel@tonic-gate /* 19840Sstevel@tonic-gate * Only one thread exists so we don't need an atomic operation. 19850Sstevel@tonic-gate */ 19860Sstevel@tonic-gate if (mp->mutex_lockw == 0) { 19870Sstevel@tonic-gate mp->mutex_lockw = LOCKSET; 19880Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 19890Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 19900Sstevel@tonic-gate return (0); 19910Sstevel@tonic-gate } 19924574Sraf if (mtype && MUTEX_OWNER(mp) == self) 19934574Sraf return (mutex_recursion(mp, mtype, MUTEX_LOCK)); 19940Sstevel@tonic-gate /* 19950Sstevel@tonic-gate * We have reached a deadlock, probably because the 19960Sstevel@tonic-gate * process is executing non-async-signal-safe code in 19970Sstevel@tonic-gate * a signal handler and is attempting to acquire a lock 19980Sstevel@tonic-gate * that it already owns. This is not surprising, given 19990Sstevel@tonic-gate * bad programming practices over the years that has 20000Sstevel@tonic-gate * resulted in applications calling printf() and such 20010Sstevel@tonic-gate * in their signal handlers. Unless the user has told 20020Sstevel@tonic-gate * us that the signal handlers are safe by setting: 20030Sstevel@tonic-gate * export _THREAD_ASYNC_SAFE=1 20040Sstevel@tonic-gate * we return EDEADLK rather than actually deadlocking. 20050Sstevel@tonic-gate */ 20060Sstevel@tonic-gate if (tsp == NULL && 20070Sstevel@tonic-gate MUTEX_OWNER(mp) == self && !self->ul_async_safe) { 20080Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK); 20090Sstevel@tonic-gate return (EDEADLK); 20100Sstevel@tonic-gate } 20110Sstevel@tonic-gate } 20120Sstevel@tonic-gate 20130Sstevel@tonic-gate /* 20140Sstevel@tonic-gate * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 20150Sstevel@tonic-gate * no error detection, and no lock statistics. 20160Sstevel@tonic-gate * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 20170Sstevel@tonic-gate */ 20180Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL && 20190Sstevel@tonic-gate (gflags->uf_trs_ted | 20200Sstevel@tonic-gate (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) { 20210Sstevel@tonic-gate if (mtype & USYNC_PROCESS) 20220Sstevel@tonic-gate return (fast_process_lock(mp, tsp, mtype, MUTEX_LOCK)); 20230Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 20240Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 20250Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 20260Sstevel@tonic-gate return (0); 20270Sstevel@tonic-gate } 20284574Sraf if (mtype && MUTEX_OWNER(mp) == self) 20294574Sraf return (mutex_recursion(mp, mtype, MUTEX_LOCK)); 20304613Sraf if (mutex_trylock_adaptive(mp, 1) != 0) 20314574Sraf return (mutex_lock_queue(self, NULL, mp, tsp)); 20324574Sraf return (0); 20330Sstevel@tonic-gate } 20340Sstevel@tonic-gate 20350Sstevel@tonic-gate /* else do it the long way */ 20360Sstevel@tonic-gate return (mutex_lock_internal(mp, tsp, MUTEX_LOCK)); 20370Sstevel@tonic-gate } 20380Sstevel@tonic-gate 20395891Sraf /* 20405891Sraf * Of the following function names (all the same function, of course), 20415891Sraf * only _private_mutex_lock() is not exported from libc. This means 20425891Sraf * that calling _private_mutex_lock() within libc will not invoke the 20435891Sraf * dynamic linker. This is critical for any code called in the child 20445891Sraf * of vfork() (via posix_spawn()) because invoking the dynamic linker 20455891Sraf * in such a case would corrupt the parent's address space. There are 20465891Sraf * other places in libc where avoiding the dynamic linker is necessary. 20475891Sraf * Of course, _private_mutex_lock() can be called in cases not requiring 20485891Sraf * the avoidance of the dynamic linker too, and often is. 20495891Sraf */ 20500Sstevel@tonic-gate #pragma weak _private_mutex_lock = __mutex_lock 20510Sstevel@tonic-gate #pragma weak mutex_lock = __mutex_lock 20520Sstevel@tonic-gate #pragma weak _mutex_lock = __mutex_lock 20530Sstevel@tonic-gate #pragma weak pthread_mutex_lock = __mutex_lock 20540Sstevel@tonic-gate #pragma weak _pthread_mutex_lock = __mutex_lock 20550Sstevel@tonic-gate int 20560Sstevel@tonic-gate __mutex_lock(mutex_t *mp) 20570Sstevel@tonic-gate { 20580Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 20590Sstevel@tonic-gate return (mutex_lock_impl(mp, NULL)); 20600Sstevel@tonic-gate } 20610Sstevel@tonic-gate 20620Sstevel@tonic-gate #pragma weak pthread_mutex_timedlock = _pthread_mutex_timedlock 20630Sstevel@tonic-gate int 20640Sstevel@tonic-gate _pthread_mutex_timedlock(mutex_t *mp, const timespec_t *abstime) 20650Sstevel@tonic-gate { 20660Sstevel@tonic-gate timespec_t tslocal; 20670Sstevel@tonic-gate int error; 20680Sstevel@tonic-gate 20690Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 20700Sstevel@tonic-gate abstime_to_reltime(CLOCK_REALTIME, abstime, &tslocal); 20710Sstevel@tonic-gate error = mutex_lock_impl(mp, &tslocal); 20720Sstevel@tonic-gate if (error == ETIME) 20730Sstevel@tonic-gate error = ETIMEDOUT; 20740Sstevel@tonic-gate return (error); 20750Sstevel@tonic-gate } 20760Sstevel@tonic-gate 20770Sstevel@tonic-gate #pragma weak pthread_mutex_reltimedlock_np = _pthread_mutex_reltimedlock_np 20780Sstevel@tonic-gate int 20790Sstevel@tonic-gate _pthread_mutex_reltimedlock_np(mutex_t *mp, const timespec_t *reltime) 20800Sstevel@tonic-gate { 20810Sstevel@tonic-gate timespec_t tslocal; 20820Sstevel@tonic-gate int error; 20830Sstevel@tonic-gate 20840Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 20850Sstevel@tonic-gate tslocal = *reltime; 20860Sstevel@tonic-gate error = mutex_lock_impl(mp, &tslocal); 20870Sstevel@tonic-gate if (error == ETIME) 20880Sstevel@tonic-gate error = ETIMEDOUT; 20890Sstevel@tonic-gate return (error); 20900Sstevel@tonic-gate } 20910Sstevel@tonic-gate 20920Sstevel@tonic-gate #pragma weak _private_mutex_trylock = __mutex_trylock 20930Sstevel@tonic-gate #pragma weak mutex_trylock = __mutex_trylock 20940Sstevel@tonic-gate #pragma weak _mutex_trylock = __mutex_trylock 20950Sstevel@tonic-gate #pragma weak pthread_mutex_trylock = __mutex_trylock 20960Sstevel@tonic-gate #pragma weak _pthread_mutex_trylock = __mutex_trylock 20970Sstevel@tonic-gate int 20980Sstevel@tonic-gate __mutex_trylock(mutex_t *mp) 20990Sstevel@tonic-gate { 21000Sstevel@tonic-gate ulwp_t *self = curthread; 21010Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 21020Sstevel@tonic-gate uberflags_t *gflags; 21030Sstevel@tonic-gate int mtype; 21040Sstevel@tonic-gate 21050Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 21060Sstevel@tonic-gate /* 21070Sstevel@tonic-gate * Optimize the case of USYNC_THREAD, including 21080Sstevel@tonic-gate * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 21090Sstevel@tonic-gate * no error detection, no lock statistics, 21100Sstevel@tonic-gate * and the process has only a single thread. 21110Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 21120Sstevel@tonic-gate */ 21130Sstevel@tonic-gate if ((((mtype = mp->mutex_type) & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 21140Sstevel@tonic-gate udp->uberflags.uf_all) == 0) { 21150Sstevel@tonic-gate /* 21160Sstevel@tonic-gate * Only one thread exists so we don't need an atomic operation. 21170Sstevel@tonic-gate */ 21180Sstevel@tonic-gate if (mp->mutex_lockw == 0) { 21190Sstevel@tonic-gate mp->mutex_lockw = LOCKSET; 21200Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 21210Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 21220Sstevel@tonic-gate return (0); 21230Sstevel@tonic-gate } 21244574Sraf if (mtype && MUTEX_OWNER(mp) == self) 21254574Sraf return (mutex_recursion(mp, mtype, MUTEX_TRY)); 21260Sstevel@tonic-gate return (EBUSY); 21270Sstevel@tonic-gate } 21280Sstevel@tonic-gate 21290Sstevel@tonic-gate /* 21300Sstevel@tonic-gate * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 21310Sstevel@tonic-gate * no error detection, and no lock statistics. 21320Sstevel@tonic-gate * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 21330Sstevel@tonic-gate */ 21340Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL && 21350Sstevel@tonic-gate (gflags->uf_trs_ted | 21360Sstevel@tonic-gate (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) { 21370Sstevel@tonic-gate if (mtype & USYNC_PROCESS) 21380Sstevel@tonic-gate return (fast_process_lock(mp, NULL, mtype, MUTEX_TRY)); 21390Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 21400Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 21410Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 21420Sstevel@tonic-gate return (0); 21430Sstevel@tonic-gate } 21444574Sraf if (mtype && MUTEX_OWNER(mp) == self) 21454574Sraf return (mutex_recursion(mp, mtype, MUTEX_TRY)); 21464613Sraf if (__td_event_report(self, TD_LOCK_TRY, udp)) { 21474613Sraf self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 21484613Sraf tdb_event(TD_LOCK_TRY, udp); 21490Sstevel@tonic-gate } 21504613Sraf return (EBUSY); 21510Sstevel@tonic-gate } 21520Sstevel@tonic-gate 21530Sstevel@tonic-gate /* else do it the long way */ 21540Sstevel@tonic-gate return (mutex_lock_internal(mp, NULL, MUTEX_TRY)); 21550Sstevel@tonic-gate } 21560Sstevel@tonic-gate 21570Sstevel@tonic-gate int 21584574Sraf mutex_unlock_internal(mutex_t *mp, int retain_robust_flags) 21590Sstevel@tonic-gate { 21600Sstevel@tonic-gate ulwp_t *self = curthread; 21610Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 21620Sstevel@tonic-gate int mtype = mp->mutex_type; 21630Sstevel@tonic-gate tdb_mutex_stats_t *msp; 21644574Sraf int error = 0; 21654574Sraf int release_all; 21660Sstevel@tonic-gate lwpid_t lwpid; 21670Sstevel@tonic-gate 21680Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !mutex_is_held(mp)) 21690Sstevel@tonic-gate return (EPERM); 21700Sstevel@tonic-gate 21710Sstevel@tonic-gate if (self->ul_error_detection && !mutex_is_held(mp)) 21720Sstevel@tonic-gate lock_error(mp, "mutex_unlock", NULL, NULL); 21730Sstevel@tonic-gate 21740Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 21750Sstevel@tonic-gate mp->mutex_rcount--; 21760Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 21770Sstevel@tonic-gate return (0); 21780Sstevel@tonic-gate } 21790Sstevel@tonic-gate 21800Sstevel@tonic-gate if ((msp = MUTEX_STATS(mp, udp)) != NULL) 21810Sstevel@tonic-gate (void) record_hold_time(msp); 21820Sstevel@tonic-gate 21834574Sraf if (!retain_robust_flags && !(mtype & LOCK_PRIO_INHERIT) && 21844574Sraf (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) { 21854574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 21864574Sraf mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED); 21874574Sraf mp->mutex_flag |= LOCK_NOTRECOVERABLE; 21884574Sraf } 21894574Sraf release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0); 21904574Sraf 21914574Sraf if (mtype & LOCK_PRIO_INHERIT) { 21920Sstevel@tonic-gate no_preempt(self); 21930Sstevel@tonic-gate mp->mutex_owner = 0; 2194*6057Sraf /* mp->mutex_ownerpid is cleared by ___lwp_mutex_unlock() */ 21950Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 21964574Sraf mp->mutex_lockw = LOCKCLEAR; 21974574Sraf error = ___lwp_mutex_unlock(mp); 21980Sstevel@tonic-gate preempt(self); 21990Sstevel@tonic-gate } else if (mtype & USYNC_PROCESS) { 22005629Sraf mutex_unlock_process(mp, release_all); 22010Sstevel@tonic-gate } else { /* USYNC_THREAD */ 22024574Sraf if ((lwpid = mutex_unlock_queue(mp, release_all)) != 0) { 22030Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 22040Sstevel@tonic-gate preempt(self); 22050Sstevel@tonic-gate } 22060Sstevel@tonic-gate } 22070Sstevel@tonic-gate 22084574Sraf if (mtype & LOCK_ROBUST) 22094574Sraf forget_lock(mp); 22104574Sraf 22114574Sraf if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp)) 22124574Sraf _ceil_prio_waive(); 22134574Sraf 22140Sstevel@tonic-gate return (error); 22150Sstevel@tonic-gate } 22160Sstevel@tonic-gate 22170Sstevel@tonic-gate #pragma weak _private_mutex_unlock = __mutex_unlock 22180Sstevel@tonic-gate #pragma weak mutex_unlock = __mutex_unlock 22190Sstevel@tonic-gate #pragma weak _mutex_unlock = __mutex_unlock 22200Sstevel@tonic-gate #pragma weak pthread_mutex_unlock = __mutex_unlock 22210Sstevel@tonic-gate #pragma weak _pthread_mutex_unlock = __mutex_unlock 22220Sstevel@tonic-gate int 22230Sstevel@tonic-gate __mutex_unlock(mutex_t *mp) 22240Sstevel@tonic-gate { 22250Sstevel@tonic-gate ulwp_t *self = curthread; 22260Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 22270Sstevel@tonic-gate uberflags_t *gflags; 22280Sstevel@tonic-gate lwpid_t lwpid; 22290Sstevel@tonic-gate int mtype; 22300Sstevel@tonic-gate short el; 22310Sstevel@tonic-gate 22320Sstevel@tonic-gate /* 22330Sstevel@tonic-gate * Optimize the case of USYNC_THREAD, including 22340Sstevel@tonic-gate * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 22350Sstevel@tonic-gate * no error detection, no lock statistics, 22360Sstevel@tonic-gate * and the process has only a single thread. 22370Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 22380Sstevel@tonic-gate */ 22390Sstevel@tonic-gate if ((((mtype = mp->mutex_type) & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 22400Sstevel@tonic-gate udp->uberflags.uf_all) == 0) { 22410Sstevel@tonic-gate if (mtype) { 22420Sstevel@tonic-gate /* 22430Sstevel@tonic-gate * At this point we know that one or both of the 22440Sstevel@tonic-gate * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set. 22450Sstevel@tonic-gate */ 22460Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self)) 22470Sstevel@tonic-gate return (EPERM); 22480Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 22490Sstevel@tonic-gate mp->mutex_rcount--; 22500Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 22510Sstevel@tonic-gate return (0); 22520Sstevel@tonic-gate } 22530Sstevel@tonic-gate } 22540Sstevel@tonic-gate /* 22550Sstevel@tonic-gate * Only one thread exists so we don't need an atomic operation. 22560Sstevel@tonic-gate * Also, there can be no waiters. 22570Sstevel@tonic-gate */ 22580Sstevel@tonic-gate mp->mutex_owner = 0; 22590Sstevel@tonic-gate mp->mutex_lockword = 0; 22600Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 22610Sstevel@tonic-gate return (0); 22620Sstevel@tonic-gate } 22630Sstevel@tonic-gate 22640Sstevel@tonic-gate /* 22650Sstevel@tonic-gate * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 22660Sstevel@tonic-gate * no error detection, and no lock statistics. 22670Sstevel@tonic-gate * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 22680Sstevel@tonic-gate */ 22690Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL) { 22700Sstevel@tonic-gate if (((el = gflags->uf_trs_ted) | mtype) == 0) { 22710Sstevel@tonic-gate fast_unlock: 22725629Sraf if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) { 22730Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 22740Sstevel@tonic-gate preempt(self); 22750Sstevel@tonic-gate } 22760Sstevel@tonic-gate return (0); 22770Sstevel@tonic-gate } 22780Sstevel@tonic-gate if (el) /* error detection or lock statistics */ 22790Sstevel@tonic-gate goto slow_unlock; 22800Sstevel@tonic-gate if ((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) { 22810Sstevel@tonic-gate /* 22820Sstevel@tonic-gate * At this point we know that one or both of the 22830Sstevel@tonic-gate * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set. 22840Sstevel@tonic-gate */ 22850Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self)) 22860Sstevel@tonic-gate return (EPERM); 22870Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 22880Sstevel@tonic-gate mp->mutex_rcount--; 22890Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 22900Sstevel@tonic-gate return (0); 22910Sstevel@tonic-gate } 22920Sstevel@tonic-gate goto fast_unlock; 22930Sstevel@tonic-gate } 22940Sstevel@tonic-gate if ((mtype & 22950Sstevel@tonic-gate ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) { 22960Sstevel@tonic-gate /* 22970Sstevel@tonic-gate * At this point we know that zero, one, or both of the 22980Sstevel@tonic-gate * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set and 22990Sstevel@tonic-gate * that the USYNC_PROCESS flag is set. 23000Sstevel@tonic-gate */ 23010Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !shared_mutex_held(mp)) 23020Sstevel@tonic-gate return (EPERM); 23030Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 23040Sstevel@tonic-gate mp->mutex_rcount--; 23050Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 23060Sstevel@tonic-gate return (0); 23070Sstevel@tonic-gate } 23085629Sraf mutex_unlock_process(mp, 0); 23090Sstevel@tonic-gate return (0); 23100Sstevel@tonic-gate } 23110Sstevel@tonic-gate } 23120Sstevel@tonic-gate 23130Sstevel@tonic-gate /* else do it the long way */ 23140Sstevel@tonic-gate slow_unlock: 23154574Sraf return (mutex_unlock_internal(mp, 0)); 23160Sstevel@tonic-gate } 23170Sstevel@tonic-gate 23180Sstevel@tonic-gate /* 23190Sstevel@tonic-gate * Internally to the library, almost all mutex lock/unlock actions 23200Sstevel@tonic-gate * go through these lmutex_ functions, to protect critical regions. 23210Sstevel@tonic-gate * We replicate a bit of code from __mutex_lock() and __mutex_unlock() 23220Sstevel@tonic-gate * to make these functions faster since we know that the mutex type 23230Sstevel@tonic-gate * of all internal locks is USYNC_THREAD. We also know that internal 23240Sstevel@tonic-gate * locking can never fail, so we panic if it does. 23250Sstevel@tonic-gate */ 23260Sstevel@tonic-gate void 23270Sstevel@tonic-gate lmutex_lock(mutex_t *mp) 23280Sstevel@tonic-gate { 23290Sstevel@tonic-gate ulwp_t *self = curthread; 23300Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 23310Sstevel@tonic-gate 23320Sstevel@tonic-gate ASSERT(mp->mutex_type == USYNC_THREAD); 23330Sstevel@tonic-gate 23340Sstevel@tonic-gate enter_critical(self); 23350Sstevel@tonic-gate /* 23360Sstevel@tonic-gate * Optimize the case of no lock statistics and only a single thread. 23370Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 23380Sstevel@tonic-gate */ 23390Sstevel@tonic-gate if (udp->uberflags.uf_all == 0) { 23400Sstevel@tonic-gate /* 23410Sstevel@tonic-gate * Only one thread exists; the mutex must be free. 23420Sstevel@tonic-gate */ 23430Sstevel@tonic-gate ASSERT(mp->mutex_lockw == 0); 23440Sstevel@tonic-gate mp->mutex_lockw = LOCKSET; 23450Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 23460Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 23470Sstevel@tonic-gate } else { 23480Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 23490Sstevel@tonic-gate 23500Sstevel@tonic-gate if (!self->ul_schedctl_called) 23510Sstevel@tonic-gate (void) setup_schedctl(); 23520Sstevel@tonic-gate 23530Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 23540Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 23550Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 23564613Sraf } else if (mutex_trylock_adaptive(mp, 1) != 0) { 23570Sstevel@tonic-gate (void) mutex_lock_queue(self, msp, mp, NULL); 23580Sstevel@tonic-gate } 23590Sstevel@tonic-gate 23600Sstevel@tonic-gate if (msp) 23610Sstevel@tonic-gate record_begin_hold(msp); 23620Sstevel@tonic-gate } 23630Sstevel@tonic-gate } 23640Sstevel@tonic-gate 23650Sstevel@tonic-gate void 23660Sstevel@tonic-gate lmutex_unlock(mutex_t *mp) 23670Sstevel@tonic-gate { 23680Sstevel@tonic-gate ulwp_t *self = curthread; 23690Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 23700Sstevel@tonic-gate 23710Sstevel@tonic-gate ASSERT(mp->mutex_type == USYNC_THREAD); 23720Sstevel@tonic-gate 23730Sstevel@tonic-gate /* 23740Sstevel@tonic-gate * Optimize the case of no lock statistics and only a single thread. 23750Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 23760Sstevel@tonic-gate */ 23770Sstevel@tonic-gate if (udp->uberflags.uf_all == 0) { 23780Sstevel@tonic-gate /* 23790Sstevel@tonic-gate * Only one thread exists so there can be no waiters. 23800Sstevel@tonic-gate */ 23810Sstevel@tonic-gate mp->mutex_owner = 0; 23820Sstevel@tonic-gate mp->mutex_lockword = 0; 23830Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 23840Sstevel@tonic-gate } else { 23850Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 23860Sstevel@tonic-gate lwpid_t lwpid; 23870Sstevel@tonic-gate 23880Sstevel@tonic-gate if (msp) 23890Sstevel@tonic-gate (void) record_hold_time(msp); 23904574Sraf if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) { 23910Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 23920Sstevel@tonic-gate preempt(self); 23930Sstevel@tonic-gate } 23940Sstevel@tonic-gate } 23950Sstevel@tonic-gate exit_critical(self); 23960Sstevel@tonic-gate } 23970Sstevel@tonic-gate 23982248Sraf /* 23992248Sraf * For specialized code in libc, like the asynchronous i/o code, 24002248Sraf * the following sig_*() locking primitives are used in order 24012248Sraf * to make the code asynchronous signal safe. Signals are 24022248Sraf * deferred while locks acquired by these functions are held. 24032248Sraf */ 24042248Sraf void 24052248Sraf sig_mutex_lock(mutex_t *mp) 24062248Sraf { 24072248Sraf sigoff(curthread); 24082248Sraf (void) _private_mutex_lock(mp); 24092248Sraf } 24102248Sraf 24112248Sraf void 24122248Sraf sig_mutex_unlock(mutex_t *mp) 24132248Sraf { 24142248Sraf (void) _private_mutex_unlock(mp); 24152248Sraf sigon(curthread); 24162248Sraf } 24172248Sraf 24182248Sraf int 24192248Sraf sig_mutex_trylock(mutex_t *mp) 24202248Sraf { 24212248Sraf int error; 24222248Sraf 24232248Sraf sigoff(curthread); 24242248Sraf if ((error = _private_mutex_trylock(mp)) != 0) 24252248Sraf sigon(curthread); 24262248Sraf return (error); 24272248Sraf } 24282248Sraf 24292248Sraf /* 24302248Sraf * sig_cond_wait() is a cancellation point. 24312248Sraf */ 24322248Sraf int 24332248Sraf sig_cond_wait(cond_t *cv, mutex_t *mp) 24342248Sraf { 24352248Sraf int error; 24362248Sraf 24372248Sraf ASSERT(curthread->ul_sigdefer != 0); 24382248Sraf _private_testcancel(); 24395891Sraf error = __cond_wait(cv, mp); 24402248Sraf if (error == EINTR && curthread->ul_cursig) { 24412248Sraf sig_mutex_unlock(mp); 24422248Sraf /* take the deferred signal here */ 24432248Sraf sig_mutex_lock(mp); 24442248Sraf } 24452248Sraf _private_testcancel(); 24462248Sraf return (error); 24472248Sraf } 24482248Sraf 24492248Sraf /* 24502248Sraf * sig_cond_reltimedwait() is a cancellation point. 24512248Sraf */ 24522248Sraf int 24532248Sraf sig_cond_reltimedwait(cond_t *cv, mutex_t *mp, const timespec_t *ts) 24542248Sraf { 24552248Sraf int error; 24562248Sraf 24572248Sraf ASSERT(curthread->ul_sigdefer != 0); 24582248Sraf _private_testcancel(); 24595891Sraf error = __cond_reltimedwait(cv, mp, ts); 24602248Sraf if (error == EINTR && curthread->ul_cursig) { 24612248Sraf sig_mutex_unlock(mp); 24622248Sraf /* take the deferred signal here */ 24632248Sraf sig_mutex_lock(mp); 24642248Sraf } 24652248Sraf _private_testcancel(); 24662248Sraf return (error); 24672248Sraf } 24682248Sraf 24695891Sraf /* 24705891Sraf * For specialized code in libc, like the stdio code. 24715891Sraf * the following cancel_safe_*() locking primitives are used in 24725891Sraf * order to make the code cancellation-safe. Cancellation is 24735891Sraf * deferred while locks acquired by these functions are held. 24745891Sraf */ 24755891Sraf void 24765891Sraf cancel_safe_mutex_lock(mutex_t *mp) 24775891Sraf { 24785891Sraf (void) _private_mutex_lock(mp); 24795891Sraf curthread->ul_libc_locks++; 24805891Sraf } 24815891Sraf 24825891Sraf int 24835891Sraf cancel_safe_mutex_trylock(mutex_t *mp) 24845891Sraf { 24855891Sraf int error; 24865891Sraf 24875891Sraf if ((error = _private_mutex_trylock(mp)) == 0) 24885891Sraf curthread->ul_libc_locks++; 24895891Sraf return (error); 24905891Sraf } 24915891Sraf 24925891Sraf void 24935891Sraf cancel_safe_mutex_unlock(mutex_t *mp) 24945891Sraf { 24955891Sraf ulwp_t *self = curthread; 24965891Sraf 24975891Sraf ASSERT(self->ul_libc_locks != 0); 24985891Sraf 24995891Sraf (void) _private_mutex_unlock(mp); 25005891Sraf 25015891Sraf /* 25025891Sraf * Decrement the count of locks held by cancel_safe_mutex_lock(). 25035891Sraf * If we are then in a position to terminate cleanly and 25045891Sraf * if there is a pending cancellation and cancellation 25055891Sraf * is not disabled and we received EINTR from a recent 25065891Sraf * system call then perform the cancellation action now. 25075891Sraf */ 25085891Sraf if (--self->ul_libc_locks == 0 && 25095891Sraf !(self->ul_vfork | self->ul_nocancel | 25105891Sraf self->ul_critical | self->ul_sigdefer) && 25115891Sraf cancel_active()) 25125891Sraf _pthread_exit(PTHREAD_CANCELED); 25135891Sraf } 25145891Sraf 25150Sstevel@tonic-gate static int 25160Sstevel@tonic-gate shared_mutex_held(mutex_t *mparg) 25170Sstevel@tonic-gate { 25180Sstevel@tonic-gate /* 25194574Sraf * The 'volatile' is necessary to make sure the compiler doesn't 25204574Sraf * reorder the tests of the various components of the mutex. 25214574Sraf * They must be tested in this order: 25224574Sraf * mutex_lockw 25234574Sraf * mutex_owner 25244574Sraf * mutex_ownerpid 25254574Sraf * This relies on the fact that everywhere mutex_lockw is cleared, 25264574Sraf * mutex_owner and mutex_ownerpid are cleared before mutex_lockw 25274574Sraf * is cleared, and that everywhere mutex_lockw is set, mutex_owner 25284574Sraf * and mutex_ownerpid are set after mutex_lockw is set, and that 25294574Sraf * mutex_lockw is set or cleared with a memory barrier. 25300Sstevel@tonic-gate */ 25310Sstevel@tonic-gate volatile mutex_t *mp = (volatile mutex_t *)mparg; 25320Sstevel@tonic-gate ulwp_t *self = curthread; 25330Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 25340Sstevel@tonic-gate 25354574Sraf return (MUTEX_OWNED(mp, self) && mp->mutex_ownerpid == udp->pid); 25360Sstevel@tonic-gate } 25370Sstevel@tonic-gate 25380Sstevel@tonic-gate /* 25390Sstevel@tonic-gate * Some crufty old programs define their own version of _mutex_held() 25400Sstevel@tonic-gate * to be simply return(1). This breaks internal libc logic, so we 25410Sstevel@tonic-gate * define a private version for exclusive use by libc, mutex_is_held(), 25420Sstevel@tonic-gate * and also a new public function, __mutex_held(), to be used in new 25430Sstevel@tonic-gate * code to circumvent these crufty old programs. 25440Sstevel@tonic-gate */ 25450Sstevel@tonic-gate #pragma weak mutex_held = mutex_is_held 25460Sstevel@tonic-gate #pragma weak _mutex_held = mutex_is_held 25470Sstevel@tonic-gate #pragma weak __mutex_held = mutex_is_held 25480Sstevel@tonic-gate int 25494574Sraf mutex_is_held(mutex_t *mparg) 25500Sstevel@tonic-gate { 25514574Sraf volatile mutex_t *mp = (volatile mutex_t *)mparg; 25524574Sraf 25534574Sraf if (mparg->mutex_type & USYNC_PROCESS) 25544574Sraf return (shared_mutex_held(mparg)); 25550Sstevel@tonic-gate return (MUTEX_OWNED(mp, curthread)); 25560Sstevel@tonic-gate } 25570Sstevel@tonic-gate 25580Sstevel@tonic-gate #pragma weak _private_mutex_destroy = __mutex_destroy 25590Sstevel@tonic-gate #pragma weak mutex_destroy = __mutex_destroy 25600Sstevel@tonic-gate #pragma weak _mutex_destroy = __mutex_destroy 25610Sstevel@tonic-gate #pragma weak pthread_mutex_destroy = __mutex_destroy 25620Sstevel@tonic-gate #pragma weak _pthread_mutex_destroy = __mutex_destroy 25630Sstevel@tonic-gate int 25640Sstevel@tonic-gate __mutex_destroy(mutex_t *mp) 25650Sstevel@tonic-gate { 25664574Sraf if (mp->mutex_type & USYNC_PROCESS) 25674574Sraf forget_lock(mp); 25684574Sraf (void) _memset(mp, 0, sizeof (*mp)); 25690Sstevel@tonic-gate tdb_sync_obj_deregister(mp); 25700Sstevel@tonic-gate return (0); 25710Sstevel@tonic-gate } 25720Sstevel@tonic-gate 25734574Sraf #pragma weak mutex_consistent = __mutex_consistent 25744574Sraf #pragma weak _mutex_consistent = __mutex_consistent 25754574Sraf #pragma weak pthread_mutex_consistent_np = __mutex_consistent 25764574Sraf #pragma weak _pthread_mutex_consistent_np = __mutex_consistent 25774574Sraf int 25784574Sraf __mutex_consistent(mutex_t *mp) 25794574Sraf { 25804574Sraf /* 25814574Sraf * Do this only for an inconsistent, initialized robust lock 25824574Sraf * that we hold. For all other cases, return EINVAL. 25834574Sraf */ 25844574Sraf if (mutex_is_held(mp) && 25854574Sraf (mp->mutex_type & LOCK_ROBUST) && 25864574Sraf (mp->mutex_flag & LOCK_INITED) && 25874574Sraf (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) { 25884574Sraf mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED); 25894574Sraf mp->mutex_rcount = 0; 25904574Sraf return (0); 25914574Sraf } 25924574Sraf return (EINVAL); 25934574Sraf } 25944574Sraf 25950Sstevel@tonic-gate /* 25960Sstevel@tonic-gate * Spin locks are separate from ordinary mutexes, 25970Sstevel@tonic-gate * but we use the same data structure for them. 25980Sstevel@tonic-gate */ 25990Sstevel@tonic-gate 26000Sstevel@tonic-gate #pragma weak pthread_spin_init = _pthread_spin_init 26010Sstevel@tonic-gate int 26020Sstevel@tonic-gate _pthread_spin_init(pthread_spinlock_t *lock, int pshared) 26030Sstevel@tonic-gate { 26040Sstevel@tonic-gate mutex_t *mp = (mutex_t *)lock; 26050Sstevel@tonic-gate 26060Sstevel@tonic-gate (void) _memset(mp, 0, sizeof (*mp)); 26070Sstevel@tonic-gate if (pshared == PTHREAD_PROCESS_SHARED) 26080Sstevel@tonic-gate mp->mutex_type = USYNC_PROCESS; 26090Sstevel@tonic-gate else 26100Sstevel@tonic-gate mp->mutex_type = USYNC_THREAD; 26110Sstevel@tonic-gate mp->mutex_flag = LOCK_INITED; 26120Sstevel@tonic-gate mp->mutex_magic = MUTEX_MAGIC; 26130Sstevel@tonic-gate return (0); 26140Sstevel@tonic-gate } 26150Sstevel@tonic-gate 26160Sstevel@tonic-gate #pragma weak pthread_spin_destroy = _pthread_spin_destroy 26170Sstevel@tonic-gate int 26180Sstevel@tonic-gate _pthread_spin_destroy(pthread_spinlock_t *lock) 26190Sstevel@tonic-gate { 26200Sstevel@tonic-gate (void) _memset(lock, 0, sizeof (*lock)); 26210Sstevel@tonic-gate return (0); 26220Sstevel@tonic-gate } 26230Sstevel@tonic-gate 26240Sstevel@tonic-gate #pragma weak pthread_spin_trylock = _pthread_spin_trylock 26250Sstevel@tonic-gate int 26260Sstevel@tonic-gate _pthread_spin_trylock(pthread_spinlock_t *lock) 26270Sstevel@tonic-gate { 26280Sstevel@tonic-gate mutex_t *mp = (mutex_t *)lock; 26290Sstevel@tonic-gate ulwp_t *self = curthread; 26300Sstevel@tonic-gate int error = 0; 26310Sstevel@tonic-gate 26320Sstevel@tonic-gate no_preempt(self); 26330Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) != 0) 26340Sstevel@tonic-gate error = EBUSY; 26350Sstevel@tonic-gate else { 26360Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 26370Sstevel@tonic-gate if (mp->mutex_type == USYNC_PROCESS) 26380Sstevel@tonic-gate mp->mutex_ownerpid = self->ul_uberdata->pid; 26390Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 26400Sstevel@tonic-gate } 26410Sstevel@tonic-gate preempt(self); 26420Sstevel@tonic-gate return (error); 26430Sstevel@tonic-gate } 26440Sstevel@tonic-gate 26450Sstevel@tonic-gate #pragma weak pthread_spin_lock = _pthread_spin_lock 26460Sstevel@tonic-gate int 26470Sstevel@tonic-gate _pthread_spin_lock(pthread_spinlock_t *lock) 26480Sstevel@tonic-gate { 26494574Sraf mutex_t *mp = (mutex_t *)lock; 26504574Sraf ulwp_t *self = curthread; 26514574Sraf volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw; 26524574Sraf int count = 0; 26534574Sraf 26544574Sraf ASSERT(!self->ul_critical || self->ul_bindflags); 26554574Sraf 26564574Sraf DTRACE_PROBE1(plockstat, mutex__spin, mp); 26574574Sraf 26580Sstevel@tonic-gate /* 26590Sstevel@tonic-gate * We don't care whether the owner is running on a processor. 26600Sstevel@tonic-gate * We just spin because that's what this interface requires. 26610Sstevel@tonic-gate */ 26620Sstevel@tonic-gate for (;;) { 26630Sstevel@tonic-gate if (*lockp == 0) { /* lock byte appears to be clear */ 26644574Sraf no_preempt(self); 26654574Sraf if (set_lock_byte(lockp) == 0) 26664574Sraf break; 26674574Sraf preempt(self); 26680Sstevel@tonic-gate } 26695629Sraf if (count < INT_MAX) 26705629Sraf count++; 26710Sstevel@tonic-gate SMT_PAUSE(); 26720Sstevel@tonic-gate } 26734574Sraf mp->mutex_owner = (uintptr_t)self; 26744574Sraf if (mp->mutex_type == USYNC_PROCESS) 26754574Sraf mp->mutex_ownerpid = self->ul_uberdata->pid; 26764574Sraf preempt(self); 26775629Sraf if (count) { 26785629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 1, count); 26795629Sraf } 26804574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); 26814574Sraf return (0); 26820Sstevel@tonic-gate } 26830Sstevel@tonic-gate 26840Sstevel@tonic-gate #pragma weak pthread_spin_unlock = _pthread_spin_unlock 26850Sstevel@tonic-gate int 26860Sstevel@tonic-gate _pthread_spin_unlock(pthread_spinlock_t *lock) 26870Sstevel@tonic-gate { 26880Sstevel@tonic-gate mutex_t *mp = (mutex_t *)lock; 26890Sstevel@tonic-gate ulwp_t *self = curthread; 26900Sstevel@tonic-gate 26910Sstevel@tonic-gate no_preempt(self); 26920Sstevel@tonic-gate mp->mutex_owner = 0; 26930Sstevel@tonic-gate mp->mutex_ownerpid = 0; 26940Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 26954570Sraf (void) atomic_swap_32(&mp->mutex_lockword, 0); 26960Sstevel@tonic-gate preempt(self); 26970Sstevel@tonic-gate return (0); 26980Sstevel@tonic-gate } 26990Sstevel@tonic-gate 27005629Sraf #define INITIAL_LOCKS 8 /* initial size of ul_heldlocks.array */ 27014574Sraf 27024574Sraf /* 27034574Sraf * Find/allocate an entry for 'lock' in our array of held locks. 27044574Sraf */ 27054574Sraf static mutex_t ** 27064574Sraf find_lock_entry(mutex_t *lock) 27074574Sraf { 27084574Sraf ulwp_t *self = curthread; 27094574Sraf mutex_t **remembered = NULL; 27104574Sraf mutex_t **lockptr; 27114574Sraf uint_t nlocks; 27124574Sraf 27134574Sraf if ((nlocks = self->ul_heldlockcnt) != 0) 27144574Sraf lockptr = self->ul_heldlocks.array; 27154574Sraf else { 27164574Sraf nlocks = 1; 27174574Sraf lockptr = &self->ul_heldlocks.single; 27184574Sraf } 27194574Sraf 27204574Sraf for (; nlocks; nlocks--, lockptr++) { 27214574Sraf if (*lockptr == lock) 27224574Sraf return (lockptr); 27234574Sraf if (*lockptr == NULL && remembered == NULL) 27244574Sraf remembered = lockptr; 27254574Sraf } 27264574Sraf if (remembered != NULL) { 27274574Sraf *remembered = lock; 27284574Sraf return (remembered); 27294574Sraf } 27304574Sraf 27314574Sraf /* 27324574Sraf * No entry available. Allocate more space, converting 27334574Sraf * the single entry into an array of entries if necessary. 27344574Sraf */ 27354574Sraf if ((nlocks = self->ul_heldlockcnt) == 0) { 27364574Sraf /* 27374574Sraf * Initial allocation of the array. 27384574Sraf * Convert the single entry into an array. 27394574Sraf */ 27404574Sraf self->ul_heldlockcnt = nlocks = INITIAL_LOCKS; 27414574Sraf lockptr = lmalloc(nlocks * sizeof (mutex_t *)); 27424574Sraf /* 27434574Sraf * The single entry becomes the first entry in the array. 27444574Sraf */ 27454574Sraf *lockptr = self->ul_heldlocks.single; 27464574Sraf self->ul_heldlocks.array = lockptr; 27474574Sraf /* 27484574Sraf * Return the next available entry in the array. 27494574Sraf */ 27504574Sraf *++lockptr = lock; 27514574Sraf return (lockptr); 27524574Sraf } 27534574Sraf /* 27544574Sraf * Reallocate the array, double the size each time. 27554574Sraf */ 27564574Sraf lockptr = lmalloc(nlocks * 2 * sizeof (mutex_t *)); 27574574Sraf (void) _memcpy(lockptr, self->ul_heldlocks.array, 27584574Sraf nlocks * sizeof (mutex_t *)); 27594574Sraf lfree(self->ul_heldlocks.array, nlocks * sizeof (mutex_t *)); 27604574Sraf self->ul_heldlocks.array = lockptr; 27614574Sraf self->ul_heldlockcnt *= 2; 27624574Sraf /* 27634574Sraf * Return the next available entry in the newly allocated array. 27644574Sraf */ 27654574Sraf *(lockptr += nlocks) = lock; 27664574Sraf return (lockptr); 27674574Sraf } 27684574Sraf 27694574Sraf /* 27704574Sraf * Insert 'lock' into our list of held locks. 27714574Sraf * Currently only used for LOCK_ROBUST mutexes. 27724574Sraf */ 27734574Sraf void 27744574Sraf remember_lock(mutex_t *lock) 27754574Sraf { 27764574Sraf (void) find_lock_entry(lock); 27774574Sraf } 27784574Sraf 27794574Sraf /* 27804574Sraf * Remove 'lock' from our list of held locks. 27814574Sraf * Currently only used for LOCK_ROBUST mutexes. 27824574Sraf */ 27834574Sraf void 27844574Sraf forget_lock(mutex_t *lock) 27854574Sraf { 27864574Sraf *find_lock_entry(lock) = NULL; 27874574Sraf } 27884574Sraf 27894574Sraf /* 27904574Sraf * Free the array of held locks. 27914574Sraf */ 27924574Sraf void 27934574Sraf heldlock_free(ulwp_t *ulwp) 27944574Sraf { 27954574Sraf uint_t nlocks; 27964574Sraf 27974574Sraf if ((nlocks = ulwp->ul_heldlockcnt) != 0) 27984574Sraf lfree(ulwp->ul_heldlocks.array, nlocks * sizeof (mutex_t *)); 27994574Sraf ulwp->ul_heldlockcnt = 0; 28004574Sraf ulwp->ul_heldlocks.array = NULL; 28014574Sraf } 28024574Sraf 28034574Sraf /* 28044574Sraf * Mark all held LOCK_ROBUST mutexes LOCK_OWNERDEAD. 28054574Sraf * Called from _thrp_exit() to deal with abandoned locks. 28064574Sraf */ 28074574Sraf void 28084574Sraf heldlock_exit(void) 28094574Sraf { 28104574Sraf ulwp_t *self = curthread; 28114574Sraf mutex_t **lockptr; 28124574Sraf uint_t nlocks; 28134574Sraf mutex_t *mp; 28144574Sraf 28154574Sraf if ((nlocks = self->ul_heldlockcnt) != 0) 28164574Sraf lockptr = self->ul_heldlocks.array; 28174574Sraf else { 28184574Sraf nlocks = 1; 28194574Sraf lockptr = &self->ul_heldlocks.single; 28204574Sraf } 28214574Sraf 28224574Sraf for (; nlocks; nlocks--, lockptr++) { 28234574Sraf /* 28244574Sraf * The kernel takes care of transitioning held 28254574Sraf * LOCK_PRIO_INHERIT mutexes to LOCK_OWNERDEAD. 28264574Sraf * We avoid that case here. 28274574Sraf */ 28284574Sraf if ((mp = *lockptr) != NULL && 28294574Sraf mutex_is_held(mp) && 28304574Sraf (mp->mutex_type & (LOCK_ROBUST | LOCK_PRIO_INHERIT)) == 28314574Sraf LOCK_ROBUST) { 28324574Sraf mp->mutex_rcount = 0; 28334574Sraf if (!(mp->mutex_flag & LOCK_UNMAPPED)) 28344574Sraf mp->mutex_flag |= LOCK_OWNERDEAD; 28354574Sraf (void) mutex_unlock_internal(mp, 1); 28364574Sraf } 28374574Sraf } 28384574Sraf 28394574Sraf heldlock_free(self); 28404574Sraf } 28414574Sraf 28420Sstevel@tonic-gate #pragma weak cond_init = _cond_init 28430Sstevel@tonic-gate /* ARGSUSED2 */ 28440Sstevel@tonic-gate int 28450Sstevel@tonic-gate _cond_init(cond_t *cvp, int type, void *arg) 28460Sstevel@tonic-gate { 28470Sstevel@tonic-gate if (type != USYNC_THREAD && type != USYNC_PROCESS) 28480Sstevel@tonic-gate return (EINVAL); 28490Sstevel@tonic-gate (void) _memset(cvp, 0, sizeof (*cvp)); 28500Sstevel@tonic-gate cvp->cond_type = (uint16_t)type; 28510Sstevel@tonic-gate cvp->cond_magic = COND_MAGIC; 28520Sstevel@tonic-gate return (0); 28530Sstevel@tonic-gate } 28540Sstevel@tonic-gate 28550Sstevel@tonic-gate /* 28560Sstevel@tonic-gate * cond_sleep_queue(): utility function for cond_wait_queue(). 28570Sstevel@tonic-gate * 28580Sstevel@tonic-gate * Go to sleep on a condvar sleep queue, expect to be waked up 28590Sstevel@tonic-gate * by someone calling cond_signal() or cond_broadcast() or due 28600Sstevel@tonic-gate * to receiving a UNIX signal or being cancelled, or just simply 28610Sstevel@tonic-gate * due to a spurious wakeup (like someome calling forkall()). 28620Sstevel@tonic-gate * 28630Sstevel@tonic-gate * The associated mutex is *not* reacquired before returning. 28640Sstevel@tonic-gate * That must be done by the caller of cond_sleep_queue(). 28650Sstevel@tonic-gate */ 28664574Sraf static int 28670Sstevel@tonic-gate cond_sleep_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 28680Sstevel@tonic-gate { 28690Sstevel@tonic-gate ulwp_t *self = curthread; 28700Sstevel@tonic-gate queue_head_t *qp; 28710Sstevel@tonic-gate queue_head_t *mqp; 28720Sstevel@tonic-gate lwpid_t lwpid; 28730Sstevel@tonic-gate int signalled; 28740Sstevel@tonic-gate int error; 28754574Sraf int release_all; 28760Sstevel@tonic-gate 28770Sstevel@tonic-gate /* 28780Sstevel@tonic-gate * Put ourself on the CV sleep queue, unlock the mutex, then 28790Sstevel@tonic-gate * park ourself and unpark a candidate lwp to grab the mutex. 28800Sstevel@tonic-gate * We must go onto the CV sleep queue before dropping the 28810Sstevel@tonic-gate * mutex in order to guarantee atomicity of the operation. 28820Sstevel@tonic-gate */ 28830Sstevel@tonic-gate self->ul_sp = stkptr(); 28840Sstevel@tonic-gate qp = queue_lock(cvp, CV); 28850Sstevel@tonic-gate enqueue(qp, self, cvp, CV); 28860Sstevel@tonic-gate cvp->cond_waiters_user = 1; 28870Sstevel@tonic-gate self->ul_cvmutex = mp; 28880Sstevel@tonic-gate self->ul_cv_wake = (tsp != NULL); 28890Sstevel@tonic-gate self->ul_signalled = 0; 28904574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) { 28914574Sraf mp->mutex_flag &= ~LOCK_OWNERDEAD; 28924574Sraf mp->mutex_flag |= LOCK_NOTRECOVERABLE; 28934574Sraf } 28944574Sraf release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0); 28954574Sraf lwpid = mutex_unlock_queue(mp, release_all); 28960Sstevel@tonic-gate for (;;) { 28970Sstevel@tonic-gate set_parking_flag(self, 1); 28980Sstevel@tonic-gate queue_unlock(qp); 28990Sstevel@tonic-gate if (lwpid != 0) { 29000Sstevel@tonic-gate lwpid = preempt_unpark(self, lwpid); 29010Sstevel@tonic-gate preempt(self); 29020Sstevel@tonic-gate } 29030Sstevel@tonic-gate /* 29040Sstevel@tonic-gate * We may have a deferred signal present, 29050Sstevel@tonic-gate * in which case we should return EINTR. 29060Sstevel@tonic-gate * Also, we may have received a SIGCANCEL; if so 29070Sstevel@tonic-gate * and we are cancelable we should return EINTR. 29080Sstevel@tonic-gate * We force an immediate EINTR return from 29090Sstevel@tonic-gate * __lwp_park() by turning our parking flag off. 29100Sstevel@tonic-gate */ 29110Sstevel@tonic-gate if (self->ul_cursig != 0 || 29120Sstevel@tonic-gate (self->ul_cancelable && self->ul_cancel_pending)) 29130Sstevel@tonic-gate set_parking_flag(self, 0); 29140Sstevel@tonic-gate /* 29150Sstevel@tonic-gate * __lwp_park() will return the residual time in tsp 29160Sstevel@tonic-gate * if we are unparked before the timeout expires. 29170Sstevel@tonic-gate */ 29180Sstevel@tonic-gate error = __lwp_park(tsp, lwpid); 29190Sstevel@tonic-gate set_parking_flag(self, 0); 29200Sstevel@tonic-gate lwpid = 0; /* unpark the other lwp only once */ 29210Sstevel@tonic-gate /* 29220Sstevel@tonic-gate * We were waked up by cond_signal(), cond_broadcast(), 29230Sstevel@tonic-gate * by an interrupt or timeout (EINTR or ETIME), 29240Sstevel@tonic-gate * or we may just have gotten a spurious wakeup. 29250Sstevel@tonic-gate */ 29260Sstevel@tonic-gate qp = queue_lock(cvp, CV); 29270Sstevel@tonic-gate mqp = queue_lock(mp, MX); 29280Sstevel@tonic-gate if (self->ul_sleepq == NULL) 29290Sstevel@tonic-gate break; 29300Sstevel@tonic-gate /* 29310Sstevel@tonic-gate * We are on either the condvar sleep queue or the 29321893Sraf * mutex sleep queue. Break out of the sleep if we 29331893Sraf * were interrupted or we timed out (EINTR or ETIME). 29340Sstevel@tonic-gate * Else this is a spurious wakeup; continue the loop. 29350Sstevel@tonic-gate */ 29361893Sraf if (self->ul_sleepq == mqp) { /* mutex queue */ 29371893Sraf if (error) { 29381893Sraf mp->mutex_waiters = dequeue_self(mqp, mp); 29391893Sraf break; 29401893Sraf } 29411893Sraf tsp = NULL; /* no more timeout */ 29421893Sraf } else if (self->ul_sleepq == qp) { /* condvar queue */ 29430Sstevel@tonic-gate if (error) { 29440Sstevel@tonic-gate cvp->cond_waiters_user = dequeue_self(qp, cvp); 29450Sstevel@tonic-gate break; 29460Sstevel@tonic-gate } 29470Sstevel@tonic-gate /* 29480Sstevel@tonic-gate * Else a spurious wakeup on the condvar queue. 29490Sstevel@tonic-gate * __lwp_park() has already adjusted the timeout. 29500Sstevel@tonic-gate */ 29510Sstevel@tonic-gate } else { 29520Sstevel@tonic-gate thr_panic("cond_sleep_queue(): thread not on queue"); 29530Sstevel@tonic-gate } 29540Sstevel@tonic-gate queue_unlock(mqp); 29550Sstevel@tonic-gate } 29560Sstevel@tonic-gate 29570Sstevel@tonic-gate self->ul_sp = 0; 29580Sstevel@tonic-gate ASSERT(self->ul_cvmutex == NULL && self->ul_cv_wake == 0); 29590Sstevel@tonic-gate ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL && 29600Sstevel@tonic-gate self->ul_wchan == NULL); 29610Sstevel@tonic-gate 29620Sstevel@tonic-gate signalled = self->ul_signalled; 29630Sstevel@tonic-gate self->ul_signalled = 0; 29640Sstevel@tonic-gate queue_unlock(qp); 29650Sstevel@tonic-gate queue_unlock(mqp); 29660Sstevel@tonic-gate 29670Sstevel@tonic-gate /* 29680Sstevel@tonic-gate * If we were concurrently cond_signal()d and any of: 29690Sstevel@tonic-gate * received a UNIX signal, were cancelled, or got a timeout, 29700Sstevel@tonic-gate * then perform another cond_signal() to avoid consuming it. 29710Sstevel@tonic-gate */ 29720Sstevel@tonic-gate if (error && signalled) 29730Sstevel@tonic-gate (void) cond_signal_internal(cvp); 29740Sstevel@tonic-gate 29750Sstevel@tonic-gate return (error); 29760Sstevel@tonic-gate } 29770Sstevel@tonic-gate 29780Sstevel@tonic-gate int 29795629Sraf cond_wait_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 29800Sstevel@tonic-gate { 29810Sstevel@tonic-gate ulwp_t *self = curthread; 29820Sstevel@tonic-gate int error; 29834574Sraf int merror; 29840Sstevel@tonic-gate 29850Sstevel@tonic-gate /* 29860Sstevel@tonic-gate * The old thread library was programmed to defer signals 29870Sstevel@tonic-gate * while in cond_wait() so that the associated mutex would 29880Sstevel@tonic-gate * be guaranteed to be held when the application signal 29890Sstevel@tonic-gate * handler was invoked. 29900Sstevel@tonic-gate * 29910Sstevel@tonic-gate * We do not behave this way by default; the state of the 29920Sstevel@tonic-gate * associated mutex in the signal handler is undefined. 29930Sstevel@tonic-gate * 29940Sstevel@tonic-gate * To accommodate applications that depend on the old 29950Sstevel@tonic-gate * behavior, the _THREAD_COND_WAIT_DEFER environment 29960Sstevel@tonic-gate * variable can be set to 1 and we will behave in the 29970Sstevel@tonic-gate * old way with respect to cond_wait(). 29980Sstevel@tonic-gate */ 29990Sstevel@tonic-gate if (self->ul_cond_wait_defer) 30000Sstevel@tonic-gate sigoff(self); 30010Sstevel@tonic-gate 30020Sstevel@tonic-gate error = cond_sleep_queue(cvp, mp, tsp); 30030Sstevel@tonic-gate 30040Sstevel@tonic-gate /* 30050Sstevel@tonic-gate * Reacquire the mutex. 30060Sstevel@tonic-gate */ 30075629Sraf if ((merror = mutex_lock_impl(mp, NULL)) != 0) 30084574Sraf error = merror; 30090Sstevel@tonic-gate 30100Sstevel@tonic-gate /* 30110Sstevel@tonic-gate * Take any deferred signal now, after we have reacquired the mutex. 30120Sstevel@tonic-gate */ 30130Sstevel@tonic-gate if (self->ul_cond_wait_defer) 30140Sstevel@tonic-gate sigon(self); 30150Sstevel@tonic-gate 30160Sstevel@tonic-gate return (error); 30170Sstevel@tonic-gate } 30180Sstevel@tonic-gate 30190Sstevel@tonic-gate /* 30200Sstevel@tonic-gate * cond_sleep_kernel(): utility function for cond_wait_kernel(). 30210Sstevel@tonic-gate * See the comment ahead of cond_sleep_queue(), above. 30220Sstevel@tonic-gate */ 30234574Sraf static int 30240Sstevel@tonic-gate cond_sleep_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 30250Sstevel@tonic-gate { 30260Sstevel@tonic-gate int mtype = mp->mutex_type; 30270Sstevel@tonic-gate ulwp_t *self = curthread; 30280Sstevel@tonic-gate int error; 30290Sstevel@tonic-gate 30304574Sraf if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp)) 30314574Sraf _ceil_prio_waive(); 30320Sstevel@tonic-gate 30330Sstevel@tonic-gate self->ul_sp = stkptr(); 30340Sstevel@tonic-gate self->ul_wchan = cvp; 30350Sstevel@tonic-gate mp->mutex_owner = 0; 3036*6057Sraf /* mp->mutex_ownerpid is cleared by ___lwp_cond_wait() */ 30374574Sraf if (mtype & LOCK_PRIO_INHERIT) 30380Sstevel@tonic-gate mp->mutex_lockw = LOCKCLEAR; 30390Sstevel@tonic-gate /* 30400Sstevel@tonic-gate * ___lwp_cond_wait() returns immediately with EINTR if 30410Sstevel@tonic-gate * set_parking_flag(self,0) is called on this lwp before it 30420Sstevel@tonic-gate * goes to sleep in the kernel. sigacthandler() calls this 30430Sstevel@tonic-gate * when a deferred signal is noted. This assures that we don't 30440Sstevel@tonic-gate * get stuck in ___lwp_cond_wait() with all signals blocked 30450Sstevel@tonic-gate * due to taking a deferred signal before going to sleep. 30460Sstevel@tonic-gate */ 30470Sstevel@tonic-gate set_parking_flag(self, 1); 30480Sstevel@tonic-gate if (self->ul_cursig != 0 || 30490Sstevel@tonic-gate (self->ul_cancelable && self->ul_cancel_pending)) 30500Sstevel@tonic-gate set_parking_flag(self, 0); 30510Sstevel@tonic-gate error = ___lwp_cond_wait(cvp, mp, tsp, 1); 30520Sstevel@tonic-gate set_parking_flag(self, 0); 30530Sstevel@tonic-gate self->ul_sp = 0; 30540Sstevel@tonic-gate self->ul_wchan = NULL; 30550Sstevel@tonic-gate return (error); 30560Sstevel@tonic-gate } 30570Sstevel@tonic-gate 30580Sstevel@tonic-gate int 30590Sstevel@tonic-gate cond_wait_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 30600Sstevel@tonic-gate { 30610Sstevel@tonic-gate ulwp_t *self = curthread; 30620Sstevel@tonic-gate int error; 30630Sstevel@tonic-gate int merror; 30640Sstevel@tonic-gate 30650Sstevel@tonic-gate /* 30660Sstevel@tonic-gate * See the large comment in cond_wait_queue(), above. 30670Sstevel@tonic-gate */ 30680Sstevel@tonic-gate if (self->ul_cond_wait_defer) 30690Sstevel@tonic-gate sigoff(self); 30700Sstevel@tonic-gate 30710Sstevel@tonic-gate error = cond_sleep_kernel(cvp, mp, tsp); 30720Sstevel@tonic-gate 30730Sstevel@tonic-gate /* 30740Sstevel@tonic-gate * Override the return code from ___lwp_cond_wait() 30750Sstevel@tonic-gate * with any non-zero return code from mutex_lock(). 30760Sstevel@tonic-gate * This addresses robust lock failures in particular; 30770Sstevel@tonic-gate * the caller must see the EOWNERDEAD or ENOTRECOVERABLE 30780Sstevel@tonic-gate * errors in order to take corrective action. 30790Sstevel@tonic-gate */ 30805629Sraf if ((merror = mutex_lock_impl(mp, NULL)) != 0) 30810Sstevel@tonic-gate error = merror; 30820Sstevel@tonic-gate 30830Sstevel@tonic-gate /* 30840Sstevel@tonic-gate * Take any deferred signal now, after we have reacquired the mutex. 30850Sstevel@tonic-gate */ 30860Sstevel@tonic-gate if (self->ul_cond_wait_defer) 30870Sstevel@tonic-gate sigon(self); 30880Sstevel@tonic-gate 30890Sstevel@tonic-gate return (error); 30900Sstevel@tonic-gate } 30910Sstevel@tonic-gate 30920Sstevel@tonic-gate /* 30930Sstevel@tonic-gate * Common code for _cond_wait() and _cond_timedwait() 30940Sstevel@tonic-gate */ 30950Sstevel@tonic-gate int 30960Sstevel@tonic-gate cond_wait_common(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 30970Sstevel@tonic-gate { 30980Sstevel@tonic-gate int mtype = mp->mutex_type; 30990Sstevel@tonic-gate hrtime_t begin_sleep = 0; 31000Sstevel@tonic-gate ulwp_t *self = curthread; 31010Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 31020Sstevel@tonic-gate tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 31030Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 31040Sstevel@tonic-gate uint8_t rcount; 31050Sstevel@tonic-gate int error = 0; 31060Sstevel@tonic-gate 31070Sstevel@tonic-gate /* 31080Sstevel@tonic-gate * The SUSV3 Posix spec for pthread_cond_timedwait() states: 31090Sstevel@tonic-gate * Except in the case of [ETIMEDOUT], all these error checks 31100Sstevel@tonic-gate * shall act as if they were performed immediately at the 31110Sstevel@tonic-gate * beginning of processing for the function and shall cause 31120Sstevel@tonic-gate * an error return, in effect, prior to modifying the state 31130Sstevel@tonic-gate * of the mutex specified by mutex or the condition variable 31140Sstevel@tonic-gate * specified by cond. 31150Sstevel@tonic-gate * Therefore, we must return EINVAL now if the timout is invalid. 31160Sstevel@tonic-gate */ 31170Sstevel@tonic-gate if (tsp != NULL && 31180Sstevel@tonic-gate (tsp->tv_sec < 0 || (ulong_t)tsp->tv_nsec >= NANOSEC)) 31190Sstevel@tonic-gate return (EINVAL); 31200Sstevel@tonic-gate 31210Sstevel@tonic-gate if (__td_event_report(self, TD_SLEEP, udp)) { 31220Sstevel@tonic-gate self->ul_sp = stkptr(); 31230Sstevel@tonic-gate self->ul_wchan = cvp; 31240Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_SLEEP; 31250Sstevel@tonic-gate self->ul_td_evbuf.eventdata = cvp; 31260Sstevel@tonic-gate tdb_event(TD_SLEEP, udp); 31270Sstevel@tonic-gate self->ul_sp = 0; 31280Sstevel@tonic-gate } 31290Sstevel@tonic-gate if (csp) { 31300Sstevel@tonic-gate if (tsp) 31310Sstevel@tonic-gate tdb_incr(csp->cond_timedwait); 31320Sstevel@tonic-gate else 31330Sstevel@tonic-gate tdb_incr(csp->cond_wait); 31340Sstevel@tonic-gate } 31350Sstevel@tonic-gate if (msp) 31360Sstevel@tonic-gate begin_sleep = record_hold_time(msp); 31370Sstevel@tonic-gate else if (csp) 31380Sstevel@tonic-gate begin_sleep = gethrtime(); 31390Sstevel@tonic-gate 31400Sstevel@tonic-gate if (self->ul_error_detection) { 31410Sstevel@tonic-gate if (!mutex_is_held(mp)) 31420Sstevel@tonic-gate lock_error(mp, "cond_wait", cvp, NULL); 31430Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) 31440Sstevel@tonic-gate lock_error(mp, "recursive mutex in cond_wait", 31455629Sraf cvp, NULL); 31460Sstevel@tonic-gate if (cvp->cond_type & USYNC_PROCESS) { 31474574Sraf if (!(mtype & USYNC_PROCESS)) 31480Sstevel@tonic-gate lock_error(mp, "cond_wait", cvp, 31495629Sraf "condvar process-shared, " 31505629Sraf "mutex process-private"); 31510Sstevel@tonic-gate } else { 31524574Sraf if (mtype & USYNC_PROCESS) 31530Sstevel@tonic-gate lock_error(mp, "cond_wait", cvp, 31545629Sraf "condvar process-private, " 31555629Sraf "mutex process-shared"); 31560Sstevel@tonic-gate } 31570Sstevel@tonic-gate } 31580Sstevel@tonic-gate 31590Sstevel@tonic-gate /* 31600Sstevel@tonic-gate * We deal with recursive mutexes by completely 31610Sstevel@tonic-gate * dropping the lock and restoring the recursion 31620Sstevel@tonic-gate * count after waking up. This is arguably wrong, 31630Sstevel@tonic-gate * but it obeys the principle of least astonishment. 31640Sstevel@tonic-gate */ 31650Sstevel@tonic-gate rcount = mp->mutex_rcount; 31660Sstevel@tonic-gate mp->mutex_rcount = 0; 31674574Sraf if ((mtype & 31684574Sraf (USYNC_PROCESS | LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT)) | 31690Sstevel@tonic-gate (cvp->cond_type & USYNC_PROCESS)) 31700Sstevel@tonic-gate error = cond_wait_kernel(cvp, mp, tsp); 31710Sstevel@tonic-gate else 31725629Sraf error = cond_wait_queue(cvp, mp, tsp); 31730Sstevel@tonic-gate mp->mutex_rcount = rcount; 31740Sstevel@tonic-gate 31750Sstevel@tonic-gate if (csp) { 31760Sstevel@tonic-gate hrtime_t lapse = gethrtime() - begin_sleep; 31770Sstevel@tonic-gate if (tsp == NULL) 31780Sstevel@tonic-gate csp->cond_wait_sleep_time += lapse; 31790Sstevel@tonic-gate else { 31800Sstevel@tonic-gate csp->cond_timedwait_sleep_time += lapse; 31810Sstevel@tonic-gate if (error == ETIME) 31820Sstevel@tonic-gate tdb_incr(csp->cond_timedwait_timeout); 31830Sstevel@tonic-gate } 31840Sstevel@tonic-gate } 31850Sstevel@tonic-gate return (error); 31860Sstevel@tonic-gate } 31870Sstevel@tonic-gate 31880Sstevel@tonic-gate /* 31895891Sraf * cond_wait() and _cond_wait() are cancellation points but __cond_wait() 31905891Sraf * is not. Internally, libc calls the non-cancellation version. 31915891Sraf * Other libraries need to use pthread_setcancelstate(), as appropriate, 31925891Sraf * since __cond_wait() is not exported from libc. 31930Sstevel@tonic-gate */ 31940Sstevel@tonic-gate int 31955891Sraf __cond_wait(cond_t *cvp, mutex_t *mp) 31960Sstevel@tonic-gate { 31970Sstevel@tonic-gate ulwp_t *self = curthread; 31980Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 31990Sstevel@tonic-gate uberflags_t *gflags; 32000Sstevel@tonic-gate 32010Sstevel@tonic-gate /* 32020Sstevel@tonic-gate * Optimize the common case of USYNC_THREAD plus 32030Sstevel@tonic-gate * no error detection, no lock statistics, and no event tracing. 32040Sstevel@tonic-gate */ 32050Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL && 32060Sstevel@tonic-gate (cvp->cond_type | mp->mutex_type | gflags->uf_trs_ted | 32070Sstevel@tonic-gate self->ul_td_events_enable | 32080Sstevel@tonic-gate udp->tdb.tdb_ev_global_mask.event_bits[0]) == 0) 32095629Sraf return (cond_wait_queue(cvp, mp, NULL)); 32100Sstevel@tonic-gate 32110Sstevel@tonic-gate /* 32120Sstevel@tonic-gate * Else do it the long way. 32130Sstevel@tonic-gate */ 32140Sstevel@tonic-gate return (cond_wait_common(cvp, mp, NULL)); 32150Sstevel@tonic-gate } 32160Sstevel@tonic-gate 32175891Sraf #pragma weak cond_wait = _cond_wait 32180Sstevel@tonic-gate int 32195891Sraf _cond_wait(cond_t *cvp, mutex_t *mp) 32200Sstevel@tonic-gate { 32210Sstevel@tonic-gate int error; 32220Sstevel@tonic-gate 32230Sstevel@tonic-gate _cancelon(); 32245891Sraf error = __cond_wait(cvp, mp); 32250Sstevel@tonic-gate if (error == EINTR) 32260Sstevel@tonic-gate _canceloff(); 32270Sstevel@tonic-gate else 32280Sstevel@tonic-gate _canceloff_nocancel(); 32290Sstevel@tonic-gate return (error); 32300Sstevel@tonic-gate } 32310Sstevel@tonic-gate 32325891Sraf /* 32335891Sraf * pthread_cond_wait() is a cancellation point. 32345891Sraf */ 32350Sstevel@tonic-gate #pragma weak pthread_cond_wait = _pthread_cond_wait 32360Sstevel@tonic-gate int 32370Sstevel@tonic-gate _pthread_cond_wait(cond_t *cvp, mutex_t *mp) 32380Sstevel@tonic-gate { 32390Sstevel@tonic-gate int error; 32400Sstevel@tonic-gate 32415891Sraf error = _cond_wait(cvp, mp); 32420Sstevel@tonic-gate return ((error == EINTR)? 0 : error); 32430Sstevel@tonic-gate } 32440Sstevel@tonic-gate 32450Sstevel@tonic-gate /* 32465891Sraf * cond_timedwait() and _cond_timedwait() are cancellation points 32475891Sraf * but __cond_timedwait() is not. 32480Sstevel@tonic-gate */ 32490Sstevel@tonic-gate int 32505891Sraf __cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime) 32510Sstevel@tonic-gate { 32520Sstevel@tonic-gate clockid_t clock_id = cvp->cond_clockid; 32530Sstevel@tonic-gate timespec_t reltime; 32540Sstevel@tonic-gate int error; 32550Sstevel@tonic-gate 32560Sstevel@tonic-gate if (clock_id != CLOCK_REALTIME && clock_id != CLOCK_HIGHRES) 32570Sstevel@tonic-gate clock_id = CLOCK_REALTIME; 32580Sstevel@tonic-gate abstime_to_reltime(clock_id, abstime, &reltime); 32590Sstevel@tonic-gate error = cond_wait_common(cvp, mp, &reltime); 32600Sstevel@tonic-gate if (error == ETIME && clock_id == CLOCK_HIGHRES) { 32610Sstevel@tonic-gate /* 32620Sstevel@tonic-gate * Don't return ETIME if we didn't really get a timeout. 32630Sstevel@tonic-gate * This can happen if we return because someone resets 32640Sstevel@tonic-gate * the system clock. Just return zero in this case, 32650Sstevel@tonic-gate * giving a spurious wakeup but not a timeout. 32660Sstevel@tonic-gate */ 32670Sstevel@tonic-gate if ((hrtime_t)(uint32_t)abstime->tv_sec * NANOSEC + 32680Sstevel@tonic-gate abstime->tv_nsec > gethrtime()) 32690Sstevel@tonic-gate error = 0; 32700Sstevel@tonic-gate } 32710Sstevel@tonic-gate return (error); 32720Sstevel@tonic-gate } 32730Sstevel@tonic-gate 32745891Sraf #pragma weak cond_timedwait = _cond_timedwait 32750Sstevel@tonic-gate int 32765891Sraf _cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime) 32770Sstevel@tonic-gate { 32780Sstevel@tonic-gate int error; 32790Sstevel@tonic-gate 32800Sstevel@tonic-gate _cancelon(); 32815891Sraf error = __cond_timedwait(cvp, mp, abstime); 32820Sstevel@tonic-gate if (error == EINTR) 32830Sstevel@tonic-gate _canceloff(); 32840Sstevel@tonic-gate else 32850Sstevel@tonic-gate _canceloff_nocancel(); 32860Sstevel@tonic-gate return (error); 32870Sstevel@tonic-gate } 32880Sstevel@tonic-gate 32895891Sraf /* 32905891Sraf * pthread_cond_timedwait() is a cancellation point. 32915891Sraf */ 32920Sstevel@tonic-gate #pragma weak pthread_cond_timedwait = _pthread_cond_timedwait 32930Sstevel@tonic-gate int 32940Sstevel@tonic-gate _pthread_cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime) 32950Sstevel@tonic-gate { 32960Sstevel@tonic-gate int error; 32970Sstevel@tonic-gate 32985891Sraf error = _cond_timedwait(cvp, mp, abstime); 32990Sstevel@tonic-gate if (error == ETIME) 33000Sstevel@tonic-gate error = ETIMEDOUT; 33010Sstevel@tonic-gate else if (error == EINTR) 33020Sstevel@tonic-gate error = 0; 33030Sstevel@tonic-gate return (error); 33040Sstevel@tonic-gate } 33050Sstevel@tonic-gate 33060Sstevel@tonic-gate /* 33075891Sraf * cond_reltimedwait() and _cond_reltimedwait() are cancellation points 33085891Sraf * but __cond_reltimedwait() is not. 33090Sstevel@tonic-gate */ 33100Sstevel@tonic-gate int 33115891Sraf __cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime) 33120Sstevel@tonic-gate { 33130Sstevel@tonic-gate timespec_t tslocal = *reltime; 33140Sstevel@tonic-gate 33150Sstevel@tonic-gate return (cond_wait_common(cvp, mp, &tslocal)); 33160Sstevel@tonic-gate } 33170Sstevel@tonic-gate 33185891Sraf #pragma weak cond_reltimedwait = _cond_reltimedwait 33190Sstevel@tonic-gate int 33205891Sraf _cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime) 33210Sstevel@tonic-gate { 33220Sstevel@tonic-gate int error; 33230Sstevel@tonic-gate 33240Sstevel@tonic-gate _cancelon(); 33255891Sraf error = __cond_reltimedwait(cvp, mp, reltime); 33260Sstevel@tonic-gate if (error == EINTR) 33270Sstevel@tonic-gate _canceloff(); 33280Sstevel@tonic-gate else 33290Sstevel@tonic-gate _canceloff_nocancel(); 33300Sstevel@tonic-gate return (error); 33310Sstevel@tonic-gate } 33320Sstevel@tonic-gate 33330Sstevel@tonic-gate #pragma weak pthread_cond_reltimedwait_np = _pthread_cond_reltimedwait_np 33340Sstevel@tonic-gate int 33350Sstevel@tonic-gate _pthread_cond_reltimedwait_np(cond_t *cvp, mutex_t *mp, 33360Sstevel@tonic-gate const timespec_t *reltime) 33370Sstevel@tonic-gate { 33380Sstevel@tonic-gate int error; 33390Sstevel@tonic-gate 33405891Sraf error = _cond_reltimedwait(cvp, mp, reltime); 33410Sstevel@tonic-gate if (error == ETIME) 33420Sstevel@tonic-gate error = ETIMEDOUT; 33430Sstevel@tonic-gate else if (error == EINTR) 33440Sstevel@tonic-gate error = 0; 33450Sstevel@tonic-gate return (error); 33460Sstevel@tonic-gate } 33470Sstevel@tonic-gate 33480Sstevel@tonic-gate #pragma weak pthread_cond_signal = cond_signal_internal 33490Sstevel@tonic-gate #pragma weak _pthread_cond_signal = cond_signal_internal 33500Sstevel@tonic-gate #pragma weak cond_signal = cond_signal_internal 33510Sstevel@tonic-gate #pragma weak _cond_signal = cond_signal_internal 33520Sstevel@tonic-gate int 33530Sstevel@tonic-gate cond_signal_internal(cond_t *cvp) 33540Sstevel@tonic-gate { 33550Sstevel@tonic-gate ulwp_t *self = curthread; 33560Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 33570Sstevel@tonic-gate tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 33580Sstevel@tonic-gate int error = 0; 33590Sstevel@tonic-gate queue_head_t *qp; 33600Sstevel@tonic-gate mutex_t *mp; 33610Sstevel@tonic-gate queue_head_t *mqp; 33620Sstevel@tonic-gate ulwp_t **ulwpp; 33630Sstevel@tonic-gate ulwp_t *ulwp; 33640Sstevel@tonic-gate ulwp_t *prev = NULL; 33650Sstevel@tonic-gate ulwp_t *next; 33660Sstevel@tonic-gate ulwp_t **suspp = NULL; 33670Sstevel@tonic-gate ulwp_t *susprev; 33680Sstevel@tonic-gate 33690Sstevel@tonic-gate if (csp) 33700Sstevel@tonic-gate tdb_incr(csp->cond_signal); 33710Sstevel@tonic-gate 33720Sstevel@tonic-gate if (cvp->cond_waiters_kernel) /* someone sleeping in the kernel? */ 33730Sstevel@tonic-gate error = __lwp_cond_signal(cvp); 33740Sstevel@tonic-gate 33750Sstevel@tonic-gate if (!cvp->cond_waiters_user) /* no one sleeping at user-level */ 33760Sstevel@tonic-gate return (error); 33770Sstevel@tonic-gate 33780Sstevel@tonic-gate /* 33790Sstevel@tonic-gate * Move someone from the condvar sleep queue to the mutex sleep 33800Sstevel@tonic-gate * queue for the mutex that he will acquire on being waked up. 33810Sstevel@tonic-gate * We can do this only if we own the mutex he will acquire. 33820Sstevel@tonic-gate * If we do not own the mutex, or if his ul_cv_wake flag 33830Sstevel@tonic-gate * is set, just dequeue and unpark him. 33840Sstevel@tonic-gate */ 33850Sstevel@tonic-gate qp = queue_lock(cvp, CV); 33860Sstevel@tonic-gate for (ulwpp = &qp->qh_head; (ulwp = *ulwpp) != NULL; 33870Sstevel@tonic-gate prev = ulwp, ulwpp = &ulwp->ul_link) { 33880Sstevel@tonic-gate if (ulwp->ul_wchan == cvp) { 33890Sstevel@tonic-gate if (!ulwp->ul_stop) 33900Sstevel@tonic-gate break; 33910Sstevel@tonic-gate /* 33920Sstevel@tonic-gate * Try not to dequeue a suspended thread. 33930Sstevel@tonic-gate * This mimics the old libthread's behavior. 33940Sstevel@tonic-gate */ 33950Sstevel@tonic-gate if (suspp == NULL) { 33960Sstevel@tonic-gate suspp = ulwpp; 33970Sstevel@tonic-gate susprev = prev; 33980Sstevel@tonic-gate } 33990Sstevel@tonic-gate } 34000Sstevel@tonic-gate } 34010Sstevel@tonic-gate if (ulwp == NULL && suspp != NULL) { 34020Sstevel@tonic-gate ulwp = *(ulwpp = suspp); 34030Sstevel@tonic-gate prev = susprev; 34040Sstevel@tonic-gate suspp = NULL; 34050Sstevel@tonic-gate } 34060Sstevel@tonic-gate if (ulwp == NULL) { /* no one on the sleep queue */ 34070Sstevel@tonic-gate cvp->cond_waiters_user = 0; 34080Sstevel@tonic-gate queue_unlock(qp); 34090Sstevel@tonic-gate return (error); 34100Sstevel@tonic-gate } 34110Sstevel@tonic-gate /* 34120Sstevel@tonic-gate * Scan the remainder of the CV queue for another waiter. 34130Sstevel@tonic-gate */ 34140Sstevel@tonic-gate if (suspp != NULL) { 34150Sstevel@tonic-gate next = *suspp; 34160Sstevel@tonic-gate } else { 34170Sstevel@tonic-gate for (next = ulwp->ul_link; next != NULL; next = next->ul_link) 34180Sstevel@tonic-gate if (next->ul_wchan == cvp) 34190Sstevel@tonic-gate break; 34200Sstevel@tonic-gate } 34210Sstevel@tonic-gate if (next == NULL) 34220Sstevel@tonic-gate cvp->cond_waiters_user = 0; 34230Sstevel@tonic-gate 34240Sstevel@tonic-gate /* 34250Sstevel@tonic-gate * Inform the thread that he was the recipient of a cond_signal(). 34260Sstevel@tonic-gate * This lets him deal with cond_signal() and, concurrently, 34270Sstevel@tonic-gate * one or more of a cancellation, a UNIX signal, or a timeout. 34280Sstevel@tonic-gate * These latter conditions must not consume a cond_signal(). 34290Sstevel@tonic-gate */ 34300Sstevel@tonic-gate ulwp->ul_signalled = 1; 34310Sstevel@tonic-gate 34320Sstevel@tonic-gate /* 34330Sstevel@tonic-gate * Dequeue the waiter but leave his ul_sleepq non-NULL 34340Sstevel@tonic-gate * while we move him to the mutex queue so that he can 34350Sstevel@tonic-gate * deal properly with spurious wakeups. 34360Sstevel@tonic-gate */ 34370Sstevel@tonic-gate *ulwpp = ulwp->ul_link; 34384574Sraf ulwp->ul_link = NULL; 34390Sstevel@tonic-gate if (qp->qh_tail == ulwp) 34400Sstevel@tonic-gate qp->qh_tail = prev; 34410Sstevel@tonic-gate qp->qh_qlen--; 34420Sstevel@tonic-gate 34430Sstevel@tonic-gate mp = ulwp->ul_cvmutex; /* the mutex he will acquire */ 34440Sstevel@tonic-gate ulwp->ul_cvmutex = NULL; 34450Sstevel@tonic-gate ASSERT(mp != NULL); 34460Sstevel@tonic-gate 34470Sstevel@tonic-gate if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) { 34480Sstevel@tonic-gate lwpid_t lwpid = ulwp->ul_lwpid; 34490Sstevel@tonic-gate 34500Sstevel@tonic-gate no_preempt(self); 34510Sstevel@tonic-gate ulwp->ul_sleepq = NULL; 34520Sstevel@tonic-gate ulwp->ul_wchan = NULL; 34530Sstevel@tonic-gate ulwp->ul_cv_wake = 0; 34540Sstevel@tonic-gate queue_unlock(qp); 34550Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 34560Sstevel@tonic-gate preempt(self); 34570Sstevel@tonic-gate } else { 34580Sstevel@tonic-gate mqp = queue_lock(mp, MX); 34590Sstevel@tonic-gate enqueue(mqp, ulwp, mp, MX); 34600Sstevel@tonic-gate mp->mutex_waiters = 1; 34610Sstevel@tonic-gate queue_unlock(mqp); 34620Sstevel@tonic-gate queue_unlock(qp); 34630Sstevel@tonic-gate } 34640Sstevel@tonic-gate 34650Sstevel@tonic-gate return (error); 34660Sstevel@tonic-gate } 34670Sstevel@tonic-gate 34684570Sraf /* 34694574Sraf * Utility function called by mutex_wakeup_all(), cond_broadcast(), 34704574Sraf * and rw_queue_release() to (re)allocate a big buffer to hold the 34714574Sraf * lwpids of all the threads to be set running after they are removed 34724574Sraf * from their sleep queues. Since we are holding a queue lock, we 34734574Sraf * cannot call any function that might acquire a lock. mmap(), munmap(), 34744574Sraf * lwp_unpark_all() are simple system calls and are safe in this regard. 34754570Sraf */ 34764570Sraf lwpid_t * 34774570Sraf alloc_lwpids(lwpid_t *lwpid, int *nlwpid_ptr, int *maxlwps_ptr) 34784570Sraf { 34794570Sraf /* 34804570Sraf * Allocate NEWLWPS ids on the first overflow. 34814570Sraf * Double the allocation each time after that. 34824570Sraf */ 34834570Sraf int nlwpid = *nlwpid_ptr; 34844570Sraf int maxlwps = *maxlwps_ptr; 34854570Sraf int first_allocation; 34864570Sraf int newlwps; 34874570Sraf void *vaddr; 34884570Sraf 34894570Sraf ASSERT(nlwpid == maxlwps); 34904570Sraf 34914570Sraf first_allocation = (maxlwps == MAXLWPS); 34924570Sraf newlwps = first_allocation? NEWLWPS : 2 * maxlwps; 34934570Sraf vaddr = _private_mmap(NULL, newlwps * sizeof (lwpid_t), 34944570Sraf PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0); 34954570Sraf 34964570Sraf if (vaddr == MAP_FAILED) { 34974570Sraf /* 34984570Sraf * Let's hope this never happens. 34994570Sraf * If it does, then we have a terrible 35004570Sraf * thundering herd on our hands. 35014570Sraf */ 35024570Sraf (void) __lwp_unpark_all(lwpid, nlwpid); 35034570Sraf *nlwpid_ptr = 0; 35044570Sraf } else { 35054570Sraf (void) _memcpy(vaddr, lwpid, maxlwps * sizeof (lwpid_t)); 35064570Sraf if (!first_allocation) 35074570Sraf (void) _private_munmap(lwpid, 35084570Sraf maxlwps * sizeof (lwpid_t)); 35094570Sraf lwpid = vaddr; 35104570Sraf *maxlwps_ptr = newlwps; 35114570Sraf } 35124570Sraf 35134570Sraf return (lwpid); 35144570Sraf } 35150Sstevel@tonic-gate 35160Sstevel@tonic-gate #pragma weak pthread_cond_broadcast = cond_broadcast_internal 35170Sstevel@tonic-gate #pragma weak _pthread_cond_broadcast = cond_broadcast_internal 35180Sstevel@tonic-gate #pragma weak cond_broadcast = cond_broadcast_internal 35190Sstevel@tonic-gate #pragma weak _cond_broadcast = cond_broadcast_internal 35200Sstevel@tonic-gate int 35210Sstevel@tonic-gate cond_broadcast_internal(cond_t *cvp) 35220Sstevel@tonic-gate { 35230Sstevel@tonic-gate ulwp_t *self = curthread; 35240Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 35250Sstevel@tonic-gate tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 35260Sstevel@tonic-gate int error = 0; 35270Sstevel@tonic-gate queue_head_t *qp; 35280Sstevel@tonic-gate mutex_t *mp; 35290Sstevel@tonic-gate mutex_t *mp_cache = NULL; 35304570Sraf queue_head_t *mqp = NULL; 35310Sstevel@tonic-gate ulwp_t **ulwpp; 35320Sstevel@tonic-gate ulwp_t *ulwp; 35330Sstevel@tonic-gate ulwp_t *prev = NULL; 35344570Sraf int nlwpid = 0; 35354570Sraf int maxlwps = MAXLWPS; 35360Sstevel@tonic-gate lwpid_t buffer[MAXLWPS]; 35370Sstevel@tonic-gate lwpid_t *lwpid = buffer; 35380Sstevel@tonic-gate 35390Sstevel@tonic-gate if (csp) 35400Sstevel@tonic-gate tdb_incr(csp->cond_broadcast); 35410Sstevel@tonic-gate 35420Sstevel@tonic-gate if (cvp->cond_waiters_kernel) /* someone sleeping in the kernel? */ 35430Sstevel@tonic-gate error = __lwp_cond_broadcast(cvp); 35440Sstevel@tonic-gate 35450Sstevel@tonic-gate if (!cvp->cond_waiters_user) /* no one sleeping at user-level */ 35460Sstevel@tonic-gate return (error); 35470Sstevel@tonic-gate 35480Sstevel@tonic-gate /* 35490Sstevel@tonic-gate * Move everyone from the condvar sleep queue to the mutex sleep 35500Sstevel@tonic-gate * queue for the mutex that they will acquire on being waked up. 35510Sstevel@tonic-gate * We can do this only if we own the mutex they will acquire. 35520Sstevel@tonic-gate * If we do not own the mutex, or if their ul_cv_wake flag 35530Sstevel@tonic-gate * is set, just dequeue and unpark them. 35540Sstevel@tonic-gate * 35550Sstevel@tonic-gate * We keep track of lwpids that are to be unparked in lwpid[]. 35560Sstevel@tonic-gate * __lwp_unpark_all() is called to unpark all of them after 35570Sstevel@tonic-gate * they have been removed from the sleep queue and the sleep 35580Sstevel@tonic-gate * queue lock has been dropped. If we run out of space in our 35590Sstevel@tonic-gate * on-stack buffer, we need to allocate more but we can't call 35600Sstevel@tonic-gate * lmalloc() because we are holding a queue lock when the overflow 35610Sstevel@tonic-gate * occurs and lmalloc() acquires a lock. We can't use alloca() 35624570Sraf * either because the application may have allocated a small 35634570Sraf * stack and we don't want to overrun the stack. So we call 35644570Sraf * alloc_lwpids() to allocate a bigger buffer using the mmap() 35650Sstevel@tonic-gate * system call directly since that path acquires no locks. 35660Sstevel@tonic-gate */ 35670Sstevel@tonic-gate qp = queue_lock(cvp, CV); 35680Sstevel@tonic-gate cvp->cond_waiters_user = 0; 35690Sstevel@tonic-gate ulwpp = &qp->qh_head; 35700Sstevel@tonic-gate while ((ulwp = *ulwpp) != NULL) { 35710Sstevel@tonic-gate if (ulwp->ul_wchan != cvp) { 35720Sstevel@tonic-gate prev = ulwp; 35730Sstevel@tonic-gate ulwpp = &ulwp->ul_link; 35740Sstevel@tonic-gate continue; 35750Sstevel@tonic-gate } 35760Sstevel@tonic-gate *ulwpp = ulwp->ul_link; 35774574Sraf ulwp->ul_link = NULL; 35780Sstevel@tonic-gate if (qp->qh_tail == ulwp) 35790Sstevel@tonic-gate qp->qh_tail = prev; 35800Sstevel@tonic-gate qp->qh_qlen--; 35810Sstevel@tonic-gate mp = ulwp->ul_cvmutex; /* his mutex */ 35820Sstevel@tonic-gate ulwp->ul_cvmutex = NULL; 35830Sstevel@tonic-gate ASSERT(mp != NULL); 35840Sstevel@tonic-gate if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) { 35850Sstevel@tonic-gate ulwp->ul_sleepq = NULL; 35860Sstevel@tonic-gate ulwp->ul_wchan = NULL; 35870Sstevel@tonic-gate ulwp->ul_cv_wake = 0; 35884570Sraf if (nlwpid == maxlwps) 35894570Sraf lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps); 35900Sstevel@tonic-gate lwpid[nlwpid++] = ulwp->ul_lwpid; 35910Sstevel@tonic-gate } else { 35920Sstevel@tonic-gate if (mp != mp_cache) { 35930Sstevel@tonic-gate mp_cache = mp; 35944570Sraf if (mqp != NULL) 35954570Sraf queue_unlock(mqp); 35964570Sraf mqp = queue_lock(mp, MX); 35970Sstevel@tonic-gate } 35980Sstevel@tonic-gate enqueue(mqp, ulwp, mp, MX); 35990Sstevel@tonic-gate mp->mutex_waiters = 1; 36000Sstevel@tonic-gate } 36010Sstevel@tonic-gate } 36024570Sraf if (mqp != NULL) 36034570Sraf queue_unlock(mqp); 36044570Sraf if (nlwpid == 0) { 36054570Sraf queue_unlock(qp); 36064570Sraf } else { 36074570Sraf no_preempt(self); 36084570Sraf queue_unlock(qp); 36090Sstevel@tonic-gate if (nlwpid == 1) 36100Sstevel@tonic-gate (void) __lwp_unpark(lwpid[0]); 36110Sstevel@tonic-gate else 36120Sstevel@tonic-gate (void) __lwp_unpark_all(lwpid, nlwpid); 36134570Sraf preempt(self); 36140Sstevel@tonic-gate } 36150Sstevel@tonic-gate if (lwpid != buffer) 36160Sstevel@tonic-gate (void) _private_munmap(lwpid, maxlwps * sizeof (lwpid_t)); 36170Sstevel@tonic-gate return (error); 36180Sstevel@tonic-gate } 36190Sstevel@tonic-gate 36200Sstevel@tonic-gate #pragma weak pthread_cond_destroy = _cond_destroy 36210Sstevel@tonic-gate #pragma weak _pthread_cond_destroy = _cond_destroy 36220Sstevel@tonic-gate #pragma weak cond_destroy = _cond_destroy 36230Sstevel@tonic-gate int 36240Sstevel@tonic-gate _cond_destroy(cond_t *cvp) 36250Sstevel@tonic-gate { 36260Sstevel@tonic-gate cvp->cond_magic = 0; 36270Sstevel@tonic-gate tdb_sync_obj_deregister(cvp); 36280Sstevel@tonic-gate return (0); 36290Sstevel@tonic-gate } 36300Sstevel@tonic-gate 36310Sstevel@tonic-gate #if defined(THREAD_DEBUG) 36320Sstevel@tonic-gate void 36330Sstevel@tonic-gate assert_no_libc_locks_held(void) 36340Sstevel@tonic-gate { 36350Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 36360Sstevel@tonic-gate } 36370Sstevel@tonic-gate #endif 36380Sstevel@tonic-gate 36390Sstevel@tonic-gate /* protected by link_lock */ 36400Sstevel@tonic-gate uint64_t spin_lock_spin; 36410Sstevel@tonic-gate uint64_t spin_lock_spin2; 36420Sstevel@tonic-gate uint64_t spin_lock_sleep; 36430Sstevel@tonic-gate uint64_t spin_lock_wakeup; 36440Sstevel@tonic-gate 36450Sstevel@tonic-gate /* 36460Sstevel@tonic-gate * Record spin lock statistics. 36470Sstevel@tonic-gate * Called by a thread exiting itself in thrp_exit(). 36480Sstevel@tonic-gate * Also called via atexit() from the thread calling 36490Sstevel@tonic-gate * exit() to do all the other threads as well. 36500Sstevel@tonic-gate */ 36510Sstevel@tonic-gate void 36520Sstevel@tonic-gate record_spin_locks(ulwp_t *ulwp) 36530Sstevel@tonic-gate { 36540Sstevel@tonic-gate spin_lock_spin += ulwp->ul_spin_lock_spin; 36550Sstevel@tonic-gate spin_lock_spin2 += ulwp->ul_spin_lock_spin2; 36560Sstevel@tonic-gate spin_lock_sleep += ulwp->ul_spin_lock_sleep; 36570Sstevel@tonic-gate spin_lock_wakeup += ulwp->ul_spin_lock_wakeup; 36580Sstevel@tonic-gate ulwp->ul_spin_lock_spin = 0; 36590Sstevel@tonic-gate ulwp->ul_spin_lock_spin2 = 0; 36600Sstevel@tonic-gate ulwp->ul_spin_lock_sleep = 0; 36610Sstevel@tonic-gate ulwp->ul_spin_lock_wakeup = 0; 36620Sstevel@tonic-gate } 36630Sstevel@tonic-gate 36640Sstevel@tonic-gate /* 36650Sstevel@tonic-gate * atexit function: dump the queue statistics to stderr. 36660Sstevel@tonic-gate */ 36671219Sraf #if !defined(__lint) 36681219Sraf #define fprintf _fprintf 36691219Sraf #endif 36700Sstevel@tonic-gate #include <stdio.h> 36710Sstevel@tonic-gate void 36720Sstevel@tonic-gate dump_queue_statistics(void) 36730Sstevel@tonic-gate { 36740Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 36750Sstevel@tonic-gate queue_head_t *qp; 36760Sstevel@tonic-gate int qn; 36770Sstevel@tonic-gate uint64_t spin_lock_total = 0; 36780Sstevel@tonic-gate 36790Sstevel@tonic-gate if (udp->queue_head == NULL || thread_queue_dump == 0) 36800Sstevel@tonic-gate return; 36810Sstevel@tonic-gate 36820Sstevel@tonic-gate if (fprintf(stderr, "\n%5d mutex queues:\n", QHASHSIZE) < 0 || 36830Sstevel@tonic-gate fprintf(stderr, "queue# lockcount max qlen\n") < 0) 36840Sstevel@tonic-gate return; 36850Sstevel@tonic-gate for (qn = 0, qp = udp->queue_head; qn < QHASHSIZE; qn++, qp++) { 36860Sstevel@tonic-gate if (qp->qh_lockcount == 0) 36870Sstevel@tonic-gate continue; 36880Sstevel@tonic-gate spin_lock_total += qp->qh_lockcount; 36890Sstevel@tonic-gate if (fprintf(stderr, "%5d %12llu%12u\n", qn, 36905629Sraf (u_longlong_t)qp->qh_lockcount, qp->qh_qmax) < 0) 36915629Sraf return; 36920Sstevel@tonic-gate } 36930Sstevel@tonic-gate 36940Sstevel@tonic-gate if (fprintf(stderr, "\n%5d condvar queues:\n", QHASHSIZE) < 0 || 36950Sstevel@tonic-gate fprintf(stderr, "queue# lockcount max qlen\n") < 0) 36960Sstevel@tonic-gate return; 36970Sstevel@tonic-gate for (qn = 0; qn < QHASHSIZE; qn++, qp++) { 36980Sstevel@tonic-gate if (qp->qh_lockcount == 0) 36990Sstevel@tonic-gate continue; 37000Sstevel@tonic-gate spin_lock_total += qp->qh_lockcount; 37010Sstevel@tonic-gate if (fprintf(stderr, "%5d %12llu%12u\n", qn, 37025629Sraf (u_longlong_t)qp->qh_lockcount, qp->qh_qmax) < 0) 37035629Sraf return; 37040Sstevel@tonic-gate } 37050Sstevel@tonic-gate 37060Sstevel@tonic-gate (void) fprintf(stderr, "\n spin_lock_total = %10llu\n", 37075629Sraf (u_longlong_t)spin_lock_total); 37080Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_spin = %10llu\n", 37095629Sraf (u_longlong_t)spin_lock_spin); 37100Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_spin2 = %10llu\n", 37115629Sraf (u_longlong_t)spin_lock_spin2); 37120Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_sleep = %10llu\n", 37135629Sraf (u_longlong_t)spin_lock_sleep); 37140Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_wakeup = %10llu\n", 37155629Sraf (u_longlong_t)spin_lock_wakeup); 37160Sstevel@tonic-gate } 3717