10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51893Sraf * Common Development and Distribution License (the "License"). 61893Sraf * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 211219Sraf 220Sstevel@tonic-gate /* 235891Sraf * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #include "lint.h" 280Sstevel@tonic-gate #include "thr_uberdata.h" 296247Sraf #include <sys/rtpriocntl.h> 306057Sraf #include <sys/sdt.h> 316057Sraf #include <atomic.h> 320Sstevel@tonic-gate 336247Sraf #if defined(THREAD_DEBUG) 346247Sraf #define INCR32(x) (((x) != UINT32_MAX)? (x)++ : 0) 356247Sraf #define INCR(x) ((x)++) 366247Sraf #define DECR(x) ((x)--) 376247Sraf #define MAXINCR(m, x) ((m < ++x)? (m = x) : 0) 386247Sraf #else 396247Sraf #define INCR32(x) 406247Sraf #define INCR(x) 416247Sraf #define DECR(x) 426247Sraf #define MAXINCR(m, x) 436247Sraf #endif 446247Sraf 450Sstevel@tonic-gate /* 460Sstevel@tonic-gate * This mutex is initialized to be held by lwp#1. 470Sstevel@tonic-gate * It is used to block a thread that has returned from a mutex_lock() 484574Sraf * of a LOCK_PRIO_INHERIT mutex with an unrecoverable error. 490Sstevel@tonic-gate */ 500Sstevel@tonic-gate mutex_t stall_mutex = DEFAULTMUTEX; 510Sstevel@tonic-gate 520Sstevel@tonic-gate static int shared_mutex_held(mutex_t *); 534574Sraf static int mutex_queuelock_adaptive(mutex_t *); 544574Sraf static void mutex_wakeup_all(mutex_t *); 550Sstevel@tonic-gate 560Sstevel@tonic-gate /* 570Sstevel@tonic-gate * Lock statistics support functions. 580Sstevel@tonic-gate */ 590Sstevel@tonic-gate void 600Sstevel@tonic-gate record_begin_hold(tdb_mutex_stats_t *msp) 610Sstevel@tonic-gate { 620Sstevel@tonic-gate tdb_incr(msp->mutex_lock); 630Sstevel@tonic-gate msp->mutex_begin_hold = gethrtime(); 640Sstevel@tonic-gate } 650Sstevel@tonic-gate 660Sstevel@tonic-gate hrtime_t 670Sstevel@tonic-gate record_hold_time(tdb_mutex_stats_t *msp) 680Sstevel@tonic-gate { 690Sstevel@tonic-gate hrtime_t now = gethrtime(); 700Sstevel@tonic-gate 710Sstevel@tonic-gate if (msp->mutex_begin_hold) 720Sstevel@tonic-gate msp->mutex_hold_time += now - msp->mutex_begin_hold; 730Sstevel@tonic-gate msp->mutex_begin_hold = 0; 740Sstevel@tonic-gate return (now); 750Sstevel@tonic-gate } 760Sstevel@tonic-gate 770Sstevel@tonic-gate /* 780Sstevel@tonic-gate * Called once at library initialization. 790Sstevel@tonic-gate */ 800Sstevel@tonic-gate void 810Sstevel@tonic-gate mutex_setup(void) 820Sstevel@tonic-gate { 830Sstevel@tonic-gate if (set_lock_byte(&stall_mutex.mutex_lockw)) 840Sstevel@tonic-gate thr_panic("mutex_setup() cannot acquire stall_mutex"); 850Sstevel@tonic-gate stall_mutex.mutex_owner = (uintptr_t)curthread; 860Sstevel@tonic-gate } 870Sstevel@tonic-gate 880Sstevel@tonic-gate /* 895629Sraf * The default spin count of 1000 is experimentally determined. 905629Sraf * On sun4u machines with any number of processors it could be raised 910Sstevel@tonic-gate * to 10,000 but that (experimentally) makes almost no difference. 925629Sraf * The environment variable: 930Sstevel@tonic-gate * _THREAD_ADAPTIVE_SPIN=count 945629Sraf * can be used to override and set the count in the range [0 .. 1,000,000]. 950Sstevel@tonic-gate */ 960Sstevel@tonic-gate int thread_adaptive_spin = 1000; 970Sstevel@tonic-gate uint_t thread_max_spinners = 100; 980Sstevel@tonic-gate int thread_queue_verify = 0; 990Sstevel@tonic-gate static int ncpus; 1000Sstevel@tonic-gate 1010Sstevel@tonic-gate /* 1020Sstevel@tonic-gate * Distinguish spinning for queue locks from spinning for regular locks. 1035629Sraf * We try harder to acquire queue locks by spinning. 1040Sstevel@tonic-gate * The environment variable: 1050Sstevel@tonic-gate * _THREAD_QUEUE_SPIN=count 1060Sstevel@tonic-gate * can be used to override and set the count in the range [0 .. 1,000,000]. 1070Sstevel@tonic-gate */ 1085629Sraf int thread_queue_spin = 10000; 1090Sstevel@tonic-gate 1104574Sraf #define ALL_ATTRIBUTES \ 1114574Sraf (LOCK_RECURSIVE | LOCK_ERRORCHECK | \ 1124574Sraf LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT | \ 1134574Sraf LOCK_ROBUST) 1140Sstevel@tonic-gate 1150Sstevel@tonic-gate /* 1164574Sraf * 'type' can be one of USYNC_THREAD, USYNC_PROCESS, or USYNC_PROCESS_ROBUST, 1174574Sraf * augmented by zero or more the flags: 1184574Sraf * LOCK_RECURSIVE 1194574Sraf * LOCK_ERRORCHECK 1204574Sraf * LOCK_PRIO_INHERIT 1214574Sraf * LOCK_PRIO_PROTECT 1224574Sraf * LOCK_ROBUST 1230Sstevel@tonic-gate */ 1246812Sraf #pragma weak _mutex_init = mutex_init 1250Sstevel@tonic-gate /* ARGSUSED2 */ 1260Sstevel@tonic-gate int 1276812Sraf mutex_init(mutex_t *mp, int type, void *arg) 1280Sstevel@tonic-gate { 1294574Sraf int basetype = (type & ~ALL_ATTRIBUTES); 1306247Sraf const pcclass_t *pccp; 1314574Sraf int error = 0; 1326247Sraf int ceil; 1334574Sraf 1344574Sraf if (basetype == USYNC_PROCESS_ROBUST) { 1354574Sraf /* 1364574Sraf * USYNC_PROCESS_ROBUST is a deprecated historical type. 1374574Sraf * We change it into (USYNC_PROCESS | LOCK_ROBUST) but 1384574Sraf * retain the USYNC_PROCESS_ROBUST flag so we can return 1394574Sraf * ELOCKUNMAPPED when necessary (only USYNC_PROCESS_ROBUST 1404574Sraf * mutexes will ever draw ELOCKUNMAPPED). 1414574Sraf */ 1424574Sraf type |= (USYNC_PROCESS | LOCK_ROBUST); 1434574Sraf basetype = USYNC_PROCESS; 1444574Sraf } 1454574Sraf 1466247Sraf if (type & LOCK_PRIO_PROTECT) 1476247Sraf pccp = get_info_by_policy(SCHED_FIFO); 1486247Sraf if ((basetype != USYNC_THREAD && basetype != USYNC_PROCESS) || 1494574Sraf (type & (LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT)) 1506247Sraf == (LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT) || 1516247Sraf ((type & LOCK_PRIO_PROTECT) && 1526247Sraf ((ceil = *(int *)arg) < pccp->pcc_primin || 1536247Sraf ceil > pccp->pcc_primax))) { 1544574Sraf error = EINVAL; 1554574Sraf } else if (type & LOCK_ROBUST) { 1564574Sraf /* 1574574Sraf * Callers of mutex_init() with the LOCK_ROBUST attribute 1584574Sraf * are required to pass an initially all-zero mutex. 1594574Sraf * Multiple calls to mutex_init() are allowed; all but 1604574Sraf * the first return EBUSY. A call to mutex_init() is 1614574Sraf * allowed to make an inconsistent robust lock consistent 1624574Sraf * (for historical usage, even though the proper interface 1634574Sraf * for this is mutex_consistent()). Note that we use 1644574Sraf * atomic_or_16() to set the LOCK_INITED flag so as 1654574Sraf * not to disturb surrounding bits (LOCK_OWNERDEAD, etc). 1664574Sraf */ 1674574Sraf if (!(mp->mutex_flag & LOCK_INITED)) { 1684574Sraf mp->mutex_type = (uint8_t)type; 1696812Sraf atomic_or_16(&mp->mutex_flag, LOCK_INITED); 1704574Sraf mp->mutex_magic = MUTEX_MAGIC; 1714574Sraf } else if (type != mp->mutex_type || 1726247Sraf ((type & LOCK_PRIO_PROTECT) && mp->mutex_ceiling != ceil)) { 1734574Sraf error = EINVAL; 1746812Sraf } else if (mutex_consistent(mp) != 0) { 1754574Sraf error = EBUSY; 1764574Sraf } 1774574Sraf /* register a process robust mutex with the kernel */ 1784574Sraf if (basetype == USYNC_PROCESS) 1794574Sraf register_lock(mp); 1804574Sraf } else { 1816515Sraf (void) memset(mp, 0, sizeof (*mp)); 1820Sstevel@tonic-gate mp->mutex_type = (uint8_t)type; 1830Sstevel@tonic-gate mp->mutex_flag = LOCK_INITED; 1844574Sraf mp->mutex_magic = MUTEX_MAGIC; 1850Sstevel@tonic-gate } 1864574Sraf 1876247Sraf if (error == 0 && (type & LOCK_PRIO_PROTECT)) { 1886247Sraf mp->mutex_ceiling = ceil; 1896247Sraf } 1904574Sraf 1917255Sraf /* 1927255Sraf * This should be at the beginning of the function, 1937255Sraf * but for the sake of old broken applications that 1947255Sraf * do not have proper alignment for their mutexes 1957255Sraf * (and don't check the return code from mutex_init), 1967255Sraf * we put it here, after initializing the mutex regardless. 1977255Sraf */ 1987255Sraf if (error == 0 && 1997255Sraf ((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && 2007255Sraf curthread->ul_misaligned == 0) 2017255Sraf error = EINVAL; 2027255Sraf 2030Sstevel@tonic-gate return (error); 2040Sstevel@tonic-gate } 2050Sstevel@tonic-gate 2060Sstevel@tonic-gate /* 2076247Sraf * Delete mp from list of ceiling mutexes owned by curthread. 2080Sstevel@tonic-gate * Return 1 if the head of the chain was updated. 2090Sstevel@tonic-gate */ 2100Sstevel@tonic-gate int 2110Sstevel@tonic-gate _ceil_mylist_del(mutex_t *mp) 2120Sstevel@tonic-gate { 2130Sstevel@tonic-gate ulwp_t *self = curthread; 2140Sstevel@tonic-gate mxchain_t **mcpp; 2150Sstevel@tonic-gate mxchain_t *mcp; 2160Sstevel@tonic-gate 2176247Sraf for (mcpp = &self->ul_mxchain; 2186247Sraf (mcp = *mcpp) != NULL; 2196247Sraf mcpp = &mcp->mxchain_next) { 2206247Sraf if (mcp->mxchain_mx == mp) { 2216247Sraf *mcpp = mcp->mxchain_next; 2226247Sraf lfree(mcp, sizeof (*mcp)); 2236247Sraf return (mcpp == &self->ul_mxchain); 2246247Sraf } 2256247Sraf } 2266247Sraf return (0); 2270Sstevel@tonic-gate } 2280Sstevel@tonic-gate 2290Sstevel@tonic-gate /* 2306247Sraf * Add mp to the list of ceiling mutexes owned by curthread. 2310Sstevel@tonic-gate * Return ENOMEM if no memory could be allocated. 2320Sstevel@tonic-gate */ 2330Sstevel@tonic-gate int 2340Sstevel@tonic-gate _ceil_mylist_add(mutex_t *mp) 2350Sstevel@tonic-gate { 2360Sstevel@tonic-gate ulwp_t *self = curthread; 2370Sstevel@tonic-gate mxchain_t *mcp; 2380Sstevel@tonic-gate 2390Sstevel@tonic-gate if ((mcp = lmalloc(sizeof (*mcp))) == NULL) 2400Sstevel@tonic-gate return (ENOMEM); 2410Sstevel@tonic-gate mcp->mxchain_mx = mp; 2420Sstevel@tonic-gate mcp->mxchain_next = self->ul_mxchain; 2430Sstevel@tonic-gate self->ul_mxchain = mcp; 2440Sstevel@tonic-gate return (0); 2450Sstevel@tonic-gate } 2460Sstevel@tonic-gate 2470Sstevel@tonic-gate /* 2486247Sraf * Helper function for _ceil_prio_inherit() and _ceil_prio_waive(), below. 2496247Sraf */ 2506247Sraf static void 2516247Sraf set_rt_priority(ulwp_t *self, int prio) 2526247Sraf { 2536247Sraf pcparms_t pcparm; 2546247Sraf 2556247Sraf pcparm.pc_cid = self->ul_rtclassid; 2566247Sraf ((rtparms_t *)pcparm.pc_clparms)->rt_tqnsecs = RT_NOCHANGE; 2576247Sraf ((rtparms_t *)pcparm.pc_clparms)->rt_pri = prio; 2586515Sraf (void) priocntl(P_LWPID, self->ul_lwpid, PC_SETPARMS, &pcparm); 2596247Sraf } 2606247Sraf 2616247Sraf /* 2626247Sraf * Inherit priority from ceiling. 2636247Sraf * This changes the effective priority, not the assigned priority. 2640Sstevel@tonic-gate */ 2650Sstevel@tonic-gate void 2666247Sraf _ceil_prio_inherit(int prio) 2670Sstevel@tonic-gate { 2680Sstevel@tonic-gate ulwp_t *self = curthread; 2696247Sraf 2706247Sraf self->ul_epri = prio; 2716247Sraf set_rt_priority(self, prio); 2720Sstevel@tonic-gate } 2730Sstevel@tonic-gate 2740Sstevel@tonic-gate /* 2750Sstevel@tonic-gate * Waive inherited ceiling priority. Inherit from head of owned ceiling locks 2760Sstevel@tonic-gate * if holding at least one ceiling lock. If no ceiling locks are held at this 2770Sstevel@tonic-gate * point, disinherit completely, reverting back to assigned priority. 2780Sstevel@tonic-gate */ 2790Sstevel@tonic-gate void 2800Sstevel@tonic-gate _ceil_prio_waive(void) 2810Sstevel@tonic-gate { 2820Sstevel@tonic-gate ulwp_t *self = curthread; 2836247Sraf mxchain_t *mcp = self->ul_mxchain; 2846247Sraf int prio; 2856247Sraf 2866247Sraf if (mcp == NULL) { 2876247Sraf prio = self->ul_pri; 2886247Sraf self->ul_epri = 0; 2890Sstevel@tonic-gate } else { 2906247Sraf prio = mcp->mxchain_mx->mutex_ceiling; 2916247Sraf self->ul_epri = prio; 2920Sstevel@tonic-gate } 2936247Sraf set_rt_priority(self, prio); 2940Sstevel@tonic-gate } 2950Sstevel@tonic-gate 2960Sstevel@tonic-gate /* 2975629Sraf * Clear the lock byte. Retain the waiters byte and the spinners byte. 2985629Sraf * Return the old value of the lock word. 2995629Sraf */ 3005629Sraf static uint32_t 3015629Sraf clear_lockbyte(volatile uint32_t *lockword) 3025629Sraf { 3035629Sraf uint32_t old; 3045629Sraf uint32_t new; 3055629Sraf 3065629Sraf do { 3075629Sraf old = *lockword; 3085629Sraf new = old & ~LOCKMASK; 3095629Sraf } while (atomic_cas_32(lockword, old, new) != old); 3105629Sraf 3115629Sraf return (old); 3125629Sraf } 3135629Sraf 3145629Sraf /* 3156057Sraf * Same as clear_lockbyte(), but operates on mutex_lockword64. 3166057Sraf * The mutex_ownerpid field is cleared along with the lock byte. 3176057Sraf */ 3186057Sraf static uint64_t 3196057Sraf clear_lockbyte64(volatile uint64_t *lockword64) 3206057Sraf { 3216057Sraf uint64_t old; 3226057Sraf uint64_t new; 3236057Sraf 3246057Sraf do { 3256057Sraf old = *lockword64; 3266057Sraf new = old & ~LOCKMASK64; 3276057Sraf } while (atomic_cas_64(lockword64, old, new) != old); 3286057Sraf 3296057Sraf return (old); 3306057Sraf } 3316057Sraf 3326057Sraf /* 3336057Sraf * Similar to set_lock_byte(), which only tries to set the lock byte. 3347255Sraf * Here, we attempt to set the lock byte AND the mutex_ownerpid, keeping 3357255Sraf * the remaining bytes constant. This atomic operation is required for the 3367255Sraf * correctness of process-shared robust locks, otherwise there would be 3377255Sraf * a window or vulnerability in which the lock byte had been set but the 3387255Sraf * mutex_ownerpid had not yet been set. If the process were to die in 3397255Sraf * this window of vulnerability (due to some other thread calling exit() 3407255Sraf * or the process receiving a fatal signal), the mutex would be left locked 3417255Sraf * but without a process-ID to determine which process was holding the lock. 3427255Sraf * The kernel would then be unable to mark the robust mutex as LOCK_OWNERDEAD 3437255Sraf * when the process died. For all other cases of process-shared locks, this 3447255Sraf * operation is just a convenience, for the sake of common code. 3457255Sraf * 3467255Sraf * This operation requires process-shared robust locks to be properly 3477255Sraf * aligned on an 8-byte boundary, at least on sparc machines, lest the 3487255Sraf * operation incur an alignment fault. This is automatic when locks 3497255Sraf * are declared properly using the mutex_t or pthread_mutex_t data types 3507255Sraf * and the application does not allocate dynamic memory on less than an 3517255Sraf * 8-byte boundary. See the 'horrible hack' comments below for cases 3527255Sraf * dealing with such broken applications. 3536057Sraf */ 3546057Sraf static int 3556057Sraf set_lock_byte64(volatile uint64_t *lockword64, pid_t ownerpid) 3566057Sraf { 3576057Sraf uint64_t old; 3586057Sraf uint64_t new; 3596057Sraf 3606057Sraf old = *lockword64 & ~LOCKMASK64; 3616057Sraf new = old | ((uint64_t)(uint_t)ownerpid << PIDSHIFT) | LOCKBYTE64; 3626057Sraf if (atomic_cas_64(lockword64, old, new) == old) 3636057Sraf return (LOCKCLEAR); 3646057Sraf 3656057Sraf return (LOCKSET); 3666057Sraf } 3676057Sraf 3686057Sraf /* 3695629Sraf * Increment the spinners count in the mutex lock word. 3705629Sraf * Return 0 on success. Return -1 if the count would overflow. 3715629Sraf */ 3725629Sraf static int 3735629Sraf spinners_incr(volatile uint32_t *lockword, uint8_t max_spinners) 3745629Sraf { 3755629Sraf uint32_t old; 3765629Sraf uint32_t new; 3775629Sraf 3785629Sraf do { 3795629Sraf old = *lockword; 3805629Sraf if (((old & SPINNERMASK) >> SPINNERSHIFT) >= max_spinners) 3815629Sraf return (-1); 3825629Sraf new = old + (1 << SPINNERSHIFT); 3835629Sraf } while (atomic_cas_32(lockword, old, new) != old); 3845629Sraf 3855629Sraf return (0); 3865629Sraf } 3875629Sraf 3885629Sraf /* 3895629Sraf * Decrement the spinners count in the mutex lock word. 3905629Sraf * Return the new value of the lock word. 3915629Sraf */ 3925629Sraf static uint32_t 3935629Sraf spinners_decr(volatile uint32_t *lockword) 3945629Sraf { 3955629Sraf uint32_t old; 3965629Sraf uint32_t new; 3975629Sraf 3985629Sraf do { 3995629Sraf new = old = *lockword; 4005629Sraf if (new & SPINNERMASK) 4015629Sraf new -= (1 << SPINNERSHIFT); 4025629Sraf } while (atomic_cas_32(lockword, old, new) != old); 4035629Sraf 4045629Sraf return (new); 4055629Sraf } 4065629Sraf 4075629Sraf /* 4080Sstevel@tonic-gate * Non-preemptive spin locks. Used by queue_lock(). 4090Sstevel@tonic-gate * No lock statistics are gathered for these locks. 4105629Sraf * No DTrace probes are provided for these locks. 4110Sstevel@tonic-gate */ 4120Sstevel@tonic-gate void 4130Sstevel@tonic-gate spin_lock_set(mutex_t *mp) 4140Sstevel@tonic-gate { 4150Sstevel@tonic-gate ulwp_t *self = curthread; 4160Sstevel@tonic-gate 4170Sstevel@tonic-gate no_preempt(self); 4180Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 4190Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 4200Sstevel@tonic-gate return; 4210Sstevel@tonic-gate } 4220Sstevel@tonic-gate /* 4230Sstevel@tonic-gate * Spin for a while, attempting to acquire the lock. 4240Sstevel@tonic-gate */ 4256247Sraf INCR32(self->ul_spin_lock_spin); 4260Sstevel@tonic-gate if (mutex_queuelock_adaptive(mp) == 0 || 4270Sstevel@tonic-gate set_lock_byte(&mp->mutex_lockw) == 0) { 4280Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 4290Sstevel@tonic-gate return; 4300Sstevel@tonic-gate } 4310Sstevel@tonic-gate /* 4320Sstevel@tonic-gate * Try harder if we were previously at a no premption level. 4330Sstevel@tonic-gate */ 4340Sstevel@tonic-gate if (self->ul_preempt > 1) { 4356247Sraf INCR32(self->ul_spin_lock_spin2); 4360Sstevel@tonic-gate if (mutex_queuelock_adaptive(mp) == 0 || 4370Sstevel@tonic-gate set_lock_byte(&mp->mutex_lockw) == 0) { 4380Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 4390Sstevel@tonic-gate return; 4400Sstevel@tonic-gate } 4410Sstevel@tonic-gate } 4420Sstevel@tonic-gate /* 4430Sstevel@tonic-gate * Give up and block in the kernel for the mutex. 4440Sstevel@tonic-gate */ 4456247Sraf INCR32(self->ul_spin_lock_sleep); 4460Sstevel@tonic-gate (void) ___lwp_mutex_timedlock(mp, NULL); 4470Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 4480Sstevel@tonic-gate } 4490Sstevel@tonic-gate 4500Sstevel@tonic-gate void 4510Sstevel@tonic-gate spin_lock_clear(mutex_t *mp) 4520Sstevel@tonic-gate { 4530Sstevel@tonic-gate ulwp_t *self = curthread; 4540Sstevel@tonic-gate 4550Sstevel@tonic-gate mp->mutex_owner = 0; 4564570Sraf if (atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK) { 4574574Sraf (void) ___lwp_mutex_wakeup(mp, 0); 4586247Sraf INCR32(self->ul_spin_lock_wakeup); 4590Sstevel@tonic-gate } 4600Sstevel@tonic-gate preempt(self); 4610Sstevel@tonic-gate } 4620Sstevel@tonic-gate 4630Sstevel@tonic-gate /* 4640Sstevel@tonic-gate * Allocate the sleep queue hash table. 4650Sstevel@tonic-gate */ 4660Sstevel@tonic-gate void 4670Sstevel@tonic-gate queue_alloc(void) 4680Sstevel@tonic-gate { 4690Sstevel@tonic-gate ulwp_t *self = curthread; 4700Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 4716247Sraf queue_head_t *qp; 4720Sstevel@tonic-gate void *data; 4730Sstevel@tonic-gate int i; 4740Sstevel@tonic-gate 4750Sstevel@tonic-gate /* 4760Sstevel@tonic-gate * No locks are needed; we call here only when single-threaded. 4770Sstevel@tonic-gate */ 4780Sstevel@tonic-gate ASSERT(self == udp->ulwp_one); 4790Sstevel@tonic-gate ASSERT(!udp->uberflags.uf_mt); 4806515Sraf if ((data = mmap(NULL, 2 * QHASHSIZE * sizeof (queue_head_t), 4810Sstevel@tonic-gate PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0)) 4820Sstevel@tonic-gate == MAP_FAILED) 4830Sstevel@tonic-gate thr_panic("cannot allocate thread queue_head table"); 4846247Sraf udp->queue_head = qp = (queue_head_t *)data; 4856247Sraf for (i = 0; i < 2 * QHASHSIZE; qp++, i++) { 4866247Sraf qp->qh_type = (i < QHASHSIZE)? MX : CV; 4876247Sraf qp->qh_lock.mutex_flag = LOCK_INITED; 4886247Sraf qp->qh_lock.mutex_magic = MUTEX_MAGIC; 4896247Sraf qp->qh_hlist = &qp->qh_def_root; 4906247Sraf #if defined(THREAD_DEBUG) 4916247Sraf qp->qh_hlen = 1; 4926247Sraf qp->qh_hmax = 1; 4936247Sraf #endif 4944574Sraf } 4950Sstevel@tonic-gate } 4960Sstevel@tonic-gate 4970Sstevel@tonic-gate #if defined(THREAD_DEBUG) 4980Sstevel@tonic-gate 4990Sstevel@tonic-gate /* 5000Sstevel@tonic-gate * Debugging: verify correctness of a sleep queue. 5010Sstevel@tonic-gate */ 5020Sstevel@tonic-gate void 5030Sstevel@tonic-gate QVERIFY(queue_head_t *qp) 5040Sstevel@tonic-gate { 5050Sstevel@tonic-gate ulwp_t *self = curthread; 5060Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 5076247Sraf queue_root_t *qrp; 5080Sstevel@tonic-gate ulwp_t *ulwp; 5090Sstevel@tonic-gate ulwp_t *prev; 5100Sstevel@tonic-gate uint_t index; 5116247Sraf uint32_t cnt; 5120Sstevel@tonic-gate char qtype; 5130Sstevel@tonic-gate void *wchan; 5140Sstevel@tonic-gate 5150Sstevel@tonic-gate ASSERT(qp >= udp->queue_head && (qp - udp->queue_head) < 2 * QHASHSIZE); 5160Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, self)); 5176247Sraf for (cnt = 0, qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next) { 5186247Sraf cnt++; 5196247Sraf ASSERT((qrp->qr_head != NULL && qrp->qr_tail != NULL) || 5206247Sraf (qrp->qr_head == NULL && qrp->qr_tail == NULL)); 5216247Sraf } 5226247Sraf ASSERT(qp->qh_hlen == cnt && qp->qh_hmax >= cnt); 5236247Sraf qtype = ((qp - udp->queue_head) < QHASHSIZE)? MX : CV; 5246247Sraf ASSERT(qp->qh_type == qtype); 5250Sstevel@tonic-gate if (!thread_queue_verify) 5260Sstevel@tonic-gate return; 5270Sstevel@tonic-gate /* real expensive stuff, only for _THREAD_QUEUE_VERIFY */ 5286247Sraf for (cnt = 0, qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next) { 5296247Sraf for (prev = NULL, ulwp = qrp->qr_head; ulwp != NULL; 5306247Sraf prev = ulwp, ulwp = ulwp->ul_link) { 5316247Sraf cnt++; 5326247Sraf if (ulwp->ul_writer) 5336247Sraf ASSERT(prev == NULL || prev->ul_writer); 5346247Sraf ASSERT(ulwp->ul_qtype == qtype); 5356247Sraf ASSERT(ulwp->ul_wchan != NULL); 5366247Sraf ASSERT(ulwp->ul_sleepq == qp); 5376247Sraf wchan = ulwp->ul_wchan; 5386247Sraf ASSERT(qrp->qr_wchan == wchan); 5396247Sraf index = QUEUE_HASH(wchan, qtype); 5406247Sraf ASSERT(&udp->queue_head[index] == qp); 5416247Sraf } 5426247Sraf ASSERT(qrp->qr_tail == prev); 5430Sstevel@tonic-gate } 5440Sstevel@tonic-gate ASSERT(qp->qh_qlen == cnt); 5450Sstevel@tonic-gate } 5460Sstevel@tonic-gate 5470Sstevel@tonic-gate #else /* THREAD_DEBUG */ 5480Sstevel@tonic-gate 5490Sstevel@tonic-gate #define QVERIFY(qp) 5500Sstevel@tonic-gate 5510Sstevel@tonic-gate #endif /* THREAD_DEBUG */ 5520Sstevel@tonic-gate 5530Sstevel@tonic-gate /* 5540Sstevel@tonic-gate * Acquire a queue head. 5550Sstevel@tonic-gate */ 5560Sstevel@tonic-gate queue_head_t * 5570Sstevel@tonic-gate queue_lock(void *wchan, int qtype) 5580Sstevel@tonic-gate { 5590Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 5600Sstevel@tonic-gate queue_head_t *qp; 5616247Sraf queue_root_t *qrp; 5620Sstevel@tonic-gate 5630Sstevel@tonic-gate ASSERT(qtype == MX || qtype == CV); 5640Sstevel@tonic-gate 5650Sstevel@tonic-gate /* 5660Sstevel@tonic-gate * It is possible that we could be called while still single-threaded. 5670Sstevel@tonic-gate * If so, we call queue_alloc() to allocate the queue_head[] array. 5680Sstevel@tonic-gate */ 5690Sstevel@tonic-gate if ((qp = udp->queue_head) == NULL) { 5700Sstevel@tonic-gate queue_alloc(); 5710Sstevel@tonic-gate qp = udp->queue_head; 5720Sstevel@tonic-gate } 5730Sstevel@tonic-gate qp += QUEUE_HASH(wchan, qtype); 5740Sstevel@tonic-gate spin_lock_set(&qp->qh_lock); 5756247Sraf for (qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next) 5766247Sraf if (qrp->qr_wchan == wchan) 5776247Sraf break; 5786247Sraf if (qrp == NULL && qp->qh_def_root.qr_head == NULL) { 5796247Sraf /* the default queue root is available; use it */ 5806247Sraf qrp = &qp->qh_def_root; 5816247Sraf qrp->qr_wchan = wchan; 5826247Sraf ASSERT(qrp->qr_next == NULL); 5836247Sraf ASSERT(qrp->qr_tail == NULL && 5846247Sraf qrp->qr_rtcount == 0 && qrp->qr_qlen == 0); 5856247Sraf } 5866247Sraf qp->qh_wchan = wchan; /* valid until queue_unlock() is called */ 5876247Sraf qp->qh_root = qrp; /* valid until queue_unlock() is called */ 5886247Sraf INCR32(qp->qh_lockcount); 5890Sstevel@tonic-gate QVERIFY(qp); 5900Sstevel@tonic-gate return (qp); 5910Sstevel@tonic-gate } 5920Sstevel@tonic-gate 5930Sstevel@tonic-gate /* 5940Sstevel@tonic-gate * Release a queue head. 5950Sstevel@tonic-gate */ 5960Sstevel@tonic-gate void 5970Sstevel@tonic-gate queue_unlock(queue_head_t *qp) 5980Sstevel@tonic-gate { 5990Sstevel@tonic-gate QVERIFY(qp); 6000Sstevel@tonic-gate spin_lock_clear(&qp->qh_lock); 6010Sstevel@tonic-gate } 6020Sstevel@tonic-gate 6030Sstevel@tonic-gate /* 6040Sstevel@tonic-gate * For rwlock queueing, we must queue writers ahead of readers of the 6050Sstevel@tonic-gate * same priority. We do this by making writers appear to have a half 6060Sstevel@tonic-gate * point higher priority for purposes of priority comparisons below. 6070Sstevel@tonic-gate */ 6080Sstevel@tonic-gate #define CMP_PRIO(ulwp) ((real_priority(ulwp) << 1) + (ulwp)->ul_writer) 6090Sstevel@tonic-gate 6100Sstevel@tonic-gate void 6116247Sraf enqueue(queue_head_t *qp, ulwp_t *ulwp, int force_fifo) 6120Sstevel@tonic-gate { 6136247Sraf queue_root_t *qrp; 6140Sstevel@tonic-gate ulwp_t **ulwpp; 6150Sstevel@tonic-gate ulwp_t *next; 6160Sstevel@tonic-gate int pri = CMP_PRIO(ulwp); 6176247Sraf 6180Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread)); 6190Sstevel@tonic-gate ASSERT(ulwp->ul_sleepq != qp); 6200Sstevel@tonic-gate 6216247Sraf if ((qrp = qp->qh_root) == NULL) { 6226247Sraf /* use the thread's queue root for the linkage */ 6236247Sraf qrp = &ulwp->ul_queue_root; 6246247Sraf qrp->qr_next = qp->qh_hlist; 6256247Sraf qrp->qr_prev = NULL; 6266247Sraf qrp->qr_head = NULL; 6276247Sraf qrp->qr_tail = NULL; 6286247Sraf qrp->qr_wchan = qp->qh_wchan; 6296247Sraf qrp->qr_rtcount = 0; 6306247Sraf qrp->qr_qlen = 0; 6316247Sraf qrp->qr_qmax = 0; 6326247Sraf qp->qh_hlist->qr_prev = qrp; 6336247Sraf qp->qh_hlist = qrp; 6346247Sraf qp->qh_root = qrp; 6356247Sraf MAXINCR(qp->qh_hmax, qp->qh_hlen); 6366247Sraf } 6376247Sraf 6380Sstevel@tonic-gate /* 6390Sstevel@tonic-gate * LIFO queue ordering is unfair and can lead to starvation, 6400Sstevel@tonic-gate * but it gives better performance for heavily contended locks. 6410Sstevel@tonic-gate * We use thread_queue_fifo (range is 0..8) to determine 6420Sstevel@tonic-gate * the frequency of FIFO vs LIFO queuing: 6430Sstevel@tonic-gate * 0 : every 256th time (almost always LIFO) 6440Sstevel@tonic-gate * 1 : every 128th time 6450Sstevel@tonic-gate * 2 : every 64th time 6460Sstevel@tonic-gate * 3 : every 32nd time 6470Sstevel@tonic-gate * 4 : every 16th time (the default value, mostly LIFO) 6480Sstevel@tonic-gate * 5 : every 8th time 6490Sstevel@tonic-gate * 6 : every 4th time 6500Sstevel@tonic-gate * 7 : every 2nd time 6510Sstevel@tonic-gate * 8 : every time (never LIFO, always FIFO) 6520Sstevel@tonic-gate * Note that there is always some degree of FIFO ordering. 6530Sstevel@tonic-gate * This breaks live lock conditions that occur in applications 6540Sstevel@tonic-gate * that are written assuming (incorrectly) that threads acquire 6550Sstevel@tonic-gate * locks fairly, that is, in roughly round-robin order. 6566247Sraf * In any event, the queue is maintained in kernel priority order. 6570Sstevel@tonic-gate * 6586247Sraf * If force_fifo is non-zero, fifo queueing is forced. 6590Sstevel@tonic-gate * SUSV3 requires this for semaphores. 6600Sstevel@tonic-gate */ 6616247Sraf if (qrp->qr_head == NULL) { 6620Sstevel@tonic-gate /* 6630Sstevel@tonic-gate * The queue is empty. LIFO/FIFO doesn't matter. 6640Sstevel@tonic-gate */ 6656247Sraf ASSERT(qrp->qr_tail == NULL); 6666247Sraf ulwpp = &qrp->qr_head; 6676247Sraf } else if (force_fifo | 6686247Sraf (((++qp->qh_qcnt << curthread->ul_queue_fifo) & 0xff) == 0)) { 6690Sstevel@tonic-gate /* 6700Sstevel@tonic-gate * Enqueue after the last thread whose priority is greater 6710Sstevel@tonic-gate * than or equal to the priority of the thread being queued. 6720Sstevel@tonic-gate * Attempt first to go directly onto the tail of the queue. 6730Sstevel@tonic-gate */ 6746247Sraf if (pri <= CMP_PRIO(qrp->qr_tail)) 6756247Sraf ulwpp = &qrp->qr_tail->ul_link; 6760Sstevel@tonic-gate else { 6776247Sraf for (ulwpp = &qrp->qr_head; (next = *ulwpp) != NULL; 6780Sstevel@tonic-gate ulwpp = &next->ul_link) 6790Sstevel@tonic-gate if (pri > CMP_PRIO(next)) 6800Sstevel@tonic-gate break; 6810Sstevel@tonic-gate } 6820Sstevel@tonic-gate } else { 6830Sstevel@tonic-gate /* 6840Sstevel@tonic-gate * Enqueue before the first thread whose priority is less 6850Sstevel@tonic-gate * than or equal to the priority of the thread being queued. 6860Sstevel@tonic-gate * Hopefully we can go directly onto the head of the queue. 6870Sstevel@tonic-gate */ 6886247Sraf for (ulwpp = &qrp->qr_head; (next = *ulwpp) != NULL; 6890Sstevel@tonic-gate ulwpp = &next->ul_link) 6900Sstevel@tonic-gate if (pri >= CMP_PRIO(next)) 6910Sstevel@tonic-gate break; 6920Sstevel@tonic-gate } 6930Sstevel@tonic-gate if ((ulwp->ul_link = *ulwpp) == NULL) 6946247Sraf qrp->qr_tail = ulwp; 6950Sstevel@tonic-gate *ulwpp = ulwp; 6960Sstevel@tonic-gate 6970Sstevel@tonic-gate ulwp->ul_sleepq = qp; 6986247Sraf ulwp->ul_wchan = qp->qh_wchan; 6996247Sraf ulwp->ul_qtype = qp->qh_type; 7006247Sraf if ((ulwp->ul_schedctl != NULL && 7016247Sraf ulwp->ul_schedctl->sc_cid == ulwp->ul_rtclassid) | 7026247Sraf ulwp->ul_pilocks) { 7036247Sraf ulwp->ul_rtqueued = 1; 7046247Sraf qrp->qr_rtcount++; 7056247Sraf } 7066247Sraf MAXINCR(qrp->qr_qmax, qrp->qr_qlen); 7076247Sraf MAXINCR(qp->qh_qmax, qp->qh_qlen); 7086247Sraf } 7096247Sraf 7106247Sraf /* 7116247Sraf * Helper function for queue_slot() and queue_slot_rt(). 7126247Sraf * Try to find a non-suspended thread on the queue. 7136247Sraf */ 7146247Sraf static ulwp_t ** 7156247Sraf queue_slot_runnable(ulwp_t **ulwpp, ulwp_t **prevp, int rt) 7166247Sraf { 7176247Sraf ulwp_t *ulwp; 7186247Sraf ulwp_t **foundpp = NULL; 7196247Sraf int priority = -1; 7206247Sraf ulwp_t *prev; 7216247Sraf int tpri; 7226247Sraf 7236247Sraf for (prev = NULL; 7246247Sraf (ulwp = *ulwpp) != NULL; 7256247Sraf prev = ulwp, ulwpp = &ulwp->ul_link) { 7266247Sraf if (ulwp->ul_stop) /* skip suspended threads */ 7276247Sraf continue; 7286247Sraf tpri = rt? CMP_PRIO(ulwp) : 0; 7296247Sraf if (tpri > priority) { 7306247Sraf foundpp = ulwpp; 7316247Sraf *prevp = prev; 7326247Sraf priority = tpri; 7336247Sraf if (!rt) 7346247Sraf break; 7356247Sraf } 7366247Sraf } 7376247Sraf return (foundpp); 7380Sstevel@tonic-gate } 7390Sstevel@tonic-gate 7400Sstevel@tonic-gate /* 7416247Sraf * For real-time, we search the entire queue because the dispatch 7426247Sraf * (kernel) priorities may have changed since enqueueing. 7430Sstevel@tonic-gate */ 7440Sstevel@tonic-gate static ulwp_t ** 7456247Sraf queue_slot_rt(ulwp_t **ulwpp_org, ulwp_t **prevp) 7466247Sraf { 7476247Sraf ulwp_t **ulwpp = ulwpp_org; 7486247Sraf ulwp_t *ulwp = *ulwpp; 7496247Sraf ulwp_t **foundpp = ulwpp; 7506247Sraf int priority = CMP_PRIO(ulwp); 7516247Sraf ulwp_t *prev; 7526247Sraf int tpri; 7536247Sraf 7546247Sraf for (prev = ulwp, ulwpp = &ulwp->ul_link; 7556247Sraf (ulwp = *ulwpp) != NULL; 7566247Sraf prev = ulwp, ulwpp = &ulwp->ul_link) { 7576247Sraf tpri = CMP_PRIO(ulwp); 7586247Sraf if (tpri > priority) { 7596247Sraf foundpp = ulwpp; 7606247Sraf *prevp = prev; 7616247Sraf priority = tpri; 7626247Sraf } 7636247Sraf } 7646247Sraf ulwp = *foundpp; 7656247Sraf 7666247Sraf /* 7676247Sraf * Try not to return a suspended thread. 7686247Sraf * This mimics the old libthread's behavior. 7696247Sraf */ 7706247Sraf if (ulwp->ul_stop && 7716247Sraf (ulwpp = queue_slot_runnable(ulwpp_org, prevp, 1)) != NULL) { 7726247Sraf foundpp = ulwpp; 7736247Sraf ulwp = *foundpp; 7746247Sraf } 7756247Sraf ulwp->ul_rt = 1; 7766247Sraf return (foundpp); 7776247Sraf } 7786247Sraf 7796247Sraf ulwp_t ** 7806247Sraf queue_slot(queue_head_t *qp, ulwp_t **prevp, int *more) 7816247Sraf { 7826247Sraf queue_root_t *qrp; 7836247Sraf ulwp_t **ulwpp; 7846247Sraf ulwp_t *ulwp; 7856247Sraf int rt; 7866247Sraf 7876247Sraf ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread)); 7886247Sraf 7896247Sraf if ((qrp = qp->qh_root) == NULL || (ulwp = qrp->qr_head) == NULL) { 7906247Sraf *more = 0; 7916247Sraf return (NULL); /* no lwps on the queue */ 7926247Sraf } 7936247Sraf rt = (qrp->qr_rtcount != 0); 7946247Sraf *prevp = NULL; 7956247Sraf if (ulwp->ul_link == NULL) { /* only one lwp on the queue */ 7966247Sraf *more = 0; 7976247Sraf ulwp->ul_rt = rt; 7986247Sraf return (&qrp->qr_head); 7996247Sraf } 8006247Sraf *more = 1; 8016247Sraf 8026247Sraf if (rt) /* real-time queue */ 8036247Sraf return (queue_slot_rt(&qrp->qr_head, prevp)); 8046247Sraf /* 8056247Sraf * Try not to return a suspended thread. 8066247Sraf * This mimics the old libthread's behavior. 8076247Sraf */ 8086247Sraf if (ulwp->ul_stop && 8096247Sraf (ulwpp = queue_slot_runnable(&qrp->qr_head, prevp, 0)) != NULL) { 8106247Sraf ulwp = *ulwpp; 8116247Sraf ulwp->ul_rt = 0; 8126247Sraf return (ulwpp); 8136247Sraf } 8146247Sraf /* 8156247Sraf * The common case; just pick the first thread on the queue. 8166247Sraf */ 8176247Sraf ulwp->ul_rt = 0; 8186247Sraf return (&qrp->qr_head); 8196247Sraf } 8206247Sraf 8216247Sraf /* 8226247Sraf * Common code for unlinking an lwp from a user-level sleep queue. 8236247Sraf */ 8246247Sraf void 8256247Sraf queue_unlink(queue_head_t *qp, ulwp_t **ulwpp, ulwp_t *prev) 8266247Sraf { 8276247Sraf queue_root_t *qrp = qp->qh_root; 8286247Sraf queue_root_t *nqrp; 8296247Sraf ulwp_t *ulwp = *ulwpp; 8306247Sraf ulwp_t *next; 8316247Sraf 8326247Sraf ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread)); 8336247Sraf ASSERT(qp->qh_wchan != NULL && ulwp->ul_wchan == qp->qh_wchan); 8346247Sraf 8356247Sraf DECR(qp->qh_qlen); 8366247Sraf DECR(qrp->qr_qlen); 8376247Sraf if (ulwp->ul_rtqueued) { 8386247Sraf ulwp->ul_rtqueued = 0; 8396247Sraf qrp->qr_rtcount--; 8406247Sraf } 8416247Sraf next = ulwp->ul_link; 8426247Sraf *ulwpp = next; 8436247Sraf ulwp->ul_link = NULL; 8446247Sraf if (qrp->qr_tail == ulwp) 8456247Sraf qrp->qr_tail = prev; 8466247Sraf if (qrp == &ulwp->ul_queue_root) { 8476247Sraf /* 8486247Sraf * We can't continue to use the unlinked thread's 8496247Sraf * queue root for the linkage. 8506247Sraf */ 8516247Sraf queue_root_t *qr_next = qrp->qr_next; 8526247Sraf queue_root_t *qr_prev = qrp->qr_prev; 8536247Sraf 8546247Sraf if (qrp->qr_tail) { 8556247Sraf /* switch to using the last thread's queue root */ 8566247Sraf ASSERT(qrp->qr_qlen != 0); 8576247Sraf nqrp = &qrp->qr_tail->ul_queue_root; 8586247Sraf *nqrp = *qrp; 8596247Sraf if (qr_next) 8606247Sraf qr_next->qr_prev = nqrp; 8616247Sraf if (qr_prev) 8626247Sraf qr_prev->qr_next = nqrp; 8636247Sraf else 8646247Sraf qp->qh_hlist = nqrp; 8656247Sraf qp->qh_root = nqrp; 8666247Sraf } else { 8676247Sraf /* empty queue root; just delete from the hash list */ 8686247Sraf ASSERT(qrp->qr_qlen == 0); 8696247Sraf if (qr_next) 8706247Sraf qr_next->qr_prev = qr_prev; 8716247Sraf if (qr_prev) 8726247Sraf qr_prev->qr_next = qr_next; 8736247Sraf else 8746247Sraf qp->qh_hlist = qr_next; 8756247Sraf qp->qh_root = NULL; 8766247Sraf DECR(qp->qh_hlen); 8776247Sraf } 8786247Sraf } 8796247Sraf } 8806247Sraf 8816247Sraf ulwp_t * 8826247Sraf dequeue(queue_head_t *qp, int *more) 8830Sstevel@tonic-gate { 8840Sstevel@tonic-gate ulwp_t **ulwpp; 8850Sstevel@tonic-gate ulwp_t *ulwp; 8866247Sraf ulwp_t *prev; 8876247Sraf 8886247Sraf if ((ulwpp = queue_slot(qp, &prev, more)) == NULL) 8890Sstevel@tonic-gate return (NULL); 8900Sstevel@tonic-gate ulwp = *ulwpp; 8916247Sraf queue_unlink(qp, ulwpp, prev); 8920Sstevel@tonic-gate ulwp->ul_sleepq = NULL; 8930Sstevel@tonic-gate ulwp->ul_wchan = NULL; 8940Sstevel@tonic-gate return (ulwp); 8950Sstevel@tonic-gate } 8960Sstevel@tonic-gate 8970Sstevel@tonic-gate /* 8980Sstevel@tonic-gate * Return a pointer to the highest priority thread sleeping on wchan. 8990Sstevel@tonic-gate */ 9000Sstevel@tonic-gate ulwp_t * 9016247Sraf queue_waiter(queue_head_t *qp) 9020Sstevel@tonic-gate { 9030Sstevel@tonic-gate ulwp_t **ulwpp; 9046247Sraf ulwp_t *prev; 9056247Sraf int more; 9066247Sraf 9076247Sraf if ((ulwpp = queue_slot(qp, &prev, &more)) == NULL) 9080Sstevel@tonic-gate return (NULL); 9090Sstevel@tonic-gate return (*ulwpp); 9100Sstevel@tonic-gate } 9110Sstevel@tonic-gate 9126247Sraf int 9136247Sraf dequeue_self(queue_head_t *qp) 9140Sstevel@tonic-gate { 9150Sstevel@tonic-gate ulwp_t *self = curthread; 9166247Sraf queue_root_t *qrp; 9170Sstevel@tonic-gate ulwp_t **ulwpp; 9180Sstevel@tonic-gate ulwp_t *ulwp; 9196247Sraf ulwp_t *prev; 9200Sstevel@tonic-gate int found = 0; 9210Sstevel@tonic-gate 9220Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, self)); 9230Sstevel@tonic-gate 9240Sstevel@tonic-gate /* find self on the sleep queue */ 9256247Sraf if ((qrp = qp->qh_root) != NULL) { 9266247Sraf for (prev = NULL, ulwpp = &qrp->qr_head; 9276247Sraf (ulwp = *ulwpp) != NULL; 9286247Sraf prev = ulwp, ulwpp = &ulwp->ul_link) { 9296247Sraf if (ulwp == self) { 9306247Sraf queue_unlink(qp, ulwpp, prev); 9316247Sraf self->ul_cvmutex = NULL; 9326247Sraf self->ul_sleepq = NULL; 9336247Sraf self->ul_wchan = NULL; 9346247Sraf found = 1; 9356247Sraf break; 9366247Sraf } 9370Sstevel@tonic-gate } 9380Sstevel@tonic-gate } 9390Sstevel@tonic-gate 9400Sstevel@tonic-gate if (!found) 9410Sstevel@tonic-gate thr_panic("dequeue_self(): curthread not found on queue"); 9420Sstevel@tonic-gate 9436247Sraf return ((qrp = qp->qh_root) != NULL && qrp->qr_head != NULL); 9440Sstevel@tonic-gate } 9450Sstevel@tonic-gate 9460Sstevel@tonic-gate /* 9470Sstevel@tonic-gate * Called from call_user_handler() and _thrp_suspend() to take 9480Sstevel@tonic-gate * ourself off of our sleep queue so we can grab locks. 9490Sstevel@tonic-gate */ 9500Sstevel@tonic-gate void 9510Sstevel@tonic-gate unsleep_self(void) 9520Sstevel@tonic-gate { 9530Sstevel@tonic-gate ulwp_t *self = curthread; 9540Sstevel@tonic-gate queue_head_t *qp; 9550Sstevel@tonic-gate 9560Sstevel@tonic-gate /* 9570Sstevel@tonic-gate * Calling enter_critical()/exit_critical() here would lead 9580Sstevel@tonic-gate * to recursion. Just manipulate self->ul_critical directly. 9590Sstevel@tonic-gate */ 9600Sstevel@tonic-gate self->ul_critical++; 9610Sstevel@tonic-gate while (self->ul_sleepq != NULL) { 9620Sstevel@tonic-gate qp = queue_lock(self->ul_wchan, self->ul_qtype); 9630Sstevel@tonic-gate /* 9640Sstevel@tonic-gate * We may have been moved from a CV queue to a 9650Sstevel@tonic-gate * mutex queue while we were attempting queue_lock(). 9660Sstevel@tonic-gate * If so, just loop around and try again. 9670Sstevel@tonic-gate * dequeue_self() clears self->ul_sleepq. 9680Sstevel@tonic-gate */ 9696247Sraf if (qp == self->ul_sleepq) 9706247Sraf (void) dequeue_self(qp); 9710Sstevel@tonic-gate queue_unlock(qp); 9720Sstevel@tonic-gate } 9736247Sraf self->ul_writer = 0; 9740Sstevel@tonic-gate self->ul_critical--; 9750Sstevel@tonic-gate } 9760Sstevel@tonic-gate 9770Sstevel@tonic-gate /* 9780Sstevel@tonic-gate * Common code for calling the the ___lwp_mutex_timedlock() system call. 9790Sstevel@tonic-gate * Returns with mutex_owner and mutex_ownerpid set correctly. 9800Sstevel@tonic-gate */ 9814574Sraf static int 9820Sstevel@tonic-gate mutex_lock_kernel(mutex_t *mp, timespec_t *tsp, tdb_mutex_stats_t *msp) 9830Sstevel@tonic-gate { 9840Sstevel@tonic-gate ulwp_t *self = curthread; 9850Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 9864574Sraf int mtype = mp->mutex_type; 9870Sstevel@tonic-gate hrtime_t begin_sleep; 9884574Sraf int acquired; 9890Sstevel@tonic-gate int error; 9900Sstevel@tonic-gate 9910Sstevel@tonic-gate self->ul_sp = stkptr(); 9920Sstevel@tonic-gate self->ul_wchan = mp; 9930Sstevel@tonic-gate if (__td_event_report(self, TD_SLEEP, udp)) { 9940Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_SLEEP; 9950Sstevel@tonic-gate self->ul_td_evbuf.eventdata = mp; 9960Sstevel@tonic-gate tdb_event(TD_SLEEP, udp); 9970Sstevel@tonic-gate } 9980Sstevel@tonic-gate if (msp) { 9990Sstevel@tonic-gate tdb_incr(msp->mutex_sleep); 10000Sstevel@tonic-gate begin_sleep = gethrtime(); 10010Sstevel@tonic-gate } 10020Sstevel@tonic-gate 10030Sstevel@tonic-gate DTRACE_PROBE1(plockstat, mutex__block, mp); 10040Sstevel@tonic-gate 10050Sstevel@tonic-gate for (;;) { 10064574Sraf /* 10074574Sraf * A return value of EOWNERDEAD or ELOCKUNMAPPED 10084574Sraf * means we successfully acquired the lock. 10094574Sraf */ 10104574Sraf if ((error = ___lwp_mutex_timedlock(mp, tsp)) != 0 && 10114574Sraf error != EOWNERDEAD && error != ELOCKUNMAPPED) { 10124574Sraf acquired = 0; 10130Sstevel@tonic-gate break; 10140Sstevel@tonic-gate } 10150Sstevel@tonic-gate 10164574Sraf if (mtype & USYNC_PROCESS) { 10170Sstevel@tonic-gate /* 10180Sstevel@tonic-gate * Defend against forkall(). We may be the child, 10190Sstevel@tonic-gate * in which case we don't actually own the mutex. 10200Sstevel@tonic-gate */ 10210Sstevel@tonic-gate enter_critical(self); 10220Sstevel@tonic-gate if (mp->mutex_ownerpid == udp->pid) { 10230Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 10240Sstevel@tonic-gate exit_critical(self); 10254574Sraf acquired = 1; 10260Sstevel@tonic-gate break; 10270Sstevel@tonic-gate } 10280Sstevel@tonic-gate exit_critical(self); 10290Sstevel@tonic-gate } else { 10300Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 10314574Sraf acquired = 1; 10320Sstevel@tonic-gate break; 10330Sstevel@tonic-gate } 10340Sstevel@tonic-gate } 10350Sstevel@tonic-gate if (msp) 10360Sstevel@tonic-gate msp->mutex_sleep_time += gethrtime() - begin_sleep; 10370Sstevel@tonic-gate self->ul_wchan = NULL; 10380Sstevel@tonic-gate self->ul_sp = 0; 10390Sstevel@tonic-gate 10404574Sraf if (acquired) { 10414574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1); 10424574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 10434574Sraf } else { 10444574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0); 10454574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 10464574Sraf } 10474574Sraf 10480Sstevel@tonic-gate return (error); 10490Sstevel@tonic-gate } 10500Sstevel@tonic-gate 10510Sstevel@tonic-gate /* 10520Sstevel@tonic-gate * Common code for calling the ___lwp_mutex_trylock() system call. 10530Sstevel@tonic-gate * Returns with mutex_owner and mutex_ownerpid set correctly. 10540Sstevel@tonic-gate */ 10550Sstevel@tonic-gate int 10560Sstevel@tonic-gate mutex_trylock_kernel(mutex_t *mp) 10570Sstevel@tonic-gate { 10580Sstevel@tonic-gate ulwp_t *self = curthread; 10590Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 10604574Sraf int mtype = mp->mutex_type; 10610Sstevel@tonic-gate int error; 10624574Sraf int acquired; 10630Sstevel@tonic-gate 10640Sstevel@tonic-gate for (;;) { 10654574Sraf /* 10664574Sraf * A return value of EOWNERDEAD or ELOCKUNMAPPED 10674574Sraf * means we successfully acquired the lock. 10684574Sraf */ 10694574Sraf if ((error = ___lwp_mutex_trylock(mp)) != 0 && 10704574Sraf error != EOWNERDEAD && error != ELOCKUNMAPPED) { 10714574Sraf acquired = 0; 10720Sstevel@tonic-gate break; 10730Sstevel@tonic-gate } 10740Sstevel@tonic-gate 10754574Sraf if (mtype & USYNC_PROCESS) { 10760Sstevel@tonic-gate /* 10770Sstevel@tonic-gate * Defend against forkall(). We may be the child, 10780Sstevel@tonic-gate * in which case we don't actually own the mutex. 10790Sstevel@tonic-gate */ 10800Sstevel@tonic-gate enter_critical(self); 10810Sstevel@tonic-gate if (mp->mutex_ownerpid == udp->pid) { 10820Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 10830Sstevel@tonic-gate exit_critical(self); 10844574Sraf acquired = 1; 10850Sstevel@tonic-gate break; 10860Sstevel@tonic-gate } 10870Sstevel@tonic-gate exit_critical(self); 10880Sstevel@tonic-gate } else { 10890Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 10904574Sraf acquired = 1; 10910Sstevel@tonic-gate break; 10920Sstevel@tonic-gate } 10930Sstevel@tonic-gate } 10940Sstevel@tonic-gate 10954574Sraf if (acquired) { 10964574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 10974574Sraf } else if (error != EBUSY) { 10984574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 10994574Sraf } 11004574Sraf 11010Sstevel@tonic-gate return (error); 11020Sstevel@tonic-gate } 11030Sstevel@tonic-gate 11040Sstevel@tonic-gate volatile sc_shared_t * 11050Sstevel@tonic-gate setup_schedctl(void) 11060Sstevel@tonic-gate { 11070Sstevel@tonic-gate ulwp_t *self = curthread; 11080Sstevel@tonic-gate volatile sc_shared_t *scp; 11090Sstevel@tonic-gate sc_shared_t *tmp; 11100Sstevel@tonic-gate 11110Sstevel@tonic-gate if ((scp = self->ul_schedctl) == NULL && /* no shared state yet */ 11120Sstevel@tonic-gate !self->ul_vfork && /* not a child of vfork() */ 11130Sstevel@tonic-gate !self->ul_schedctl_called) { /* haven't been called before */ 11140Sstevel@tonic-gate enter_critical(self); 11150Sstevel@tonic-gate self->ul_schedctl_called = &self->ul_uberdata->uberflags; 11160Sstevel@tonic-gate if ((tmp = __schedctl()) != (sc_shared_t *)(-1)) 11170Sstevel@tonic-gate self->ul_schedctl = scp = tmp; 11180Sstevel@tonic-gate exit_critical(self); 11190Sstevel@tonic-gate } 11200Sstevel@tonic-gate /* 11210Sstevel@tonic-gate * Unless the call to setup_schedctl() is surrounded 11220Sstevel@tonic-gate * by enter_critical()/exit_critical(), the address 11230Sstevel@tonic-gate * we are returning could be invalid due to a forkall() 11240Sstevel@tonic-gate * having occurred in another thread. 11250Sstevel@tonic-gate */ 11260Sstevel@tonic-gate return (scp); 11270Sstevel@tonic-gate } 11280Sstevel@tonic-gate 11290Sstevel@tonic-gate /* 11300Sstevel@tonic-gate * Interfaces from libsched, incorporated into libc. 11310Sstevel@tonic-gate * libsched.so.1 is now a filter library onto libc. 11320Sstevel@tonic-gate */ 11336812Sraf #pragma weak schedctl_lookup = schedctl_init 11340Sstevel@tonic-gate schedctl_t * 11356812Sraf schedctl_init(void) 11360Sstevel@tonic-gate { 11370Sstevel@tonic-gate volatile sc_shared_t *scp = setup_schedctl(); 11380Sstevel@tonic-gate return ((scp == NULL)? NULL : (schedctl_t *)&scp->sc_preemptctl); 11390Sstevel@tonic-gate } 11400Sstevel@tonic-gate 11410Sstevel@tonic-gate void 11426812Sraf schedctl_exit(void) 11430Sstevel@tonic-gate { 11440Sstevel@tonic-gate } 11450Sstevel@tonic-gate 11460Sstevel@tonic-gate /* 11470Sstevel@tonic-gate * Contract private interface for java. 11480Sstevel@tonic-gate * Set up the schedctl data if it doesn't exist yet. 11490Sstevel@tonic-gate * Return a pointer to the pointer to the schedctl data. 11500Sstevel@tonic-gate */ 11510Sstevel@tonic-gate volatile sc_shared_t *volatile * 11520Sstevel@tonic-gate _thr_schedctl(void) 11530Sstevel@tonic-gate { 11540Sstevel@tonic-gate ulwp_t *self = curthread; 11550Sstevel@tonic-gate volatile sc_shared_t *volatile *ptr; 11560Sstevel@tonic-gate 11570Sstevel@tonic-gate if (self->ul_vfork) 11580Sstevel@tonic-gate return (NULL); 11590Sstevel@tonic-gate if (*(ptr = &self->ul_schedctl) == NULL) 11600Sstevel@tonic-gate (void) setup_schedctl(); 11610Sstevel@tonic-gate return (ptr); 11620Sstevel@tonic-gate } 11630Sstevel@tonic-gate 11640Sstevel@tonic-gate /* 11650Sstevel@tonic-gate * Block signals and attempt to block preemption. 11660Sstevel@tonic-gate * no_preempt()/preempt() must be used in pairs but can be nested. 11670Sstevel@tonic-gate */ 11680Sstevel@tonic-gate void 11690Sstevel@tonic-gate no_preempt(ulwp_t *self) 11700Sstevel@tonic-gate { 11710Sstevel@tonic-gate volatile sc_shared_t *scp; 11720Sstevel@tonic-gate 11730Sstevel@tonic-gate if (self->ul_preempt++ == 0) { 11740Sstevel@tonic-gate enter_critical(self); 11750Sstevel@tonic-gate if ((scp = self->ul_schedctl) != NULL || 11760Sstevel@tonic-gate (scp = setup_schedctl()) != NULL) { 11770Sstevel@tonic-gate /* 11780Sstevel@tonic-gate * Save the pre-existing preempt value. 11790Sstevel@tonic-gate */ 11800Sstevel@tonic-gate self->ul_savpreempt = scp->sc_preemptctl.sc_nopreempt; 11810Sstevel@tonic-gate scp->sc_preemptctl.sc_nopreempt = 1; 11820Sstevel@tonic-gate } 11830Sstevel@tonic-gate } 11840Sstevel@tonic-gate } 11850Sstevel@tonic-gate 11860Sstevel@tonic-gate /* 11870Sstevel@tonic-gate * Undo the effects of no_preempt(). 11880Sstevel@tonic-gate */ 11890Sstevel@tonic-gate void 11900Sstevel@tonic-gate preempt(ulwp_t *self) 11910Sstevel@tonic-gate { 11920Sstevel@tonic-gate volatile sc_shared_t *scp; 11930Sstevel@tonic-gate 11940Sstevel@tonic-gate ASSERT(self->ul_preempt > 0); 11950Sstevel@tonic-gate if (--self->ul_preempt == 0) { 11960Sstevel@tonic-gate if ((scp = self->ul_schedctl) != NULL) { 11970Sstevel@tonic-gate /* 11980Sstevel@tonic-gate * Restore the pre-existing preempt value. 11990Sstevel@tonic-gate */ 12000Sstevel@tonic-gate scp->sc_preemptctl.sc_nopreempt = self->ul_savpreempt; 12010Sstevel@tonic-gate if (scp->sc_preemptctl.sc_yield && 12020Sstevel@tonic-gate scp->sc_preemptctl.sc_nopreempt == 0) { 12036515Sraf yield(); 12040Sstevel@tonic-gate if (scp->sc_preemptctl.sc_yield) { 12050Sstevel@tonic-gate /* 12060Sstevel@tonic-gate * Shouldn't happen. This is either 12070Sstevel@tonic-gate * a race condition or the thread 12080Sstevel@tonic-gate * just entered the real-time class. 12090Sstevel@tonic-gate */ 12106515Sraf yield(); 12110Sstevel@tonic-gate scp->sc_preemptctl.sc_yield = 0; 12120Sstevel@tonic-gate } 12130Sstevel@tonic-gate } 12140Sstevel@tonic-gate } 12150Sstevel@tonic-gate exit_critical(self); 12160Sstevel@tonic-gate } 12170Sstevel@tonic-gate } 12180Sstevel@tonic-gate 12190Sstevel@tonic-gate /* 12200Sstevel@tonic-gate * If a call to preempt() would cause the current thread to yield or to 12210Sstevel@tonic-gate * take deferred actions in exit_critical(), then unpark the specified 12220Sstevel@tonic-gate * lwp so it can run while we delay. Return the original lwpid if the 12230Sstevel@tonic-gate * unpark was not performed, else return zero. The tests are a repeat 12240Sstevel@tonic-gate * of some of the tests in preempt(), above. This is a statistical 12250Sstevel@tonic-gate * optimization solely for cond_sleep_queue(), below. 12260Sstevel@tonic-gate */ 12270Sstevel@tonic-gate static lwpid_t 12280Sstevel@tonic-gate preempt_unpark(ulwp_t *self, lwpid_t lwpid) 12290Sstevel@tonic-gate { 12300Sstevel@tonic-gate volatile sc_shared_t *scp = self->ul_schedctl; 12310Sstevel@tonic-gate 12320Sstevel@tonic-gate ASSERT(self->ul_preempt == 1 && self->ul_critical > 0); 12330Sstevel@tonic-gate if ((scp != NULL && scp->sc_preemptctl.sc_yield) || 12340Sstevel@tonic-gate (self->ul_curplease && self->ul_critical == 1)) { 12350Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 12360Sstevel@tonic-gate lwpid = 0; 12370Sstevel@tonic-gate } 12380Sstevel@tonic-gate return (lwpid); 12390Sstevel@tonic-gate } 12400Sstevel@tonic-gate 12410Sstevel@tonic-gate /* 12424613Sraf * Spin for a while (if 'tryhard' is true), trying to grab the lock. 12430Sstevel@tonic-gate * If this fails, return EBUSY and let the caller deal with it. 12440Sstevel@tonic-gate * If this succeeds, return 0 with mutex_owner set to curthread. 12450Sstevel@tonic-gate */ 12464574Sraf static int 12474613Sraf mutex_trylock_adaptive(mutex_t *mp, int tryhard) 12480Sstevel@tonic-gate { 12490Sstevel@tonic-gate ulwp_t *self = curthread; 12504574Sraf int error = EBUSY; 12510Sstevel@tonic-gate ulwp_t *ulwp; 12520Sstevel@tonic-gate volatile sc_shared_t *scp; 12535629Sraf volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw; 12545629Sraf volatile uint64_t *ownerp = (volatile uint64_t *)&mp->mutex_owner; 12555629Sraf uint32_t new_lockword; 12565629Sraf int count = 0; 12575629Sraf int max_count; 12585629Sraf uint8_t max_spinners; 12594574Sraf 12604574Sraf ASSERT(!(mp->mutex_type & USYNC_PROCESS)); 12614574Sraf 12624574Sraf if (MUTEX_OWNER(mp) == self) 12630Sstevel@tonic-gate return (EBUSY); 12640Sstevel@tonic-gate 12654574Sraf /* short-cut, not definitive (see below) */ 12664574Sraf if (mp->mutex_flag & LOCK_NOTRECOVERABLE) { 12674574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 12685629Sraf error = ENOTRECOVERABLE; 12695629Sraf goto done; 12704574Sraf } 12714574Sraf 12725629Sraf /* 12735629Sraf * Make one attempt to acquire the lock before 12745629Sraf * incurring the overhead of the spin loop. 12755629Sraf */ 12765629Sraf if (set_lock_byte(lockp) == 0) { 12775629Sraf *ownerp = (uintptr_t)self; 12785629Sraf error = 0; 12795629Sraf goto done; 12805629Sraf } 12815629Sraf if (!tryhard) 12825629Sraf goto done; 12835629Sraf if (ncpus == 0) 12845629Sraf ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN); 12855629Sraf if ((max_spinners = self->ul_max_spinners) >= ncpus) 12865629Sraf max_spinners = ncpus - 1; 12875629Sraf max_count = (max_spinners != 0)? self->ul_adaptive_spin : 0; 12885629Sraf if (max_count == 0) 12895629Sraf goto done; 12905629Sraf 12910Sstevel@tonic-gate /* 12920Sstevel@tonic-gate * This spin loop is unfair to lwps that have already dropped into 12930Sstevel@tonic-gate * the kernel to sleep. They will starve on a highly-contended mutex. 12940Sstevel@tonic-gate * This is just too bad. The adaptive spin algorithm is intended 12950Sstevel@tonic-gate * to allow programs with highly-contended locks (that is, broken 12960Sstevel@tonic-gate * programs) to execute with reasonable speed despite their contention. 12970Sstevel@tonic-gate * Being fair would reduce the speed of such programs and well-written 12980Sstevel@tonic-gate * programs will not suffer in any case. 12990Sstevel@tonic-gate */ 13005629Sraf enter_critical(self); 13015629Sraf if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1) { 13025629Sraf exit_critical(self); 13035629Sraf goto done; 13045629Sraf } 13055629Sraf DTRACE_PROBE1(plockstat, mutex__spin, mp); 13065629Sraf for (count = 1; ; count++) { 13070Sstevel@tonic-gate if (*lockp == 0 && set_lock_byte(lockp) == 0) { 13080Sstevel@tonic-gate *ownerp = (uintptr_t)self; 13094574Sraf error = 0; 13104574Sraf break; 13110Sstevel@tonic-gate } 13125629Sraf if (count == max_count) 13135629Sraf break; 13140Sstevel@tonic-gate SMT_PAUSE(); 13150Sstevel@tonic-gate /* 13160Sstevel@tonic-gate * Stop spinning if the mutex owner is not running on 13170Sstevel@tonic-gate * a processor; it will not drop the lock any time soon 13180Sstevel@tonic-gate * and we would just be wasting time to keep spinning. 13190Sstevel@tonic-gate * 13200Sstevel@tonic-gate * Note that we are looking at another thread (ulwp_t) 13210Sstevel@tonic-gate * without ensuring that the other thread does not exit. 13220Sstevel@tonic-gate * The scheme relies on ulwp_t structures never being 13230Sstevel@tonic-gate * deallocated by the library (the library employs a free 13240Sstevel@tonic-gate * list of ulwp_t structs that are reused when new threads 13250Sstevel@tonic-gate * are created) and on schedctl shared memory never being 13260Sstevel@tonic-gate * deallocated once created via __schedctl(). 13270Sstevel@tonic-gate * 13280Sstevel@tonic-gate * Thus, the worst that can happen when the spinning thread 13290Sstevel@tonic-gate * looks at the owner's schedctl data is that it is looking 13300Sstevel@tonic-gate * at some other thread's schedctl data. This almost never 13310Sstevel@tonic-gate * happens and is benign when it does. 13320Sstevel@tonic-gate */ 13330Sstevel@tonic-gate if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL && 13340Sstevel@tonic-gate ((scp = ulwp->ul_schedctl) == NULL || 13350Sstevel@tonic-gate scp->sc_state != SC_ONPROC)) 13360Sstevel@tonic-gate break; 13370Sstevel@tonic-gate } 13385629Sraf new_lockword = spinners_decr(&mp->mutex_lockword); 13395629Sraf if (error && (new_lockword & (LOCKMASK | SPINNERMASK)) == 0) { 13405629Sraf /* 13415629Sraf * We haven't yet acquired the lock, the lock 13425629Sraf * is free, and there are no other spinners. 13435629Sraf * Make one final attempt to acquire the lock. 13445629Sraf * 13455629Sraf * This isn't strictly necessary since mutex_lock_queue() 13465629Sraf * (the next action this thread will take if it doesn't 13475629Sraf * acquire the lock here) makes one attempt to acquire 13485629Sraf * the lock before putting the thread to sleep. 13495629Sraf * 13505629Sraf * If the next action for this thread (on failure here) 13515629Sraf * were not to call mutex_lock_queue(), this would be 13525629Sraf * necessary for correctness, to avoid ending up with an 13535629Sraf * unheld mutex with waiters but no one to wake them up. 13545629Sraf */ 13555629Sraf if (set_lock_byte(lockp) == 0) { 13565629Sraf *ownerp = (uintptr_t)self; 13575629Sraf error = 0; 13585629Sraf } 13595629Sraf count++; 13605629Sraf } 13610Sstevel@tonic-gate exit_critical(self); 13620Sstevel@tonic-gate 13635629Sraf done: 13644574Sraf if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { 13654574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 13664574Sraf /* 13676057Sraf * We shouldn't own the mutex. 13686057Sraf * Just clear the lock; everyone has already been waked up. 13694574Sraf */ 13704574Sraf mp->mutex_owner = 0; 13716057Sraf (void) clear_lockbyte(&mp->mutex_lockword); 13724574Sraf error = ENOTRECOVERABLE; 13734574Sraf } 13744574Sraf 13754574Sraf if (error) { 13765629Sraf if (count) { 13775629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 0, count); 13785629Sraf } 13794574Sraf if (error != EBUSY) { 13804574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 13814574Sraf } 13824574Sraf } else { 13835629Sraf if (count) { 13845629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 1, count); 13855629Sraf } 13864574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); 13874574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) { 13884574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 13894574Sraf error = EOWNERDEAD; 13904574Sraf } 13914574Sraf } 13924574Sraf 13934574Sraf return (error); 13940Sstevel@tonic-gate } 13950Sstevel@tonic-gate 13960Sstevel@tonic-gate /* 13970Sstevel@tonic-gate * Same as mutex_trylock_adaptive(), except specifically for queue locks. 13980Sstevel@tonic-gate * The owner field is not set here; the caller (spin_lock_set()) sets it. 13990Sstevel@tonic-gate */ 14004574Sraf static int 14010Sstevel@tonic-gate mutex_queuelock_adaptive(mutex_t *mp) 14020Sstevel@tonic-gate { 14030Sstevel@tonic-gate ulwp_t *ulwp; 14040Sstevel@tonic-gate volatile sc_shared_t *scp; 14050Sstevel@tonic-gate volatile uint8_t *lockp; 14060Sstevel@tonic-gate volatile uint64_t *ownerp; 14070Sstevel@tonic-gate int count = curthread->ul_queue_spin; 14080Sstevel@tonic-gate 14090Sstevel@tonic-gate ASSERT(mp->mutex_type == USYNC_THREAD); 14100Sstevel@tonic-gate 14110Sstevel@tonic-gate if (count == 0) 14120Sstevel@tonic-gate return (EBUSY); 14130Sstevel@tonic-gate 14140Sstevel@tonic-gate lockp = (volatile uint8_t *)&mp->mutex_lockw; 14150Sstevel@tonic-gate ownerp = (volatile uint64_t *)&mp->mutex_owner; 14160Sstevel@tonic-gate while (--count >= 0) { 14170Sstevel@tonic-gate if (*lockp == 0 && set_lock_byte(lockp) == 0) 14180Sstevel@tonic-gate return (0); 14190Sstevel@tonic-gate SMT_PAUSE(); 14200Sstevel@tonic-gate if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL && 14210Sstevel@tonic-gate ((scp = ulwp->ul_schedctl) == NULL || 14220Sstevel@tonic-gate scp->sc_state != SC_ONPROC)) 14230Sstevel@tonic-gate break; 14240Sstevel@tonic-gate } 14250Sstevel@tonic-gate 14260Sstevel@tonic-gate return (EBUSY); 14270Sstevel@tonic-gate } 14280Sstevel@tonic-gate 14290Sstevel@tonic-gate /* 14300Sstevel@tonic-gate * Like mutex_trylock_adaptive(), but for process-shared mutexes. 14314613Sraf * Spin for a while (if 'tryhard' is true), trying to grab the lock. 14320Sstevel@tonic-gate * If this fails, return EBUSY and let the caller deal with it. 14330Sstevel@tonic-gate * If this succeeds, return 0 with mutex_owner set to curthread 14340Sstevel@tonic-gate * and mutex_ownerpid set to the current pid. 14350Sstevel@tonic-gate */ 14364574Sraf static int 14374613Sraf mutex_trylock_process(mutex_t *mp, int tryhard) 14380Sstevel@tonic-gate { 14390Sstevel@tonic-gate ulwp_t *self = curthread; 14405629Sraf uberdata_t *udp = self->ul_uberdata; 14414574Sraf int error = EBUSY; 14426057Sraf volatile uint64_t *lockp = (volatile uint64_t *)&mp->mutex_lockword64; 14435629Sraf uint32_t new_lockword; 14445629Sraf int count = 0; 14455629Sraf int max_count; 14465629Sraf uint8_t max_spinners; 14474574Sraf 14487255Sraf #if defined(__sparc) && !defined(_LP64) 14497255Sraf /* horrible hack, necessary only on 32-bit sparc */ 14507255Sraf int fix_alignment_problem = 14517255Sraf (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && 14527255Sraf self->ul_misaligned && !(mp->mutex_type & LOCK_ROBUST)); 14537255Sraf #endif 14547255Sraf 14554574Sraf ASSERT(mp->mutex_type & USYNC_PROCESS); 14564574Sraf 14574574Sraf if (shared_mutex_held(mp)) 14580Sstevel@tonic-gate return (EBUSY); 14590Sstevel@tonic-gate 14604574Sraf /* short-cut, not definitive (see below) */ 14614574Sraf if (mp->mutex_flag & LOCK_NOTRECOVERABLE) { 14624574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 14635629Sraf error = ENOTRECOVERABLE; 14645629Sraf goto done; 14654574Sraf } 14664574Sraf 14675629Sraf /* 14685629Sraf * Make one attempt to acquire the lock before 14695629Sraf * incurring the overhead of the spin loop. 14705629Sraf */ 14715629Sraf enter_critical(self); 14727255Sraf #if defined(__sparc) && !defined(_LP64) 14737255Sraf /* horrible hack, necessary only on 32-bit sparc */ 14747255Sraf if (fix_alignment_problem) { 14757255Sraf if (set_lock_byte(&mp->mutex_lockw) == 0) { 14767255Sraf mp->mutex_ownerpid = udp->pid; 14777255Sraf mp->mutex_owner = (uintptr_t)self; 14787255Sraf exit_critical(self); 14797255Sraf error = 0; 14807255Sraf goto done; 14817255Sraf } 14827255Sraf } else 14837255Sraf #endif 14846057Sraf if (set_lock_byte64(lockp, udp->pid) == 0) { 14855629Sraf mp->mutex_owner = (uintptr_t)self; 14866057Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 14875629Sraf exit_critical(self); 14885629Sraf error = 0; 14895629Sraf goto done; 14905629Sraf } 14915629Sraf exit_critical(self); 14925629Sraf if (!tryhard) 14935629Sraf goto done; 14944574Sraf if (ncpus == 0) 14954574Sraf ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN); 14965629Sraf if ((max_spinners = self->ul_max_spinners) >= ncpus) 14975629Sraf max_spinners = ncpus - 1; 14985629Sraf max_count = (max_spinners != 0)? self->ul_adaptive_spin : 0; 14995629Sraf if (max_count == 0) 15005629Sraf goto done; 15015629Sraf 15020Sstevel@tonic-gate /* 15030Sstevel@tonic-gate * This is a process-shared mutex. 15040Sstevel@tonic-gate * We cannot know if the owner is running on a processor. 15050Sstevel@tonic-gate * We just spin and hope that it is on a processor. 15060Sstevel@tonic-gate */ 15074574Sraf enter_critical(self); 15085629Sraf if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1) { 15095629Sraf exit_critical(self); 15105629Sraf goto done; 15115629Sraf } 15125629Sraf DTRACE_PROBE1(plockstat, mutex__spin, mp); 15135629Sraf for (count = 1; ; count++) { 15147255Sraf #if defined(__sparc) && !defined(_LP64) 15157255Sraf /* horrible hack, necessary only on 32-bit sparc */ 15167255Sraf if (fix_alignment_problem) { 15177255Sraf if ((*lockp & LOCKMASK64) == 0 && 15187255Sraf set_lock_byte(&mp->mutex_lockw) == 0) { 15197255Sraf mp->mutex_ownerpid = udp->pid; 15207255Sraf mp->mutex_owner = (uintptr_t)self; 15217255Sraf error = 0; 15227255Sraf break; 15237255Sraf } 15247255Sraf } else 15257255Sraf #endif 15266057Sraf if ((*lockp & LOCKMASK64) == 0 && 15276057Sraf set_lock_byte64(lockp, udp->pid) == 0) { 15284574Sraf mp->mutex_owner = (uintptr_t)self; 15296057Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 15304574Sraf error = 0; 15314574Sraf break; 15324574Sraf } 15335629Sraf if (count == max_count) 15345629Sraf break; 15354574Sraf SMT_PAUSE(); 15364574Sraf } 15375629Sraf new_lockword = spinners_decr(&mp->mutex_lockword); 15385629Sraf if (error && (new_lockword & (LOCKMASK | SPINNERMASK)) == 0) { 15395629Sraf /* 15405629Sraf * We haven't yet acquired the lock, the lock 15415629Sraf * is free, and there are no other spinners. 15425629Sraf * Make one final attempt to acquire the lock. 15435629Sraf * 15445629Sraf * This isn't strictly necessary since mutex_lock_kernel() 15455629Sraf * (the next action this thread will take if it doesn't 15465629Sraf * acquire the lock here) makes one attempt to acquire 15475629Sraf * the lock before putting the thread to sleep. 15485629Sraf * 15495629Sraf * If the next action for this thread (on failure here) 15505629Sraf * were not to call mutex_lock_kernel(), this would be 15515629Sraf * necessary for correctness, to avoid ending up with an 15525629Sraf * unheld mutex with waiters but no one to wake them up. 15535629Sraf */ 15547255Sraf #if defined(__sparc) && !defined(_LP64) 15557255Sraf /* horrible hack, necessary only on 32-bit sparc */ 15567255Sraf if (fix_alignment_problem) { 15577255Sraf if (set_lock_byte(&mp->mutex_lockw) == 0) { 15587255Sraf mp->mutex_ownerpid = udp->pid; 15597255Sraf mp->mutex_owner = (uintptr_t)self; 15607255Sraf error = 0; 15617255Sraf } 15627255Sraf } else 15637255Sraf #endif 15646057Sraf if (set_lock_byte64(lockp, udp->pid) == 0) { 15655629Sraf mp->mutex_owner = (uintptr_t)self; 15666057Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 15675629Sraf error = 0; 15685629Sraf } 15695629Sraf count++; 15705629Sraf } 15714574Sraf exit_critical(self); 15724574Sraf 15735629Sraf done: 15744574Sraf if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { 15754574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 15764574Sraf /* 15776057Sraf * We shouldn't own the mutex. 15786057Sraf * Just clear the lock; everyone has already been waked up. 15794574Sraf */ 15804574Sraf mp->mutex_owner = 0; 15816057Sraf /* mp->mutex_ownerpid is cleared by clear_lockbyte64() */ 15826057Sraf (void) clear_lockbyte64(&mp->mutex_lockword64); 15834574Sraf error = ENOTRECOVERABLE; 15840Sstevel@tonic-gate } 15850Sstevel@tonic-gate 15864574Sraf if (error) { 15875629Sraf if (count) { 15885629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 0, count); 15895629Sraf } 15904574Sraf if (error != EBUSY) { 15914574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 15924574Sraf } 15934574Sraf } else { 15945629Sraf if (count) { 15955629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 1, count); 15965629Sraf } 15974574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); 15984574Sraf if (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED)) { 15994574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 16004574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) 16014574Sraf error = EOWNERDEAD; 16024574Sraf else if (mp->mutex_type & USYNC_PROCESS_ROBUST) 16034574Sraf error = ELOCKUNMAPPED; 16044574Sraf else 16054574Sraf error = EOWNERDEAD; 16064574Sraf } 16074574Sraf } 16084574Sraf 16094574Sraf return (error); 16100Sstevel@tonic-gate } 16110Sstevel@tonic-gate 16120Sstevel@tonic-gate /* 16130Sstevel@tonic-gate * Mutex wakeup code for releasing a USYNC_THREAD mutex. 16140Sstevel@tonic-gate * Returns the lwpid of the thread that was dequeued, if any. 16150Sstevel@tonic-gate * The caller of mutex_wakeup() must call __lwp_unpark(lwpid) 16160Sstevel@tonic-gate * to wake up the specified lwp. 16170Sstevel@tonic-gate */ 16184574Sraf static lwpid_t 16190Sstevel@tonic-gate mutex_wakeup(mutex_t *mp) 16200Sstevel@tonic-gate { 16210Sstevel@tonic-gate lwpid_t lwpid = 0; 16226247Sraf int more; 16230Sstevel@tonic-gate queue_head_t *qp; 16240Sstevel@tonic-gate ulwp_t *ulwp; 16250Sstevel@tonic-gate 16260Sstevel@tonic-gate /* 16270Sstevel@tonic-gate * Dequeue a waiter from the sleep queue. Don't touch the mutex 16280Sstevel@tonic-gate * waiters bit if no one was found on the queue because the mutex 16290Sstevel@tonic-gate * might have been deallocated or reallocated for another purpose. 16300Sstevel@tonic-gate */ 16310Sstevel@tonic-gate qp = queue_lock(mp, MX); 16326247Sraf if ((ulwp = dequeue(qp, &more)) != NULL) { 16330Sstevel@tonic-gate lwpid = ulwp->ul_lwpid; 16346247Sraf mp->mutex_waiters = more; 16350Sstevel@tonic-gate } 16360Sstevel@tonic-gate queue_unlock(qp); 16370Sstevel@tonic-gate return (lwpid); 16380Sstevel@tonic-gate } 16390Sstevel@tonic-gate 16400Sstevel@tonic-gate /* 16414574Sraf * Mutex wakeup code for releasing all waiters on a USYNC_THREAD mutex. 16424574Sraf */ 16434574Sraf static void 16444574Sraf mutex_wakeup_all(mutex_t *mp) 16454574Sraf { 16464574Sraf queue_head_t *qp; 16476247Sraf queue_root_t *qrp; 16484574Sraf int nlwpid = 0; 16494574Sraf int maxlwps = MAXLWPS; 16504574Sraf ulwp_t *ulwp; 16514574Sraf lwpid_t buffer[MAXLWPS]; 16524574Sraf lwpid_t *lwpid = buffer; 16534574Sraf 16544574Sraf /* 16554574Sraf * Walk the list of waiters and prepare to wake up all of them. 16564574Sraf * The waiters flag has already been cleared from the mutex. 16574574Sraf * 16584574Sraf * We keep track of lwpids that are to be unparked in lwpid[]. 16594574Sraf * __lwp_unpark_all() is called to unpark all of them after 16604574Sraf * they have been removed from the sleep queue and the sleep 16614574Sraf * queue lock has been dropped. If we run out of space in our 16624574Sraf * on-stack buffer, we need to allocate more but we can't call 16634574Sraf * lmalloc() because we are holding a queue lock when the overflow 16644574Sraf * occurs and lmalloc() acquires a lock. We can't use alloca() 16654574Sraf * either because the application may have allocated a small 16664574Sraf * stack and we don't want to overrun the stack. So we call 16674574Sraf * alloc_lwpids() to allocate a bigger buffer using the mmap() 16684574Sraf * system call directly since that path acquires no locks. 16694574Sraf */ 16704574Sraf qp = queue_lock(mp, MX); 16716247Sraf for (;;) { 16726247Sraf if ((qrp = qp->qh_root) == NULL || 16736247Sraf (ulwp = qrp->qr_head) == NULL) 16746247Sraf break; 16756247Sraf ASSERT(ulwp->ul_wchan == mp); 16766247Sraf queue_unlink(qp, &qrp->qr_head, NULL); 16776247Sraf ulwp->ul_sleepq = NULL; 16786247Sraf ulwp->ul_wchan = NULL; 16796247Sraf if (nlwpid == maxlwps) 16806247Sraf lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps); 16816247Sraf lwpid[nlwpid++] = ulwp->ul_lwpid; 16824574Sraf } 16834574Sraf 16844574Sraf if (nlwpid == 0) { 16854574Sraf queue_unlock(qp); 16864574Sraf } else { 16875629Sraf mp->mutex_waiters = 0; 16884574Sraf no_preempt(curthread); 16894574Sraf queue_unlock(qp); 16904574Sraf if (nlwpid == 1) 16914574Sraf (void) __lwp_unpark(lwpid[0]); 16924574Sraf else 16934574Sraf (void) __lwp_unpark_all(lwpid, nlwpid); 16944574Sraf preempt(curthread); 16954574Sraf } 16964574Sraf 16974574Sraf if (lwpid != buffer) 16986515Sraf (void) munmap((caddr_t)lwpid, maxlwps * sizeof (lwpid_t)); 16994574Sraf } 17004574Sraf 17014574Sraf /* 17025629Sraf * Release a process-private mutex. 17035629Sraf * As an optimization, if there are waiters but there are also spinners 17045629Sraf * attempting to acquire the mutex, then don't bother waking up a waiter; 17055629Sraf * one of the spinners will acquire the mutex soon and it would be a waste 17065629Sraf * of resources to wake up some thread just to have it spin for a while 17075629Sraf * and then possibly go back to sleep. See mutex_trylock_adaptive(). 17080Sstevel@tonic-gate */ 17094574Sraf static lwpid_t 17104574Sraf mutex_unlock_queue(mutex_t *mp, int release_all) 17110Sstevel@tonic-gate { 17125629Sraf lwpid_t lwpid = 0; 17135629Sraf uint32_t old_lockword; 17145629Sraf 17156057Sraf DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 17165629Sraf mp->mutex_owner = 0; 17175629Sraf old_lockword = clear_lockbyte(&mp->mutex_lockword); 17185629Sraf if ((old_lockword & WAITERMASK) && 17195629Sraf (release_all || (old_lockword & SPINNERMASK) == 0)) { 17205629Sraf ulwp_t *self = curthread; 17210Sstevel@tonic-gate no_preempt(self); /* ensure a prompt wakeup */ 17225629Sraf if (release_all) 17235629Sraf mutex_wakeup_all(mp); 17245629Sraf else 17255629Sraf lwpid = mutex_wakeup(mp); 17265629Sraf if (lwpid == 0) 17275629Sraf preempt(self); 17284574Sraf } 17290Sstevel@tonic-gate return (lwpid); 17300Sstevel@tonic-gate } 17310Sstevel@tonic-gate 17320Sstevel@tonic-gate /* 17330Sstevel@tonic-gate * Like mutex_unlock_queue(), but for process-shared mutexes. 17340Sstevel@tonic-gate */ 17354574Sraf static void 17364574Sraf mutex_unlock_process(mutex_t *mp, int release_all) 17370Sstevel@tonic-gate { 17387255Sraf ulwp_t *self = curthread; 17396057Sraf uint64_t old_lockword64; 17406057Sraf 17416057Sraf DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 17420Sstevel@tonic-gate mp->mutex_owner = 0; 17437255Sraf #if defined(__sparc) && !defined(_LP64) 17447255Sraf /* horrible hack, necessary only on 32-bit sparc */ 17457255Sraf if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && 17467255Sraf self->ul_misaligned && !(mp->mutex_type & LOCK_ROBUST)) { 17477255Sraf uint32_t old_lockword; 17487255Sraf mp->mutex_ownerpid = 0; 17497255Sraf old_lockword = clear_lockbyte(&mp->mutex_lockword); 17507255Sraf if ((old_lockword & WAITERMASK) && 17517255Sraf (release_all || (old_lockword & SPINNERMASK) == 0)) { 17527255Sraf no_preempt(self); /* ensure a prompt wakeup */ 17537255Sraf (void) ___lwp_mutex_wakeup(mp, release_all); 17547255Sraf preempt(self); 17557255Sraf } 17567255Sraf return; 17577255Sraf } 17587255Sraf #endif 17596057Sraf /* mp->mutex_ownerpid is cleared by clear_lockbyte64() */ 17606057Sraf old_lockword64 = clear_lockbyte64(&mp->mutex_lockword64); 17616057Sraf if ((old_lockword64 & WAITERMASK64) && 17626057Sraf (release_all || (old_lockword64 & SPINNERMASK64) == 0)) { 17635629Sraf no_preempt(self); /* ensure a prompt wakeup */ 17645629Sraf (void) ___lwp_mutex_wakeup(mp, release_all); 17655629Sraf preempt(self); 17660Sstevel@tonic-gate } 17670Sstevel@tonic-gate } 17680Sstevel@tonic-gate 17690Sstevel@tonic-gate void 17700Sstevel@tonic-gate stall(void) 17710Sstevel@tonic-gate { 17720Sstevel@tonic-gate for (;;) 17730Sstevel@tonic-gate (void) mutex_lock_kernel(&stall_mutex, NULL, NULL); 17740Sstevel@tonic-gate } 17750Sstevel@tonic-gate 17760Sstevel@tonic-gate /* 17770Sstevel@tonic-gate * Acquire a USYNC_THREAD mutex via user-level sleep queues. 17780Sstevel@tonic-gate * We failed set_lock_byte(&mp->mutex_lockw) before coming here. 17794574Sraf * If successful, returns with mutex_owner set correctly. 17800Sstevel@tonic-gate */ 17810Sstevel@tonic-gate int 17820Sstevel@tonic-gate mutex_lock_queue(ulwp_t *self, tdb_mutex_stats_t *msp, mutex_t *mp, 17830Sstevel@tonic-gate timespec_t *tsp) 17840Sstevel@tonic-gate { 17850Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 17860Sstevel@tonic-gate queue_head_t *qp; 17870Sstevel@tonic-gate hrtime_t begin_sleep; 17880Sstevel@tonic-gate int error = 0; 17890Sstevel@tonic-gate 17900Sstevel@tonic-gate self->ul_sp = stkptr(); 17910Sstevel@tonic-gate if (__td_event_report(self, TD_SLEEP, udp)) { 17920Sstevel@tonic-gate self->ul_wchan = mp; 17930Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_SLEEP; 17940Sstevel@tonic-gate self->ul_td_evbuf.eventdata = mp; 17950Sstevel@tonic-gate tdb_event(TD_SLEEP, udp); 17960Sstevel@tonic-gate } 17970Sstevel@tonic-gate if (msp) { 17980Sstevel@tonic-gate tdb_incr(msp->mutex_sleep); 17990Sstevel@tonic-gate begin_sleep = gethrtime(); 18000Sstevel@tonic-gate } 18010Sstevel@tonic-gate 18020Sstevel@tonic-gate DTRACE_PROBE1(plockstat, mutex__block, mp); 18030Sstevel@tonic-gate 18040Sstevel@tonic-gate /* 18050Sstevel@tonic-gate * Put ourself on the sleep queue, and while we are 18060Sstevel@tonic-gate * unable to grab the lock, go park in the kernel. 18070Sstevel@tonic-gate * Take ourself off the sleep queue after we acquire the lock. 18080Sstevel@tonic-gate * The waiter bit can be set/cleared only while holding the queue lock. 18090Sstevel@tonic-gate */ 18100Sstevel@tonic-gate qp = queue_lock(mp, MX); 18116247Sraf enqueue(qp, self, 0); 18120Sstevel@tonic-gate mp->mutex_waiters = 1; 18130Sstevel@tonic-gate for (;;) { 18140Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 18150Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 18166247Sraf mp->mutex_waiters = dequeue_self(qp); 18170Sstevel@tonic-gate break; 18180Sstevel@tonic-gate } 18190Sstevel@tonic-gate set_parking_flag(self, 1); 18200Sstevel@tonic-gate queue_unlock(qp); 18210Sstevel@tonic-gate /* 18220Sstevel@tonic-gate * __lwp_park() will return the residual time in tsp 18230Sstevel@tonic-gate * if we are unparked before the timeout expires. 18240Sstevel@tonic-gate */ 18255629Sraf error = __lwp_park(tsp, 0); 18260Sstevel@tonic-gate set_parking_flag(self, 0); 18270Sstevel@tonic-gate /* 18280Sstevel@tonic-gate * We could have taken a signal or suspended ourself. 18290Sstevel@tonic-gate * If we did, then we removed ourself from the queue. 18300Sstevel@tonic-gate * Someone else may have removed us from the queue 18310Sstevel@tonic-gate * as a consequence of mutex_unlock(). We may have 18320Sstevel@tonic-gate * gotten a timeout from __lwp_park(). Or we may still 18330Sstevel@tonic-gate * be on the queue and this is just a spurious wakeup. 18340Sstevel@tonic-gate */ 18350Sstevel@tonic-gate qp = queue_lock(mp, MX); 18360Sstevel@tonic-gate if (self->ul_sleepq == NULL) { 18375629Sraf if (error) { 18386247Sraf mp->mutex_waiters = queue_waiter(qp)? 1 : 0; 18395629Sraf if (error != EINTR) 18405629Sraf break; 18415629Sraf error = 0; 18425629Sraf } 18430Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 18440Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 18450Sstevel@tonic-gate break; 18460Sstevel@tonic-gate } 18476247Sraf enqueue(qp, self, 0); 18480Sstevel@tonic-gate mp->mutex_waiters = 1; 18490Sstevel@tonic-gate } 18500Sstevel@tonic-gate ASSERT(self->ul_sleepq == qp && 18510Sstevel@tonic-gate self->ul_qtype == MX && 18520Sstevel@tonic-gate self->ul_wchan == mp); 18530Sstevel@tonic-gate if (error) { 18545629Sraf if (error != EINTR) { 18556247Sraf mp->mutex_waiters = dequeue_self(qp); 18565629Sraf break; 18575629Sraf } 18585629Sraf error = 0; 18590Sstevel@tonic-gate } 18600Sstevel@tonic-gate } 18610Sstevel@tonic-gate ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL && 18620Sstevel@tonic-gate self->ul_wchan == NULL); 18630Sstevel@tonic-gate self->ul_sp = 0; 18640Sstevel@tonic-gate queue_unlock(qp); 18654574Sraf 18660Sstevel@tonic-gate if (msp) 18670Sstevel@tonic-gate msp->mutex_sleep_time += gethrtime() - begin_sleep; 18680Sstevel@tonic-gate 18690Sstevel@tonic-gate ASSERT(error == 0 || error == EINVAL || error == ETIME); 18704574Sraf 18714574Sraf if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { 18724574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 18734574Sraf /* 18746057Sraf * We shouldn't own the mutex. 18756057Sraf * Just clear the lock; everyone has already been waked up. 18764574Sraf */ 18774574Sraf mp->mutex_owner = 0; 18786057Sraf (void) clear_lockbyte(&mp->mutex_lockword); 18794574Sraf error = ENOTRECOVERABLE; 18804574Sraf } 18814574Sraf 18824574Sraf if (error) { 18834574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0); 18844574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 18854574Sraf } else { 18864574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1); 18874574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 18884574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) { 18894574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 18904574Sraf error = EOWNERDEAD; 18914574Sraf } 18924574Sraf } 18934574Sraf 18940Sstevel@tonic-gate return (error); 18950Sstevel@tonic-gate } 18960Sstevel@tonic-gate 18974574Sraf static int 18984574Sraf mutex_recursion(mutex_t *mp, int mtype, int try) 18994574Sraf { 19006812Sraf ASSERT(mutex_held(mp)); 19014574Sraf ASSERT(mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)); 19024574Sraf ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK); 19034574Sraf 19044574Sraf if (mtype & LOCK_RECURSIVE) { 19054574Sraf if (mp->mutex_rcount == RECURSION_MAX) { 19064574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EAGAIN); 19074574Sraf return (EAGAIN); 19084574Sraf } 19094574Sraf mp->mutex_rcount++; 19104574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1, 0); 19114574Sraf return (0); 19124574Sraf } 19134574Sraf if (try == MUTEX_LOCK) { 19144574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK); 19154574Sraf return (EDEADLK); 19164574Sraf } 19174574Sraf return (EBUSY); 19184574Sraf } 19194574Sraf 19204574Sraf /* 19214574Sraf * Register this USYNC_PROCESS|LOCK_ROBUST mutex with the kernel so 19224574Sraf * it can apply LOCK_OWNERDEAD|LOCK_UNMAPPED if it becomes necessary. 19234574Sraf * We use tdb_hash_lock here and in the synch object tracking code in 19244574Sraf * the tdb_agent.c file. There is no conflict between these two usages. 19254574Sraf */ 19264574Sraf void 19274574Sraf register_lock(mutex_t *mp) 19284574Sraf { 19294574Sraf uberdata_t *udp = curthread->ul_uberdata; 19304574Sraf uint_t hash = LOCK_HASH(mp); 19314574Sraf robust_t *rlp; 19324574Sraf robust_t **rlpp; 19334574Sraf robust_t **table; 19344574Sraf 19354574Sraf if ((table = udp->robustlocks) == NULL) { 19364574Sraf lmutex_lock(&udp->tdb_hash_lock); 19374574Sraf if ((table = udp->robustlocks) == NULL) { 19384574Sraf table = lmalloc(LOCKHASHSZ * sizeof (robust_t *)); 19396812Sraf membar_producer(); 19404574Sraf udp->robustlocks = table; 19414574Sraf } 19424574Sraf lmutex_unlock(&udp->tdb_hash_lock); 19434574Sraf } 19446812Sraf membar_consumer(); 19454574Sraf 19464574Sraf /* 19474574Sraf * First search the registered table with no locks held. 19484574Sraf * This is safe because the table never shrinks 19494574Sraf * and we can only get a false negative. 19504574Sraf */ 19514574Sraf for (rlp = table[hash]; rlp != NULL; rlp = rlp->robust_next) { 19524574Sraf if (rlp->robust_lock == mp) /* already registered */ 19534574Sraf return; 19544574Sraf } 19554574Sraf 19564574Sraf /* 19574574Sraf * The lock was not found. 19584574Sraf * Repeat the operation with tdb_hash_lock held. 19594574Sraf */ 19604574Sraf lmutex_lock(&udp->tdb_hash_lock); 19614574Sraf 19624574Sraf for (rlpp = &table[hash]; 19634574Sraf (rlp = *rlpp) != NULL; 19644574Sraf rlpp = &rlp->robust_next) { 19654574Sraf if (rlp->robust_lock == mp) { /* already registered */ 19664574Sraf lmutex_unlock(&udp->tdb_hash_lock); 19674574Sraf return; 19684574Sraf } 19694574Sraf } 19704574Sraf 19714574Sraf /* 19724574Sraf * The lock has never been registered. 19734574Sraf * Register it now and add it to the table. 19744574Sraf */ 19754574Sraf (void) ___lwp_mutex_register(mp); 19764574Sraf rlp = lmalloc(sizeof (*rlp)); 19774574Sraf rlp->robust_lock = mp; 19786812Sraf membar_producer(); 19794574Sraf *rlpp = rlp; 19804574Sraf 19814574Sraf lmutex_unlock(&udp->tdb_hash_lock); 19824574Sraf } 19834574Sraf 19844574Sraf /* 19854574Sraf * This is called in the child of fork()/forkall() to start over 19864574Sraf * with a clean slate. (Each process must register its own locks.) 19874574Sraf * No locks are needed because all other threads are suspended or gone. 19884574Sraf */ 19894574Sraf void 19904574Sraf unregister_locks(void) 19914574Sraf { 19924574Sraf uberdata_t *udp = curthread->ul_uberdata; 19934574Sraf uint_t hash; 19944574Sraf robust_t **table; 19954574Sraf robust_t *rlp; 19964574Sraf robust_t *next; 19974574Sraf 19984574Sraf if ((table = udp->robustlocks) != NULL) { 19994574Sraf for (hash = 0; hash < LOCKHASHSZ; hash++) { 20004574Sraf rlp = table[hash]; 20014574Sraf while (rlp != NULL) { 20024574Sraf next = rlp->robust_next; 20034574Sraf lfree(rlp, sizeof (*rlp)); 20044574Sraf rlp = next; 20054574Sraf } 20064574Sraf } 20074574Sraf lfree(table, LOCKHASHSZ * sizeof (robust_t *)); 20084574Sraf udp->robustlocks = NULL; 20094574Sraf } 20104574Sraf } 20114574Sraf 20120Sstevel@tonic-gate /* 20130Sstevel@tonic-gate * Returns with mutex_owner set correctly. 20140Sstevel@tonic-gate */ 20156247Sraf int 20160Sstevel@tonic-gate mutex_lock_internal(mutex_t *mp, timespec_t *tsp, int try) 20170Sstevel@tonic-gate { 20180Sstevel@tonic-gate ulwp_t *self = curthread; 20190Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 20200Sstevel@tonic-gate int mtype = mp->mutex_type; 20210Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 20220Sstevel@tonic-gate int error = 0; 20236247Sraf int noceil = try & MUTEX_NOCEIL; 20244574Sraf uint8_t ceil; 20254574Sraf int myprio; 20260Sstevel@tonic-gate 20276247Sraf try &= ~MUTEX_NOCEIL; 20280Sstevel@tonic-gate ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK); 20290Sstevel@tonic-gate 20300Sstevel@tonic-gate if (!self->ul_schedctl_called) 20310Sstevel@tonic-gate (void) setup_schedctl(); 20320Sstevel@tonic-gate 20330Sstevel@tonic-gate if (msp && try == MUTEX_TRY) 20340Sstevel@tonic-gate tdb_incr(msp->mutex_try); 20350Sstevel@tonic-gate 20366812Sraf if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && mutex_held(mp)) 20374574Sraf return (mutex_recursion(mp, mtype, try)); 20380Sstevel@tonic-gate 20390Sstevel@tonic-gate if (self->ul_error_detection && try == MUTEX_LOCK && 20406812Sraf tsp == NULL && mutex_held(mp)) 20410Sstevel@tonic-gate lock_error(mp, "mutex_lock", NULL, NULL); 20420Sstevel@tonic-gate 20436247Sraf if ((mtype & LOCK_PRIO_PROTECT) && noceil == 0) { 20446247Sraf update_sched(self); 20456247Sraf if (self->ul_cid != self->ul_rtclassid) { 20466247Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EPERM); 20476247Sraf return (EPERM); 20486247Sraf } 20494574Sraf ceil = mp->mutex_ceiling; 20506247Sraf myprio = self->ul_epri? self->ul_epri : self->ul_pri; 20514574Sraf if (myprio > ceil) { 20524574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EINVAL); 20534574Sraf return (EINVAL); 20544574Sraf } 20554574Sraf if ((error = _ceil_mylist_add(mp)) != 0) { 20564574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 20574574Sraf return (error); 20580Sstevel@tonic-gate } 20594574Sraf if (myprio < ceil) 20604574Sraf _ceil_prio_inherit(ceil); 20614574Sraf } 20624574Sraf 20634574Sraf if ((mtype & (USYNC_PROCESS | LOCK_ROBUST)) 20644574Sraf == (USYNC_PROCESS | LOCK_ROBUST)) 20654574Sraf register_lock(mp); 20664574Sraf 20674574Sraf if (mtype & LOCK_PRIO_INHERIT) { 20684574Sraf /* go straight to the kernel */ 20694574Sraf if (try == MUTEX_TRY) 20704574Sraf error = mutex_trylock_kernel(mp); 20714574Sraf else /* MUTEX_LOCK */ 20724574Sraf error = mutex_lock_kernel(mp, tsp, msp); 20734574Sraf /* 20744574Sraf * The kernel never sets or clears the lock byte 20754574Sraf * for LOCK_PRIO_INHERIT mutexes. 20764574Sraf * Set it here for consistency. 20774574Sraf */ 20784574Sraf switch (error) { 20794574Sraf case 0: 20806247Sraf self->ul_pilocks++; 20814574Sraf mp->mutex_lockw = LOCKSET; 20824574Sraf break; 20834574Sraf case EOWNERDEAD: 20844574Sraf case ELOCKUNMAPPED: 20856247Sraf self->ul_pilocks++; 20864574Sraf mp->mutex_lockw = LOCKSET; 20874574Sraf /* FALLTHROUGH */ 20884574Sraf case ENOTRECOVERABLE: 20894574Sraf ASSERT(mtype & LOCK_ROBUST); 20904574Sraf break; 20914574Sraf case EDEADLK: 2092*7376SRoger.Faulkner@Sun.COM if (try == MUTEX_TRY) { 2093*7376SRoger.Faulkner@Sun.COM error = EBUSY; 2094*7376SRoger.Faulkner@Sun.COM } else if (tsp != NULL) { /* simulate a timeout */ 2095*7376SRoger.Faulkner@Sun.COM /* 2096*7376SRoger.Faulkner@Sun.COM * Note: mutex_timedlock() never returns EINTR. 2097*7376SRoger.Faulkner@Sun.COM */ 2098*7376SRoger.Faulkner@Sun.COM timespec_t ts = *tsp; 2099*7376SRoger.Faulkner@Sun.COM timespec_t rts; 2100*7376SRoger.Faulkner@Sun.COM 2101*7376SRoger.Faulkner@Sun.COM while (__nanosleep(&ts, &rts) == EINTR) 2102*7376SRoger.Faulkner@Sun.COM ts = rts; 2103*7376SRoger.Faulkner@Sun.COM error = ETIME; 2104*7376SRoger.Faulkner@Sun.COM } else { /* simulate a deadlock */ 21054574Sraf stall(); 2106*7376SRoger.Faulkner@Sun.COM } 21074574Sraf break; 21080Sstevel@tonic-gate } 21090Sstevel@tonic-gate } else if (mtype & USYNC_PROCESS) { 21104613Sraf error = mutex_trylock_process(mp, try == MUTEX_LOCK); 21114574Sraf if (error == EBUSY && try == MUTEX_LOCK) 21120Sstevel@tonic-gate error = mutex_lock_kernel(mp, tsp, msp); 21135629Sraf } else { /* USYNC_THREAD */ 21144613Sraf error = mutex_trylock_adaptive(mp, try == MUTEX_LOCK); 21154574Sraf if (error == EBUSY && try == MUTEX_LOCK) 21164574Sraf error = mutex_lock_queue(self, msp, mp, tsp); 21170Sstevel@tonic-gate } 21180Sstevel@tonic-gate 21190Sstevel@tonic-gate switch (error) { 21204574Sraf case 0: 21210Sstevel@tonic-gate case EOWNERDEAD: 21220Sstevel@tonic-gate case ELOCKUNMAPPED: 21234574Sraf if (mtype & LOCK_ROBUST) 21244574Sraf remember_lock(mp); 21250Sstevel@tonic-gate if (msp) 21260Sstevel@tonic-gate record_begin_hold(msp); 21270Sstevel@tonic-gate break; 21280Sstevel@tonic-gate default: 21296247Sraf if ((mtype & LOCK_PRIO_PROTECT) && noceil == 0) { 21304574Sraf (void) _ceil_mylist_del(mp); 21314574Sraf if (myprio < ceil) 21324574Sraf _ceil_prio_waive(); 21334574Sraf } 21340Sstevel@tonic-gate if (try == MUTEX_TRY) { 21350Sstevel@tonic-gate if (msp) 21360Sstevel@tonic-gate tdb_incr(msp->mutex_try_fail); 21370Sstevel@tonic-gate if (__td_event_report(self, TD_LOCK_TRY, udp)) { 21380Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 21390Sstevel@tonic-gate tdb_event(TD_LOCK_TRY, udp); 21400Sstevel@tonic-gate } 21410Sstevel@tonic-gate } 21420Sstevel@tonic-gate break; 21430Sstevel@tonic-gate } 21440Sstevel@tonic-gate 21450Sstevel@tonic-gate return (error); 21460Sstevel@tonic-gate } 21470Sstevel@tonic-gate 21480Sstevel@tonic-gate int 21490Sstevel@tonic-gate fast_process_lock(mutex_t *mp, timespec_t *tsp, int mtype, int try) 21500Sstevel@tonic-gate { 21510Sstevel@tonic-gate ulwp_t *self = curthread; 21520Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 21530Sstevel@tonic-gate 21540Sstevel@tonic-gate /* 21550Sstevel@tonic-gate * We know that USYNC_PROCESS is set in mtype and that 21560Sstevel@tonic-gate * zero, one, or both of the flags LOCK_RECURSIVE and 21570Sstevel@tonic-gate * LOCK_ERRORCHECK are set, and that no other flags are set. 21580Sstevel@tonic-gate */ 21594574Sraf ASSERT((mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0); 21600Sstevel@tonic-gate enter_critical(self); 21617255Sraf #if defined(__sparc) && !defined(_LP64) 21627255Sraf /* horrible hack, necessary only on 32-bit sparc */ 21637255Sraf if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && 21647255Sraf self->ul_misaligned) { 21657255Sraf if (set_lock_byte(&mp->mutex_lockw) == 0) { 21667255Sraf mp->mutex_ownerpid = udp->pid; 21677255Sraf mp->mutex_owner = (uintptr_t)self; 21687255Sraf exit_critical(self); 21697255Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 21707255Sraf return (0); 21717255Sraf } 21727255Sraf } else 21737255Sraf #endif 21746057Sraf if (set_lock_byte64(&mp->mutex_lockword64, udp->pid) == 0) { 21750Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 21766057Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 21770Sstevel@tonic-gate exit_critical(self); 21780Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 21790Sstevel@tonic-gate return (0); 21800Sstevel@tonic-gate } 21810Sstevel@tonic-gate exit_critical(self); 21820Sstevel@tonic-gate 21834574Sraf if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && shared_mutex_held(mp)) 21844574Sraf return (mutex_recursion(mp, mtype, try)); 21854574Sraf 21864613Sraf if (try == MUTEX_LOCK) { 21874613Sraf if (mutex_trylock_process(mp, 1) == 0) 21884613Sraf return (0); 21890Sstevel@tonic-gate return (mutex_lock_kernel(mp, tsp, NULL)); 21904613Sraf } 21910Sstevel@tonic-gate 21920Sstevel@tonic-gate if (__td_event_report(self, TD_LOCK_TRY, udp)) { 21930Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 21940Sstevel@tonic-gate tdb_event(TD_LOCK_TRY, udp); 21950Sstevel@tonic-gate } 21960Sstevel@tonic-gate return (EBUSY); 21970Sstevel@tonic-gate } 21980Sstevel@tonic-gate 21990Sstevel@tonic-gate static int 22000Sstevel@tonic-gate mutex_lock_impl(mutex_t *mp, timespec_t *tsp) 22010Sstevel@tonic-gate { 22020Sstevel@tonic-gate ulwp_t *self = curthread; 22036247Sraf int mtype = mp->mutex_type; 22040Sstevel@tonic-gate uberflags_t *gflags; 22050Sstevel@tonic-gate 22067255Sraf if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && 22077255Sraf self->ul_error_detection && self->ul_misaligned == 0) 22087255Sraf lock_error(mp, "mutex_lock", NULL, "mutex is misaligned"); 22097255Sraf 22100Sstevel@tonic-gate /* 22110Sstevel@tonic-gate * Optimize the case of USYNC_THREAD, including 22120Sstevel@tonic-gate * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 22130Sstevel@tonic-gate * no error detection, no lock statistics, 22140Sstevel@tonic-gate * and the process has only a single thread. 22150Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 22160Sstevel@tonic-gate */ 22176247Sraf if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 22186247Sraf self->ul_uberdata->uberflags.uf_all) == 0) { 22190Sstevel@tonic-gate /* 22200Sstevel@tonic-gate * Only one thread exists so we don't need an atomic operation. 22210Sstevel@tonic-gate */ 22220Sstevel@tonic-gate if (mp->mutex_lockw == 0) { 22230Sstevel@tonic-gate mp->mutex_lockw = LOCKSET; 22240Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 22250Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 22260Sstevel@tonic-gate return (0); 22270Sstevel@tonic-gate } 22284574Sraf if (mtype && MUTEX_OWNER(mp) == self) 22294574Sraf return (mutex_recursion(mp, mtype, MUTEX_LOCK)); 22300Sstevel@tonic-gate /* 22310Sstevel@tonic-gate * We have reached a deadlock, probably because the 22320Sstevel@tonic-gate * process is executing non-async-signal-safe code in 22330Sstevel@tonic-gate * a signal handler and is attempting to acquire a lock 22340Sstevel@tonic-gate * that it already owns. This is not surprising, given 22350Sstevel@tonic-gate * bad programming practices over the years that has 22360Sstevel@tonic-gate * resulted in applications calling printf() and such 22370Sstevel@tonic-gate * in their signal handlers. Unless the user has told 22380Sstevel@tonic-gate * us that the signal handlers are safe by setting: 22390Sstevel@tonic-gate * export _THREAD_ASYNC_SAFE=1 22400Sstevel@tonic-gate * we return EDEADLK rather than actually deadlocking. 22410Sstevel@tonic-gate */ 22420Sstevel@tonic-gate if (tsp == NULL && 22430Sstevel@tonic-gate MUTEX_OWNER(mp) == self && !self->ul_async_safe) { 22440Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK); 22450Sstevel@tonic-gate return (EDEADLK); 22460Sstevel@tonic-gate } 22470Sstevel@tonic-gate } 22480Sstevel@tonic-gate 22490Sstevel@tonic-gate /* 22500Sstevel@tonic-gate * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 22510Sstevel@tonic-gate * no error detection, and no lock statistics. 22520Sstevel@tonic-gate * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 22530Sstevel@tonic-gate */ 22540Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL && 22550Sstevel@tonic-gate (gflags->uf_trs_ted | 22560Sstevel@tonic-gate (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) { 22570Sstevel@tonic-gate if (mtype & USYNC_PROCESS) 22580Sstevel@tonic-gate return (fast_process_lock(mp, tsp, mtype, MUTEX_LOCK)); 22590Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 22600Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 22610Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 22620Sstevel@tonic-gate return (0); 22630Sstevel@tonic-gate } 22644574Sraf if (mtype && MUTEX_OWNER(mp) == self) 22654574Sraf return (mutex_recursion(mp, mtype, MUTEX_LOCK)); 22664613Sraf if (mutex_trylock_adaptive(mp, 1) != 0) 22674574Sraf return (mutex_lock_queue(self, NULL, mp, tsp)); 22684574Sraf return (0); 22690Sstevel@tonic-gate } 22700Sstevel@tonic-gate 22710Sstevel@tonic-gate /* else do it the long way */ 22720Sstevel@tonic-gate return (mutex_lock_internal(mp, tsp, MUTEX_LOCK)); 22730Sstevel@tonic-gate } 22740Sstevel@tonic-gate 22756812Sraf #pragma weak pthread_mutex_lock = mutex_lock 22766812Sraf #pragma weak _mutex_lock = mutex_lock 22770Sstevel@tonic-gate int 22786812Sraf mutex_lock(mutex_t *mp) 22790Sstevel@tonic-gate { 22800Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 22810Sstevel@tonic-gate return (mutex_lock_impl(mp, NULL)); 22820Sstevel@tonic-gate } 22830Sstevel@tonic-gate 22840Sstevel@tonic-gate int 22856812Sraf pthread_mutex_timedlock(pthread_mutex_t *_RESTRICT_KYWD mp, 22866812Sraf const struct timespec *_RESTRICT_KYWD abstime) 22870Sstevel@tonic-gate { 22880Sstevel@tonic-gate timespec_t tslocal; 22890Sstevel@tonic-gate int error; 22900Sstevel@tonic-gate 22910Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 22920Sstevel@tonic-gate abstime_to_reltime(CLOCK_REALTIME, abstime, &tslocal); 22936812Sraf error = mutex_lock_impl((mutex_t *)mp, &tslocal); 22940Sstevel@tonic-gate if (error == ETIME) 22950Sstevel@tonic-gate error = ETIMEDOUT; 22960Sstevel@tonic-gate return (error); 22970Sstevel@tonic-gate } 22980Sstevel@tonic-gate 22990Sstevel@tonic-gate int 23006812Sraf pthread_mutex_reltimedlock_np(pthread_mutex_t *_RESTRICT_KYWD mp, 23016812Sraf const struct timespec *_RESTRICT_KYWD reltime) 23020Sstevel@tonic-gate { 23030Sstevel@tonic-gate timespec_t tslocal; 23040Sstevel@tonic-gate int error; 23050Sstevel@tonic-gate 23060Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 23070Sstevel@tonic-gate tslocal = *reltime; 23086812Sraf error = mutex_lock_impl((mutex_t *)mp, &tslocal); 23090Sstevel@tonic-gate if (error == ETIME) 23100Sstevel@tonic-gate error = ETIMEDOUT; 23110Sstevel@tonic-gate return (error); 23120Sstevel@tonic-gate } 23130Sstevel@tonic-gate 23146812Sraf #pragma weak pthread_mutex_trylock = mutex_trylock 23150Sstevel@tonic-gate int 23166812Sraf mutex_trylock(mutex_t *mp) 23170Sstevel@tonic-gate { 23180Sstevel@tonic-gate ulwp_t *self = curthread; 23190Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 23206247Sraf int mtype = mp->mutex_type; 23210Sstevel@tonic-gate uberflags_t *gflags; 23220Sstevel@tonic-gate 23230Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 23246247Sraf 23250Sstevel@tonic-gate /* 23260Sstevel@tonic-gate * Optimize the case of USYNC_THREAD, including 23270Sstevel@tonic-gate * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 23280Sstevel@tonic-gate * no error detection, no lock statistics, 23290Sstevel@tonic-gate * and the process has only a single thread. 23300Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 23310Sstevel@tonic-gate */ 23326247Sraf if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 23330Sstevel@tonic-gate udp->uberflags.uf_all) == 0) { 23340Sstevel@tonic-gate /* 23350Sstevel@tonic-gate * Only one thread exists so we don't need an atomic operation. 23360Sstevel@tonic-gate */ 23370Sstevel@tonic-gate if (mp->mutex_lockw == 0) { 23380Sstevel@tonic-gate mp->mutex_lockw = LOCKSET; 23390Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 23400Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 23410Sstevel@tonic-gate return (0); 23420Sstevel@tonic-gate } 23434574Sraf if (mtype && MUTEX_OWNER(mp) == self) 23444574Sraf return (mutex_recursion(mp, mtype, MUTEX_TRY)); 23450Sstevel@tonic-gate return (EBUSY); 23460Sstevel@tonic-gate } 23470Sstevel@tonic-gate 23480Sstevel@tonic-gate /* 23490Sstevel@tonic-gate * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 23500Sstevel@tonic-gate * no error detection, and no lock statistics. 23510Sstevel@tonic-gate * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 23520Sstevel@tonic-gate */ 23530Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL && 23540Sstevel@tonic-gate (gflags->uf_trs_ted | 23550Sstevel@tonic-gate (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) { 23560Sstevel@tonic-gate if (mtype & USYNC_PROCESS) 23570Sstevel@tonic-gate return (fast_process_lock(mp, NULL, mtype, MUTEX_TRY)); 23580Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 23590Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 23600Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 23610Sstevel@tonic-gate return (0); 23620Sstevel@tonic-gate } 23634574Sraf if (mtype && MUTEX_OWNER(mp) == self) 23644574Sraf return (mutex_recursion(mp, mtype, MUTEX_TRY)); 23654613Sraf if (__td_event_report(self, TD_LOCK_TRY, udp)) { 23664613Sraf self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 23674613Sraf tdb_event(TD_LOCK_TRY, udp); 23680Sstevel@tonic-gate } 23694613Sraf return (EBUSY); 23700Sstevel@tonic-gate } 23710Sstevel@tonic-gate 23720Sstevel@tonic-gate /* else do it the long way */ 23730Sstevel@tonic-gate return (mutex_lock_internal(mp, NULL, MUTEX_TRY)); 23740Sstevel@tonic-gate } 23750Sstevel@tonic-gate 23760Sstevel@tonic-gate int 23774574Sraf mutex_unlock_internal(mutex_t *mp, int retain_robust_flags) 23780Sstevel@tonic-gate { 23790Sstevel@tonic-gate ulwp_t *self = curthread; 23800Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 23810Sstevel@tonic-gate int mtype = mp->mutex_type; 23820Sstevel@tonic-gate tdb_mutex_stats_t *msp; 23834574Sraf int error = 0; 23844574Sraf int release_all; 23850Sstevel@tonic-gate lwpid_t lwpid; 23860Sstevel@tonic-gate 23876812Sraf if ((mtype & LOCK_ERRORCHECK) && !mutex_held(mp)) 23880Sstevel@tonic-gate return (EPERM); 23890Sstevel@tonic-gate 23906812Sraf if (self->ul_error_detection && !mutex_held(mp)) 23910Sstevel@tonic-gate lock_error(mp, "mutex_unlock", NULL, NULL); 23920Sstevel@tonic-gate 23930Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 23940Sstevel@tonic-gate mp->mutex_rcount--; 23950Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 23960Sstevel@tonic-gate return (0); 23970Sstevel@tonic-gate } 23980Sstevel@tonic-gate 23990Sstevel@tonic-gate if ((msp = MUTEX_STATS(mp, udp)) != NULL) 24000Sstevel@tonic-gate (void) record_hold_time(msp); 24010Sstevel@tonic-gate 24024574Sraf if (!retain_robust_flags && !(mtype & LOCK_PRIO_INHERIT) && 24034574Sraf (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) { 24044574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 24054574Sraf mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED); 24064574Sraf mp->mutex_flag |= LOCK_NOTRECOVERABLE; 24074574Sraf } 24084574Sraf release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0); 24094574Sraf 24104574Sraf if (mtype & LOCK_PRIO_INHERIT) { 24110Sstevel@tonic-gate no_preempt(self); 24120Sstevel@tonic-gate mp->mutex_owner = 0; 24136057Sraf /* mp->mutex_ownerpid is cleared by ___lwp_mutex_unlock() */ 24140Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 24154574Sraf mp->mutex_lockw = LOCKCLEAR; 24166247Sraf self->ul_pilocks--; 24174574Sraf error = ___lwp_mutex_unlock(mp); 24180Sstevel@tonic-gate preempt(self); 24190Sstevel@tonic-gate } else if (mtype & USYNC_PROCESS) { 24205629Sraf mutex_unlock_process(mp, release_all); 24210Sstevel@tonic-gate } else { /* USYNC_THREAD */ 24224574Sraf if ((lwpid = mutex_unlock_queue(mp, release_all)) != 0) { 24230Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 24240Sstevel@tonic-gate preempt(self); 24250Sstevel@tonic-gate } 24260Sstevel@tonic-gate } 24270Sstevel@tonic-gate 24284574Sraf if (mtype & LOCK_ROBUST) 24294574Sraf forget_lock(mp); 24304574Sraf 24314574Sraf if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp)) 24324574Sraf _ceil_prio_waive(); 24334574Sraf 24340Sstevel@tonic-gate return (error); 24350Sstevel@tonic-gate } 24360Sstevel@tonic-gate 24376812Sraf #pragma weak pthread_mutex_unlock = mutex_unlock 24386812Sraf #pragma weak _mutex_unlock = mutex_unlock 24390Sstevel@tonic-gate int 24406812Sraf mutex_unlock(mutex_t *mp) 24410Sstevel@tonic-gate { 24420Sstevel@tonic-gate ulwp_t *self = curthread; 24436247Sraf int mtype = mp->mutex_type; 24440Sstevel@tonic-gate uberflags_t *gflags; 24450Sstevel@tonic-gate lwpid_t lwpid; 24460Sstevel@tonic-gate short el; 24470Sstevel@tonic-gate 24480Sstevel@tonic-gate /* 24490Sstevel@tonic-gate * Optimize the case of USYNC_THREAD, including 24500Sstevel@tonic-gate * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 24510Sstevel@tonic-gate * no error detection, no lock statistics, 24520Sstevel@tonic-gate * and the process has only a single thread. 24530Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 24540Sstevel@tonic-gate */ 24556247Sraf if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 24566247Sraf self->ul_uberdata->uberflags.uf_all) == 0) { 24570Sstevel@tonic-gate if (mtype) { 24580Sstevel@tonic-gate /* 24590Sstevel@tonic-gate * At this point we know that one or both of the 24600Sstevel@tonic-gate * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set. 24610Sstevel@tonic-gate */ 24620Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self)) 24630Sstevel@tonic-gate return (EPERM); 24640Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 24650Sstevel@tonic-gate mp->mutex_rcount--; 24660Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 24670Sstevel@tonic-gate return (0); 24680Sstevel@tonic-gate } 24690Sstevel@tonic-gate } 24700Sstevel@tonic-gate /* 24710Sstevel@tonic-gate * Only one thread exists so we don't need an atomic operation. 24720Sstevel@tonic-gate * Also, there can be no waiters. 24730Sstevel@tonic-gate */ 24740Sstevel@tonic-gate mp->mutex_owner = 0; 24750Sstevel@tonic-gate mp->mutex_lockword = 0; 24760Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 24770Sstevel@tonic-gate return (0); 24780Sstevel@tonic-gate } 24790Sstevel@tonic-gate 24800Sstevel@tonic-gate /* 24810Sstevel@tonic-gate * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 24820Sstevel@tonic-gate * no error detection, and no lock statistics. 24830Sstevel@tonic-gate * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 24840Sstevel@tonic-gate */ 24850Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL) { 24860Sstevel@tonic-gate if (((el = gflags->uf_trs_ted) | mtype) == 0) { 24870Sstevel@tonic-gate fast_unlock: 24885629Sraf if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) { 24890Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 24900Sstevel@tonic-gate preempt(self); 24910Sstevel@tonic-gate } 24920Sstevel@tonic-gate return (0); 24930Sstevel@tonic-gate } 24940Sstevel@tonic-gate if (el) /* error detection or lock statistics */ 24950Sstevel@tonic-gate goto slow_unlock; 24960Sstevel@tonic-gate if ((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) { 24970Sstevel@tonic-gate /* 24980Sstevel@tonic-gate * At this point we know that one or both of the 24990Sstevel@tonic-gate * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set. 25000Sstevel@tonic-gate */ 25010Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self)) 25020Sstevel@tonic-gate return (EPERM); 25030Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 25040Sstevel@tonic-gate mp->mutex_rcount--; 25050Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 25060Sstevel@tonic-gate return (0); 25070Sstevel@tonic-gate } 25080Sstevel@tonic-gate goto fast_unlock; 25090Sstevel@tonic-gate } 25100Sstevel@tonic-gate if ((mtype & 25110Sstevel@tonic-gate ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) { 25120Sstevel@tonic-gate /* 25130Sstevel@tonic-gate * At this point we know that zero, one, or both of the 25140Sstevel@tonic-gate * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set and 25150Sstevel@tonic-gate * that the USYNC_PROCESS flag is set. 25160Sstevel@tonic-gate */ 25170Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !shared_mutex_held(mp)) 25180Sstevel@tonic-gate return (EPERM); 25190Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 25200Sstevel@tonic-gate mp->mutex_rcount--; 25210Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 25220Sstevel@tonic-gate return (0); 25230Sstevel@tonic-gate } 25245629Sraf mutex_unlock_process(mp, 0); 25250Sstevel@tonic-gate return (0); 25260Sstevel@tonic-gate } 25270Sstevel@tonic-gate } 25280Sstevel@tonic-gate 25290Sstevel@tonic-gate /* else do it the long way */ 25300Sstevel@tonic-gate slow_unlock: 25314574Sraf return (mutex_unlock_internal(mp, 0)); 25320Sstevel@tonic-gate } 25330Sstevel@tonic-gate 25340Sstevel@tonic-gate /* 25350Sstevel@tonic-gate * Internally to the library, almost all mutex lock/unlock actions 25360Sstevel@tonic-gate * go through these lmutex_ functions, to protect critical regions. 25376812Sraf * We replicate a bit of code from mutex_lock() and mutex_unlock() 25380Sstevel@tonic-gate * to make these functions faster since we know that the mutex type 25390Sstevel@tonic-gate * of all internal locks is USYNC_THREAD. We also know that internal 25400Sstevel@tonic-gate * locking can never fail, so we panic if it does. 25410Sstevel@tonic-gate */ 25420Sstevel@tonic-gate void 25430Sstevel@tonic-gate lmutex_lock(mutex_t *mp) 25440Sstevel@tonic-gate { 25450Sstevel@tonic-gate ulwp_t *self = curthread; 25460Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 25470Sstevel@tonic-gate 25480Sstevel@tonic-gate ASSERT(mp->mutex_type == USYNC_THREAD); 25490Sstevel@tonic-gate 25500Sstevel@tonic-gate enter_critical(self); 25510Sstevel@tonic-gate /* 25520Sstevel@tonic-gate * Optimize the case of no lock statistics and only a single thread. 25530Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 25540Sstevel@tonic-gate */ 25550Sstevel@tonic-gate if (udp->uberflags.uf_all == 0) { 25560Sstevel@tonic-gate /* 25570Sstevel@tonic-gate * Only one thread exists; the mutex must be free. 25580Sstevel@tonic-gate */ 25590Sstevel@tonic-gate ASSERT(mp->mutex_lockw == 0); 25600Sstevel@tonic-gate mp->mutex_lockw = LOCKSET; 25610Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 25620Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 25630Sstevel@tonic-gate } else { 25640Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 25650Sstevel@tonic-gate 25660Sstevel@tonic-gate if (!self->ul_schedctl_called) 25670Sstevel@tonic-gate (void) setup_schedctl(); 25680Sstevel@tonic-gate 25690Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 25700Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 25710Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 25724613Sraf } else if (mutex_trylock_adaptive(mp, 1) != 0) { 25730Sstevel@tonic-gate (void) mutex_lock_queue(self, msp, mp, NULL); 25740Sstevel@tonic-gate } 25750Sstevel@tonic-gate 25760Sstevel@tonic-gate if (msp) 25770Sstevel@tonic-gate record_begin_hold(msp); 25780Sstevel@tonic-gate } 25790Sstevel@tonic-gate } 25800Sstevel@tonic-gate 25810Sstevel@tonic-gate void 25820Sstevel@tonic-gate lmutex_unlock(mutex_t *mp) 25830Sstevel@tonic-gate { 25840Sstevel@tonic-gate ulwp_t *self = curthread; 25850Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 25860Sstevel@tonic-gate 25870Sstevel@tonic-gate ASSERT(mp->mutex_type == USYNC_THREAD); 25880Sstevel@tonic-gate 25890Sstevel@tonic-gate /* 25900Sstevel@tonic-gate * Optimize the case of no lock statistics and only a single thread. 25910Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 25920Sstevel@tonic-gate */ 25930Sstevel@tonic-gate if (udp->uberflags.uf_all == 0) { 25940Sstevel@tonic-gate /* 25950Sstevel@tonic-gate * Only one thread exists so there can be no waiters. 25960Sstevel@tonic-gate */ 25970Sstevel@tonic-gate mp->mutex_owner = 0; 25980Sstevel@tonic-gate mp->mutex_lockword = 0; 25990Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 26000Sstevel@tonic-gate } else { 26010Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 26020Sstevel@tonic-gate lwpid_t lwpid; 26030Sstevel@tonic-gate 26040Sstevel@tonic-gate if (msp) 26050Sstevel@tonic-gate (void) record_hold_time(msp); 26064574Sraf if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) { 26070Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 26080Sstevel@tonic-gate preempt(self); 26090Sstevel@tonic-gate } 26100Sstevel@tonic-gate } 26110Sstevel@tonic-gate exit_critical(self); 26120Sstevel@tonic-gate } 26130Sstevel@tonic-gate 26142248Sraf /* 26152248Sraf * For specialized code in libc, like the asynchronous i/o code, 26162248Sraf * the following sig_*() locking primitives are used in order 26172248Sraf * to make the code asynchronous signal safe. Signals are 26182248Sraf * deferred while locks acquired by these functions are held. 26192248Sraf */ 26202248Sraf void 26212248Sraf sig_mutex_lock(mutex_t *mp) 26222248Sraf { 26232248Sraf sigoff(curthread); 26246515Sraf (void) mutex_lock(mp); 26252248Sraf } 26262248Sraf 26272248Sraf void 26282248Sraf sig_mutex_unlock(mutex_t *mp) 26292248Sraf { 26306515Sraf (void) mutex_unlock(mp); 26312248Sraf sigon(curthread); 26322248Sraf } 26332248Sraf 26342248Sraf int 26352248Sraf sig_mutex_trylock(mutex_t *mp) 26362248Sraf { 26372248Sraf int error; 26382248Sraf 26392248Sraf sigoff(curthread); 26406515Sraf if ((error = mutex_trylock(mp)) != 0) 26412248Sraf sigon(curthread); 26422248Sraf return (error); 26432248Sraf } 26442248Sraf 26452248Sraf /* 26462248Sraf * sig_cond_wait() is a cancellation point. 26472248Sraf */ 26482248Sraf int 26492248Sraf sig_cond_wait(cond_t *cv, mutex_t *mp) 26502248Sraf { 26512248Sraf int error; 26522248Sraf 26532248Sraf ASSERT(curthread->ul_sigdefer != 0); 26546515Sraf pthread_testcancel(); 26555891Sraf error = __cond_wait(cv, mp); 26562248Sraf if (error == EINTR && curthread->ul_cursig) { 26572248Sraf sig_mutex_unlock(mp); 26582248Sraf /* take the deferred signal here */ 26592248Sraf sig_mutex_lock(mp); 26602248Sraf } 26616515Sraf pthread_testcancel(); 26622248Sraf return (error); 26632248Sraf } 26642248Sraf 26652248Sraf /* 26662248Sraf * sig_cond_reltimedwait() is a cancellation point. 26672248Sraf */ 26682248Sraf int 26692248Sraf sig_cond_reltimedwait(cond_t *cv, mutex_t *mp, const timespec_t *ts) 26702248Sraf { 26712248Sraf int error; 26722248Sraf 26732248Sraf ASSERT(curthread->ul_sigdefer != 0); 26746515Sraf pthread_testcancel(); 26755891Sraf error = __cond_reltimedwait(cv, mp, ts); 26762248Sraf if (error == EINTR && curthread->ul_cursig) { 26772248Sraf sig_mutex_unlock(mp); 26782248Sraf /* take the deferred signal here */ 26792248Sraf sig_mutex_lock(mp); 26802248Sraf } 26816515Sraf pthread_testcancel(); 26822248Sraf return (error); 26832248Sraf } 26842248Sraf 26855891Sraf /* 26865891Sraf * For specialized code in libc, like the stdio code. 26875891Sraf * the following cancel_safe_*() locking primitives are used in 26885891Sraf * order to make the code cancellation-safe. Cancellation is 26895891Sraf * deferred while locks acquired by these functions are held. 26905891Sraf */ 26915891Sraf void 26925891Sraf cancel_safe_mutex_lock(mutex_t *mp) 26935891Sraf { 26946515Sraf (void) mutex_lock(mp); 26955891Sraf curthread->ul_libc_locks++; 26965891Sraf } 26975891Sraf 26985891Sraf int 26995891Sraf cancel_safe_mutex_trylock(mutex_t *mp) 27005891Sraf { 27015891Sraf int error; 27025891Sraf 27036515Sraf if ((error = mutex_trylock(mp)) == 0) 27045891Sraf curthread->ul_libc_locks++; 27055891Sraf return (error); 27065891Sraf } 27075891Sraf 27085891Sraf void 27095891Sraf cancel_safe_mutex_unlock(mutex_t *mp) 27105891Sraf { 27115891Sraf ulwp_t *self = curthread; 27125891Sraf 27135891Sraf ASSERT(self->ul_libc_locks != 0); 27145891Sraf 27156515Sraf (void) mutex_unlock(mp); 27165891Sraf 27175891Sraf /* 27185891Sraf * Decrement the count of locks held by cancel_safe_mutex_lock(). 27195891Sraf * If we are then in a position to terminate cleanly and 27205891Sraf * if there is a pending cancellation and cancellation 27215891Sraf * is not disabled and we received EINTR from a recent 27225891Sraf * system call then perform the cancellation action now. 27235891Sraf */ 27245891Sraf if (--self->ul_libc_locks == 0 && 27255891Sraf !(self->ul_vfork | self->ul_nocancel | 27265891Sraf self->ul_critical | self->ul_sigdefer) && 27275891Sraf cancel_active()) 27286812Sraf pthread_exit(PTHREAD_CANCELED); 27295891Sraf } 27305891Sraf 27310Sstevel@tonic-gate static int 27320Sstevel@tonic-gate shared_mutex_held(mutex_t *mparg) 27330Sstevel@tonic-gate { 27340Sstevel@tonic-gate /* 27354574Sraf * The 'volatile' is necessary to make sure the compiler doesn't 27364574Sraf * reorder the tests of the various components of the mutex. 27374574Sraf * They must be tested in this order: 27384574Sraf * mutex_lockw 27394574Sraf * mutex_owner 27404574Sraf * mutex_ownerpid 27414574Sraf * This relies on the fact that everywhere mutex_lockw is cleared, 27424574Sraf * mutex_owner and mutex_ownerpid are cleared before mutex_lockw 27434574Sraf * is cleared, and that everywhere mutex_lockw is set, mutex_owner 27444574Sraf * and mutex_ownerpid are set after mutex_lockw is set, and that 27454574Sraf * mutex_lockw is set or cleared with a memory barrier. 27460Sstevel@tonic-gate */ 27470Sstevel@tonic-gate volatile mutex_t *mp = (volatile mutex_t *)mparg; 27480Sstevel@tonic-gate ulwp_t *self = curthread; 27490Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 27500Sstevel@tonic-gate 27514574Sraf return (MUTEX_OWNED(mp, self) && mp->mutex_ownerpid == udp->pid); 27520Sstevel@tonic-gate } 27530Sstevel@tonic-gate 27546812Sraf #pragma weak _mutex_held = mutex_held 27550Sstevel@tonic-gate int 27566812Sraf mutex_held(mutex_t *mparg) 27570Sstevel@tonic-gate { 27584574Sraf volatile mutex_t *mp = (volatile mutex_t *)mparg; 27594574Sraf 27604574Sraf if (mparg->mutex_type & USYNC_PROCESS) 27614574Sraf return (shared_mutex_held(mparg)); 27620Sstevel@tonic-gate return (MUTEX_OWNED(mp, curthread)); 27630Sstevel@tonic-gate } 27640Sstevel@tonic-gate 27656812Sraf #pragma weak pthread_mutex_destroy = mutex_destroy 27666812Sraf #pragma weak _mutex_destroy = mutex_destroy 27670Sstevel@tonic-gate int 27686812Sraf mutex_destroy(mutex_t *mp) 27690Sstevel@tonic-gate { 27704574Sraf if (mp->mutex_type & USYNC_PROCESS) 27714574Sraf forget_lock(mp); 27726515Sraf (void) memset(mp, 0, sizeof (*mp)); 27730Sstevel@tonic-gate tdb_sync_obj_deregister(mp); 27740Sstevel@tonic-gate return (0); 27750Sstevel@tonic-gate } 27760Sstevel@tonic-gate 27776812Sraf #pragma weak pthread_mutex_consistent_np = mutex_consistent 27784574Sraf int 27796812Sraf mutex_consistent(mutex_t *mp) 27804574Sraf { 27814574Sraf /* 27824574Sraf * Do this only for an inconsistent, initialized robust lock 27834574Sraf * that we hold. For all other cases, return EINVAL. 27844574Sraf */ 27856812Sraf if (mutex_held(mp) && 27864574Sraf (mp->mutex_type & LOCK_ROBUST) && 27874574Sraf (mp->mutex_flag & LOCK_INITED) && 27884574Sraf (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) { 27894574Sraf mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED); 27904574Sraf mp->mutex_rcount = 0; 27914574Sraf return (0); 27924574Sraf } 27934574Sraf return (EINVAL); 27944574Sraf } 27954574Sraf 27960Sstevel@tonic-gate /* 27970Sstevel@tonic-gate * Spin locks are separate from ordinary mutexes, 27980Sstevel@tonic-gate * but we use the same data structure for them. 27990Sstevel@tonic-gate */ 28000Sstevel@tonic-gate 28010Sstevel@tonic-gate int 28026812Sraf pthread_spin_init(pthread_spinlock_t *lock, int pshared) 28030Sstevel@tonic-gate { 28040Sstevel@tonic-gate mutex_t *mp = (mutex_t *)lock; 28050Sstevel@tonic-gate 28066515Sraf (void) memset(mp, 0, sizeof (*mp)); 28070Sstevel@tonic-gate if (pshared == PTHREAD_PROCESS_SHARED) 28080Sstevel@tonic-gate mp->mutex_type = USYNC_PROCESS; 28090Sstevel@tonic-gate else 28100Sstevel@tonic-gate mp->mutex_type = USYNC_THREAD; 28110Sstevel@tonic-gate mp->mutex_flag = LOCK_INITED; 28120Sstevel@tonic-gate mp->mutex_magic = MUTEX_MAGIC; 28137255Sraf 28147255Sraf /* 28157255Sraf * This should be at the beginning of the function, 28167255Sraf * but for the sake of old broken applications that 28177255Sraf * do not have proper alignment for their mutexes 28187255Sraf * (and don't check the return code from pthread_spin_init), 28197255Sraf * we put it here, after initializing the mutex regardless. 28207255Sraf */ 28217255Sraf if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && 28227255Sraf curthread->ul_misaligned == 0) 28237255Sraf return (EINVAL); 28247255Sraf 28250Sstevel@tonic-gate return (0); 28260Sstevel@tonic-gate } 28270Sstevel@tonic-gate 28280Sstevel@tonic-gate int 28296812Sraf pthread_spin_destroy(pthread_spinlock_t *lock) 28300Sstevel@tonic-gate { 28316515Sraf (void) memset(lock, 0, sizeof (*lock)); 28320Sstevel@tonic-gate return (0); 28330Sstevel@tonic-gate } 28340Sstevel@tonic-gate 28350Sstevel@tonic-gate int 28366812Sraf pthread_spin_trylock(pthread_spinlock_t *lock) 28370Sstevel@tonic-gate { 28380Sstevel@tonic-gate mutex_t *mp = (mutex_t *)lock; 28390Sstevel@tonic-gate ulwp_t *self = curthread; 28400Sstevel@tonic-gate int error = 0; 28410Sstevel@tonic-gate 28420Sstevel@tonic-gate no_preempt(self); 28430Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) != 0) 28440Sstevel@tonic-gate error = EBUSY; 28450Sstevel@tonic-gate else { 28460Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 28470Sstevel@tonic-gate if (mp->mutex_type == USYNC_PROCESS) 28480Sstevel@tonic-gate mp->mutex_ownerpid = self->ul_uberdata->pid; 28490Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 28500Sstevel@tonic-gate } 28510Sstevel@tonic-gate preempt(self); 28520Sstevel@tonic-gate return (error); 28530Sstevel@tonic-gate } 28540Sstevel@tonic-gate 28550Sstevel@tonic-gate int 28566812Sraf pthread_spin_lock(pthread_spinlock_t *lock) 28570Sstevel@tonic-gate { 28584574Sraf mutex_t *mp = (mutex_t *)lock; 28594574Sraf ulwp_t *self = curthread; 28604574Sraf volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw; 28614574Sraf int count = 0; 28624574Sraf 28634574Sraf ASSERT(!self->ul_critical || self->ul_bindflags); 28644574Sraf 28654574Sraf DTRACE_PROBE1(plockstat, mutex__spin, mp); 28664574Sraf 28670Sstevel@tonic-gate /* 28680Sstevel@tonic-gate * We don't care whether the owner is running on a processor. 28690Sstevel@tonic-gate * We just spin because that's what this interface requires. 28700Sstevel@tonic-gate */ 28710Sstevel@tonic-gate for (;;) { 28720Sstevel@tonic-gate if (*lockp == 0) { /* lock byte appears to be clear */ 28734574Sraf no_preempt(self); 28744574Sraf if (set_lock_byte(lockp) == 0) 28754574Sraf break; 28764574Sraf preempt(self); 28770Sstevel@tonic-gate } 28785629Sraf if (count < INT_MAX) 28795629Sraf count++; 28800Sstevel@tonic-gate SMT_PAUSE(); 28810Sstevel@tonic-gate } 28824574Sraf mp->mutex_owner = (uintptr_t)self; 28834574Sraf if (mp->mutex_type == USYNC_PROCESS) 28844574Sraf mp->mutex_ownerpid = self->ul_uberdata->pid; 28854574Sraf preempt(self); 28865629Sraf if (count) { 28875629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 1, count); 28885629Sraf } 28894574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); 28904574Sraf return (0); 28910Sstevel@tonic-gate } 28920Sstevel@tonic-gate 28930Sstevel@tonic-gate int 28946812Sraf pthread_spin_unlock(pthread_spinlock_t *lock) 28950Sstevel@tonic-gate { 28960Sstevel@tonic-gate mutex_t *mp = (mutex_t *)lock; 28970Sstevel@tonic-gate ulwp_t *self = curthread; 28980Sstevel@tonic-gate 28990Sstevel@tonic-gate no_preempt(self); 29000Sstevel@tonic-gate mp->mutex_owner = 0; 29010Sstevel@tonic-gate mp->mutex_ownerpid = 0; 29020Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 29034570Sraf (void) atomic_swap_32(&mp->mutex_lockword, 0); 29040Sstevel@tonic-gate preempt(self); 29050Sstevel@tonic-gate return (0); 29060Sstevel@tonic-gate } 29070Sstevel@tonic-gate 29085629Sraf #define INITIAL_LOCKS 8 /* initial size of ul_heldlocks.array */ 29094574Sraf 29104574Sraf /* 29114574Sraf * Find/allocate an entry for 'lock' in our array of held locks. 29124574Sraf */ 29134574Sraf static mutex_t ** 29144574Sraf find_lock_entry(mutex_t *lock) 29154574Sraf { 29164574Sraf ulwp_t *self = curthread; 29174574Sraf mutex_t **remembered = NULL; 29184574Sraf mutex_t **lockptr; 29194574Sraf uint_t nlocks; 29204574Sraf 29214574Sraf if ((nlocks = self->ul_heldlockcnt) != 0) 29224574Sraf lockptr = self->ul_heldlocks.array; 29234574Sraf else { 29244574Sraf nlocks = 1; 29254574Sraf lockptr = &self->ul_heldlocks.single; 29264574Sraf } 29274574Sraf 29284574Sraf for (; nlocks; nlocks--, lockptr++) { 29294574Sraf if (*lockptr == lock) 29304574Sraf return (lockptr); 29314574Sraf if (*lockptr == NULL && remembered == NULL) 29324574Sraf remembered = lockptr; 29334574Sraf } 29344574Sraf if (remembered != NULL) { 29354574Sraf *remembered = lock; 29364574Sraf return (remembered); 29374574Sraf } 29384574Sraf 29394574Sraf /* 29404574Sraf * No entry available. Allocate more space, converting 29414574Sraf * the single entry into an array of entries if necessary. 29424574Sraf */ 29434574Sraf if ((nlocks = self->ul_heldlockcnt) == 0) { 29444574Sraf /* 29454574Sraf * Initial allocation of the array. 29464574Sraf * Convert the single entry into an array. 29474574Sraf */ 29484574Sraf self->ul_heldlockcnt = nlocks = INITIAL_LOCKS; 29494574Sraf lockptr = lmalloc(nlocks * sizeof (mutex_t *)); 29504574Sraf /* 29514574Sraf * The single entry becomes the first entry in the array. 29524574Sraf */ 29534574Sraf *lockptr = self->ul_heldlocks.single; 29544574Sraf self->ul_heldlocks.array = lockptr; 29554574Sraf /* 29564574Sraf * Return the next available entry in the array. 29574574Sraf */ 29584574Sraf *++lockptr = lock; 29594574Sraf return (lockptr); 29604574Sraf } 29614574Sraf /* 29624574Sraf * Reallocate the array, double the size each time. 29634574Sraf */ 29644574Sraf lockptr = lmalloc(nlocks * 2 * sizeof (mutex_t *)); 29656515Sraf (void) memcpy(lockptr, self->ul_heldlocks.array, 29664574Sraf nlocks * sizeof (mutex_t *)); 29674574Sraf lfree(self->ul_heldlocks.array, nlocks * sizeof (mutex_t *)); 29684574Sraf self->ul_heldlocks.array = lockptr; 29694574Sraf self->ul_heldlockcnt *= 2; 29704574Sraf /* 29714574Sraf * Return the next available entry in the newly allocated array. 29724574Sraf */ 29734574Sraf *(lockptr += nlocks) = lock; 29744574Sraf return (lockptr); 29754574Sraf } 29764574Sraf 29774574Sraf /* 29784574Sraf * Insert 'lock' into our list of held locks. 29794574Sraf * Currently only used for LOCK_ROBUST mutexes. 29804574Sraf */ 29814574Sraf void 29824574Sraf remember_lock(mutex_t *lock) 29834574Sraf { 29844574Sraf (void) find_lock_entry(lock); 29854574Sraf } 29864574Sraf 29874574Sraf /* 29884574Sraf * Remove 'lock' from our list of held locks. 29894574Sraf * Currently only used for LOCK_ROBUST mutexes. 29904574Sraf */ 29914574Sraf void 29924574Sraf forget_lock(mutex_t *lock) 29934574Sraf { 29944574Sraf *find_lock_entry(lock) = NULL; 29954574Sraf } 29964574Sraf 29974574Sraf /* 29984574Sraf * Free the array of held locks. 29994574Sraf */ 30004574Sraf void 30014574Sraf heldlock_free(ulwp_t *ulwp) 30024574Sraf { 30034574Sraf uint_t nlocks; 30044574Sraf 30054574Sraf if ((nlocks = ulwp->ul_heldlockcnt) != 0) 30064574Sraf lfree(ulwp->ul_heldlocks.array, nlocks * sizeof (mutex_t *)); 30074574Sraf ulwp->ul_heldlockcnt = 0; 30084574Sraf ulwp->ul_heldlocks.array = NULL; 30094574Sraf } 30104574Sraf 30114574Sraf /* 30124574Sraf * Mark all held LOCK_ROBUST mutexes LOCK_OWNERDEAD. 30134574Sraf * Called from _thrp_exit() to deal with abandoned locks. 30144574Sraf */ 30154574Sraf void 30164574Sraf heldlock_exit(void) 30174574Sraf { 30184574Sraf ulwp_t *self = curthread; 30194574Sraf mutex_t **lockptr; 30204574Sraf uint_t nlocks; 30214574Sraf mutex_t *mp; 30224574Sraf 30234574Sraf if ((nlocks = self->ul_heldlockcnt) != 0) 30244574Sraf lockptr = self->ul_heldlocks.array; 30254574Sraf else { 30264574Sraf nlocks = 1; 30274574Sraf lockptr = &self->ul_heldlocks.single; 30284574Sraf } 30294574Sraf 30304574Sraf for (; nlocks; nlocks--, lockptr++) { 30314574Sraf /* 30324574Sraf * The kernel takes care of transitioning held 30334574Sraf * LOCK_PRIO_INHERIT mutexes to LOCK_OWNERDEAD. 30344574Sraf * We avoid that case here. 30354574Sraf */ 30364574Sraf if ((mp = *lockptr) != NULL && 30376812Sraf mutex_held(mp) && 30384574Sraf (mp->mutex_type & (LOCK_ROBUST | LOCK_PRIO_INHERIT)) == 30394574Sraf LOCK_ROBUST) { 30404574Sraf mp->mutex_rcount = 0; 30414574Sraf if (!(mp->mutex_flag & LOCK_UNMAPPED)) 30424574Sraf mp->mutex_flag |= LOCK_OWNERDEAD; 30434574Sraf (void) mutex_unlock_internal(mp, 1); 30444574Sraf } 30454574Sraf } 30464574Sraf 30474574Sraf heldlock_free(self); 30484574Sraf } 30494574Sraf 30506812Sraf #pragma weak _cond_init = cond_init 30510Sstevel@tonic-gate /* ARGSUSED2 */ 30520Sstevel@tonic-gate int 30536812Sraf cond_init(cond_t *cvp, int type, void *arg) 30540Sstevel@tonic-gate { 30550Sstevel@tonic-gate if (type != USYNC_THREAD && type != USYNC_PROCESS) 30560Sstevel@tonic-gate return (EINVAL); 30576515Sraf (void) memset(cvp, 0, sizeof (*cvp)); 30580Sstevel@tonic-gate cvp->cond_type = (uint16_t)type; 30590Sstevel@tonic-gate cvp->cond_magic = COND_MAGIC; 30607255Sraf 30617255Sraf /* 30627255Sraf * This should be at the beginning of the function, 30637255Sraf * but for the sake of old broken applications that 30647255Sraf * do not have proper alignment for their condvars 30657255Sraf * (and don't check the return code from cond_init), 30667255Sraf * we put it here, after initializing the condvar regardless. 30677255Sraf */ 30687255Sraf if (((uintptr_t)cvp & (_LONG_LONG_ALIGNMENT - 1)) && 30697255Sraf curthread->ul_misaligned == 0) 30707255Sraf return (EINVAL); 30717255Sraf 30720Sstevel@tonic-gate return (0); 30730Sstevel@tonic-gate } 30740Sstevel@tonic-gate 30750Sstevel@tonic-gate /* 30760Sstevel@tonic-gate * cond_sleep_queue(): utility function for cond_wait_queue(). 30770Sstevel@tonic-gate * 30780Sstevel@tonic-gate * Go to sleep on a condvar sleep queue, expect to be waked up 30790Sstevel@tonic-gate * by someone calling cond_signal() or cond_broadcast() or due 30800Sstevel@tonic-gate * to receiving a UNIX signal or being cancelled, or just simply 30810Sstevel@tonic-gate * due to a spurious wakeup (like someome calling forkall()). 30820Sstevel@tonic-gate * 30830Sstevel@tonic-gate * The associated mutex is *not* reacquired before returning. 30840Sstevel@tonic-gate * That must be done by the caller of cond_sleep_queue(). 30850Sstevel@tonic-gate */ 30864574Sraf static int 30870Sstevel@tonic-gate cond_sleep_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 30880Sstevel@tonic-gate { 30890Sstevel@tonic-gate ulwp_t *self = curthread; 30900Sstevel@tonic-gate queue_head_t *qp; 30910Sstevel@tonic-gate queue_head_t *mqp; 30920Sstevel@tonic-gate lwpid_t lwpid; 30930Sstevel@tonic-gate int signalled; 30940Sstevel@tonic-gate int error; 30956247Sraf int cv_wake; 30964574Sraf int release_all; 30970Sstevel@tonic-gate 30980Sstevel@tonic-gate /* 30990Sstevel@tonic-gate * Put ourself on the CV sleep queue, unlock the mutex, then 31000Sstevel@tonic-gate * park ourself and unpark a candidate lwp to grab the mutex. 31010Sstevel@tonic-gate * We must go onto the CV sleep queue before dropping the 31020Sstevel@tonic-gate * mutex in order to guarantee atomicity of the operation. 31030Sstevel@tonic-gate */ 31040Sstevel@tonic-gate self->ul_sp = stkptr(); 31050Sstevel@tonic-gate qp = queue_lock(cvp, CV); 31066247Sraf enqueue(qp, self, 0); 31070Sstevel@tonic-gate cvp->cond_waiters_user = 1; 31080Sstevel@tonic-gate self->ul_cvmutex = mp; 31096247Sraf self->ul_cv_wake = cv_wake = (tsp != NULL); 31100Sstevel@tonic-gate self->ul_signalled = 0; 31114574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) { 31124574Sraf mp->mutex_flag &= ~LOCK_OWNERDEAD; 31134574Sraf mp->mutex_flag |= LOCK_NOTRECOVERABLE; 31144574Sraf } 31154574Sraf release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0); 31164574Sraf lwpid = mutex_unlock_queue(mp, release_all); 31170Sstevel@tonic-gate for (;;) { 31180Sstevel@tonic-gate set_parking_flag(self, 1); 31190Sstevel@tonic-gate queue_unlock(qp); 31200Sstevel@tonic-gate if (lwpid != 0) { 31210Sstevel@tonic-gate lwpid = preempt_unpark(self, lwpid); 31220Sstevel@tonic-gate preempt(self); 31230Sstevel@tonic-gate } 31240Sstevel@tonic-gate /* 31250Sstevel@tonic-gate * We may have a deferred signal present, 31260Sstevel@tonic-gate * in which case we should return EINTR. 31270Sstevel@tonic-gate * Also, we may have received a SIGCANCEL; if so 31280Sstevel@tonic-gate * and we are cancelable we should return EINTR. 31290Sstevel@tonic-gate * We force an immediate EINTR return from 31300Sstevel@tonic-gate * __lwp_park() by turning our parking flag off. 31310Sstevel@tonic-gate */ 31320Sstevel@tonic-gate if (self->ul_cursig != 0 || 31330Sstevel@tonic-gate (self->ul_cancelable && self->ul_cancel_pending)) 31340Sstevel@tonic-gate set_parking_flag(self, 0); 31350Sstevel@tonic-gate /* 31360Sstevel@tonic-gate * __lwp_park() will return the residual time in tsp 31370Sstevel@tonic-gate * if we are unparked before the timeout expires. 31380Sstevel@tonic-gate */ 31390Sstevel@tonic-gate error = __lwp_park(tsp, lwpid); 31400Sstevel@tonic-gate set_parking_flag(self, 0); 31410Sstevel@tonic-gate lwpid = 0; /* unpark the other lwp only once */ 31420Sstevel@tonic-gate /* 31430Sstevel@tonic-gate * We were waked up by cond_signal(), cond_broadcast(), 31440Sstevel@tonic-gate * by an interrupt or timeout (EINTR or ETIME), 31450Sstevel@tonic-gate * or we may just have gotten a spurious wakeup. 31460Sstevel@tonic-gate */ 31470Sstevel@tonic-gate qp = queue_lock(cvp, CV); 31486247Sraf if (!cv_wake) 31496247Sraf mqp = queue_lock(mp, MX); 31500Sstevel@tonic-gate if (self->ul_sleepq == NULL) 31510Sstevel@tonic-gate break; 31520Sstevel@tonic-gate /* 31530Sstevel@tonic-gate * We are on either the condvar sleep queue or the 31541893Sraf * mutex sleep queue. Break out of the sleep if we 31551893Sraf * were interrupted or we timed out (EINTR or ETIME). 31560Sstevel@tonic-gate * Else this is a spurious wakeup; continue the loop. 31570Sstevel@tonic-gate */ 31586247Sraf if (!cv_wake && self->ul_sleepq == mqp) { /* mutex queue */ 31591893Sraf if (error) { 31606247Sraf mp->mutex_waiters = dequeue_self(mqp); 31611893Sraf break; 31621893Sraf } 31631893Sraf tsp = NULL; /* no more timeout */ 31641893Sraf } else if (self->ul_sleepq == qp) { /* condvar queue */ 31650Sstevel@tonic-gate if (error) { 31666247Sraf cvp->cond_waiters_user = dequeue_self(qp); 31670Sstevel@tonic-gate break; 31680Sstevel@tonic-gate } 31690Sstevel@tonic-gate /* 31700Sstevel@tonic-gate * Else a spurious wakeup on the condvar queue. 31710Sstevel@tonic-gate * __lwp_park() has already adjusted the timeout. 31720Sstevel@tonic-gate */ 31730Sstevel@tonic-gate } else { 31740Sstevel@tonic-gate thr_panic("cond_sleep_queue(): thread not on queue"); 31750Sstevel@tonic-gate } 31766247Sraf if (!cv_wake) 31776247Sraf queue_unlock(mqp); 31780Sstevel@tonic-gate } 31790Sstevel@tonic-gate 31800Sstevel@tonic-gate self->ul_sp = 0; 31816247Sraf self->ul_cv_wake = 0; 31826247Sraf ASSERT(self->ul_cvmutex == NULL); 31830Sstevel@tonic-gate ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL && 31840Sstevel@tonic-gate self->ul_wchan == NULL); 31850Sstevel@tonic-gate 31860Sstevel@tonic-gate signalled = self->ul_signalled; 31870Sstevel@tonic-gate self->ul_signalled = 0; 31880Sstevel@tonic-gate queue_unlock(qp); 31896247Sraf if (!cv_wake) 31906247Sraf queue_unlock(mqp); 31910Sstevel@tonic-gate 31920Sstevel@tonic-gate /* 31930Sstevel@tonic-gate * If we were concurrently cond_signal()d and any of: 31940Sstevel@tonic-gate * received a UNIX signal, were cancelled, or got a timeout, 31950Sstevel@tonic-gate * then perform another cond_signal() to avoid consuming it. 31960Sstevel@tonic-gate */ 31970Sstevel@tonic-gate if (error && signalled) 31986812Sraf (void) cond_signal(cvp); 31990Sstevel@tonic-gate 32000Sstevel@tonic-gate return (error); 32010Sstevel@tonic-gate } 32020Sstevel@tonic-gate 32037255Sraf static void 32047255Sraf cond_wait_check_alignment(cond_t *cvp, mutex_t *mp) 32057255Sraf { 32067255Sraf if ((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) 32077255Sraf lock_error(mp, "cond_wait", cvp, "mutex is misaligned"); 32087255Sraf if ((uintptr_t)cvp & (_LONG_LONG_ALIGNMENT - 1)) 32097255Sraf lock_error(mp, "cond_wait", cvp, "condvar is misaligned"); 32107255Sraf } 32117255Sraf 32120Sstevel@tonic-gate int 32135629Sraf cond_wait_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 32140Sstevel@tonic-gate { 32150Sstevel@tonic-gate ulwp_t *self = curthread; 32160Sstevel@tonic-gate int error; 32174574Sraf int merror; 32180Sstevel@tonic-gate 32197255Sraf if (self->ul_error_detection && self->ul_misaligned == 0) 32207255Sraf cond_wait_check_alignment(cvp, mp); 32217255Sraf 32220Sstevel@tonic-gate /* 32230Sstevel@tonic-gate * The old thread library was programmed to defer signals 32240Sstevel@tonic-gate * while in cond_wait() so that the associated mutex would 32250Sstevel@tonic-gate * be guaranteed to be held when the application signal 32260Sstevel@tonic-gate * handler was invoked. 32270Sstevel@tonic-gate * 32280Sstevel@tonic-gate * We do not behave this way by default; the state of the 32290Sstevel@tonic-gate * associated mutex in the signal handler is undefined. 32300Sstevel@tonic-gate * 32310Sstevel@tonic-gate * To accommodate applications that depend on the old 32320Sstevel@tonic-gate * behavior, the _THREAD_COND_WAIT_DEFER environment 32330Sstevel@tonic-gate * variable can be set to 1 and we will behave in the 32340Sstevel@tonic-gate * old way with respect to cond_wait(). 32350Sstevel@tonic-gate */ 32360Sstevel@tonic-gate if (self->ul_cond_wait_defer) 32370Sstevel@tonic-gate sigoff(self); 32380Sstevel@tonic-gate 32390Sstevel@tonic-gate error = cond_sleep_queue(cvp, mp, tsp); 32400Sstevel@tonic-gate 32410Sstevel@tonic-gate /* 32420Sstevel@tonic-gate * Reacquire the mutex. 32430Sstevel@tonic-gate */ 32445629Sraf if ((merror = mutex_lock_impl(mp, NULL)) != 0) 32454574Sraf error = merror; 32460Sstevel@tonic-gate 32470Sstevel@tonic-gate /* 32480Sstevel@tonic-gate * Take any deferred signal now, after we have reacquired the mutex. 32490Sstevel@tonic-gate */ 32500Sstevel@tonic-gate if (self->ul_cond_wait_defer) 32510Sstevel@tonic-gate sigon(self); 32520Sstevel@tonic-gate 32530Sstevel@tonic-gate return (error); 32540Sstevel@tonic-gate } 32550Sstevel@tonic-gate 32560Sstevel@tonic-gate /* 32570Sstevel@tonic-gate * cond_sleep_kernel(): utility function for cond_wait_kernel(). 32580Sstevel@tonic-gate * See the comment ahead of cond_sleep_queue(), above. 32590Sstevel@tonic-gate */ 32604574Sraf static int 32610Sstevel@tonic-gate cond_sleep_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 32620Sstevel@tonic-gate { 32630Sstevel@tonic-gate int mtype = mp->mutex_type; 32640Sstevel@tonic-gate ulwp_t *self = curthread; 32650Sstevel@tonic-gate int error; 32660Sstevel@tonic-gate 32674574Sraf if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp)) 32684574Sraf _ceil_prio_waive(); 32690Sstevel@tonic-gate 32700Sstevel@tonic-gate self->ul_sp = stkptr(); 32710Sstevel@tonic-gate self->ul_wchan = cvp; 32720Sstevel@tonic-gate mp->mutex_owner = 0; 32736057Sraf /* mp->mutex_ownerpid is cleared by ___lwp_cond_wait() */ 32746247Sraf if (mtype & LOCK_PRIO_INHERIT) { 32750Sstevel@tonic-gate mp->mutex_lockw = LOCKCLEAR; 32766247Sraf self->ul_pilocks--; 32776247Sraf } 32780Sstevel@tonic-gate /* 32790Sstevel@tonic-gate * ___lwp_cond_wait() returns immediately with EINTR if 32800Sstevel@tonic-gate * set_parking_flag(self,0) is called on this lwp before it 32810Sstevel@tonic-gate * goes to sleep in the kernel. sigacthandler() calls this 32820Sstevel@tonic-gate * when a deferred signal is noted. This assures that we don't 32830Sstevel@tonic-gate * get stuck in ___lwp_cond_wait() with all signals blocked 32840Sstevel@tonic-gate * due to taking a deferred signal before going to sleep. 32850Sstevel@tonic-gate */ 32860Sstevel@tonic-gate set_parking_flag(self, 1); 32870Sstevel@tonic-gate if (self->ul_cursig != 0 || 32880Sstevel@tonic-gate (self->ul_cancelable && self->ul_cancel_pending)) 32890Sstevel@tonic-gate set_parking_flag(self, 0); 32900Sstevel@tonic-gate error = ___lwp_cond_wait(cvp, mp, tsp, 1); 32910Sstevel@tonic-gate set_parking_flag(self, 0); 32920Sstevel@tonic-gate self->ul_sp = 0; 32930Sstevel@tonic-gate self->ul_wchan = NULL; 32940Sstevel@tonic-gate return (error); 32950Sstevel@tonic-gate } 32960Sstevel@tonic-gate 32970Sstevel@tonic-gate int 32980Sstevel@tonic-gate cond_wait_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 32990Sstevel@tonic-gate { 33000Sstevel@tonic-gate ulwp_t *self = curthread; 33010Sstevel@tonic-gate int error; 33020Sstevel@tonic-gate int merror; 33030Sstevel@tonic-gate 33047255Sraf if (self->ul_error_detection && self->ul_misaligned == 0) 33057255Sraf cond_wait_check_alignment(cvp, mp); 33067255Sraf 33070Sstevel@tonic-gate /* 33080Sstevel@tonic-gate * See the large comment in cond_wait_queue(), above. 33090Sstevel@tonic-gate */ 33100Sstevel@tonic-gate if (self->ul_cond_wait_defer) 33110Sstevel@tonic-gate sigoff(self); 33120Sstevel@tonic-gate 33130Sstevel@tonic-gate error = cond_sleep_kernel(cvp, mp, tsp); 33140Sstevel@tonic-gate 33150Sstevel@tonic-gate /* 33160Sstevel@tonic-gate * Override the return code from ___lwp_cond_wait() 33170Sstevel@tonic-gate * with any non-zero return code from mutex_lock(). 33180Sstevel@tonic-gate * This addresses robust lock failures in particular; 33190Sstevel@tonic-gate * the caller must see the EOWNERDEAD or ENOTRECOVERABLE 33200Sstevel@tonic-gate * errors in order to take corrective action. 33210Sstevel@tonic-gate */ 33225629Sraf if ((merror = mutex_lock_impl(mp, NULL)) != 0) 33230Sstevel@tonic-gate error = merror; 33240Sstevel@tonic-gate 33250Sstevel@tonic-gate /* 33260Sstevel@tonic-gate * Take any deferred signal now, after we have reacquired the mutex. 33270Sstevel@tonic-gate */ 33280Sstevel@tonic-gate if (self->ul_cond_wait_defer) 33290Sstevel@tonic-gate sigon(self); 33300Sstevel@tonic-gate 33310Sstevel@tonic-gate return (error); 33320Sstevel@tonic-gate } 33330Sstevel@tonic-gate 33340Sstevel@tonic-gate /* 33356812Sraf * Common code for cond_wait() and cond_timedwait() 33360Sstevel@tonic-gate */ 33370Sstevel@tonic-gate int 33380Sstevel@tonic-gate cond_wait_common(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 33390Sstevel@tonic-gate { 33400Sstevel@tonic-gate int mtype = mp->mutex_type; 33410Sstevel@tonic-gate hrtime_t begin_sleep = 0; 33420Sstevel@tonic-gate ulwp_t *self = curthread; 33430Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 33440Sstevel@tonic-gate tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 33450Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 33460Sstevel@tonic-gate uint8_t rcount; 33470Sstevel@tonic-gate int error = 0; 33480Sstevel@tonic-gate 33490Sstevel@tonic-gate /* 33500Sstevel@tonic-gate * The SUSV3 Posix spec for pthread_cond_timedwait() states: 33510Sstevel@tonic-gate * Except in the case of [ETIMEDOUT], all these error checks 33520Sstevel@tonic-gate * shall act as if they were performed immediately at the 33530Sstevel@tonic-gate * beginning of processing for the function and shall cause 33540Sstevel@tonic-gate * an error return, in effect, prior to modifying the state 33550Sstevel@tonic-gate * of the mutex specified by mutex or the condition variable 33560Sstevel@tonic-gate * specified by cond. 33570Sstevel@tonic-gate * Therefore, we must return EINVAL now if the timout is invalid. 33580Sstevel@tonic-gate */ 33590Sstevel@tonic-gate if (tsp != NULL && 33600Sstevel@tonic-gate (tsp->tv_sec < 0 || (ulong_t)tsp->tv_nsec >= NANOSEC)) 33610Sstevel@tonic-gate return (EINVAL); 33620Sstevel@tonic-gate 33630Sstevel@tonic-gate if (__td_event_report(self, TD_SLEEP, udp)) { 33640Sstevel@tonic-gate self->ul_sp = stkptr(); 33650Sstevel@tonic-gate self->ul_wchan = cvp; 33660Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_SLEEP; 33670Sstevel@tonic-gate self->ul_td_evbuf.eventdata = cvp; 33680Sstevel@tonic-gate tdb_event(TD_SLEEP, udp); 33690Sstevel@tonic-gate self->ul_sp = 0; 33700Sstevel@tonic-gate } 33710Sstevel@tonic-gate if (csp) { 33720Sstevel@tonic-gate if (tsp) 33730Sstevel@tonic-gate tdb_incr(csp->cond_timedwait); 33740Sstevel@tonic-gate else 33750Sstevel@tonic-gate tdb_incr(csp->cond_wait); 33760Sstevel@tonic-gate } 33770Sstevel@tonic-gate if (msp) 33780Sstevel@tonic-gate begin_sleep = record_hold_time(msp); 33790Sstevel@tonic-gate else if (csp) 33800Sstevel@tonic-gate begin_sleep = gethrtime(); 33810Sstevel@tonic-gate 33820Sstevel@tonic-gate if (self->ul_error_detection) { 33836812Sraf if (!mutex_held(mp)) 33840Sstevel@tonic-gate lock_error(mp, "cond_wait", cvp, NULL); 33850Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) 33860Sstevel@tonic-gate lock_error(mp, "recursive mutex in cond_wait", 33875629Sraf cvp, NULL); 33880Sstevel@tonic-gate if (cvp->cond_type & USYNC_PROCESS) { 33894574Sraf if (!(mtype & USYNC_PROCESS)) 33900Sstevel@tonic-gate lock_error(mp, "cond_wait", cvp, 33915629Sraf "condvar process-shared, " 33925629Sraf "mutex process-private"); 33930Sstevel@tonic-gate } else { 33944574Sraf if (mtype & USYNC_PROCESS) 33950Sstevel@tonic-gate lock_error(mp, "cond_wait", cvp, 33965629Sraf "condvar process-private, " 33975629Sraf "mutex process-shared"); 33980Sstevel@tonic-gate } 33990Sstevel@tonic-gate } 34000Sstevel@tonic-gate 34010Sstevel@tonic-gate /* 34020Sstevel@tonic-gate * We deal with recursive mutexes by completely 34030Sstevel@tonic-gate * dropping the lock and restoring the recursion 34040Sstevel@tonic-gate * count after waking up. This is arguably wrong, 34050Sstevel@tonic-gate * but it obeys the principle of least astonishment. 34060Sstevel@tonic-gate */ 34070Sstevel@tonic-gate rcount = mp->mutex_rcount; 34080Sstevel@tonic-gate mp->mutex_rcount = 0; 34094574Sraf if ((mtype & 34104574Sraf (USYNC_PROCESS | LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT)) | 34110Sstevel@tonic-gate (cvp->cond_type & USYNC_PROCESS)) 34120Sstevel@tonic-gate error = cond_wait_kernel(cvp, mp, tsp); 34130Sstevel@tonic-gate else 34145629Sraf error = cond_wait_queue(cvp, mp, tsp); 34150Sstevel@tonic-gate mp->mutex_rcount = rcount; 34160Sstevel@tonic-gate 34170Sstevel@tonic-gate if (csp) { 34180Sstevel@tonic-gate hrtime_t lapse = gethrtime() - begin_sleep; 34190Sstevel@tonic-gate if (tsp == NULL) 34200Sstevel@tonic-gate csp->cond_wait_sleep_time += lapse; 34210Sstevel@tonic-gate else { 34220Sstevel@tonic-gate csp->cond_timedwait_sleep_time += lapse; 34230Sstevel@tonic-gate if (error == ETIME) 34240Sstevel@tonic-gate tdb_incr(csp->cond_timedwait_timeout); 34250Sstevel@tonic-gate } 34260Sstevel@tonic-gate } 34270Sstevel@tonic-gate return (error); 34280Sstevel@tonic-gate } 34290Sstevel@tonic-gate 34300Sstevel@tonic-gate /* 34316812Sraf * cond_wait() is a cancellation point but __cond_wait() is not. 34326812Sraf * Internally, libc calls the non-cancellation version. 34335891Sraf * Other libraries need to use pthread_setcancelstate(), as appropriate, 34345891Sraf * since __cond_wait() is not exported from libc. 34350Sstevel@tonic-gate */ 34360Sstevel@tonic-gate int 34375891Sraf __cond_wait(cond_t *cvp, mutex_t *mp) 34380Sstevel@tonic-gate { 34390Sstevel@tonic-gate ulwp_t *self = curthread; 34400Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 34410Sstevel@tonic-gate uberflags_t *gflags; 34420Sstevel@tonic-gate 34430Sstevel@tonic-gate /* 34440Sstevel@tonic-gate * Optimize the common case of USYNC_THREAD plus 34450Sstevel@tonic-gate * no error detection, no lock statistics, and no event tracing. 34460Sstevel@tonic-gate */ 34470Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL && 34480Sstevel@tonic-gate (cvp->cond_type | mp->mutex_type | gflags->uf_trs_ted | 34490Sstevel@tonic-gate self->ul_td_events_enable | 34500Sstevel@tonic-gate udp->tdb.tdb_ev_global_mask.event_bits[0]) == 0) 34515629Sraf return (cond_wait_queue(cvp, mp, NULL)); 34520Sstevel@tonic-gate 34530Sstevel@tonic-gate /* 34540Sstevel@tonic-gate * Else do it the long way. 34550Sstevel@tonic-gate */ 34560Sstevel@tonic-gate return (cond_wait_common(cvp, mp, NULL)); 34570Sstevel@tonic-gate } 34580Sstevel@tonic-gate 34596812Sraf #pragma weak _cond_wait = cond_wait 34600Sstevel@tonic-gate int 34616812Sraf cond_wait(cond_t *cvp, mutex_t *mp) 34620Sstevel@tonic-gate { 34630Sstevel@tonic-gate int error; 34640Sstevel@tonic-gate 34650Sstevel@tonic-gate _cancelon(); 34665891Sraf error = __cond_wait(cvp, mp); 34670Sstevel@tonic-gate if (error == EINTR) 34680Sstevel@tonic-gate _canceloff(); 34690Sstevel@tonic-gate else 34700Sstevel@tonic-gate _canceloff_nocancel(); 34710Sstevel@tonic-gate return (error); 34720Sstevel@tonic-gate } 34730Sstevel@tonic-gate 34745891Sraf /* 34755891Sraf * pthread_cond_wait() is a cancellation point. 34765891Sraf */ 34770Sstevel@tonic-gate int 34786812Sraf pthread_cond_wait(pthread_cond_t *_RESTRICT_KYWD cvp, 34796812Sraf pthread_mutex_t *_RESTRICT_KYWD mp) 34800Sstevel@tonic-gate { 34810Sstevel@tonic-gate int error; 34820Sstevel@tonic-gate 34836812Sraf error = cond_wait((cond_t *)cvp, (mutex_t *)mp); 34840Sstevel@tonic-gate return ((error == EINTR)? 0 : error); 34850Sstevel@tonic-gate } 34860Sstevel@tonic-gate 34870Sstevel@tonic-gate /* 34886812Sraf * cond_timedwait() is a cancellation point but __cond_timedwait() is not. 34890Sstevel@tonic-gate */ 34900Sstevel@tonic-gate int 34915891Sraf __cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime) 34920Sstevel@tonic-gate { 34930Sstevel@tonic-gate clockid_t clock_id = cvp->cond_clockid; 34940Sstevel@tonic-gate timespec_t reltime; 34950Sstevel@tonic-gate int error; 34960Sstevel@tonic-gate 34970Sstevel@tonic-gate if (clock_id != CLOCK_REALTIME && clock_id != CLOCK_HIGHRES) 34980Sstevel@tonic-gate clock_id = CLOCK_REALTIME; 34990Sstevel@tonic-gate abstime_to_reltime(clock_id, abstime, &reltime); 35000Sstevel@tonic-gate error = cond_wait_common(cvp, mp, &reltime); 35010Sstevel@tonic-gate if (error == ETIME && clock_id == CLOCK_HIGHRES) { 35020Sstevel@tonic-gate /* 35030Sstevel@tonic-gate * Don't return ETIME if we didn't really get a timeout. 35040Sstevel@tonic-gate * This can happen if we return because someone resets 35050Sstevel@tonic-gate * the system clock. Just return zero in this case, 35060Sstevel@tonic-gate * giving a spurious wakeup but not a timeout. 35070Sstevel@tonic-gate */ 35080Sstevel@tonic-gate if ((hrtime_t)(uint32_t)abstime->tv_sec * NANOSEC + 35090Sstevel@tonic-gate abstime->tv_nsec > gethrtime()) 35100Sstevel@tonic-gate error = 0; 35110Sstevel@tonic-gate } 35120Sstevel@tonic-gate return (error); 35130Sstevel@tonic-gate } 35140Sstevel@tonic-gate 35150Sstevel@tonic-gate int 35166812Sraf cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime) 35170Sstevel@tonic-gate { 35180Sstevel@tonic-gate int error; 35190Sstevel@tonic-gate 35200Sstevel@tonic-gate _cancelon(); 35215891Sraf error = __cond_timedwait(cvp, mp, abstime); 35220Sstevel@tonic-gate if (error == EINTR) 35230Sstevel@tonic-gate _canceloff(); 35240Sstevel@tonic-gate else 35250Sstevel@tonic-gate _canceloff_nocancel(); 35260Sstevel@tonic-gate return (error); 35270Sstevel@tonic-gate } 35280Sstevel@tonic-gate 35295891Sraf /* 35305891Sraf * pthread_cond_timedwait() is a cancellation point. 35315891Sraf */ 35320Sstevel@tonic-gate int 35336812Sraf pthread_cond_timedwait(pthread_cond_t *_RESTRICT_KYWD cvp, 35346812Sraf pthread_mutex_t *_RESTRICT_KYWD mp, 35356812Sraf const struct timespec *_RESTRICT_KYWD abstime) 35360Sstevel@tonic-gate { 35370Sstevel@tonic-gate int error; 35380Sstevel@tonic-gate 35396812Sraf error = cond_timedwait((cond_t *)cvp, (mutex_t *)mp, abstime); 35400Sstevel@tonic-gate if (error == ETIME) 35410Sstevel@tonic-gate error = ETIMEDOUT; 35420Sstevel@tonic-gate else if (error == EINTR) 35430Sstevel@tonic-gate error = 0; 35440Sstevel@tonic-gate return (error); 35450Sstevel@tonic-gate } 35460Sstevel@tonic-gate 35470Sstevel@tonic-gate /* 35486812Sraf * cond_reltimedwait() is a cancellation point but __cond_reltimedwait() is not. 35490Sstevel@tonic-gate */ 35500Sstevel@tonic-gate int 35515891Sraf __cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime) 35520Sstevel@tonic-gate { 35530Sstevel@tonic-gate timespec_t tslocal = *reltime; 35540Sstevel@tonic-gate 35550Sstevel@tonic-gate return (cond_wait_common(cvp, mp, &tslocal)); 35560Sstevel@tonic-gate } 35570Sstevel@tonic-gate 35580Sstevel@tonic-gate int 35596812Sraf cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime) 35600Sstevel@tonic-gate { 35610Sstevel@tonic-gate int error; 35620Sstevel@tonic-gate 35630Sstevel@tonic-gate _cancelon(); 35645891Sraf error = __cond_reltimedwait(cvp, mp, reltime); 35650Sstevel@tonic-gate if (error == EINTR) 35660Sstevel@tonic-gate _canceloff(); 35670Sstevel@tonic-gate else 35680Sstevel@tonic-gate _canceloff_nocancel(); 35690Sstevel@tonic-gate return (error); 35700Sstevel@tonic-gate } 35710Sstevel@tonic-gate 35720Sstevel@tonic-gate int 35736812Sraf pthread_cond_reltimedwait_np(pthread_cond_t *_RESTRICT_KYWD cvp, 35746812Sraf pthread_mutex_t *_RESTRICT_KYWD mp, 35756812Sraf const struct timespec *_RESTRICT_KYWD reltime) 35760Sstevel@tonic-gate { 35770Sstevel@tonic-gate int error; 35780Sstevel@tonic-gate 35796812Sraf error = cond_reltimedwait((cond_t *)cvp, (mutex_t *)mp, reltime); 35800Sstevel@tonic-gate if (error == ETIME) 35810Sstevel@tonic-gate error = ETIMEDOUT; 35820Sstevel@tonic-gate else if (error == EINTR) 35830Sstevel@tonic-gate error = 0; 35840Sstevel@tonic-gate return (error); 35850Sstevel@tonic-gate } 35860Sstevel@tonic-gate 35876812Sraf #pragma weak pthread_cond_signal = cond_signal 35886812Sraf #pragma weak _cond_signal = cond_signal 35890Sstevel@tonic-gate int 35906812Sraf cond_signal(cond_t *cvp) 35910Sstevel@tonic-gate { 35920Sstevel@tonic-gate ulwp_t *self = curthread; 35930Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 35940Sstevel@tonic-gate tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 35950Sstevel@tonic-gate int error = 0; 35966247Sraf int more; 35976247Sraf lwpid_t lwpid; 35980Sstevel@tonic-gate queue_head_t *qp; 35990Sstevel@tonic-gate mutex_t *mp; 36000Sstevel@tonic-gate queue_head_t *mqp; 36010Sstevel@tonic-gate ulwp_t **ulwpp; 36020Sstevel@tonic-gate ulwp_t *ulwp; 36036247Sraf ulwp_t *prev; 36040Sstevel@tonic-gate 36050Sstevel@tonic-gate if (csp) 36060Sstevel@tonic-gate tdb_incr(csp->cond_signal); 36070Sstevel@tonic-gate 36080Sstevel@tonic-gate if (cvp->cond_waiters_kernel) /* someone sleeping in the kernel? */ 36096812Sraf error = _lwp_cond_signal(cvp); 36100Sstevel@tonic-gate 36110Sstevel@tonic-gate if (!cvp->cond_waiters_user) /* no one sleeping at user-level */ 36120Sstevel@tonic-gate return (error); 36130Sstevel@tonic-gate 36140Sstevel@tonic-gate /* 36150Sstevel@tonic-gate * Move someone from the condvar sleep queue to the mutex sleep 36160Sstevel@tonic-gate * queue for the mutex that he will acquire on being waked up. 36170Sstevel@tonic-gate * We can do this only if we own the mutex he will acquire. 36180Sstevel@tonic-gate * If we do not own the mutex, or if his ul_cv_wake flag 36190Sstevel@tonic-gate * is set, just dequeue and unpark him. 36200Sstevel@tonic-gate */ 36210Sstevel@tonic-gate qp = queue_lock(cvp, CV); 36226247Sraf ulwpp = queue_slot(qp, &prev, &more); 36236247Sraf cvp->cond_waiters_user = more; 36246247Sraf if (ulwpp == NULL) { /* no one on the sleep queue */ 36250Sstevel@tonic-gate queue_unlock(qp); 36260Sstevel@tonic-gate return (error); 36270Sstevel@tonic-gate } 36286247Sraf ulwp = *ulwpp; 36290Sstevel@tonic-gate 36300Sstevel@tonic-gate /* 36310Sstevel@tonic-gate * Inform the thread that he was the recipient of a cond_signal(). 36320Sstevel@tonic-gate * This lets him deal with cond_signal() and, concurrently, 36330Sstevel@tonic-gate * one or more of a cancellation, a UNIX signal, or a timeout. 36340Sstevel@tonic-gate * These latter conditions must not consume a cond_signal(). 36350Sstevel@tonic-gate */ 36360Sstevel@tonic-gate ulwp->ul_signalled = 1; 36370Sstevel@tonic-gate 36380Sstevel@tonic-gate /* 36390Sstevel@tonic-gate * Dequeue the waiter but leave his ul_sleepq non-NULL 36400Sstevel@tonic-gate * while we move him to the mutex queue so that he can 36410Sstevel@tonic-gate * deal properly with spurious wakeups. 36420Sstevel@tonic-gate */ 36436247Sraf queue_unlink(qp, ulwpp, prev); 36440Sstevel@tonic-gate 36450Sstevel@tonic-gate mp = ulwp->ul_cvmutex; /* the mutex he will acquire */ 36460Sstevel@tonic-gate ulwp->ul_cvmutex = NULL; 36470Sstevel@tonic-gate ASSERT(mp != NULL); 36480Sstevel@tonic-gate 36490Sstevel@tonic-gate if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) { 36506247Sraf /* just wake him up */ 36516247Sraf lwpid = ulwp->ul_lwpid; 36520Sstevel@tonic-gate no_preempt(self); 36530Sstevel@tonic-gate ulwp->ul_sleepq = NULL; 36540Sstevel@tonic-gate ulwp->ul_wchan = NULL; 36550Sstevel@tonic-gate queue_unlock(qp); 36560Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 36570Sstevel@tonic-gate preempt(self); 36580Sstevel@tonic-gate } else { 36596247Sraf /* move him to the mutex queue */ 36600Sstevel@tonic-gate mqp = queue_lock(mp, MX); 36616247Sraf enqueue(mqp, ulwp, 0); 36620Sstevel@tonic-gate mp->mutex_waiters = 1; 36630Sstevel@tonic-gate queue_unlock(mqp); 36640Sstevel@tonic-gate queue_unlock(qp); 36650Sstevel@tonic-gate } 36660Sstevel@tonic-gate 36670Sstevel@tonic-gate return (error); 36680Sstevel@tonic-gate } 36690Sstevel@tonic-gate 36704570Sraf /* 36714574Sraf * Utility function called by mutex_wakeup_all(), cond_broadcast(), 36724574Sraf * and rw_queue_release() to (re)allocate a big buffer to hold the 36734574Sraf * lwpids of all the threads to be set running after they are removed 36744574Sraf * from their sleep queues. Since we are holding a queue lock, we 36754574Sraf * cannot call any function that might acquire a lock. mmap(), munmap(), 36764574Sraf * lwp_unpark_all() are simple system calls and are safe in this regard. 36774570Sraf */ 36784570Sraf lwpid_t * 36794570Sraf alloc_lwpids(lwpid_t *lwpid, int *nlwpid_ptr, int *maxlwps_ptr) 36804570Sraf { 36814570Sraf /* 36824570Sraf * Allocate NEWLWPS ids on the first overflow. 36834570Sraf * Double the allocation each time after that. 36844570Sraf */ 36854570Sraf int nlwpid = *nlwpid_ptr; 36864570Sraf int maxlwps = *maxlwps_ptr; 36874570Sraf int first_allocation; 36884570Sraf int newlwps; 36894570Sraf void *vaddr; 36904570Sraf 36914570Sraf ASSERT(nlwpid == maxlwps); 36924570Sraf 36934570Sraf first_allocation = (maxlwps == MAXLWPS); 36944570Sraf newlwps = first_allocation? NEWLWPS : 2 * maxlwps; 36956515Sraf vaddr = mmap(NULL, newlwps * sizeof (lwpid_t), 36964570Sraf PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0); 36974570Sraf 36984570Sraf if (vaddr == MAP_FAILED) { 36994570Sraf /* 37004570Sraf * Let's hope this never happens. 37014570Sraf * If it does, then we have a terrible 37024570Sraf * thundering herd on our hands. 37034570Sraf */ 37044570Sraf (void) __lwp_unpark_all(lwpid, nlwpid); 37054570Sraf *nlwpid_ptr = 0; 37064570Sraf } else { 37076515Sraf (void) memcpy(vaddr, lwpid, maxlwps * sizeof (lwpid_t)); 37084570Sraf if (!first_allocation) 37096515Sraf (void) munmap((caddr_t)lwpid, 37104570Sraf maxlwps * sizeof (lwpid_t)); 37114570Sraf lwpid = vaddr; 37124570Sraf *maxlwps_ptr = newlwps; 37134570Sraf } 37144570Sraf 37154570Sraf return (lwpid); 37164570Sraf } 37170Sstevel@tonic-gate 37186812Sraf #pragma weak pthread_cond_broadcast = cond_broadcast 37196812Sraf #pragma weak _cond_broadcast = cond_broadcast 37200Sstevel@tonic-gate int 37216812Sraf cond_broadcast(cond_t *cvp) 37220Sstevel@tonic-gate { 37230Sstevel@tonic-gate ulwp_t *self = curthread; 37240Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 37250Sstevel@tonic-gate tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 37260Sstevel@tonic-gate int error = 0; 37270Sstevel@tonic-gate queue_head_t *qp; 37286247Sraf queue_root_t *qrp; 37290Sstevel@tonic-gate mutex_t *mp; 37300Sstevel@tonic-gate mutex_t *mp_cache = NULL; 37314570Sraf queue_head_t *mqp = NULL; 37320Sstevel@tonic-gate ulwp_t *ulwp; 37334570Sraf int nlwpid = 0; 37344570Sraf int maxlwps = MAXLWPS; 37350Sstevel@tonic-gate lwpid_t buffer[MAXLWPS]; 37360Sstevel@tonic-gate lwpid_t *lwpid = buffer; 37370Sstevel@tonic-gate 37380Sstevel@tonic-gate if (csp) 37390Sstevel@tonic-gate tdb_incr(csp->cond_broadcast); 37400Sstevel@tonic-gate 37410Sstevel@tonic-gate if (cvp->cond_waiters_kernel) /* someone sleeping in the kernel? */ 37426812Sraf error = _lwp_cond_broadcast(cvp); 37430Sstevel@tonic-gate 37440Sstevel@tonic-gate if (!cvp->cond_waiters_user) /* no one sleeping at user-level */ 37450Sstevel@tonic-gate return (error); 37460Sstevel@tonic-gate 37470Sstevel@tonic-gate /* 37480Sstevel@tonic-gate * Move everyone from the condvar sleep queue to the mutex sleep 37490Sstevel@tonic-gate * queue for the mutex that they will acquire on being waked up. 37500Sstevel@tonic-gate * We can do this only if we own the mutex they will acquire. 37510Sstevel@tonic-gate * If we do not own the mutex, or if their ul_cv_wake flag 37520Sstevel@tonic-gate * is set, just dequeue and unpark them. 37530Sstevel@tonic-gate * 37540Sstevel@tonic-gate * We keep track of lwpids that are to be unparked in lwpid[]. 37550Sstevel@tonic-gate * __lwp_unpark_all() is called to unpark all of them after 37560Sstevel@tonic-gate * they have been removed from the sleep queue and the sleep 37570Sstevel@tonic-gate * queue lock has been dropped. If we run out of space in our 37580Sstevel@tonic-gate * on-stack buffer, we need to allocate more but we can't call 37590Sstevel@tonic-gate * lmalloc() because we are holding a queue lock when the overflow 37600Sstevel@tonic-gate * occurs and lmalloc() acquires a lock. We can't use alloca() 37614570Sraf * either because the application may have allocated a small 37624570Sraf * stack and we don't want to overrun the stack. So we call 37634570Sraf * alloc_lwpids() to allocate a bigger buffer using the mmap() 37640Sstevel@tonic-gate * system call directly since that path acquires no locks. 37650Sstevel@tonic-gate */ 37660Sstevel@tonic-gate qp = queue_lock(cvp, CV); 37670Sstevel@tonic-gate cvp->cond_waiters_user = 0; 37686247Sraf for (;;) { 37696247Sraf if ((qrp = qp->qh_root) == NULL || 37706247Sraf (ulwp = qrp->qr_head) == NULL) 37716247Sraf break; 37726247Sraf ASSERT(ulwp->ul_wchan == cvp); 37736247Sraf queue_unlink(qp, &qrp->qr_head, NULL); 37740Sstevel@tonic-gate mp = ulwp->ul_cvmutex; /* his mutex */ 37750Sstevel@tonic-gate ulwp->ul_cvmutex = NULL; 37760Sstevel@tonic-gate ASSERT(mp != NULL); 37770Sstevel@tonic-gate if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) { 37786247Sraf /* just wake him up */ 37790Sstevel@tonic-gate ulwp->ul_sleepq = NULL; 37800Sstevel@tonic-gate ulwp->ul_wchan = NULL; 37814570Sraf if (nlwpid == maxlwps) 37824570Sraf lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps); 37830Sstevel@tonic-gate lwpid[nlwpid++] = ulwp->ul_lwpid; 37840Sstevel@tonic-gate } else { 37856247Sraf /* move him to the mutex queue */ 37860Sstevel@tonic-gate if (mp != mp_cache) { 37870Sstevel@tonic-gate mp_cache = mp; 37884570Sraf if (mqp != NULL) 37894570Sraf queue_unlock(mqp); 37904570Sraf mqp = queue_lock(mp, MX); 37910Sstevel@tonic-gate } 37926247Sraf enqueue(mqp, ulwp, 0); 37930Sstevel@tonic-gate mp->mutex_waiters = 1; 37940Sstevel@tonic-gate } 37950Sstevel@tonic-gate } 37964570Sraf if (mqp != NULL) 37974570Sraf queue_unlock(mqp); 37984570Sraf if (nlwpid == 0) { 37994570Sraf queue_unlock(qp); 38004570Sraf } else { 38014570Sraf no_preempt(self); 38024570Sraf queue_unlock(qp); 38030Sstevel@tonic-gate if (nlwpid == 1) 38040Sstevel@tonic-gate (void) __lwp_unpark(lwpid[0]); 38050Sstevel@tonic-gate else 38060Sstevel@tonic-gate (void) __lwp_unpark_all(lwpid, nlwpid); 38074570Sraf preempt(self); 38080Sstevel@tonic-gate } 38090Sstevel@tonic-gate if (lwpid != buffer) 38106515Sraf (void) munmap((caddr_t)lwpid, maxlwps * sizeof (lwpid_t)); 38110Sstevel@tonic-gate return (error); 38120Sstevel@tonic-gate } 38130Sstevel@tonic-gate 38146812Sraf #pragma weak pthread_cond_destroy = cond_destroy 38150Sstevel@tonic-gate int 38166812Sraf cond_destroy(cond_t *cvp) 38170Sstevel@tonic-gate { 38180Sstevel@tonic-gate cvp->cond_magic = 0; 38190Sstevel@tonic-gate tdb_sync_obj_deregister(cvp); 38200Sstevel@tonic-gate return (0); 38210Sstevel@tonic-gate } 38220Sstevel@tonic-gate 38230Sstevel@tonic-gate #if defined(THREAD_DEBUG) 38240Sstevel@tonic-gate void 38250Sstevel@tonic-gate assert_no_libc_locks_held(void) 38260Sstevel@tonic-gate { 38270Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 38280Sstevel@tonic-gate } 38290Sstevel@tonic-gate 38300Sstevel@tonic-gate /* protected by link_lock */ 38310Sstevel@tonic-gate uint64_t spin_lock_spin; 38320Sstevel@tonic-gate uint64_t spin_lock_spin2; 38330Sstevel@tonic-gate uint64_t spin_lock_sleep; 38340Sstevel@tonic-gate uint64_t spin_lock_wakeup; 38350Sstevel@tonic-gate 38360Sstevel@tonic-gate /* 38370Sstevel@tonic-gate * Record spin lock statistics. 38380Sstevel@tonic-gate * Called by a thread exiting itself in thrp_exit(). 38390Sstevel@tonic-gate * Also called via atexit() from the thread calling 38400Sstevel@tonic-gate * exit() to do all the other threads as well. 38410Sstevel@tonic-gate */ 38420Sstevel@tonic-gate void 38430Sstevel@tonic-gate record_spin_locks(ulwp_t *ulwp) 38440Sstevel@tonic-gate { 38450Sstevel@tonic-gate spin_lock_spin += ulwp->ul_spin_lock_spin; 38460Sstevel@tonic-gate spin_lock_spin2 += ulwp->ul_spin_lock_spin2; 38470Sstevel@tonic-gate spin_lock_sleep += ulwp->ul_spin_lock_sleep; 38480Sstevel@tonic-gate spin_lock_wakeup += ulwp->ul_spin_lock_wakeup; 38490Sstevel@tonic-gate ulwp->ul_spin_lock_spin = 0; 38500Sstevel@tonic-gate ulwp->ul_spin_lock_spin2 = 0; 38510Sstevel@tonic-gate ulwp->ul_spin_lock_sleep = 0; 38520Sstevel@tonic-gate ulwp->ul_spin_lock_wakeup = 0; 38530Sstevel@tonic-gate } 38540Sstevel@tonic-gate 38550Sstevel@tonic-gate /* 38560Sstevel@tonic-gate * atexit function: dump the queue statistics to stderr. 38570Sstevel@tonic-gate */ 38580Sstevel@tonic-gate #include <stdio.h> 38590Sstevel@tonic-gate void 38600Sstevel@tonic-gate dump_queue_statistics(void) 38610Sstevel@tonic-gate { 38620Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 38630Sstevel@tonic-gate queue_head_t *qp; 38640Sstevel@tonic-gate int qn; 38650Sstevel@tonic-gate uint64_t spin_lock_total = 0; 38660Sstevel@tonic-gate 38670Sstevel@tonic-gate if (udp->queue_head == NULL || thread_queue_dump == 0) 38680Sstevel@tonic-gate return; 38690Sstevel@tonic-gate 38700Sstevel@tonic-gate if (fprintf(stderr, "\n%5d mutex queues:\n", QHASHSIZE) < 0 || 38716247Sraf fprintf(stderr, "queue# lockcount max qlen max hlen\n") < 0) 38720Sstevel@tonic-gate return; 38730Sstevel@tonic-gate for (qn = 0, qp = udp->queue_head; qn < QHASHSIZE; qn++, qp++) { 38740Sstevel@tonic-gate if (qp->qh_lockcount == 0) 38750Sstevel@tonic-gate continue; 38760Sstevel@tonic-gate spin_lock_total += qp->qh_lockcount; 38776247Sraf if (fprintf(stderr, "%5d %12llu%12u%12u\n", qn, 38786247Sraf (u_longlong_t)qp->qh_lockcount, 38796247Sraf qp->qh_qmax, qp->qh_hmax) < 0) 38805629Sraf return; 38810Sstevel@tonic-gate } 38820Sstevel@tonic-gate 38830Sstevel@tonic-gate if (fprintf(stderr, "\n%5d condvar queues:\n", QHASHSIZE) < 0 || 38846247Sraf fprintf(stderr, "queue# lockcount max qlen max hlen\n") < 0) 38850Sstevel@tonic-gate return; 38860Sstevel@tonic-gate for (qn = 0; qn < QHASHSIZE; qn++, qp++) { 38870Sstevel@tonic-gate if (qp->qh_lockcount == 0) 38880Sstevel@tonic-gate continue; 38890Sstevel@tonic-gate spin_lock_total += qp->qh_lockcount; 38906247Sraf if (fprintf(stderr, "%5d %12llu%12u%12u\n", qn, 38916247Sraf (u_longlong_t)qp->qh_lockcount, 38926247Sraf qp->qh_qmax, qp->qh_hmax) < 0) 38935629Sraf return; 38940Sstevel@tonic-gate } 38950Sstevel@tonic-gate 38960Sstevel@tonic-gate (void) fprintf(stderr, "\n spin_lock_total = %10llu\n", 38975629Sraf (u_longlong_t)spin_lock_total); 38980Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_spin = %10llu\n", 38995629Sraf (u_longlong_t)spin_lock_spin); 39000Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_spin2 = %10llu\n", 39015629Sraf (u_longlong_t)spin_lock_spin2); 39020Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_sleep = %10llu\n", 39035629Sraf (u_longlong_t)spin_lock_sleep); 39040Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_wakeup = %10llu\n", 39055629Sraf (u_longlong_t)spin_lock_wakeup); 39060Sstevel@tonic-gate } 39076247Sraf #endif 3908