10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51893Sraf * Common Development and Distribution License (the "License"). 61893Sraf * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 211219Sraf 220Sstevel@tonic-gate /* 235891Sraf * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate #include "lint.h" 300Sstevel@tonic-gate #include "thr_uberdata.h" 316247Sraf #include <sys/rtpriocntl.h> 326057Sraf #include <sys/sdt.h> 336057Sraf #include <atomic.h> 340Sstevel@tonic-gate 356247Sraf #if defined(THREAD_DEBUG) 366247Sraf #define INCR32(x) (((x) != UINT32_MAX)? (x)++ : 0) 376247Sraf #define INCR(x) ((x)++) 386247Sraf #define DECR(x) ((x)--) 396247Sraf #define MAXINCR(m, x) ((m < ++x)? (m = x) : 0) 406247Sraf #else 416247Sraf #define INCR32(x) 426247Sraf #define INCR(x) 436247Sraf #define DECR(x) 446247Sraf #define MAXINCR(m, x) 456247Sraf #endif 466247Sraf 470Sstevel@tonic-gate /* 480Sstevel@tonic-gate * This mutex is initialized to be held by lwp#1. 490Sstevel@tonic-gate * It is used to block a thread that has returned from a mutex_lock() 504574Sraf * of a LOCK_PRIO_INHERIT mutex with an unrecoverable error. 510Sstevel@tonic-gate */ 520Sstevel@tonic-gate mutex_t stall_mutex = DEFAULTMUTEX; 530Sstevel@tonic-gate 540Sstevel@tonic-gate static int shared_mutex_held(mutex_t *); 554574Sraf static int mutex_queuelock_adaptive(mutex_t *); 564574Sraf static void mutex_wakeup_all(mutex_t *); 570Sstevel@tonic-gate 580Sstevel@tonic-gate /* 590Sstevel@tonic-gate * Lock statistics support functions. 600Sstevel@tonic-gate */ 610Sstevel@tonic-gate void 620Sstevel@tonic-gate record_begin_hold(tdb_mutex_stats_t *msp) 630Sstevel@tonic-gate { 640Sstevel@tonic-gate tdb_incr(msp->mutex_lock); 650Sstevel@tonic-gate msp->mutex_begin_hold = gethrtime(); 660Sstevel@tonic-gate } 670Sstevel@tonic-gate 680Sstevel@tonic-gate hrtime_t 690Sstevel@tonic-gate record_hold_time(tdb_mutex_stats_t *msp) 700Sstevel@tonic-gate { 710Sstevel@tonic-gate hrtime_t now = gethrtime(); 720Sstevel@tonic-gate 730Sstevel@tonic-gate if (msp->mutex_begin_hold) 740Sstevel@tonic-gate msp->mutex_hold_time += now - msp->mutex_begin_hold; 750Sstevel@tonic-gate msp->mutex_begin_hold = 0; 760Sstevel@tonic-gate return (now); 770Sstevel@tonic-gate } 780Sstevel@tonic-gate 790Sstevel@tonic-gate /* 800Sstevel@tonic-gate * Called once at library initialization. 810Sstevel@tonic-gate */ 820Sstevel@tonic-gate void 830Sstevel@tonic-gate mutex_setup(void) 840Sstevel@tonic-gate { 850Sstevel@tonic-gate if (set_lock_byte(&stall_mutex.mutex_lockw)) 860Sstevel@tonic-gate thr_panic("mutex_setup() cannot acquire stall_mutex"); 870Sstevel@tonic-gate stall_mutex.mutex_owner = (uintptr_t)curthread; 880Sstevel@tonic-gate } 890Sstevel@tonic-gate 900Sstevel@tonic-gate /* 915629Sraf * The default spin count of 1000 is experimentally determined. 925629Sraf * On sun4u machines with any number of processors it could be raised 930Sstevel@tonic-gate * to 10,000 but that (experimentally) makes almost no difference. 945629Sraf * The environment variable: 950Sstevel@tonic-gate * _THREAD_ADAPTIVE_SPIN=count 965629Sraf * can be used to override and set the count in the range [0 .. 1,000,000]. 970Sstevel@tonic-gate */ 980Sstevel@tonic-gate int thread_adaptive_spin = 1000; 990Sstevel@tonic-gate uint_t thread_max_spinners = 100; 1000Sstevel@tonic-gate int thread_queue_verify = 0; 1010Sstevel@tonic-gate static int ncpus; 1020Sstevel@tonic-gate 1030Sstevel@tonic-gate /* 1040Sstevel@tonic-gate * Distinguish spinning for queue locks from spinning for regular locks. 1055629Sraf * We try harder to acquire queue locks by spinning. 1060Sstevel@tonic-gate * The environment variable: 1070Sstevel@tonic-gate * _THREAD_QUEUE_SPIN=count 1080Sstevel@tonic-gate * can be used to override and set the count in the range [0 .. 1,000,000]. 1090Sstevel@tonic-gate */ 1105629Sraf int thread_queue_spin = 10000; 1110Sstevel@tonic-gate 1124574Sraf #define ALL_ATTRIBUTES \ 1134574Sraf (LOCK_RECURSIVE | LOCK_ERRORCHECK | \ 1144574Sraf LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT | \ 1154574Sraf LOCK_ROBUST) 1160Sstevel@tonic-gate 1170Sstevel@tonic-gate /* 1184574Sraf * 'type' can be one of USYNC_THREAD, USYNC_PROCESS, or USYNC_PROCESS_ROBUST, 1194574Sraf * augmented by zero or more the flags: 1204574Sraf * LOCK_RECURSIVE 1214574Sraf * LOCK_ERRORCHECK 1224574Sraf * LOCK_PRIO_INHERIT 1234574Sraf * LOCK_PRIO_PROTECT 1244574Sraf * LOCK_ROBUST 1250Sstevel@tonic-gate */ 1266812Sraf #pragma weak _mutex_init = mutex_init 1270Sstevel@tonic-gate /* ARGSUSED2 */ 1280Sstevel@tonic-gate int 1296812Sraf mutex_init(mutex_t *mp, int type, void *arg) 1300Sstevel@tonic-gate { 1314574Sraf int basetype = (type & ~ALL_ATTRIBUTES); 1326247Sraf const pcclass_t *pccp; 1334574Sraf int error = 0; 1346247Sraf int ceil; 1354574Sraf 1364574Sraf if (basetype == USYNC_PROCESS_ROBUST) { 1374574Sraf /* 1384574Sraf * USYNC_PROCESS_ROBUST is a deprecated historical type. 1394574Sraf * We change it into (USYNC_PROCESS | LOCK_ROBUST) but 1404574Sraf * retain the USYNC_PROCESS_ROBUST flag so we can return 1414574Sraf * ELOCKUNMAPPED when necessary (only USYNC_PROCESS_ROBUST 1424574Sraf * mutexes will ever draw ELOCKUNMAPPED). 1434574Sraf */ 1444574Sraf type |= (USYNC_PROCESS | LOCK_ROBUST); 1454574Sraf basetype = USYNC_PROCESS; 1464574Sraf } 1474574Sraf 1486247Sraf if (type & LOCK_PRIO_PROTECT) 1496247Sraf pccp = get_info_by_policy(SCHED_FIFO); 1506247Sraf if ((basetype != USYNC_THREAD && basetype != USYNC_PROCESS) || 1514574Sraf (type & (LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT)) 1526247Sraf == (LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT) || 1536247Sraf ((type & LOCK_PRIO_PROTECT) && 1546247Sraf ((ceil = *(int *)arg) < pccp->pcc_primin || 1556247Sraf ceil > pccp->pcc_primax))) { 1564574Sraf error = EINVAL; 1574574Sraf } else if (type & LOCK_ROBUST) { 1584574Sraf /* 1594574Sraf * Callers of mutex_init() with the LOCK_ROBUST attribute 1604574Sraf * are required to pass an initially all-zero mutex. 1614574Sraf * Multiple calls to mutex_init() are allowed; all but 1624574Sraf * the first return EBUSY. A call to mutex_init() is 1634574Sraf * allowed to make an inconsistent robust lock consistent 1644574Sraf * (for historical usage, even though the proper interface 1654574Sraf * for this is mutex_consistent()). Note that we use 1664574Sraf * atomic_or_16() to set the LOCK_INITED flag so as 1674574Sraf * not to disturb surrounding bits (LOCK_OWNERDEAD, etc). 1684574Sraf */ 1694574Sraf if (!(mp->mutex_flag & LOCK_INITED)) { 1704574Sraf mp->mutex_type = (uint8_t)type; 1716812Sraf atomic_or_16(&mp->mutex_flag, LOCK_INITED); 1724574Sraf mp->mutex_magic = MUTEX_MAGIC; 1734574Sraf } else if (type != mp->mutex_type || 1746247Sraf ((type & LOCK_PRIO_PROTECT) && mp->mutex_ceiling != ceil)) { 1754574Sraf error = EINVAL; 1766812Sraf } else if (mutex_consistent(mp) != 0) { 1774574Sraf error = EBUSY; 1784574Sraf } 1794574Sraf /* register a process robust mutex with the kernel */ 1804574Sraf if (basetype == USYNC_PROCESS) 1814574Sraf register_lock(mp); 1824574Sraf } else { 1836515Sraf (void) memset(mp, 0, sizeof (*mp)); 1840Sstevel@tonic-gate mp->mutex_type = (uint8_t)type; 1850Sstevel@tonic-gate mp->mutex_flag = LOCK_INITED; 1864574Sraf mp->mutex_magic = MUTEX_MAGIC; 1870Sstevel@tonic-gate } 1884574Sraf 1896247Sraf if (error == 0 && (type & LOCK_PRIO_PROTECT)) { 1906247Sraf mp->mutex_ceiling = ceil; 1916247Sraf } 1924574Sraf 193*7255Sraf /* 194*7255Sraf * This should be at the beginning of the function, 195*7255Sraf * but for the sake of old broken applications that 196*7255Sraf * do not have proper alignment for their mutexes 197*7255Sraf * (and don't check the return code from mutex_init), 198*7255Sraf * we put it here, after initializing the mutex regardless. 199*7255Sraf */ 200*7255Sraf if (error == 0 && 201*7255Sraf ((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && 202*7255Sraf curthread->ul_misaligned == 0) 203*7255Sraf error = EINVAL; 204*7255Sraf 2050Sstevel@tonic-gate return (error); 2060Sstevel@tonic-gate } 2070Sstevel@tonic-gate 2080Sstevel@tonic-gate /* 2096247Sraf * Delete mp from list of ceiling mutexes owned by curthread. 2100Sstevel@tonic-gate * Return 1 if the head of the chain was updated. 2110Sstevel@tonic-gate */ 2120Sstevel@tonic-gate int 2130Sstevel@tonic-gate _ceil_mylist_del(mutex_t *mp) 2140Sstevel@tonic-gate { 2150Sstevel@tonic-gate ulwp_t *self = curthread; 2160Sstevel@tonic-gate mxchain_t **mcpp; 2170Sstevel@tonic-gate mxchain_t *mcp; 2180Sstevel@tonic-gate 2196247Sraf for (mcpp = &self->ul_mxchain; 2206247Sraf (mcp = *mcpp) != NULL; 2216247Sraf mcpp = &mcp->mxchain_next) { 2226247Sraf if (mcp->mxchain_mx == mp) { 2236247Sraf *mcpp = mcp->mxchain_next; 2246247Sraf lfree(mcp, sizeof (*mcp)); 2256247Sraf return (mcpp == &self->ul_mxchain); 2266247Sraf } 2276247Sraf } 2286247Sraf return (0); 2290Sstevel@tonic-gate } 2300Sstevel@tonic-gate 2310Sstevel@tonic-gate /* 2326247Sraf * Add mp to the list of ceiling mutexes owned by curthread. 2330Sstevel@tonic-gate * Return ENOMEM if no memory could be allocated. 2340Sstevel@tonic-gate */ 2350Sstevel@tonic-gate int 2360Sstevel@tonic-gate _ceil_mylist_add(mutex_t *mp) 2370Sstevel@tonic-gate { 2380Sstevel@tonic-gate ulwp_t *self = curthread; 2390Sstevel@tonic-gate mxchain_t *mcp; 2400Sstevel@tonic-gate 2410Sstevel@tonic-gate if ((mcp = lmalloc(sizeof (*mcp))) == NULL) 2420Sstevel@tonic-gate return (ENOMEM); 2430Sstevel@tonic-gate mcp->mxchain_mx = mp; 2440Sstevel@tonic-gate mcp->mxchain_next = self->ul_mxchain; 2450Sstevel@tonic-gate self->ul_mxchain = mcp; 2460Sstevel@tonic-gate return (0); 2470Sstevel@tonic-gate } 2480Sstevel@tonic-gate 2490Sstevel@tonic-gate /* 2506247Sraf * Helper function for _ceil_prio_inherit() and _ceil_prio_waive(), below. 2516247Sraf */ 2526247Sraf static void 2536247Sraf set_rt_priority(ulwp_t *self, int prio) 2546247Sraf { 2556247Sraf pcparms_t pcparm; 2566247Sraf 2576247Sraf pcparm.pc_cid = self->ul_rtclassid; 2586247Sraf ((rtparms_t *)pcparm.pc_clparms)->rt_tqnsecs = RT_NOCHANGE; 2596247Sraf ((rtparms_t *)pcparm.pc_clparms)->rt_pri = prio; 2606515Sraf (void) priocntl(P_LWPID, self->ul_lwpid, PC_SETPARMS, &pcparm); 2616247Sraf } 2626247Sraf 2636247Sraf /* 2646247Sraf * Inherit priority from ceiling. 2656247Sraf * This changes the effective priority, not the assigned priority. 2660Sstevel@tonic-gate */ 2670Sstevel@tonic-gate void 2686247Sraf _ceil_prio_inherit(int prio) 2690Sstevel@tonic-gate { 2700Sstevel@tonic-gate ulwp_t *self = curthread; 2716247Sraf 2726247Sraf self->ul_epri = prio; 2736247Sraf set_rt_priority(self, prio); 2740Sstevel@tonic-gate } 2750Sstevel@tonic-gate 2760Sstevel@tonic-gate /* 2770Sstevel@tonic-gate * Waive inherited ceiling priority. Inherit from head of owned ceiling locks 2780Sstevel@tonic-gate * if holding at least one ceiling lock. If no ceiling locks are held at this 2790Sstevel@tonic-gate * point, disinherit completely, reverting back to assigned priority. 2800Sstevel@tonic-gate */ 2810Sstevel@tonic-gate void 2820Sstevel@tonic-gate _ceil_prio_waive(void) 2830Sstevel@tonic-gate { 2840Sstevel@tonic-gate ulwp_t *self = curthread; 2856247Sraf mxchain_t *mcp = self->ul_mxchain; 2866247Sraf int prio; 2876247Sraf 2886247Sraf if (mcp == NULL) { 2896247Sraf prio = self->ul_pri; 2906247Sraf self->ul_epri = 0; 2910Sstevel@tonic-gate } else { 2926247Sraf prio = mcp->mxchain_mx->mutex_ceiling; 2936247Sraf self->ul_epri = prio; 2940Sstevel@tonic-gate } 2956247Sraf set_rt_priority(self, prio); 2960Sstevel@tonic-gate } 2970Sstevel@tonic-gate 2980Sstevel@tonic-gate /* 2995629Sraf * Clear the lock byte. Retain the waiters byte and the spinners byte. 3005629Sraf * Return the old value of the lock word. 3015629Sraf */ 3025629Sraf static uint32_t 3035629Sraf clear_lockbyte(volatile uint32_t *lockword) 3045629Sraf { 3055629Sraf uint32_t old; 3065629Sraf uint32_t new; 3075629Sraf 3085629Sraf do { 3095629Sraf old = *lockword; 3105629Sraf new = old & ~LOCKMASK; 3115629Sraf } while (atomic_cas_32(lockword, old, new) != old); 3125629Sraf 3135629Sraf return (old); 3145629Sraf } 3155629Sraf 3165629Sraf /* 3176057Sraf * Same as clear_lockbyte(), but operates on mutex_lockword64. 3186057Sraf * The mutex_ownerpid field is cleared along with the lock byte. 3196057Sraf */ 3206057Sraf static uint64_t 3216057Sraf clear_lockbyte64(volatile uint64_t *lockword64) 3226057Sraf { 3236057Sraf uint64_t old; 3246057Sraf uint64_t new; 3256057Sraf 3266057Sraf do { 3276057Sraf old = *lockword64; 3286057Sraf new = old & ~LOCKMASK64; 3296057Sraf } while (atomic_cas_64(lockword64, old, new) != old); 3306057Sraf 3316057Sraf return (old); 3326057Sraf } 3336057Sraf 3346057Sraf /* 3356057Sraf * Similar to set_lock_byte(), which only tries to set the lock byte. 336*7255Sraf * Here, we attempt to set the lock byte AND the mutex_ownerpid, keeping 337*7255Sraf * the remaining bytes constant. This atomic operation is required for the 338*7255Sraf * correctness of process-shared robust locks, otherwise there would be 339*7255Sraf * a window or vulnerability in which the lock byte had been set but the 340*7255Sraf * mutex_ownerpid had not yet been set. If the process were to die in 341*7255Sraf * this window of vulnerability (due to some other thread calling exit() 342*7255Sraf * or the process receiving a fatal signal), the mutex would be left locked 343*7255Sraf * but without a process-ID to determine which process was holding the lock. 344*7255Sraf * The kernel would then be unable to mark the robust mutex as LOCK_OWNERDEAD 345*7255Sraf * when the process died. For all other cases of process-shared locks, this 346*7255Sraf * operation is just a convenience, for the sake of common code. 347*7255Sraf * 348*7255Sraf * This operation requires process-shared robust locks to be properly 349*7255Sraf * aligned on an 8-byte boundary, at least on sparc machines, lest the 350*7255Sraf * operation incur an alignment fault. This is automatic when locks 351*7255Sraf * are declared properly using the mutex_t or pthread_mutex_t data types 352*7255Sraf * and the application does not allocate dynamic memory on less than an 353*7255Sraf * 8-byte boundary. See the 'horrible hack' comments below for cases 354*7255Sraf * dealing with such broken applications. 3556057Sraf */ 3566057Sraf static int 3576057Sraf set_lock_byte64(volatile uint64_t *lockword64, pid_t ownerpid) 3586057Sraf { 3596057Sraf uint64_t old; 3606057Sraf uint64_t new; 3616057Sraf 3626057Sraf old = *lockword64 & ~LOCKMASK64; 3636057Sraf new = old | ((uint64_t)(uint_t)ownerpid << PIDSHIFT) | LOCKBYTE64; 3646057Sraf if (atomic_cas_64(lockword64, old, new) == old) 3656057Sraf return (LOCKCLEAR); 3666057Sraf 3676057Sraf return (LOCKSET); 3686057Sraf } 3696057Sraf 3706057Sraf /* 3715629Sraf * Increment the spinners count in the mutex lock word. 3725629Sraf * Return 0 on success. Return -1 if the count would overflow. 3735629Sraf */ 3745629Sraf static int 3755629Sraf spinners_incr(volatile uint32_t *lockword, uint8_t max_spinners) 3765629Sraf { 3775629Sraf uint32_t old; 3785629Sraf uint32_t new; 3795629Sraf 3805629Sraf do { 3815629Sraf old = *lockword; 3825629Sraf if (((old & SPINNERMASK) >> SPINNERSHIFT) >= max_spinners) 3835629Sraf return (-1); 3845629Sraf new = old + (1 << SPINNERSHIFT); 3855629Sraf } while (atomic_cas_32(lockword, old, new) != old); 3865629Sraf 3875629Sraf return (0); 3885629Sraf } 3895629Sraf 3905629Sraf /* 3915629Sraf * Decrement the spinners count in the mutex lock word. 3925629Sraf * Return the new value of the lock word. 3935629Sraf */ 3945629Sraf static uint32_t 3955629Sraf spinners_decr(volatile uint32_t *lockword) 3965629Sraf { 3975629Sraf uint32_t old; 3985629Sraf uint32_t new; 3995629Sraf 4005629Sraf do { 4015629Sraf new = old = *lockword; 4025629Sraf if (new & SPINNERMASK) 4035629Sraf new -= (1 << SPINNERSHIFT); 4045629Sraf } while (atomic_cas_32(lockword, old, new) != old); 4055629Sraf 4065629Sraf return (new); 4075629Sraf } 4085629Sraf 4095629Sraf /* 4100Sstevel@tonic-gate * Non-preemptive spin locks. Used by queue_lock(). 4110Sstevel@tonic-gate * No lock statistics are gathered for these locks. 4125629Sraf * No DTrace probes are provided for these locks. 4130Sstevel@tonic-gate */ 4140Sstevel@tonic-gate void 4150Sstevel@tonic-gate spin_lock_set(mutex_t *mp) 4160Sstevel@tonic-gate { 4170Sstevel@tonic-gate ulwp_t *self = curthread; 4180Sstevel@tonic-gate 4190Sstevel@tonic-gate no_preempt(self); 4200Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 4210Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 4220Sstevel@tonic-gate return; 4230Sstevel@tonic-gate } 4240Sstevel@tonic-gate /* 4250Sstevel@tonic-gate * Spin for a while, attempting to acquire the lock. 4260Sstevel@tonic-gate */ 4276247Sraf INCR32(self->ul_spin_lock_spin); 4280Sstevel@tonic-gate if (mutex_queuelock_adaptive(mp) == 0 || 4290Sstevel@tonic-gate set_lock_byte(&mp->mutex_lockw) == 0) { 4300Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 4310Sstevel@tonic-gate return; 4320Sstevel@tonic-gate } 4330Sstevel@tonic-gate /* 4340Sstevel@tonic-gate * Try harder if we were previously at a no premption level. 4350Sstevel@tonic-gate */ 4360Sstevel@tonic-gate if (self->ul_preempt > 1) { 4376247Sraf INCR32(self->ul_spin_lock_spin2); 4380Sstevel@tonic-gate if (mutex_queuelock_adaptive(mp) == 0 || 4390Sstevel@tonic-gate set_lock_byte(&mp->mutex_lockw) == 0) { 4400Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 4410Sstevel@tonic-gate return; 4420Sstevel@tonic-gate } 4430Sstevel@tonic-gate } 4440Sstevel@tonic-gate /* 4450Sstevel@tonic-gate * Give up and block in the kernel for the mutex. 4460Sstevel@tonic-gate */ 4476247Sraf INCR32(self->ul_spin_lock_sleep); 4480Sstevel@tonic-gate (void) ___lwp_mutex_timedlock(mp, NULL); 4490Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 4500Sstevel@tonic-gate } 4510Sstevel@tonic-gate 4520Sstevel@tonic-gate void 4530Sstevel@tonic-gate spin_lock_clear(mutex_t *mp) 4540Sstevel@tonic-gate { 4550Sstevel@tonic-gate ulwp_t *self = curthread; 4560Sstevel@tonic-gate 4570Sstevel@tonic-gate mp->mutex_owner = 0; 4584570Sraf if (atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK) { 4594574Sraf (void) ___lwp_mutex_wakeup(mp, 0); 4606247Sraf INCR32(self->ul_spin_lock_wakeup); 4610Sstevel@tonic-gate } 4620Sstevel@tonic-gate preempt(self); 4630Sstevel@tonic-gate } 4640Sstevel@tonic-gate 4650Sstevel@tonic-gate /* 4660Sstevel@tonic-gate * Allocate the sleep queue hash table. 4670Sstevel@tonic-gate */ 4680Sstevel@tonic-gate void 4690Sstevel@tonic-gate queue_alloc(void) 4700Sstevel@tonic-gate { 4710Sstevel@tonic-gate ulwp_t *self = curthread; 4720Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 4736247Sraf queue_head_t *qp; 4740Sstevel@tonic-gate void *data; 4750Sstevel@tonic-gate int i; 4760Sstevel@tonic-gate 4770Sstevel@tonic-gate /* 4780Sstevel@tonic-gate * No locks are needed; we call here only when single-threaded. 4790Sstevel@tonic-gate */ 4800Sstevel@tonic-gate ASSERT(self == udp->ulwp_one); 4810Sstevel@tonic-gate ASSERT(!udp->uberflags.uf_mt); 4826515Sraf if ((data = mmap(NULL, 2 * QHASHSIZE * sizeof (queue_head_t), 4830Sstevel@tonic-gate PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0)) 4840Sstevel@tonic-gate == MAP_FAILED) 4850Sstevel@tonic-gate thr_panic("cannot allocate thread queue_head table"); 4866247Sraf udp->queue_head = qp = (queue_head_t *)data; 4876247Sraf for (i = 0; i < 2 * QHASHSIZE; qp++, i++) { 4886247Sraf qp->qh_type = (i < QHASHSIZE)? MX : CV; 4896247Sraf qp->qh_lock.mutex_flag = LOCK_INITED; 4906247Sraf qp->qh_lock.mutex_magic = MUTEX_MAGIC; 4916247Sraf qp->qh_hlist = &qp->qh_def_root; 4926247Sraf #if defined(THREAD_DEBUG) 4936247Sraf qp->qh_hlen = 1; 4946247Sraf qp->qh_hmax = 1; 4956247Sraf #endif 4964574Sraf } 4970Sstevel@tonic-gate } 4980Sstevel@tonic-gate 4990Sstevel@tonic-gate #if defined(THREAD_DEBUG) 5000Sstevel@tonic-gate 5010Sstevel@tonic-gate /* 5020Sstevel@tonic-gate * Debugging: verify correctness of a sleep queue. 5030Sstevel@tonic-gate */ 5040Sstevel@tonic-gate void 5050Sstevel@tonic-gate QVERIFY(queue_head_t *qp) 5060Sstevel@tonic-gate { 5070Sstevel@tonic-gate ulwp_t *self = curthread; 5080Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 5096247Sraf queue_root_t *qrp; 5100Sstevel@tonic-gate ulwp_t *ulwp; 5110Sstevel@tonic-gate ulwp_t *prev; 5120Sstevel@tonic-gate uint_t index; 5136247Sraf uint32_t cnt; 5140Sstevel@tonic-gate char qtype; 5150Sstevel@tonic-gate void *wchan; 5160Sstevel@tonic-gate 5170Sstevel@tonic-gate ASSERT(qp >= udp->queue_head && (qp - udp->queue_head) < 2 * QHASHSIZE); 5180Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, self)); 5196247Sraf for (cnt = 0, qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next) { 5206247Sraf cnt++; 5216247Sraf ASSERT((qrp->qr_head != NULL && qrp->qr_tail != NULL) || 5226247Sraf (qrp->qr_head == NULL && qrp->qr_tail == NULL)); 5236247Sraf } 5246247Sraf ASSERT(qp->qh_hlen == cnt && qp->qh_hmax >= cnt); 5256247Sraf qtype = ((qp - udp->queue_head) < QHASHSIZE)? MX : CV; 5266247Sraf ASSERT(qp->qh_type == qtype); 5270Sstevel@tonic-gate if (!thread_queue_verify) 5280Sstevel@tonic-gate return; 5290Sstevel@tonic-gate /* real expensive stuff, only for _THREAD_QUEUE_VERIFY */ 5306247Sraf for (cnt = 0, qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next) { 5316247Sraf for (prev = NULL, ulwp = qrp->qr_head; ulwp != NULL; 5326247Sraf prev = ulwp, ulwp = ulwp->ul_link) { 5336247Sraf cnt++; 5346247Sraf if (ulwp->ul_writer) 5356247Sraf ASSERT(prev == NULL || prev->ul_writer); 5366247Sraf ASSERT(ulwp->ul_qtype == qtype); 5376247Sraf ASSERT(ulwp->ul_wchan != NULL); 5386247Sraf ASSERT(ulwp->ul_sleepq == qp); 5396247Sraf wchan = ulwp->ul_wchan; 5406247Sraf ASSERT(qrp->qr_wchan == wchan); 5416247Sraf index = QUEUE_HASH(wchan, qtype); 5426247Sraf ASSERT(&udp->queue_head[index] == qp); 5436247Sraf } 5446247Sraf ASSERT(qrp->qr_tail == prev); 5450Sstevel@tonic-gate } 5460Sstevel@tonic-gate ASSERT(qp->qh_qlen == cnt); 5470Sstevel@tonic-gate } 5480Sstevel@tonic-gate 5490Sstevel@tonic-gate #else /* THREAD_DEBUG */ 5500Sstevel@tonic-gate 5510Sstevel@tonic-gate #define QVERIFY(qp) 5520Sstevel@tonic-gate 5530Sstevel@tonic-gate #endif /* THREAD_DEBUG */ 5540Sstevel@tonic-gate 5550Sstevel@tonic-gate /* 5560Sstevel@tonic-gate * Acquire a queue head. 5570Sstevel@tonic-gate */ 5580Sstevel@tonic-gate queue_head_t * 5590Sstevel@tonic-gate queue_lock(void *wchan, int qtype) 5600Sstevel@tonic-gate { 5610Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 5620Sstevel@tonic-gate queue_head_t *qp; 5636247Sraf queue_root_t *qrp; 5640Sstevel@tonic-gate 5650Sstevel@tonic-gate ASSERT(qtype == MX || qtype == CV); 5660Sstevel@tonic-gate 5670Sstevel@tonic-gate /* 5680Sstevel@tonic-gate * It is possible that we could be called while still single-threaded. 5690Sstevel@tonic-gate * If so, we call queue_alloc() to allocate the queue_head[] array. 5700Sstevel@tonic-gate */ 5710Sstevel@tonic-gate if ((qp = udp->queue_head) == NULL) { 5720Sstevel@tonic-gate queue_alloc(); 5730Sstevel@tonic-gate qp = udp->queue_head; 5740Sstevel@tonic-gate } 5750Sstevel@tonic-gate qp += QUEUE_HASH(wchan, qtype); 5760Sstevel@tonic-gate spin_lock_set(&qp->qh_lock); 5776247Sraf for (qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next) 5786247Sraf if (qrp->qr_wchan == wchan) 5796247Sraf break; 5806247Sraf if (qrp == NULL && qp->qh_def_root.qr_head == NULL) { 5816247Sraf /* the default queue root is available; use it */ 5826247Sraf qrp = &qp->qh_def_root; 5836247Sraf qrp->qr_wchan = wchan; 5846247Sraf ASSERT(qrp->qr_next == NULL); 5856247Sraf ASSERT(qrp->qr_tail == NULL && 5866247Sraf qrp->qr_rtcount == 0 && qrp->qr_qlen == 0); 5876247Sraf } 5886247Sraf qp->qh_wchan = wchan; /* valid until queue_unlock() is called */ 5896247Sraf qp->qh_root = qrp; /* valid until queue_unlock() is called */ 5906247Sraf INCR32(qp->qh_lockcount); 5910Sstevel@tonic-gate QVERIFY(qp); 5920Sstevel@tonic-gate return (qp); 5930Sstevel@tonic-gate } 5940Sstevel@tonic-gate 5950Sstevel@tonic-gate /* 5960Sstevel@tonic-gate * Release a queue head. 5970Sstevel@tonic-gate */ 5980Sstevel@tonic-gate void 5990Sstevel@tonic-gate queue_unlock(queue_head_t *qp) 6000Sstevel@tonic-gate { 6010Sstevel@tonic-gate QVERIFY(qp); 6020Sstevel@tonic-gate spin_lock_clear(&qp->qh_lock); 6030Sstevel@tonic-gate } 6040Sstevel@tonic-gate 6050Sstevel@tonic-gate /* 6060Sstevel@tonic-gate * For rwlock queueing, we must queue writers ahead of readers of the 6070Sstevel@tonic-gate * same priority. We do this by making writers appear to have a half 6080Sstevel@tonic-gate * point higher priority for purposes of priority comparisons below. 6090Sstevel@tonic-gate */ 6100Sstevel@tonic-gate #define CMP_PRIO(ulwp) ((real_priority(ulwp) << 1) + (ulwp)->ul_writer) 6110Sstevel@tonic-gate 6120Sstevel@tonic-gate void 6136247Sraf enqueue(queue_head_t *qp, ulwp_t *ulwp, int force_fifo) 6140Sstevel@tonic-gate { 6156247Sraf queue_root_t *qrp; 6160Sstevel@tonic-gate ulwp_t **ulwpp; 6170Sstevel@tonic-gate ulwp_t *next; 6180Sstevel@tonic-gate int pri = CMP_PRIO(ulwp); 6196247Sraf 6200Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread)); 6210Sstevel@tonic-gate ASSERT(ulwp->ul_sleepq != qp); 6220Sstevel@tonic-gate 6236247Sraf if ((qrp = qp->qh_root) == NULL) { 6246247Sraf /* use the thread's queue root for the linkage */ 6256247Sraf qrp = &ulwp->ul_queue_root; 6266247Sraf qrp->qr_next = qp->qh_hlist; 6276247Sraf qrp->qr_prev = NULL; 6286247Sraf qrp->qr_head = NULL; 6296247Sraf qrp->qr_tail = NULL; 6306247Sraf qrp->qr_wchan = qp->qh_wchan; 6316247Sraf qrp->qr_rtcount = 0; 6326247Sraf qrp->qr_qlen = 0; 6336247Sraf qrp->qr_qmax = 0; 6346247Sraf qp->qh_hlist->qr_prev = qrp; 6356247Sraf qp->qh_hlist = qrp; 6366247Sraf qp->qh_root = qrp; 6376247Sraf MAXINCR(qp->qh_hmax, qp->qh_hlen); 6386247Sraf } 6396247Sraf 6400Sstevel@tonic-gate /* 6410Sstevel@tonic-gate * LIFO queue ordering is unfair and can lead to starvation, 6420Sstevel@tonic-gate * but it gives better performance for heavily contended locks. 6430Sstevel@tonic-gate * We use thread_queue_fifo (range is 0..8) to determine 6440Sstevel@tonic-gate * the frequency of FIFO vs LIFO queuing: 6450Sstevel@tonic-gate * 0 : every 256th time (almost always LIFO) 6460Sstevel@tonic-gate * 1 : every 128th time 6470Sstevel@tonic-gate * 2 : every 64th time 6480Sstevel@tonic-gate * 3 : every 32nd time 6490Sstevel@tonic-gate * 4 : every 16th time (the default value, mostly LIFO) 6500Sstevel@tonic-gate * 5 : every 8th time 6510Sstevel@tonic-gate * 6 : every 4th time 6520Sstevel@tonic-gate * 7 : every 2nd time 6530Sstevel@tonic-gate * 8 : every time (never LIFO, always FIFO) 6540Sstevel@tonic-gate * Note that there is always some degree of FIFO ordering. 6550Sstevel@tonic-gate * This breaks live lock conditions that occur in applications 6560Sstevel@tonic-gate * that are written assuming (incorrectly) that threads acquire 6570Sstevel@tonic-gate * locks fairly, that is, in roughly round-robin order. 6586247Sraf * In any event, the queue is maintained in kernel priority order. 6590Sstevel@tonic-gate * 6606247Sraf * If force_fifo is non-zero, fifo queueing is forced. 6610Sstevel@tonic-gate * SUSV3 requires this for semaphores. 6620Sstevel@tonic-gate */ 6636247Sraf if (qrp->qr_head == NULL) { 6640Sstevel@tonic-gate /* 6650Sstevel@tonic-gate * The queue is empty. LIFO/FIFO doesn't matter. 6660Sstevel@tonic-gate */ 6676247Sraf ASSERT(qrp->qr_tail == NULL); 6686247Sraf ulwpp = &qrp->qr_head; 6696247Sraf } else if (force_fifo | 6706247Sraf (((++qp->qh_qcnt << curthread->ul_queue_fifo) & 0xff) == 0)) { 6710Sstevel@tonic-gate /* 6720Sstevel@tonic-gate * Enqueue after the last thread whose priority is greater 6730Sstevel@tonic-gate * than or equal to the priority of the thread being queued. 6740Sstevel@tonic-gate * Attempt first to go directly onto the tail of the queue. 6750Sstevel@tonic-gate */ 6766247Sraf if (pri <= CMP_PRIO(qrp->qr_tail)) 6776247Sraf ulwpp = &qrp->qr_tail->ul_link; 6780Sstevel@tonic-gate else { 6796247Sraf for (ulwpp = &qrp->qr_head; (next = *ulwpp) != NULL; 6800Sstevel@tonic-gate ulwpp = &next->ul_link) 6810Sstevel@tonic-gate if (pri > CMP_PRIO(next)) 6820Sstevel@tonic-gate break; 6830Sstevel@tonic-gate } 6840Sstevel@tonic-gate } else { 6850Sstevel@tonic-gate /* 6860Sstevel@tonic-gate * Enqueue before the first thread whose priority is less 6870Sstevel@tonic-gate * than or equal to the priority of the thread being queued. 6880Sstevel@tonic-gate * Hopefully we can go directly onto the head of the queue. 6890Sstevel@tonic-gate */ 6906247Sraf for (ulwpp = &qrp->qr_head; (next = *ulwpp) != NULL; 6910Sstevel@tonic-gate ulwpp = &next->ul_link) 6920Sstevel@tonic-gate if (pri >= CMP_PRIO(next)) 6930Sstevel@tonic-gate break; 6940Sstevel@tonic-gate } 6950Sstevel@tonic-gate if ((ulwp->ul_link = *ulwpp) == NULL) 6966247Sraf qrp->qr_tail = ulwp; 6970Sstevel@tonic-gate *ulwpp = ulwp; 6980Sstevel@tonic-gate 6990Sstevel@tonic-gate ulwp->ul_sleepq = qp; 7006247Sraf ulwp->ul_wchan = qp->qh_wchan; 7016247Sraf ulwp->ul_qtype = qp->qh_type; 7026247Sraf if ((ulwp->ul_schedctl != NULL && 7036247Sraf ulwp->ul_schedctl->sc_cid == ulwp->ul_rtclassid) | 7046247Sraf ulwp->ul_pilocks) { 7056247Sraf ulwp->ul_rtqueued = 1; 7066247Sraf qrp->qr_rtcount++; 7076247Sraf } 7086247Sraf MAXINCR(qrp->qr_qmax, qrp->qr_qlen); 7096247Sraf MAXINCR(qp->qh_qmax, qp->qh_qlen); 7106247Sraf } 7116247Sraf 7126247Sraf /* 7136247Sraf * Helper function for queue_slot() and queue_slot_rt(). 7146247Sraf * Try to find a non-suspended thread on the queue. 7156247Sraf */ 7166247Sraf static ulwp_t ** 7176247Sraf queue_slot_runnable(ulwp_t **ulwpp, ulwp_t **prevp, int rt) 7186247Sraf { 7196247Sraf ulwp_t *ulwp; 7206247Sraf ulwp_t **foundpp = NULL; 7216247Sraf int priority = -1; 7226247Sraf ulwp_t *prev; 7236247Sraf int tpri; 7246247Sraf 7256247Sraf for (prev = NULL; 7266247Sraf (ulwp = *ulwpp) != NULL; 7276247Sraf prev = ulwp, ulwpp = &ulwp->ul_link) { 7286247Sraf if (ulwp->ul_stop) /* skip suspended threads */ 7296247Sraf continue; 7306247Sraf tpri = rt? CMP_PRIO(ulwp) : 0; 7316247Sraf if (tpri > priority) { 7326247Sraf foundpp = ulwpp; 7336247Sraf *prevp = prev; 7346247Sraf priority = tpri; 7356247Sraf if (!rt) 7366247Sraf break; 7376247Sraf } 7386247Sraf } 7396247Sraf return (foundpp); 7400Sstevel@tonic-gate } 7410Sstevel@tonic-gate 7420Sstevel@tonic-gate /* 7436247Sraf * For real-time, we search the entire queue because the dispatch 7446247Sraf * (kernel) priorities may have changed since enqueueing. 7450Sstevel@tonic-gate */ 7460Sstevel@tonic-gate static ulwp_t ** 7476247Sraf queue_slot_rt(ulwp_t **ulwpp_org, ulwp_t **prevp) 7486247Sraf { 7496247Sraf ulwp_t **ulwpp = ulwpp_org; 7506247Sraf ulwp_t *ulwp = *ulwpp; 7516247Sraf ulwp_t **foundpp = ulwpp; 7526247Sraf int priority = CMP_PRIO(ulwp); 7536247Sraf ulwp_t *prev; 7546247Sraf int tpri; 7556247Sraf 7566247Sraf for (prev = ulwp, ulwpp = &ulwp->ul_link; 7576247Sraf (ulwp = *ulwpp) != NULL; 7586247Sraf prev = ulwp, ulwpp = &ulwp->ul_link) { 7596247Sraf tpri = CMP_PRIO(ulwp); 7606247Sraf if (tpri > priority) { 7616247Sraf foundpp = ulwpp; 7626247Sraf *prevp = prev; 7636247Sraf priority = tpri; 7646247Sraf } 7656247Sraf } 7666247Sraf ulwp = *foundpp; 7676247Sraf 7686247Sraf /* 7696247Sraf * Try not to return a suspended thread. 7706247Sraf * This mimics the old libthread's behavior. 7716247Sraf */ 7726247Sraf if (ulwp->ul_stop && 7736247Sraf (ulwpp = queue_slot_runnable(ulwpp_org, prevp, 1)) != NULL) { 7746247Sraf foundpp = ulwpp; 7756247Sraf ulwp = *foundpp; 7766247Sraf } 7776247Sraf ulwp->ul_rt = 1; 7786247Sraf return (foundpp); 7796247Sraf } 7806247Sraf 7816247Sraf ulwp_t ** 7826247Sraf queue_slot(queue_head_t *qp, ulwp_t **prevp, int *more) 7836247Sraf { 7846247Sraf queue_root_t *qrp; 7856247Sraf ulwp_t **ulwpp; 7866247Sraf ulwp_t *ulwp; 7876247Sraf int rt; 7886247Sraf 7896247Sraf ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread)); 7906247Sraf 7916247Sraf if ((qrp = qp->qh_root) == NULL || (ulwp = qrp->qr_head) == NULL) { 7926247Sraf *more = 0; 7936247Sraf return (NULL); /* no lwps on the queue */ 7946247Sraf } 7956247Sraf rt = (qrp->qr_rtcount != 0); 7966247Sraf *prevp = NULL; 7976247Sraf if (ulwp->ul_link == NULL) { /* only one lwp on the queue */ 7986247Sraf *more = 0; 7996247Sraf ulwp->ul_rt = rt; 8006247Sraf return (&qrp->qr_head); 8016247Sraf } 8026247Sraf *more = 1; 8036247Sraf 8046247Sraf if (rt) /* real-time queue */ 8056247Sraf return (queue_slot_rt(&qrp->qr_head, prevp)); 8066247Sraf /* 8076247Sraf * Try not to return a suspended thread. 8086247Sraf * This mimics the old libthread's behavior. 8096247Sraf */ 8106247Sraf if (ulwp->ul_stop && 8116247Sraf (ulwpp = queue_slot_runnable(&qrp->qr_head, prevp, 0)) != NULL) { 8126247Sraf ulwp = *ulwpp; 8136247Sraf ulwp->ul_rt = 0; 8146247Sraf return (ulwpp); 8156247Sraf } 8166247Sraf /* 8176247Sraf * The common case; just pick the first thread on the queue. 8186247Sraf */ 8196247Sraf ulwp->ul_rt = 0; 8206247Sraf return (&qrp->qr_head); 8216247Sraf } 8226247Sraf 8236247Sraf /* 8246247Sraf * Common code for unlinking an lwp from a user-level sleep queue. 8256247Sraf */ 8266247Sraf void 8276247Sraf queue_unlink(queue_head_t *qp, ulwp_t **ulwpp, ulwp_t *prev) 8286247Sraf { 8296247Sraf queue_root_t *qrp = qp->qh_root; 8306247Sraf queue_root_t *nqrp; 8316247Sraf ulwp_t *ulwp = *ulwpp; 8326247Sraf ulwp_t *next; 8336247Sraf 8346247Sraf ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread)); 8356247Sraf ASSERT(qp->qh_wchan != NULL && ulwp->ul_wchan == qp->qh_wchan); 8366247Sraf 8376247Sraf DECR(qp->qh_qlen); 8386247Sraf DECR(qrp->qr_qlen); 8396247Sraf if (ulwp->ul_rtqueued) { 8406247Sraf ulwp->ul_rtqueued = 0; 8416247Sraf qrp->qr_rtcount--; 8426247Sraf } 8436247Sraf next = ulwp->ul_link; 8446247Sraf *ulwpp = next; 8456247Sraf ulwp->ul_link = NULL; 8466247Sraf if (qrp->qr_tail == ulwp) 8476247Sraf qrp->qr_tail = prev; 8486247Sraf if (qrp == &ulwp->ul_queue_root) { 8496247Sraf /* 8506247Sraf * We can't continue to use the unlinked thread's 8516247Sraf * queue root for the linkage. 8526247Sraf */ 8536247Sraf queue_root_t *qr_next = qrp->qr_next; 8546247Sraf queue_root_t *qr_prev = qrp->qr_prev; 8556247Sraf 8566247Sraf if (qrp->qr_tail) { 8576247Sraf /* switch to using the last thread's queue root */ 8586247Sraf ASSERT(qrp->qr_qlen != 0); 8596247Sraf nqrp = &qrp->qr_tail->ul_queue_root; 8606247Sraf *nqrp = *qrp; 8616247Sraf if (qr_next) 8626247Sraf qr_next->qr_prev = nqrp; 8636247Sraf if (qr_prev) 8646247Sraf qr_prev->qr_next = nqrp; 8656247Sraf else 8666247Sraf qp->qh_hlist = nqrp; 8676247Sraf qp->qh_root = nqrp; 8686247Sraf } else { 8696247Sraf /* empty queue root; just delete from the hash list */ 8706247Sraf ASSERT(qrp->qr_qlen == 0); 8716247Sraf if (qr_next) 8726247Sraf qr_next->qr_prev = qr_prev; 8736247Sraf if (qr_prev) 8746247Sraf qr_prev->qr_next = qr_next; 8756247Sraf else 8766247Sraf qp->qh_hlist = qr_next; 8776247Sraf qp->qh_root = NULL; 8786247Sraf DECR(qp->qh_hlen); 8796247Sraf } 8806247Sraf } 8816247Sraf } 8826247Sraf 8836247Sraf ulwp_t * 8846247Sraf dequeue(queue_head_t *qp, int *more) 8850Sstevel@tonic-gate { 8860Sstevel@tonic-gate ulwp_t **ulwpp; 8870Sstevel@tonic-gate ulwp_t *ulwp; 8886247Sraf ulwp_t *prev; 8896247Sraf 8906247Sraf if ((ulwpp = queue_slot(qp, &prev, more)) == NULL) 8910Sstevel@tonic-gate return (NULL); 8920Sstevel@tonic-gate ulwp = *ulwpp; 8936247Sraf queue_unlink(qp, ulwpp, prev); 8940Sstevel@tonic-gate ulwp->ul_sleepq = NULL; 8950Sstevel@tonic-gate ulwp->ul_wchan = NULL; 8960Sstevel@tonic-gate return (ulwp); 8970Sstevel@tonic-gate } 8980Sstevel@tonic-gate 8990Sstevel@tonic-gate /* 9000Sstevel@tonic-gate * Return a pointer to the highest priority thread sleeping on wchan. 9010Sstevel@tonic-gate */ 9020Sstevel@tonic-gate ulwp_t * 9036247Sraf queue_waiter(queue_head_t *qp) 9040Sstevel@tonic-gate { 9050Sstevel@tonic-gate ulwp_t **ulwpp; 9066247Sraf ulwp_t *prev; 9076247Sraf int more; 9086247Sraf 9096247Sraf if ((ulwpp = queue_slot(qp, &prev, &more)) == NULL) 9100Sstevel@tonic-gate return (NULL); 9110Sstevel@tonic-gate return (*ulwpp); 9120Sstevel@tonic-gate } 9130Sstevel@tonic-gate 9146247Sraf int 9156247Sraf dequeue_self(queue_head_t *qp) 9160Sstevel@tonic-gate { 9170Sstevel@tonic-gate ulwp_t *self = curthread; 9186247Sraf queue_root_t *qrp; 9190Sstevel@tonic-gate ulwp_t **ulwpp; 9200Sstevel@tonic-gate ulwp_t *ulwp; 9216247Sraf ulwp_t *prev; 9220Sstevel@tonic-gate int found = 0; 9230Sstevel@tonic-gate 9240Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, self)); 9250Sstevel@tonic-gate 9260Sstevel@tonic-gate /* find self on the sleep queue */ 9276247Sraf if ((qrp = qp->qh_root) != NULL) { 9286247Sraf for (prev = NULL, ulwpp = &qrp->qr_head; 9296247Sraf (ulwp = *ulwpp) != NULL; 9306247Sraf prev = ulwp, ulwpp = &ulwp->ul_link) { 9316247Sraf if (ulwp == self) { 9326247Sraf queue_unlink(qp, ulwpp, prev); 9336247Sraf self->ul_cvmutex = NULL; 9346247Sraf self->ul_sleepq = NULL; 9356247Sraf self->ul_wchan = NULL; 9366247Sraf found = 1; 9376247Sraf break; 9386247Sraf } 9390Sstevel@tonic-gate } 9400Sstevel@tonic-gate } 9410Sstevel@tonic-gate 9420Sstevel@tonic-gate if (!found) 9430Sstevel@tonic-gate thr_panic("dequeue_self(): curthread not found on queue"); 9440Sstevel@tonic-gate 9456247Sraf return ((qrp = qp->qh_root) != NULL && qrp->qr_head != NULL); 9460Sstevel@tonic-gate } 9470Sstevel@tonic-gate 9480Sstevel@tonic-gate /* 9490Sstevel@tonic-gate * Called from call_user_handler() and _thrp_suspend() to take 9500Sstevel@tonic-gate * ourself off of our sleep queue so we can grab locks. 9510Sstevel@tonic-gate */ 9520Sstevel@tonic-gate void 9530Sstevel@tonic-gate unsleep_self(void) 9540Sstevel@tonic-gate { 9550Sstevel@tonic-gate ulwp_t *self = curthread; 9560Sstevel@tonic-gate queue_head_t *qp; 9570Sstevel@tonic-gate 9580Sstevel@tonic-gate /* 9590Sstevel@tonic-gate * Calling enter_critical()/exit_critical() here would lead 9600Sstevel@tonic-gate * to recursion. Just manipulate self->ul_critical directly. 9610Sstevel@tonic-gate */ 9620Sstevel@tonic-gate self->ul_critical++; 9630Sstevel@tonic-gate while (self->ul_sleepq != NULL) { 9640Sstevel@tonic-gate qp = queue_lock(self->ul_wchan, self->ul_qtype); 9650Sstevel@tonic-gate /* 9660Sstevel@tonic-gate * We may have been moved from a CV queue to a 9670Sstevel@tonic-gate * mutex queue while we were attempting queue_lock(). 9680Sstevel@tonic-gate * If so, just loop around and try again. 9690Sstevel@tonic-gate * dequeue_self() clears self->ul_sleepq. 9700Sstevel@tonic-gate */ 9716247Sraf if (qp == self->ul_sleepq) 9726247Sraf (void) dequeue_self(qp); 9730Sstevel@tonic-gate queue_unlock(qp); 9740Sstevel@tonic-gate } 9756247Sraf self->ul_writer = 0; 9760Sstevel@tonic-gate self->ul_critical--; 9770Sstevel@tonic-gate } 9780Sstevel@tonic-gate 9790Sstevel@tonic-gate /* 9800Sstevel@tonic-gate * Common code for calling the the ___lwp_mutex_timedlock() system call. 9810Sstevel@tonic-gate * Returns with mutex_owner and mutex_ownerpid set correctly. 9820Sstevel@tonic-gate */ 9834574Sraf static int 9840Sstevel@tonic-gate mutex_lock_kernel(mutex_t *mp, timespec_t *tsp, tdb_mutex_stats_t *msp) 9850Sstevel@tonic-gate { 9860Sstevel@tonic-gate ulwp_t *self = curthread; 9870Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 9884574Sraf int mtype = mp->mutex_type; 9890Sstevel@tonic-gate hrtime_t begin_sleep; 9904574Sraf int acquired; 9910Sstevel@tonic-gate int error; 9920Sstevel@tonic-gate 9930Sstevel@tonic-gate self->ul_sp = stkptr(); 9940Sstevel@tonic-gate self->ul_wchan = mp; 9950Sstevel@tonic-gate if (__td_event_report(self, TD_SLEEP, udp)) { 9960Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_SLEEP; 9970Sstevel@tonic-gate self->ul_td_evbuf.eventdata = mp; 9980Sstevel@tonic-gate tdb_event(TD_SLEEP, udp); 9990Sstevel@tonic-gate } 10000Sstevel@tonic-gate if (msp) { 10010Sstevel@tonic-gate tdb_incr(msp->mutex_sleep); 10020Sstevel@tonic-gate begin_sleep = gethrtime(); 10030Sstevel@tonic-gate } 10040Sstevel@tonic-gate 10050Sstevel@tonic-gate DTRACE_PROBE1(plockstat, mutex__block, mp); 10060Sstevel@tonic-gate 10070Sstevel@tonic-gate for (;;) { 10084574Sraf /* 10094574Sraf * A return value of EOWNERDEAD or ELOCKUNMAPPED 10104574Sraf * means we successfully acquired the lock. 10114574Sraf */ 10124574Sraf if ((error = ___lwp_mutex_timedlock(mp, tsp)) != 0 && 10134574Sraf error != EOWNERDEAD && error != ELOCKUNMAPPED) { 10144574Sraf acquired = 0; 10150Sstevel@tonic-gate break; 10160Sstevel@tonic-gate } 10170Sstevel@tonic-gate 10184574Sraf if (mtype & USYNC_PROCESS) { 10190Sstevel@tonic-gate /* 10200Sstevel@tonic-gate * Defend against forkall(). We may be the child, 10210Sstevel@tonic-gate * in which case we don't actually own the mutex. 10220Sstevel@tonic-gate */ 10230Sstevel@tonic-gate enter_critical(self); 10240Sstevel@tonic-gate if (mp->mutex_ownerpid == udp->pid) { 10250Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 10260Sstevel@tonic-gate exit_critical(self); 10274574Sraf acquired = 1; 10280Sstevel@tonic-gate break; 10290Sstevel@tonic-gate } 10300Sstevel@tonic-gate exit_critical(self); 10310Sstevel@tonic-gate } else { 10320Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 10334574Sraf acquired = 1; 10340Sstevel@tonic-gate break; 10350Sstevel@tonic-gate } 10360Sstevel@tonic-gate } 10370Sstevel@tonic-gate if (msp) 10380Sstevel@tonic-gate msp->mutex_sleep_time += gethrtime() - begin_sleep; 10390Sstevel@tonic-gate self->ul_wchan = NULL; 10400Sstevel@tonic-gate self->ul_sp = 0; 10410Sstevel@tonic-gate 10424574Sraf if (acquired) { 10434574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1); 10444574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 10454574Sraf } else { 10464574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0); 10474574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 10484574Sraf } 10494574Sraf 10500Sstevel@tonic-gate return (error); 10510Sstevel@tonic-gate } 10520Sstevel@tonic-gate 10530Sstevel@tonic-gate /* 10540Sstevel@tonic-gate * Common code for calling the ___lwp_mutex_trylock() system call. 10550Sstevel@tonic-gate * Returns with mutex_owner and mutex_ownerpid set correctly. 10560Sstevel@tonic-gate */ 10570Sstevel@tonic-gate int 10580Sstevel@tonic-gate mutex_trylock_kernel(mutex_t *mp) 10590Sstevel@tonic-gate { 10600Sstevel@tonic-gate ulwp_t *self = curthread; 10610Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 10624574Sraf int mtype = mp->mutex_type; 10630Sstevel@tonic-gate int error; 10644574Sraf int acquired; 10650Sstevel@tonic-gate 10660Sstevel@tonic-gate for (;;) { 10674574Sraf /* 10684574Sraf * A return value of EOWNERDEAD or ELOCKUNMAPPED 10694574Sraf * means we successfully acquired the lock. 10704574Sraf */ 10714574Sraf if ((error = ___lwp_mutex_trylock(mp)) != 0 && 10724574Sraf error != EOWNERDEAD && error != ELOCKUNMAPPED) { 10734574Sraf acquired = 0; 10740Sstevel@tonic-gate break; 10750Sstevel@tonic-gate } 10760Sstevel@tonic-gate 10774574Sraf if (mtype & USYNC_PROCESS) { 10780Sstevel@tonic-gate /* 10790Sstevel@tonic-gate * Defend against forkall(). We may be the child, 10800Sstevel@tonic-gate * in which case we don't actually own the mutex. 10810Sstevel@tonic-gate */ 10820Sstevel@tonic-gate enter_critical(self); 10830Sstevel@tonic-gate if (mp->mutex_ownerpid == udp->pid) { 10840Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 10850Sstevel@tonic-gate exit_critical(self); 10864574Sraf acquired = 1; 10870Sstevel@tonic-gate break; 10880Sstevel@tonic-gate } 10890Sstevel@tonic-gate exit_critical(self); 10900Sstevel@tonic-gate } else { 10910Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 10924574Sraf acquired = 1; 10930Sstevel@tonic-gate break; 10940Sstevel@tonic-gate } 10950Sstevel@tonic-gate } 10960Sstevel@tonic-gate 10974574Sraf if (acquired) { 10984574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 10994574Sraf } else if (error != EBUSY) { 11004574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 11014574Sraf } 11024574Sraf 11030Sstevel@tonic-gate return (error); 11040Sstevel@tonic-gate } 11050Sstevel@tonic-gate 11060Sstevel@tonic-gate volatile sc_shared_t * 11070Sstevel@tonic-gate setup_schedctl(void) 11080Sstevel@tonic-gate { 11090Sstevel@tonic-gate ulwp_t *self = curthread; 11100Sstevel@tonic-gate volatile sc_shared_t *scp; 11110Sstevel@tonic-gate sc_shared_t *tmp; 11120Sstevel@tonic-gate 11130Sstevel@tonic-gate if ((scp = self->ul_schedctl) == NULL && /* no shared state yet */ 11140Sstevel@tonic-gate !self->ul_vfork && /* not a child of vfork() */ 11150Sstevel@tonic-gate !self->ul_schedctl_called) { /* haven't been called before */ 11160Sstevel@tonic-gate enter_critical(self); 11170Sstevel@tonic-gate self->ul_schedctl_called = &self->ul_uberdata->uberflags; 11180Sstevel@tonic-gate if ((tmp = __schedctl()) != (sc_shared_t *)(-1)) 11190Sstevel@tonic-gate self->ul_schedctl = scp = tmp; 11200Sstevel@tonic-gate exit_critical(self); 11210Sstevel@tonic-gate } 11220Sstevel@tonic-gate /* 11230Sstevel@tonic-gate * Unless the call to setup_schedctl() is surrounded 11240Sstevel@tonic-gate * by enter_critical()/exit_critical(), the address 11250Sstevel@tonic-gate * we are returning could be invalid due to a forkall() 11260Sstevel@tonic-gate * having occurred in another thread. 11270Sstevel@tonic-gate */ 11280Sstevel@tonic-gate return (scp); 11290Sstevel@tonic-gate } 11300Sstevel@tonic-gate 11310Sstevel@tonic-gate /* 11320Sstevel@tonic-gate * Interfaces from libsched, incorporated into libc. 11330Sstevel@tonic-gate * libsched.so.1 is now a filter library onto libc. 11340Sstevel@tonic-gate */ 11356812Sraf #pragma weak schedctl_lookup = schedctl_init 11360Sstevel@tonic-gate schedctl_t * 11376812Sraf schedctl_init(void) 11380Sstevel@tonic-gate { 11390Sstevel@tonic-gate volatile sc_shared_t *scp = setup_schedctl(); 11400Sstevel@tonic-gate return ((scp == NULL)? NULL : (schedctl_t *)&scp->sc_preemptctl); 11410Sstevel@tonic-gate } 11420Sstevel@tonic-gate 11430Sstevel@tonic-gate void 11446812Sraf schedctl_exit(void) 11450Sstevel@tonic-gate { 11460Sstevel@tonic-gate } 11470Sstevel@tonic-gate 11480Sstevel@tonic-gate /* 11490Sstevel@tonic-gate * Contract private interface for java. 11500Sstevel@tonic-gate * Set up the schedctl data if it doesn't exist yet. 11510Sstevel@tonic-gate * Return a pointer to the pointer to the schedctl data. 11520Sstevel@tonic-gate */ 11530Sstevel@tonic-gate volatile sc_shared_t *volatile * 11540Sstevel@tonic-gate _thr_schedctl(void) 11550Sstevel@tonic-gate { 11560Sstevel@tonic-gate ulwp_t *self = curthread; 11570Sstevel@tonic-gate volatile sc_shared_t *volatile *ptr; 11580Sstevel@tonic-gate 11590Sstevel@tonic-gate if (self->ul_vfork) 11600Sstevel@tonic-gate return (NULL); 11610Sstevel@tonic-gate if (*(ptr = &self->ul_schedctl) == NULL) 11620Sstevel@tonic-gate (void) setup_schedctl(); 11630Sstevel@tonic-gate return (ptr); 11640Sstevel@tonic-gate } 11650Sstevel@tonic-gate 11660Sstevel@tonic-gate /* 11670Sstevel@tonic-gate * Block signals and attempt to block preemption. 11680Sstevel@tonic-gate * no_preempt()/preempt() must be used in pairs but can be nested. 11690Sstevel@tonic-gate */ 11700Sstevel@tonic-gate void 11710Sstevel@tonic-gate no_preempt(ulwp_t *self) 11720Sstevel@tonic-gate { 11730Sstevel@tonic-gate volatile sc_shared_t *scp; 11740Sstevel@tonic-gate 11750Sstevel@tonic-gate if (self->ul_preempt++ == 0) { 11760Sstevel@tonic-gate enter_critical(self); 11770Sstevel@tonic-gate if ((scp = self->ul_schedctl) != NULL || 11780Sstevel@tonic-gate (scp = setup_schedctl()) != NULL) { 11790Sstevel@tonic-gate /* 11800Sstevel@tonic-gate * Save the pre-existing preempt value. 11810Sstevel@tonic-gate */ 11820Sstevel@tonic-gate self->ul_savpreempt = scp->sc_preemptctl.sc_nopreempt; 11830Sstevel@tonic-gate scp->sc_preemptctl.sc_nopreempt = 1; 11840Sstevel@tonic-gate } 11850Sstevel@tonic-gate } 11860Sstevel@tonic-gate } 11870Sstevel@tonic-gate 11880Sstevel@tonic-gate /* 11890Sstevel@tonic-gate * Undo the effects of no_preempt(). 11900Sstevel@tonic-gate */ 11910Sstevel@tonic-gate void 11920Sstevel@tonic-gate preempt(ulwp_t *self) 11930Sstevel@tonic-gate { 11940Sstevel@tonic-gate volatile sc_shared_t *scp; 11950Sstevel@tonic-gate 11960Sstevel@tonic-gate ASSERT(self->ul_preempt > 0); 11970Sstevel@tonic-gate if (--self->ul_preempt == 0) { 11980Sstevel@tonic-gate if ((scp = self->ul_schedctl) != NULL) { 11990Sstevel@tonic-gate /* 12000Sstevel@tonic-gate * Restore the pre-existing preempt value. 12010Sstevel@tonic-gate */ 12020Sstevel@tonic-gate scp->sc_preemptctl.sc_nopreempt = self->ul_savpreempt; 12030Sstevel@tonic-gate if (scp->sc_preemptctl.sc_yield && 12040Sstevel@tonic-gate scp->sc_preemptctl.sc_nopreempt == 0) { 12056515Sraf yield(); 12060Sstevel@tonic-gate if (scp->sc_preemptctl.sc_yield) { 12070Sstevel@tonic-gate /* 12080Sstevel@tonic-gate * Shouldn't happen. This is either 12090Sstevel@tonic-gate * a race condition or the thread 12100Sstevel@tonic-gate * just entered the real-time class. 12110Sstevel@tonic-gate */ 12126515Sraf yield(); 12130Sstevel@tonic-gate scp->sc_preemptctl.sc_yield = 0; 12140Sstevel@tonic-gate } 12150Sstevel@tonic-gate } 12160Sstevel@tonic-gate } 12170Sstevel@tonic-gate exit_critical(self); 12180Sstevel@tonic-gate } 12190Sstevel@tonic-gate } 12200Sstevel@tonic-gate 12210Sstevel@tonic-gate /* 12220Sstevel@tonic-gate * If a call to preempt() would cause the current thread to yield or to 12230Sstevel@tonic-gate * take deferred actions in exit_critical(), then unpark the specified 12240Sstevel@tonic-gate * lwp so it can run while we delay. Return the original lwpid if the 12250Sstevel@tonic-gate * unpark was not performed, else return zero. The tests are a repeat 12260Sstevel@tonic-gate * of some of the tests in preempt(), above. This is a statistical 12270Sstevel@tonic-gate * optimization solely for cond_sleep_queue(), below. 12280Sstevel@tonic-gate */ 12290Sstevel@tonic-gate static lwpid_t 12300Sstevel@tonic-gate preempt_unpark(ulwp_t *self, lwpid_t lwpid) 12310Sstevel@tonic-gate { 12320Sstevel@tonic-gate volatile sc_shared_t *scp = self->ul_schedctl; 12330Sstevel@tonic-gate 12340Sstevel@tonic-gate ASSERT(self->ul_preempt == 1 && self->ul_critical > 0); 12350Sstevel@tonic-gate if ((scp != NULL && scp->sc_preemptctl.sc_yield) || 12360Sstevel@tonic-gate (self->ul_curplease && self->ul_critical == 1)) { 12370Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 12380Sstevel@tonic-gate lwpid = 0; 12390Sstevel@tonic-gate } 12400Sstevel@tonic-gate return (lwpid); 12410Sstevel@tonic-gate } 12420Sstevel@tonic-gate 12430Sstevel@tonic-gate /* 12444613Sraf * Spin for a while (if 'tryhard' is true), trying to grab the lock. 12450Sstevel@tonic-gate * If this fails, return EBUSY and let the caller deal with it. 12460Sstevel@tonic-gate * If this succeeds, return 0 with mutex_owner set to curthread. 12470Sstevel@tonic-gate */ 12484574Sraf static int 12494613Sraf mutex_trylock_adaptive(mutex_t *mp, int tryhard) 12500Sstevel@tonic-gate { 12510Sstevel@tonic-gate ulwp_t *self = curthread; 12524574Sraf int error = EBUSY; 12530Sstevel@tonic-gate ulwp_t *ulwp; 12540Sstevel@tonic-gate volatile sc_shared_t *scp; 12555629Sraf volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw; 12565629Sraf volatile uint64_t *ownerp = (volatile uint64_t *)&mp->mutex_owner; 12575629Sraf uint32_t new_lockword; 12585629Sraf int count = 0; 12595629Sraf int max_count; 12605629Sraf uint8_t max_spinners; 12614574Sraf 12624574Sraf ASSERT(!(mp->mutex_type & USYNC_PROCESS)); 12634574Sraf 12644574Sraf if (MUTEX_OWNER(mp) == self) 12650Sstevel@tonic-gate return (EBUSY); 12660Sstevel@tonic-gate 12674574Sraf /* short-cut, not definitive (see below) */ 12684574Sraf if (mp->mutex_flag & LOCK_NOTRECOVERABLE) { 12694574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 12705629Sraf error = ENOTRECOVERABLE; 12715629Sraf goto done; 12724574Sraf } 12734574Sraf 12745629Sraf /* 12755629Sraf * Make one attempt to acquire the lock before 12765629Sraf * incurring the overhead of the spin loop. 12775629Sraf */ 12785629Sraf if (set_lock_byte(lockp) == 0) { 12795629Sraf *ownerp = (uintptr_t)self; 12805629Sraf error = 0; 12815629Sraf goto done; 12825629Sraf } 12835629Sraf if (!tryhard) 12845629Sraf goto done; 12855629Sraf if (ncpus == 0) 12865629Sraf ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN); 12875629Sraf if ((max_spinners = self->ul_max_spinners) >= ncpus) 12885629Sraf max_spinners = ncpus - 1; 12895629Sraf max_count = (max_spinners != 0)? self->ul_adaptive_spin : 0; 12905629Sraf if (max_count == 0) 12915629Sraf goto done; 12925629Sraf 12930Sstevel@tonic-gate /* 12940Sstevel@tonic-gate * This spin loop is unfair to lwps that have already dropped into 12950Sstevel@tonic-gate * the kernel to sleep. They will starve on a highly-contended mutex. 12960Sstevel@tonic-gate * This is just too bad. The adaptive spin algorithm is intended 12970Sstevel@tonic-gate * to allow programs with highly-contended locks (that is, broken 12980Sstevel@tonic-gate * programs) to execute with reasonable speed despite their contention. 12990Sstevel@tonic-gate * Being fair would reduce the speed of such programs and well-written 13000Sstevel@tonic-gate * programs will not suffer in any case. 13010Sstevel@tonic-gate */ 13025629Sraf enter_critical(self); 13035629Sraf if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1) { 13045629Sraf exit_critical(self); 13055629Sraf goto done; 13065629Sraf } 13075629Sraf DTRACE_PROBE1(plockstat, mutex__spin, mp); 13085629Sraf for (count = 1; ; count++) { 13090Sstevel@tonic-gate if (*lockp == 0 && set_lock_byte(lockp) == 0) { 13100Sstevel@tonic-gate *ownerp = (uintptr_t)self; 13114574Sraf error = 0; 13124574Sraf break; 13130Sstevel@tonic-gate } 13145629Sraf if (count == max_count) 13155629Sraf break; 13160Sstevel@tonic-gate SMT_PAUSE(); 13170Sstevel@tonic-gate /* 13180Sstevel@tonic-gate * Stop spinning if the mutex owner is not running on 13190Sstevel@tonic-gate * a processor; it will not drop the lock any time soon 13200Sstevel@tonic-gate * and we would just be wasting time to keep spinning. 13210Sstevel@tonic-gate * 13220Sstevel@tonic-gate * Note that we are looking at another thread (ulwp_t) 13230Sstevel@tonic-gate * without ensuring that the other thread does not exit. 13240Sstevel@tonic-gate * The scheme relies on ulwp_t structures never being 13250Sstevel@tonic-gate * deallocated by the library (the library employs a free 13260Sstevel@tonic-gate * list of ulwp_t structs that are reused when new threads 13270Sstevel@tonic-gate * are created) and on schedctl shared memory never being 13280Sstevel@tonic-gate * deallocated once created via __schedctl(). 13290Sstevel@tonic-gate * 13300Sstevel@tonic-gate * Thus, the worst that can happen when the spinning thread 13310Sstevel@tonic-gate * looks at the owner's schedctl data is that it is looking 13320Sstevel@tonic-gate * at some other thread's schedctl data. This almost never 13330Sstevel@tonic-gate * happens and is benign when it does. 13340Sstevel@tonic-gate */ 13350Sstevel@tonic-gate if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL && 13360Sstevel@tonic-gate ((scp = ulwp->ul_schedctl) == NULL || 13370Sstevel@tonic-gate scp->sc_state != SC_ONPROC)) 13380Sstevel@tonic-gate break; 13390Sstevel@tonic-gate } 13405629Sraf new_lockword = spinners_decr(&mp->mutex_lockword); 13415629Sraf if (error && (new_lockword & (LOCKMASK | SPINNERMASK)) == 0) { 13425629Sraf /* 13435629Sraf * We haven't yet acquired the lock, the lock 13445629Sraf * is free, and there are no other spinners. 13455629Sraf * Make one final attempt to acquire the lock. 13465629Sraf * 13475629Sraf * This isn't strictly necessary since mutex_lock_queue() 13485629Sraf * (the next action this thread will take if it doesn't 13495629Sraf * acquire the lock here) makes one attempt to acquire 13505629Sraf * the lock before putting the thread to sleep. 13515629Sraf * 13525629Sraf * If the next action for this thread (on failure here) 13535629Sraf * were not to call mutex_lock_queue(), this would be 13545629Sraf * necessary for correctness, to avoid ending up with an 13555629Sraf * unheld mutex with waiters but no one to wake them up. 13565629Sraf */ 13575629Sraf if (set_lock_byte(lockp) == 0) { 13585629Sraf *ownerp = (uintptr_t)self; 13595629Sraf error = 0; 13605629Sraf } 13615629Sraf count++; 13625629Sraf } 13630Sstevel@tonic-gate exit_critical(self); 13640Sstevel@tonic-gate 13655629Sraf done: 13664574Sraf if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { 13674574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 13684574Sraf /* 13696057Sraf * We shouldn't own the mutex. 13706057Sraf * Just clear the lock; everyone has already been waked up. 13714574Sraf */ 13724574Sraf mp->mutex_owner = 0; 13736057Sraf (void) clear_lockbyte(&mp->mutex_lockword); 13744574Sraf error = ENOTRECOVERABLE; 13754574Sraf } 13764574Sraf 13774574Sraf if (error) { 13785629Sraf if (count) { 13795629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 0, count); 13805629Sraf } 13814574Sraf if (error != EBUSY) { 13824574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 13834574Sraf } 13844574Sraf } else { 13855629Sraf if (count) { 13865629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 1, count); 13875629Sraf } 13884574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); 13894574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) { 13904574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 13914574Sraf error = EOWNERDEAD; 13924574Sraf } 13934574Sraf } 13944574Sraf 13954574Sraf return (error); 13960Sstevel@tonic-gate } 13970Sstevel@tonic-gate 13980Sstevel@tonic-gate /* 13990Sstevel@tonic-gate * Same as mutex_trylock_adaptive(), except specifically for queue locks. 14000Sstevel@tonic-gate * The owner field is not set here; the caller (spin_lock_set()) sets it. 14010Sstevel@tonic-gate */ 14024574Sraf static int 14030Sstevel@tonic-gate mutex_queuelock_adaptive(mutex_t *mp) 14040Sstevel@tonic-gate { 14050Sstevel@tonic-gate ulwp_t *ulwp; 14060Sstevel@tonic-gate volatile sc_shared_t *scp; 14070Sstevel@tonic-gate volatile uint8_t *lockp; 14080Sstevel@tonic-gate volatile uint64_t *ownerp; 14090Sstevel@tonic-gate int count = curthread->ul_queue_spin; 14100Sstevel@tonic-gate 14110Sstevel@tonic-gate ASSERT(mp->mutex_type == USYNC_THREAD); 14120Sstevel@tonic-gate 14130Sstevel@tonic-gate if (count == 0) 14140Sstevel@tonic-gate return (EBUSY); 14150Sstevel@tonic-gate 14160Sstevel@tonic-gate lockp = (volatile uint8_t *)&mp->mutex_lockw; 14170Sstevel@tonic-gate ownerp = (volatile uint64_t *)&mp->mutex_owner; 14180Sstevel@tonic-gate while (--count >= 0) { 14190Sstevel@tonic-gate if (*lockp == 0 && set_lock_byte(lockp) == 0) 14200Sstevel@tonic-gate return (0); 14210Sstevel@tonic-gate SMT_PAUSE(); 14220Sstevel@tonic-gate if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL && 14230Sstevel@tonic-gate ((scp = ulwp->ul_schedctl) == NULL || 14240Sstevel@tonic-gate scp->sc_state != SC_ONPROC)) 14250Sstevel@tonic-gate break; 14260Sstevel@tonic-gate } 14270Sstevel@tonic-gate 14280Sstevel@tonic-gate return (EBUSY); 14290Sstevel@tonic-gate } 14300Sstevel@tonic-gate 14310Sstevel@tonic-gate /* 14320Sstevel@tonic-gate * Like mutex_trylock_adaptive(), but for process-shared mutexes. 14334613Sraf * Spin for a while (if 'tryhard' is true), trying to grab the lock. 14340Sstevel@tonic-gate * If this fails, return EBUSY and let the caller deal with it. 14350Sstevel@tonic-gate * If this succeeds, return 0 with mutex_owner set to curthread 14360Sstevel@tonic-gate * and mutex_ownerpid set to the current pid. 14370Sstevel@tonic-gate */ 14384574Sraf static int 14394613Sraf mutex_trylock_process(mutex_t *mp, int tryhard) 14400Sstevel@tonic-gate { 14410Sstevel@tonic-gate ulwp_t *self = curthread; 14425629Sraf uberdata_t *udp = self->ul_uberdata; 14434574Sraf int error = EBUSY; 14446057Sraf volatile uint64_t *lockp = (volatile uint64_t *)&mp->mutex_lockword64; 14455629Sraf uint32_t new_lockword; 14465629Sraf int count = 0; 14475629Sraf int max_count; 14485629Sraf uint8_t max_spinners; 14494574Sraf 1450*7255Sraf #if defined(__sparc) && !defined(_LP64) 1451*7255Sraf /* horrible hack, necessary only on 32-bit sparc */ 1452*7255Sraf int fix_alignment_problem = 1453*7255Sraf (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && 1454*7255Sraf self->ul_misaligned && !(mp->mutex_type & LOCK_ROBUST)); 1455*7255Sraf #endif 1456*7255Sraf 14574574Sraf ASSERT(mp->mutex_type & USYNC_PROCESS); 14584574Sraf 14594574Sraf if (shared_mutex_held(mp)) 14600Sstevel@tonic-gate return (EBUSY); 14610Sstevel@tonic-gate 14624574Sraf /* short-cut, not definitive (see below) */ 14634574Sraf if (mp->mutex_flag & LOCK_NOTRECOVERABLE) { 14644574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 14655629Sraf error = ENOTRECOVERABLE; 14665629Sraf goto done; 14674574Sraf } 14684574Sraf 14695629Sraf /* 14705629Sraf * Make one attempt to acquire the lock before 14715629Sraf * incurring the overhead of the spin loop. 14725629Sraf */ 14735629Sraf enter_critical(self); 1474*7255Sraf #if defined(__sparc) && !defined(_LP64) 1475*7255Sraf /* horrible hack, necessary only on 32-bit sparc */ 1476*7255Sraf if (fix_alignment_problem) { 1477*7255Sraf if (set_lock_byte(&mp->mutex_lockw) == 0) { 1478*7255Sraf mp->mutex_ownerpid = udp->pid; 1479*7255Sraf mp->mutex_owner = (uintptr_t)self; 1480*7255Sraf exit_critical(self); 1481*7255Sraf error = 0; 1482*7255Sraf goto done; 1483*7255Sraf } 1484*7255Sraf } else 1485*7255Sraf #endif 14866057Sraf if (set_lock_byte64(lockp, udp->pid) == 0) { 14875629Sraf mp->mutex_owner = (uintptr_t)self; 14886057Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 14895629Sraf exit_critical(self); 14905629Sraf error = 0; 14915629Sraf goto done; 14925629Sraf } 14935629Sraf exit_critical(self); 14945629Sraf if (!tryhard) 14955629Sraf goto done; 14964574Sraf if (ncpus == 0) 14974574Sraf ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN); 14985629Sraf if ((max_spinners = self->ul_max_spinners) >= ncpus) 14995629Sraf max_spinners = ncpus - 1; 15005629Sraf max_count = (max_spinners != 0)? self->ul_adaptive_spin : 0; 15015629Sraf if (max_count == 0) 15025629Sraf goto done; 15035629Sraf 15040Sstevel@tonic-gate /* 15050Sstevel@tonic-gate * This is a process-shared mutex. 15060Sstevel@tonic-gate * We cannot know if the owner is running on a processor. 15070Sstevel@tonic-gate * We just spin and hope that it is on a processor. 15080Sstevel@tonic-gate */ 15094574Sraf enter_critical(self); 15105629Sraf if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1) { 15115629Sraf exit_critical(self); 15125629Sraf goto done; 15135629Sraf } 15145629Sraf DTRACE_PROBE1(plockstat, mutex__spin, mp); 15155629Sraf for (count = 1; ; count++) { 1516*7255Sraf #if defined(__sparc) && !defined(_LP64) 1517*7255Sraf /* horrible hack, necessary only on 32-bit sparc */ 1518*7255Sraf if (fix_alignment_problem) { 1519*7255Sraf if ((*lockp & LOCKMASK64) == 0 && 1520*7255Sraf set_lock_byte(&mp->mutex_lockw) == 0) { 1521*7255Sraf mp->mutex_ownerpid = udp->pid; 1522*7255Sraf mp->mutex_owner = (uintptr_t)self; 1523*7255Sraf error = 0; 1524*7255Sraf break; 1525*7255Sraf } 1526*7255Sraf } else 1527*7255Sraf #endif 15286057Sraf if ((*lockp & LOCKMASK64) == 0 && 15296057Sraf set_lock_byte64(lockp, udp->pid) == 0) { 15304574Sraf mp->mutex_owner = (uintptr_t)self; 15316057Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 15324574Sraf error = 0; 15334574Sraf break; 15344574Sraf } 15355629Sraf if (count == max_count) 15365629Sraf break; 15374574Sraf SMT_PAUSE(); 15384574Sraf } 15395629Sraf new_lockword = spinners_decr(&mp->mutex_lockword); 15405629Sraf if (error && (new_lockword & (LOCKMASK | SPINNERMASK)) == 0) { 15415629Sraf /* 15425629Sraf * We haven't yet acquired the lock, the lock 15435629Sraf * is free, and there are no other spinners. 15445629Sraf * Make one final attempt to acquire the lock. 15455629Sraf * 15465629Sraf * This isn't strictly necessary since mutex_lock_kernel() 15475629Sraf * (the next action this thread will take if it doesn't 15485629Sraf * acquire the lock here) makes one attempt to acquire 15495629Sraf * the lock before putting the thread to sleep. 15505629Sraf * 15515629Sraf * If the next action for this thread (on failure here) 15525629Sraf * were not to call mutex_lock_kernel(), this would be 15535629Sraf * necessary for correctness, to avoid ending up with an 15545629Sraf * unheld mutex with waiters but no one to wake them up. 15555629Sraf */ 1556*7255Sraf #if defined(__sparc) && !defined(_LP64) 1557*7255Sraf /* horrible hack, necessary only on 32-bit sparc */ 1558*7255Sraf if (fix_alignment_problem) { 1559*7255Sraf if (set_lock_byte(&mp->mutex_lockw) == 0) { 1560*7255Sraf mp->mutex_ownerpid = udp->pid; 1561*7255Sraf mp->mutex_owner = (uintptr_t)self; 1562*7255Sraf error = 0; 1563*7255Sraf } 1564*7255Sraf } else 1565*7255Sraf #endif 15666057Sraf if (set_lock_byte64(lockp, udp->pid) == 0) { 15675629Sraf mp->mutex_owner = (uintptr_t)self; 15686057Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 15695629Sraf error = 0; 15705629Sraf } 15715629Sraf count++; 15725629Sraf } 15734574Sraf exit_critical(self); 15744574Sraf 15755629Sraf done: 15764574Sraf if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { 15774574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 15784574Sraf /* 15796057Sraf * We shouldn't own the mutex. 15806057Sraf * Just clear the lock; everyone has already been waked up. 15814574Sraf */ 15824574Sraf mp->mutex_owner = 0; 15836057Sraf /* mp->mutex_ownerpid is cleared by clear_lockbyte64() */ 15846057Sraf (void) clear_lockbyte64(&mp->mutex_lockword64); 15854574Sraf error = ENOTRECOVERABLE; 15860Sstevel@tonic-gate } 15870Sstevel@tonic-gate 15884574Sraf if (error) { 15895629Sraf if (count) { 15905629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 0, count); 15915629Sraf } 15924574Sraf if (error != EBUSY) { 15934574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 15944574Sraf } 15954574Sraf } else { 15965629Sraf if (count) { 15975629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 1, count); 15985629Sraf } 15994574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); 16004574Sraf if (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED)) { 16014574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 16024574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) 16034574Sraf error = EOWNERDEAD; 16044574Sraf else if (mp->mutex_type & USYNC_PROCESS_ROBUST) 16054574Sraf error = ELOCKUNMAPPED; 16064574Sraf else 16074574Sraf error = EOWNERDEAD; 16084574Sraf } 16094574Sraf } 16104574Sraf 16114574Sraf return (error); 16120Sstevel@tonic-gate } 16130Sstevel@tonic-gate 16140Sstevel@tonic-gate /* 16150Sstevel@tonic-gate * Mutex wakeup code for releasing a USYNC_THREAD mutex. 16160Sstevel@tonic-gate * Returns the lwpid of the thread that was dequeued, if any. 16170Sstevel@tonic-gate * The caller of mutex_wakeup() must call __lwp_unpark(lwpid) 16180Sstevel@tonic-gate * to wake up the specified lwp. 16190Sstevel@tonic-gate */ 16204574Sraf static lwpid_t 16210Sstevel@tonic-gate mutex_wakeup(mutex_t *mp) 16220Sstevel@tonic-gate { 16230Sstevel@tonic-gate lwpid_t lwpid = 0; 16246247Sraf int more; 16250Sstevel@tonic-gate queue_head_t *qp; 16260Sstevel@tonic-gate ulwp_t *ulwp; 16270Sstevel@tonic-gate 16280Sstevel@tonic-gate /* 16290Sstevel@tonic-gate * Dequeue a waiter from the sleep queue. Don't touch the mutex 16300Sstevel@tonic-gate * waiters bit if no one was found on the queue because the mutex 16310Sstevel@tonic-gate * might have been deallocated or reallocated for another purpose. 16320Sstevel@tonic-gate */ 16330Sstevel@tonic-gate qp = queue_lock(mp, MX); 16346247Sraf if ((ulwp = dequeue(qp, &more)) != NULL) { 16350Sstevel@tonic-gate lwpid = ulwp->ul_lwpid; 16366247Sraf mp->mutex_waiters = more; 16370Sstevel@tonic-gate } 16380Sstevel@tonic-gate queue_unlock(qp); 16390Sstevel@tonic-gate return (lwpid); 16400Sstevel@tonic-gate } 16410Sstevel@tonic-gate 16420Sstevel@tonic-gate /* 16434574Sraf * Mutex wakeup code for releasing all waiters on a USYNC_THREAD mutex. 16444574Sraf */ 16454574Sraf static void 16464574Sraf mutex_wakeup_all(mutex_t *mp) 16474574Sraf { 16484574Sraf queue_head_t *qp; 16496247Sraf queue_root_t *qrp; 16504574Sraf int nlwpid = 0; 16514574Sraf int maxlwps = MAXLWPS; 16524574Sraf ulwp_t *ulwp; 16534574Sraf lwpid_t buffer[MAXLWPS]; 16544574Sraf lwpid_t *lwpid = buffer; 16554574Sraf 16564574Sraf /* 16574574Sraf * Walk the list of waiters and prepare to wake up all of them. 16584574Sraf * The waiters flag has already been cleared from the mutex. 16594574Sraf * 16604574Sraf * We keep track of lwpids that are to be unparked in lwpid[]. 16614574Sraf * __lwp_unpark_all() is called to unpark all of them after 16624574Sraf * they have been removed from the sleep queue and the sleep 16634574Sraf * queue lock has been dropped. If we run out of space in our 16644574Sraf * on-stack buffer, we need to allocate more but we can't call 16654574Sraf * lmalloc() because we are holding a queue lock when the overflow 16664574Sraf * occurs and lmalloc() acquires a lock. We can't use alloca() 16674574Sraf * either because the application may have allocated a small 16684574Sraf * stack and we don't want to overrun the stack. So we call 16694574Sraf * alloc_lwpids() to allocate a bigger buffer using the mmap() 16704574Sraf * system call directly since that path acquires no locks. 16714574Sraf */ 16724574Sraf qp = queue_lock(mp, MX); 16736247Sraf for (;;) { 16746247Sraf if ((qrp = qp->qh_root) == NULL || 16756247Sraf (ulwp = qrp->qr_head) == NULL) 16766247Sraf break; 16776247Sraf ASSERT(ulwp->ul_wchan == mp); 16786247Sraf queue_unlink(qp, &qrp->qr_head, NULL); 16796247Sraf ulwp->ul_sleepq = NULL; 16806247Sraf ulwp->ul_wchan = NULL; 16816247Sraf if (nlwpid == maxlwps) 16826247Sraf lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps); 16836247Sraf lwpid[nlwpid++] = ulwp->ul_lwpid; 16844574Sraf } 16854574Sraf 16864574Sraf if (nlwpid == 0) { 16874574Sraf queue_unlock(qp); 16884574Sraf } else { 16895629Sraf mp->mutex_waiters = 0; 16904574Sraf no_preempt(curthread); 16914574Sraf queue_unlock(qp); 16924574Sraf if (nlwpid == 1) 16934574Sraf (void) __lwp_unpark(lwpid[0]); 16944574Sraf else 16954574Sraf (void) __lwp_unpark_all(lwpid, nlwpid); 16964574Sraf preempt(curthread); 16974574Sraf } 16984574Sraf 16994574Sraf if (lwpid != buffer) 17006515Sraf (void) munmap((caddr_t)lwpid, maxlwps * sizeof (lwpid_t)); 17014574Sraf } 17024574Sraf 17034574Sraf /* 17045629Sraf * Release a process-private mutex. 17055629Sraf * As an optimization, if there are waiters but there are also spinners 17065629Sraf * attempting to acquire the mutex, then don't bother waking up a waiter; 17075629Sraf * one of the spinners will acquire the mutex soon and it would be a waste 17085629Sraf * of resources to wake up some thread just to have it spin for a while 17095629Sraf * and then possibly go back to sleep. See mutex_trylock_adaptive(). 17100Sstevel@tonic-gate */ 17114574Sraf static lwpid_t 17124574Sraf mutex_unlock_queue(mutex_t *mp, int release_all) 17130Sstevel@tonic-gate { 17145629Sraf lwpid_t lwpid = 0; 17155629Sraf uint32_t old_lockword; 17165629Sraf 17176057Sraf DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 17185629Sraf mp->mutex_owner = 0; 17195629Sraf old_lockword = clear_lockbyte(&mp->mutex_lockword); 17205629Sraf if ((old_lockword & WAITERMASK) && 17215629Sraf (release_all || (old_lockword & SPINNERMASK) == 0)) { 17225629Sraf ulwp_t *self = curthread; 17230Sstevel@tonic-gate no_preempt(self); /* ensure a prompt wakeup */ 17245629Sraf if (release_all) 17255629Sraf mutex_wakeup_all(mp); 17265629Sraf else 17275629Sraf lwpid = mutex_wakeup(mp); 17285629Sraf if (lwpid == 0) 17295629Sraf preempt(self); 17304574Sraf } 17310Sstevel@tonic-gate return (lwpid); 17320Sstevel@tonic-gate } 17330Sstevel@tonic-gate 17340Sstevel@tonic-gate /* 17350Sstevel@tonic-gate * Like mutex_unlock_queue(), but for process-shared mutexes. 17360Sstevel@tonic-gate */ 17374574Sraf static void 17384574Sraf mutex_unlock_process(mutex_t *mp, int release_all) 17390Sstevel@tonic-gate { 1740*7255Sraf ulwp_t *self = curthread; 17416057Sraf uint64_t old_lockword64; 17426057Sraf 17436057Sraf DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 17440Sstevel@tonic-gate mp->mutex_owner = 0; 1745*7255Sraf #if defined(__sparc) && !defined(_LP64) 1746*7255Sraf /* horrible hack, necessary only on 32-bit sparc */ 1747*7255Sraf if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && 1748*7255Sraf self->ul_misaligned && !(mp->mutex_type & LOCK_ROBUST)) { 1749*7255Sraf uint32_t old_lockword; 1750*7255Sraf mp->mutex_ownerpid = 0; 1751*7255Sraf old_lockword = clear_lockbyte(&mp->mutex_lockword); 1752*7255Sraf if ((old_lockword & WAITERMASK) && 1753*7255Sraf (release_all || (old_lockword & SPINNERMASK) == 0)) { 1754*7255Sraf no_preempt(self); /* ensure a prompt wakeup */ 1755*7255Sraf (void) ___lwp_mutex_wakeup(mp, release_all); 1756*7255Sraf preempt(self); 1757*7255Sraf } 1758*7255Sraf return; 1759*7255Sraf } 1760*7255Sraf #endif 17616057Sraf /* mp->mutex_ownerpid is cleared by clear_lockbyte64() */ 17626057Sraf old_lockword64 = clear_lockbyte64(&mp->mutex_lockword64); 17636057Sraf if ((old_lockword64 & WAITERMASK64) && 17646057Sraf (release_all || (old_lockword64 & SPINNERMASK64) == 0)) { 17655629Sraf no_preempt(self); /* ensure a prompt wakeup */ 17665629Sraf (void) ___lwp_mutex_wakeup(mp, release_all); 17675629Sraf preempt(self); 17680Sstevel@tonic-gate } 17690Sstevel@tonic-gate } 17700Sstevel@tonic-gate 17710Sstevel@tonic-gate void 17720Sstevel@tonic-gate stall(void) 17730Sstevel@tonic-gate { 17740Sstevel@tonic-gate for (;;) 17750Sstevel@tonic-gate (void) mutex_lock_kernel(&stall_mutex, NULL, NULL); 17760Sstevel@tonic-gate } 17770Sstevel@tonic-gate 17780Sstevel@tonic-gate /* 17790Sstevel@tonic-gate * Acquire a USYNC_THREAD mutex via user-level sleep queues. 17800Sstevel@tonic-gate * We failed set_lock_byte(&mp->mutex_lockw) before coming here. 17814574Sraf * If successful, returns with mutex_owner set correctly. 17820Sstevel@tonic-gate */ 17830Sstevel@tonic-gate int 17840Sstevel@tonic-gate mutex_lock_queue(ulwp_t *self, tdb_mutex_stats_t *msp, mutex_t *mp, 17850Sstevel@tonic-gate timespec_t *tsp) 17860Sstevel@tonic-gate { 17870Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 17880Sstevel@tonic-gate queue_head_t *qp; 17890Sstevel@tonic-gate hrtime_t begin_sleep; 17900Sstevel@tonic-gate int error = 0; 17910Sstevel@tonic-gate 17920Sstevel@tonic-gate self->ul_sp = stkptr(); 17930Sstevel@tonic-gate if (__td_event_report(self, TD_SLEEP, udp)) { 17940Sstevel@tonic-gate self->ul_wchan = mp; 17950Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_SLEEP; 17960Sstevel@tonic-gate self->ul_td_evbuf.eventdata = mp; 17970Sstevel@tonic-gate tdb_event(TD_SLEEP, udp); 17980Sstevel@tonic-gate } 17990Sstevel@tonic-gate if (msp) { 18000Sstevel@tonic-gate tdb_incr(msp->mutex_sleep); 18010Sstevel@tonic-gate begin_sleep = gethrtime(); 18020Sstevel@tonic-gate } 18030Sstevel@tonic-gate 18040Sstevel@tonic-gate DTRACE_PROBE1(plockstat, mutex__block, mp); 18050Sstevel@tonic-gate 18060Sstevel@tonic-gate /* 18070Sstevel@tonic-gate * Put ourself on the sleep queue, and while we are 18080Sstevel@tonic-gate * unable to grab the lock, go park in the kernel. 18090Sstevel@tonic-gate * Take ourself off the sleep queue after we acquire the lock. 18100Sstevel@tonic-gate * The waiter bit can be set/cleared only while holding the queue lock. 18110Sstevel@tonic-gate */ 18120Sstevel@tonic-gate qp = queue_lock(mp, MX); 18136247Sraf enqueue(qp, self, 0); 18140Sstevel@tonic-gate mp->mutex_waiters = 1; 18150Sstevel@tonic-gate for (;;) { 18160Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 18170Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 18186247Sraf mp->mutex_waiters = dequeue_self(qp); 18190Sstevel@tonic-gate break; 18200Sstevel@tonic-gate } 18210Sstevel@tonic-gate set_parking_flag(self, 1); 18220Sstevel@tonic-gate queue_unlock(qp); 18230Sstevel@tonic-gate /* 18240Sstevel@tonic-gate * __lwp_park() will return the residual time in tsp 18250Sstevel@tonic-gate * if we are unparked before the timeout expires. 18260Sstevel@tonic-gate */ 18275629Sraf error = __lwp_park(tsp, 0); 18280Sstevel@tonic-gate set_parking_flag(self, 0); 18290Sstevel@tonic-gate /* 18300Sstevel@tonic-gate * We could have taken a signal or suspended ourself. 18310Sstevel@tonic-gate * If we did, then we removed ourself from the queue. 18320Sstevel@tonic-gate * Someone else may have removed us from the queue 18330Sstevel@tonic-gate * as a consequence of mutex_unlock(). We may have 18340Sstevel@tonic-gate * gotten a timeout from __lwp_park(). Or we may still 18350Sstevel@tonic-gate * be on the queue and this is just a spurious wakeup. 18360Sstevel@tonic-gate */ 18370Sstevel@tonic-gate qp = queue_lock(mp, MX); 18380Sstevel@tonic-gate if (self->ul_sleepq == NULL) { 18395629Sraf if (error) { 18406247Sraf mp->mutex_waiters = queue_waiter(qp)? 1 : 0; 18415629Sraf if (error != EINTR) 18425629Sraf break; 18435629Sraf error = 0; 18445629Sraf } 18450Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 18460Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 18470Sstevel@tonic-gate break; 18480Sstevel@tonic-gate } 18496247Sraf enqueue(qp, self, 0); 18500Sstevel@tonic-gate mp->mutex_waiters = 1; 18510Sstevel@tonic-gate } 18520Sstevel@tonic-gate ASSERT(self->ul_sleepq == qp && 18530Sstevel@tonic-gate self->ul_qtype == MX && 18540Sstevel@tonic-gate self->ul_wchan == mp); 18550Sstevel@tonic-gate if (error) { 18565629Sraf if (error != EINTR) { 18576247Sraf mp->mutex_waiters = dequeue_self(qp); 18585629Sraf break; 18595629Sraf } 18605629Sraf error = 0; 18610Sstevel@tonic-gate } 18620Sstevel@tonic-gate } 18630Sstevel@tonic-gate ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL && 18640Sstevel@tonic-gate self->ul_wchan == NULL); 18650Sstevel@tonic-gate self->ul_sp = 0; 18660Sstevel@tonic-gate queue_unlock(qp); 18674574Sraf 18680Sstevel@tonic-gate if (msp) 18690Sstevel@tonic-gate msp->mutex_sleep_time += gethrtime() - begin_sleep; 18700Sstevel@tonic-gate 18710Sstevel@tonic-gate ASSERT(error == 0 || error == EINVAL || error == ETIME); 18724574Sraf 18734574Sraf if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { 18744574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 18754574Sraf /* 18766057Sraf * We shouldn't own the mutex. 18776057Sraf * Just clear the lock; everyone has already been waked up. 18784574Sraf */ 18794574Sraf mp->mutex_owner = 0; 18806057Sraf (void) clear_lockbyte(&mp->mutex_lockword); 18814574Sraf error = ENOTRECOVERABLE; 18824574Sraf } 18834574Sraf 18844574Sraf if (error) { 18854574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0); 18864574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 18874574Sraf } else { 18884574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1); 18894574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 18904574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) { 18914574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 18924574Sraf error = EOWNERDEAD; 18934574Sraf } 18944574Sraf } 18954574Sraf 18960Sstevel@tonic-gate return (error); 18970Sstevel@tonic-gate } 18980Sstevel@tonic-gate 18994574Sraf static int 19004574Sraf mutex_recursion(mutex_t *mp, int mtype, int try) 19014574Sraf { 19026812Sraf ASSERT(mutex_held(mp)); 19034574Sraf ASSERT(mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)); 19044574Sraf ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK); 19054574Sraf 19064574Sraf if (mtype & LOCK_RECURSIVE) { 19074574Sraf if (mp->mutex_rcount == RECURSION_MAX) { 19084574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EAGAIN); 19094574Sraf return (EAGAIN); 19104574Sraf } 19114574Sraf mp->mutex_rcount++; 19124574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1, 0); 19134574Sraf return (0); 19144574Sraf } 19154574Sraf if (try == MUTEX_LOCK) { 19164574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK); 19174574Sraf return (EDEADLK); 19184574Sraf } 19194574Sraf return (EBUSY); 19204574Sraf } 19214574Sraf 19224574Sraf /* 19234574Sraf * Register this USYNC_PROCESS|LOCK_ROBUST mutex with the kernel so 19244574Sraf * it can apply LOCK_OWNERDEAD|LOCK_UNMAPPED if it becomes necessary. 19254574Sraf * We use tdb_hash_lock here and in the synch object tracking code in 19264574Sraf * the tdb_agent.c file. There is no conflict between these two usages. 19274574Sraf */ 19284574Sraf void 19294574Sraf register_lock(mutex_t *mp) 19304574Sraf { 19314574Sraf uberdata_t *udp = curthread->ul_uberdata; 19324574Sraf uint_t hash = LOCK_HASH(mp); 19334574Sraf robust_t *rlp; 19344574Sraf robust_t **rlpp; 19354574Sraf robust_t **table; 19364574Sraf 19374574Sraf if ((table = udp->robustlocks) == NULL) { 19384574Sraf lmutex_lock(&udp->tdb_hash_lock); 19394574Sraf if ((table = udp->robustlocks) == NULL) { 19404574Sraf table = lmalloc(LOCKHASHSZ * sizeof (robust_t *)); 19416812Sraf membar_producer(); 19424574Sraf udp->robustlocks = table; 19434574Sraf } 19444574Sraf lmutex_unlock(&udp->tdb_hash_lock); 19454574Sraf } 19466812Sraf membar_consumer(); 19474574Sraf 19484574Sraf /* 19494574Sraf * First search the registered table with no locks held. 19504574Sraf * This is safe because the table never shrinks 19514574Sraf * and we can only get a false negative. 19524574Sraf */ 19534574Sraf for (rlp = table[hash]; rlp != NULL; rlp = rlp->robust_next) { 19544574Sraf if (rlp->robust_lock == mp) /* already registered */ 19554574Sraf return; 19564574Sraf } 19574574Sraf 19584574Sraf /* 19594574Sraf * The lock was not found. 19604574Sraf * Repeat the operation with tdb_hash_lock held. 19614574Sraf */ 19624574Sraf lmutex_lock(&udp->tdb_hash_lock); 19634574Sraf 19644574Sraf for (rlpp = &table[hash]; 19654574Sraf (rlp = *rlpp) != NULL; 19664574Sraf rlpp = &rlp->robust_next) { 19674574Sraf if (rlp->robust_lock == mp) { /* already registered */ 19684574Sraf lmutex_unlock(&udp->tdb_hash_lock); 19694574Sraf return; 19704574Sraf } 19714574Sraf } 19724574Sraf 19734574Sraf /* 19744574Sraf * The lock has never been registered. 19754574Sraf * Register it now and add it to the table. 19764574Sraf */ 19774574Sraf (void) ___lwp_mutex_register(mp); 19784574Sraf rlp = lmalloc(sizeof (*rlp)); 19794574Sraf rlp->robust_lock = mp; 19806812Sraf membar_producer(); 19814574Sraf *rlpp = rlp; 19824574Sraf 19834574Sraf lmutex_unlock(&udp->tdb_hash_lock); 19844574Sraf } 19854574Sraf 19864574Sraf /* 19874574Sraf * This is called in the child of fork()/forkall() to start over 19884574Sraf * with a clean slate. (Each process must register its own locks.) 19894574Sraf * No locks are needed because all other threads are suspended or gone. 19904574Sraf */ 19914574Sraf void 19924574Sraf unregister_locks(void) 19934574Sraf { 19944574Sraf uberdata_t *udp = curthread->ul_uberdata; 19954574Sraf uint_t hash; 19964574Sraf robust_t **table; 19974574Sraf robust_t *rlp; 19984574Sraf robust_t *next; 19994574Sraf 20004574Sraf if ((table = udp->robustlocks) != NULL) { 20014574Sraf for (hash = 0; hash < LOCKHASHSZ; hash++) { 20024574Sraf rlp = table[hash]; 20034574Sraf while (rlp != NULL) { 20044574Sraf next = rlp->robust_next; 20054574Sraf lfree(rlp, sizeof (*rlp)); 20064574Sraf rlp = next; 20074574Sraf } 20084574Sraf } 20094574Sraf lfree(table, LOCKHASHSZ * sizeof (robust_t *)); 20104574Sraf udp->robustlocks = NULL; 20114574Sraf } 20124574Sraf } 20134574Sraf 20140Sstevel@tonic-gate /* 20150Sstevel@tonic-gate * Returns with mutex_owner set correctly. 20160Sstevel@tonic-gate */ 20176247Sraf int 20180Sstevel@tonic-gate mutex_lock_internal(mutex_t *mp, timespec_t *tsp, int try) 20190Sstevel@tonic-gate { 20200Sstevel@tonic-gate ulwp_t *self = curthread; 20210Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 20220Sstevel@tonic-gate int mtype = mp->mutex_type; 20230Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 20240Sstevel@tonic-gate int error = 0; 20256247Sraf int noceil = try & MUTEX_NOCEIL; 20264574Sraf uint8_t ceil; 20274574Sraf int myprio; 20280Sstevel@tonic-gate 20296247Sraf try &= ~MUTEX_NOCEIL; 20300Sstevel@tonic-gate ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK); 20310Sstevel@tonic-gate 20320Sstevel@tonic-gate if (!self->ul_schedctl_called) 20330Sstevel@tonic-gate (void) setup_schedctl(); 20340Sstevel@tonic-gate 20350Sstevel@tonic-gate if (msp && try == MUTEX_TRY) 20360Sstevel@tonic-gate tdb_incr(msp->mutex_try); 20370Sstevel@tonic-gate 20386812Sraf if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && mutex_held(mp)) 20394574Sraf return (mutex_recursion(mp, mtype, try)); 20400Sstevel@tonic-gate 20410Sstevel@tonic-gate if (self->ul_error_detection && try == MUTEX_LOCK && 20426812Sraf tsp == NULL && mutex_held(mp)) 20430Sstevel@tonic-gate lock_error(mp, "mutex_lock", NULL, NULL); 20440Sstevel@tonic-gate 20456247Sraf if ((mtype & LOCK_PRIO_PROTECT) && noceil == 0) { 20466247Sraf update_sched(self); 20476247Sraf if (self->ul_cid != self->ul_rtclassid) { 20486247Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EPERM); 20496247Sraf return (EPERM); 20506247Sraf } 20514574Sraf ceil = mp->mutex_ceiling; 20526247Sraf myprio = self->ul_epri? self->ul_epri : self->ul_pri; 20534574Sraf if (myprio > ceil) { 20544574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EINVAL); 20554574Sraf return (EINVAL); 20564574Sraf } 20574574Sraf if ((error = _ceil_mylist_add(mp)) != 0) { 20584574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 20594574Sraf return (error); 20600Sstevel@tonic-gate } 20614574Sraf if (myprio < ceil) 20624574Sraf _ceil_prio_inherit(ceil); 20634574Sraf } 20644574Sraf 20654574Sraf if ((mtype & (USYNC_PROCESS | LOCK_ROBUST)) 20664574Sraf == (USYNC_PROCESS | LOCK_ROBUST)) 20674574Sraf register_lock(mp); 20684574Sraf 20694574Sraf if (mtype & LOCK_PRIO_INHERIT) { 20704574Sraf /* go straight to the kernel */ 20714574Sraf if (try == MUTEX_TRY) 20724574Sraf error = mutex_trylock_kernel(mp); 20734574Sraf else /* MUTEX_LOCK */ 20744574Sraf error = mutex_lock_kernel(mp, tsp, msp); 20754574Sraf /* 20764574Sraf * The kernel never sets or clears the lock byte 20774574Sraf * for LOCK_PRIO_INHERIT mutexes. 20784574Sraf * Set it here for consistency. 20794574Sraf */ 20804574Sraf switch (error) { 20814574Sraf case 0: 20826247Sraf self->ul_pilocks++; 20834574Sraf mp->mutex_lockw = LOCKSET; 20844574Sraf break; 20854574Sraf case EOWNERDEAD: 20864574Sraf case ELOCKUNMAPPED: 20876247Sraf self->ul_pilocks++; 20884574Sraf mp->mutex_lockw = LOCKSET; 20894574Sraf /* FALLTHROUGH */ 20904574Sraf case ENOTRECOVERABLE: 20914574Sraf ASSERT(mtype & LOCK_ROBUST); 20924574Sraf break; 20934574Sraf case EDEADLK: 20944574Sraf if (try == MUTEX_LOCK) 20954574Sraf stall(); 20964574Sraf error = EBUSY; 20974574Sraf break; 20980Sstevel@tonic-gate } 20990Sstevel@tonic-gate } else if (mtype & USYNC_PROCESS) { 21004613Sraf error = mutex_trylock_process(mp, try == MUTEX_LOCK); 21014574Sraf if (error == EBUSY && try == MUTEX_LOCK) 21020Sstevel@tonic-gate error = mutex_lock_kernel(mp, tsp, msp); 21035629Sraf } else { /* USYNC_THREAD */ 21044613Sraf error = mutex_trylock_adaptive(mp, try == MUTEX_LOCK); 21054574Sraf if (error == EBUSY && try == MUTEX_LOCK) 21064574Sraf error = mutex_lock_queue(self, msp, mp, tsp); 21070Sstevel@tonic-gate } 21080Sstevel@tonic-gate 21090Sstevel@tonic-gate switch (error) { 21104574Sraf case 0: 21110Sstevel@tonic-gate case EOWNERDEAD: 21120Sstevel@tonic-gate case ELOCKUNMAPPED: 21134574Sraf if (mtype & LOCK_ROBUST) 21144574Sraf remember_lock(mp); 21150Sstevel@tonic-gate if (msp) 21160Sstevel@tonic-gate record_begin_hold(msp); 21170Sstevel@tonic-gate break; 21180Sstevel@tonic-gate default: 21196247Sraf if ((mtype & LOCK_PRIO_PROTECT) && noceil == 0) { 21204574Sraf (void) _ceil_mylist_del(mp); 21214574Sraf if (myprio < ceil) 21224574Sraf _ceil_prio_waive(); 21234574Sraf } 21240Sstevel@tonic-gate if (try == MUTEX_TRY) { 21250Sstevel@tonic-gate if (msp) 21260Sstevel@tonic-gate tdb_incr(msp->mutex_try_fail); 21270Sstevel@tonic-gate if (__td_event_report(self, TD_LOCK_TRY, udp)) { 21280Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 21290Sstevel@tonic-gate tdb_event(TD_LOCK_TRY, udp); 21300Sstevel@tonic-gate } 21310Sstevel@tonic-gate } 21320Sstevel@tonic-gate break; 21330Sstevel@tonic-gate } 21340Sstevel@tonic-gate 21350Sstevel@tonic-gate return (error); 21360Sstevel@tonic-gate } 21370Sstevel@tonic-gate 21380Sstevel@tonic-gate int 21390Sstevel@tonic-gate fast_process_lock(mutex_t *mp, timespec_t *tsp, int mtype, int try) 21400Sstevel@tonic-gate { 21410Sstevel@tonic-gate ulwp_t *self = curthread; 21420Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 21430Sstevel@tonic-gate 21440Sstevel@tonic-gate /* 21450Sstevel@tonic-gate * We know that USYNC_PROCESS is set in mtype and that 21460Sstevel@tonic-gate * zero, one, or both of the flags LOCK_RECURSIVE and 21470Sstevel@tonic-gate * LOCK_ERRORCHECK are set, and that no other flags are set. 21480Sstevel@tonic-gate */ 21494574Sraf ASSERT((mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0); 21500Sstevel@tonic-gate enter_critical(self); 2151*7255Sraf #if defined(__sparc) && !defined(_LP64) 2152*7255Sraf /* horrible hack, necessary only on 32-bit sparc */ 2153*7255Sraf if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && 2154*7255Sraf self->ul_misaligned) { 2155*7255Sraf if (set_lock_byte(&mp->mutex_lockw) == 0) { 2156*7255Sraf mp->mutex_ownerpid = udp->pid; 2157*7255Sraf mp->mutex_owner = (uintptr_t)self; 2158*7255Sraf exit_critical(self); 2159*7255Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 2160*7255Sraf return (0); 2161*7255Sraf } 2162*7255Sraf } else 2163*7255Sraf #endif 21646057Sraf if (set_lock_byte64(&mp->mutex_lockword64, udp->pid) == 0) { 21650Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 21666057Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 21670Sstevel@tonic-gate exit_critical(self); 21680Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 21690Sstevel@tonic-gate return (0); 21700Sstevel@tonic-gate } 21710Sstevel@tonic-gate exit_critical(self); 21720Sstevel@tonic-gate 21734574Sraf if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && shared_mutex_held(mp)) 21744574Sraf return (mutex_recursion(mp, mtype, try)); 21754574Sraf 21764613Sraf if (try == MUTEX_LOCK) { 21774613Sraf if (mutex_trylock_process(mp, 1) == 0) 21784613Sraf return (0); 21790Sstevel@tonic-gate return (mutex_lock_kernel(mp, tsp, NULL)); 21804613Sraf } 21810Sstevel@tonic-gate 21820Sstevel@tonic-gate if (__td_event_report(self, TD_LOCK_TRY, udp)) { 21830Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 21840Sstevel@tonic-gate tdb_event(TD_LOCK_TRY, udp); 21850Sstevel@tonic-gate } 21860Sstevel@tonic-gate return (EBUSY); 21870Sstevel@tonic-gate } 21880Sstevel@tonic-gate 21890Sstevel@tonic-gate static int 21900Sstevel@tonic-gate mutex_lock_impl(mutex_t *mp, timespec_t *tsp) 21910Sstevel@tonic-gate { 21920Sstevel@tonic-gate ulwp_t *self = curthread; 21936247Sraf int mtype = mp->mutex_type; 21940Sstevel@tonic-gate uberflags_t *gflags; 21950Sstevel@tonic-gate 2196*7255Sraf if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && 2197*7255Sraf self->ul_error_detection && self->ul_misaligned == 0) 2198*7255Sraf lock_error(mp, "mutex_lock", NULL, "mutex is misaligned"); 2199*7255Sraf 22000Sstevel@tonic-gate /* 22010Sstevel@tonic-gate * Optimize the case of USYNC_THREAD, including 22020Sstevel@tonic-gate * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 22030Sstevel@tonic-gate * no error detection, no lock statistics, 22040Sstevel@tonic-gate * and the process has only a single thread. 22050Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 22060Sstevel@tonic-gate */ 22076247Sraf if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 22086247Sraf self->ul_uberdata->uberflags.uf_all) == 0) { 22090Sstevel@tonic-gate /* 22100Sstevel@tonic-gate * Only one thread exists so we don't need an atomic operation. 22110Sstevel@tonic-gate */ 22120Sstevel@tonic-gate if (mp->mutex_lockw == 0) { 22130Sstevel@tonic-gate mp->mutex_lockw = LOCKSET; 22140Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 22150Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 22160Sstevel@tonic-gate return (0); 22170Sstevel@tonic-gate } 22184574Sraf if (mtype && MUTEX_OWNER(mp) == self) 22194574Sraf return (mutex_recursion(mp, mtype, MUTEX_LOCK)); 22200Sstevel@tonic-gate /* 22210Sstevel@tonic-gate * We have reached a deadlock, probably because the 22220Sstevel@tonic-gate * process is executing non-async-signal-safe code in 22230Sstevel@tonic-gate * a signal handler and is attempting to acquire a lock 22240Sstevel@tonic-gate * that it already owns. This is not surprising, given 22250Sstevel@tonic-gate * bad programming practices over the years that has 22260Sstevel@tonic-gate * resulted in applications calling printf() and such 22270Sstevel@tonic-gate * in their signal handlers. Unless the user has told 22280Sstevel@tonic-gate * us that the signal handlers are safe by setting: 22290Sstevel@tonic-gate * export _THREAD_ASYNC_SAFE=1 22300Sstevel@tonic-gate * we return EDEADLK rather than actually deadlocking. 22310Sstevel@tonic-gate */ 22320Sstevel@tonic-gate if (tsp == NULL && 22330Sstevel@tonic-gate MUTEX_OWNER(mp) == self && !self->ul_async_safe) { 22340Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK); 22350Sstevel@tonic-gate return (EDEADLK); 22360Sstevel@tonic-gate } 22370Sstevel@tonic-gate } 22380Sstevel@tonic-gate 22390Sstevel@tonic-gate /* 22400Sstevel@tonic-gate * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 22410Sstevel@tonic-gate * no error detection, and no lock statistics. 22420Sstevel@tonic-gate * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 22430Sstevel@tonic-gate */ 22440Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL && 22450Sstevel@tonic-gate (gflags->uf_trs_ted | 22460Sstevel@tonic-gate (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) { 22470Sstevel@tonic-gate if (mtype & USYNC_PROCESS) 22480Sstevel@tonic-gate return (fast_process_lock(mp, tsp, mtype, MUTEX_LOCK)); 22490Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 22500Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 22510Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 22520Sstevel@tonic-gate return (0); 22530Sstevel@tonic-gate } 22544574Sraf if (mtype && MUTEX_OWNER(mp) == self) 22554574Sraf return (mutex_recursion(mp, mtype, MUTEX_LOCK)); 22564613Sraf if (mutex_trylock_adaptive(mp, 1) != 0) 22574574Sraf return (mutex_lock_queue(self, NULL, mp, tsp)); 22584574Sraf return (0); 22590Sstevel@tonic-gate } 22600Sstevel@tonic-gate 22610Sstevel@tonic-gate /* else do it the long way */ 22620Sstevel@tonic-gate return (mutex_lock_internal(mp, tsp, MUTEX_LOCK)); 22630Sstevel@tonic-gate } 22640Sstevel@tonic-gate 22656812Sraf #pragma weak pthread_mutex_lock = mutex_lock 22666812Sraf #pragma weak _mutex_lock = mutex_lock 22670Sstevel@tonic-gate int 22686812Sraf mutex_lock(mutex_t *mp) 22690Sstevel@tonic-gate { 22700Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 22710Sstevel@tonic-gate return (mutex_lock_impl(mp, NULL)); 22720Sstevel@tonic-gate } 22730Sstevel@tonic-gate 22740Sstevel@tonic-gate int 22756812Sraf pthread_mutex_timedlock(pthread_mutex_t *_RESTRICT_KYWD mp, 22766812Sraf const struct timespec *_RESTRICT_KYWD abstime) 22770Sstevel@tonic-gate { 22780Sstevel@tonic-gate timespec_t tslocal; 22790Sstevel@tonic-gate int error; 22800Sstevel@tonic-gate 22810Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 22820Sstevel@tonic-gate abstime_to_reltime(CLOCK_REALTIME, abstime, &tslocal); 22836812Sraf error = mutex_lock_impl((mutex_t *)mp, &tslocal); 22840Sstevel@tonic-gate if (error == ETIME) 22850Sstevel@tonic-gate error = ETIMEDOUT; 22860Sstevel@tonic-gate return (error); 22870Sstevel@tonic-gate } 22880Sstevel@tonic-gate 22890Sstevel@tonic-gate int 22906812Sraf pthread_mutex_reltimedlock_np(pthread_mutex_t *_RESTRICT_KYWD mp, 22916812Sraf const struct timespec *_RESTRICT_KYWD reltime) 22920Sstevel@tonic-gate { 22930Sstevel@tonic-gate timespec_t tslocal; 22940Sstevel@tonic-gate int error; 22950Sstevel@tonic-gate 22960Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 22970Sstevel@tonic-gate tslocal = *reltime; 22986812Sraf error = mutex_lock_impl((mutex_t *)mp, &tslocal); 22990Sstevel@tonic-gate if (error == ETIME) 23000Sstevel@tonic-gate error = ETIMEDOUT; 23010Sstevel@tonic-gate return (error); 23020Sstevel@tonic-gate } 23030Sstevel@tonic-gate 23046812Sraf #pragma weak pthread_mutex_trylock = mutex_trylock 23050Sstevel@tonic-gate int 23066812Sraf mutex_trylock(mutex_t *mp) 23070Sstevel@tonic-gate { 23080Sstevel@tonic-gate ulwp_t *self = curthread; 23090Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 23106247Sraf int mtype = mp->mutex_type; 23110Sstevel@tonic-gate uberflags_t *gflags; 23120Sstevel@tonic-gate 23130Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 23146247Sraf 23150Sstevel@tonic-gate /* 23160Sstevel@tonic-gate * Optimize the case of USYNC_THREAD, including 23170Sstevel@tonic-gate * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 23180Sstevel@tonic-gate * no error detection, no lock statistics, 23190Sstevel@tonic-gate * and the process has only a single thread. 23200Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 23210Sstevel@tonic-gate */ 23226247Sraf if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 23230Sstevel@tonic-gate udp->uberflags.uf_all) == 0) { 23240Sstevel@tonic-gate /* 23250Sstevel@tonic-gate * Only one thread exists so we don't need an atomic operation. 23260Sstevel@tonic-gate */ 23270Sstevel@tonic-gate if (mp->mutex_lockw == 0) { 23280Sstevel@tonic-gate mp->mutex_lockw = LOCKSET; 23290Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 23300Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 23310Sstevel@tonic-gate return (0); 23320Sstevel@tonic-gate } 23334574Sraf if (mtype && MUTEX_OWNER(mp) == self) 23344574Sraf return (mutex_recursion(mp, mtype, MUTEX_TRY)); 23350Sstevel@tonic-gate return (EBUSY); 23360Sstevel@tonic-gate } 23370Sstevel@tonic-gate 23380Sstevel@tonic-gate /* 23390Sstevel@tonic-gate * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 23400Sstevel@tonic-gate * no error detection, and no lock statistics. 23410Sstevel@tonic-gate * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 23420Sstevel@tonic-gate */ 23430Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL && 23440Sstevel@tonic-gate (gflags->uf_trs_ted | 23450Sstevel@tonic-gate (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) { 23460Sstevel@tonic-gate if (mtype & USYNC_PROCESS) 23470Sstevel@tonic-gate return (fast_process_lock(mp, NULL, mtype, MUTEX_TRY)); 23480Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 23490Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 23500Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 23510Sstevel@tonic-gate return (0); 23520Sstevel@tonic-gate } 23534574Sraf if (mtype && MUTEX_OWNER(mp) == self) 23544574Sraf return (mutex_recursion(mp, mtype, MUTEX_TRY)); 23554613Sraf if (__td_event_report(self, TD_LOCK_TRY, udp)) { 23564613Sraf self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 23574613Sraf tdb_event(TD_LOCK_TRY, udp); 23580Sstevel@tonic-gate } 23594613Sraf return (EBUSY); 23600Sstevel@tonic-gate } 23610Sstevel@tonic-gate 23620Sstevel@tonic-gate /* else do it the long way */ 23630Sstevel@tonic-gate return (mutex_lock_internal(mp, NULL, MUTEX_TRY)); 23640Sstevel@tonic-gate } 23650Sstevel@tonic-gate 23660Sstevel@tonic-gate int 23674574Sraf mutex_unlock_internal(mutex_t *mp, int retain_robust_flags) 23680Sstevel@tonic-gate { 23690Sstevel@tonic-gate ulwp_t *self = curthread; 23700Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 23710Sstevel@tonic-gate int mtype = mp->mutex_type; 23720Sstevel@tonic-gate tdb_mutex_stats_t *msp; 23734574Sraf int error = 0; 23744574Sraf int release_all; 23750Sstevel@tonic-gate lwpid_t lwpid; 23760Sstevel@tonic-gate 23776812Sraf if ((mtype & LOCK_ERRORCHECK) && !mutex_held(mp)) 23780Sstevel@tonic-gate return (EPERM); 23790Sstevel@tonic-gate 23806812Sraf if (self->ul_error_detection && !mutex_held(mp)) 23810Sstevel@tonic-gate lock_error(mp, "mutex_unlock", NULL, NULL); 23820Sstevel@tonic-gate 23830Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 23840Sstevel@tonic-gate mp->mutex_rcount--; 23850Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 23860Sstevel@tonic-gate return (0); 23870Sstevel@tonic-gate } 23880Sstevel@tonic-gate 23890Sstevel@tonic-gate if ((msp = MUTEX_STATS(mp, udp)) != NULL) 23900Sstevel@tonic-gate (void) record_hold_time(msp); 23910Sstevel@tonic-gate 23924574Sraf if (!retain_robust_flags && !(mtype & LOCK_PRIO_INHERIT) && 23934574Sraf (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) { 23944574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 23954574Sraf mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED); 23964574Sraf mp->mutex_flag |= LOCK_NOTRECOVERABLE; 23974574Sraf } 23984574Sraf release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0); 23994574Sraf 24004574Sraf if (mtype & LOCK_PRIO_INHERIT) { 24010Sstevel@tonic-gate no_preempt(self); 24020Sstevel@tonic-gate mp->mutex_owner = 0; 24036057Sraf /* mp->mutex_ownerpid is cleared by ___lwp_mutex_unlock() */ 24040Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 24054574Sraf mp->mutex_lockw = LOCKCLEAR; 24066247Sraf self->ul_pilocks--; 24074574Sraf error = ___lwp_mutex_unlock(mp); 24080Sstevel@tonic-gate preempt(self); 24090Sstevel@tonic-gate } else if (mtype & USYNC_PROCESS) { 24105629Sraf mutex_unlock_process(mp, release_all); 24110Sstevel@tonic-gate } else { /* USYNC_THREAD */ 24124574Sraf if ((lwpid = mutex_unlock_queue(mp, release_all)) != 0) { 24130Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 24140Sstevel@tonic-gate preempt(self); 24150Sstevel@tonic-gate } 24160Sstevel@tonic-gate } 24170Sstevel@tonic-gate 24184574Sraf if (mtype & LOCK_ROBUST) 24194574Sraf forget_lock(mp); 24204574Sraf 24214574Sraf if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp)) 24224574Sraf _ceil_prio_waive(); 24234574Sraf 24240Sstevel@tonic-gate return (error); 24250Sstevel@tonic-gate } 24260Sstevel@tonic-gate 24276812Sraf #pragma weak pthread_mutex_unlock = mutex_unlock 24286812Sraf #pragma weak _mutex_unlock = mutex_unlock 24290Sstevel@tonic-gate int 24306812Sraf mutex_unlock(mutex_t *mp) 24310Sstevel@tonic-gate { 24320Sstevel@tonic-gate ulwp_t *self = curthread; 24336247Sraf int mtype = mp->mutex_type; 24340Sstevel@tonic-gate uberflags_t *gflags; 24350Sstevel@tonic-gate lwpid_t lwpid; 24360Sstevel@tonic-gate short el; 24370Sstevel@tonic-gate 24380Sstevel@tonic-gate /* 24390Sstevel@tonic-gate * Optimize the case of USYNC_THREAD, including 24400Sstevel@tonic-gate * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 24410Sstevel@tonic-gate * no error detection, no lock statistics, 24420Sstevel@tonic-gate * and the process has only a single thread. 24430Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 24440Sstevel@tonic-gate */ 24456247Sraf if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 24466247Sraf self->ul_uberdata->uberflags.uf_all) == 0) { 24470Sstevel@tonic-gate if (mtype) { 24480Sstevel@tonic-gate /* 24490Sstevel@tonic-gate * At this point we know that one or both of the 24500Sstevel@tonic-gate * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set. 24510Sstevel@tonic-gate */ 24520Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self)) 24530Sstevel@tonic-gate return (EPERM); 24540Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 24550Sstevel@tonic-gate mp->mutex_rcount--; 24560Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 24570Sstevel@tonic-gate return (0); 24580Sstevel@tonic-gate } 24590Sstevel@tonic-gate } 24600Sstevel@tonic-gate /* 24610Sstevel@tonic-gate * Only one thread exists so we don't need an atomic operation. 24620Sstevel@tonic-gate * Also, there can be no waiters. 24630Sstevel@tonic-gate */ 24640Sstevel@tonic-gate mp->mutex_owner = 0; 24650Sstevel@tonic-gate mp->mutex_lockword = 0; 24660Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 24670Sstevel@tonic-gate return (0); 24680Sstevel@tonic-gate } 24690Sstevel@tonic-gate 24700Sstevel@tonic-gate /* 24710Sstevel@tonic-gate * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 24720Sstevel@tonic-gate * no error detection, and no lock statistics. 24730Sstevel@tonic-gate * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 24740Sstevel@tonic-gate */ 24750Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL) { 24760Sstevel@tonic-gate if (((el = gflags->uf_trs_ted) | mtype) == 0) { 24770Sstevel@tonic-gate fast_unlock: 24785629Sraf if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) { 24790Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 24800Sstevel@tonic-gate preempt(self); 24810Sstevel@tonic-gate } 24820Sstevel@tonic-gate return (0); 24830Sstevel@tonic-gate } 24840Sstevel@tonic-gate if (el) /* error detection or lock statistics */ 24850Sstevel@tonic-gate goto slow_unlock; 24860Sstevel@tonic-gate if ((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) { 24870Sstevel@tonic-gate /* 24880Sstevel@tonic-gate * At this point we know that one or both of the 24890Sstevel@tonic-gate * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set. 24900Sstevel@tonic-gate */ 24910Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self)) 24920Sstevel@tonic-gate return (EPERM); 24930Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 24940Sstevel@tonic-gate mp->mutex_rcount--; 24950Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 24960Sstevel@tonic-gate return (0); 24970Sstevel@tonic-gate } 24980Sstevel@tonic-gate goto fast_unlock; 24990Sstevel@tonic-gate } 25000Sstevel@tonic-gate if ((mtype & 25010Sstevel@tonic-gate ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) { 25020Sstevel@tonic-gate /* 25030Sstevel@tonic-gate * At this point we know that zero, one, or both of the 25040Sstevel@tonic-gate * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set and 25050Sstevel@tonic-gate * that the USYNC_PROCESS flag is set. 25060Sstevel@tonic-gate */ 25070Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !shared_mutex_held(mp)) 25080Sstevel@tonic-gate return (EPERM); 25090Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 25100Sstevel@tonic-gate mp->mutex_rcount--; 25110Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 25120Sstevel@tonic-gate return (0); 25130Sstevel@tonic-gate } 25145629Sraf mutex_unlock_process(mp, 0); 25150Sstevel@tonic-gate return (0); 25160Sstevel@tonic-gate } 25170Sstevel@tonic-gate } 25180Sstevel@tonic-gate 25190Sstevel@tonic-gate /* else do it the long way */ 25200Sstevel@tonic-gate slow_unlock: 25214574Sraf return (mutex_unlock_internal(mp, 0)); 25220Sstevel@tonic-gate } 25230Sstevel@tonic-gate 25240Sstevel@tonic-gate /* 25250Sstevel@tonic-gate * Internally to the library, almost all mutex lock/unlock actions 25260Sstevel@tonic-gate * go through these lmutex_ functions, to protect critical regions. 25276812Sraf * We replicate a bit of code from mutex_lock() and mutex_unlock() 25280Sstevel@tonic-gate * to make these functions faster since we know that the mutex type 25290Sstevel@tonic-gate * of all internal locks is USYNC_THREAD. We also know that internal 25300Sstevel@tonic-gate * locking can never fail, so we panic if it does. 25310Sstevel@tonic-gate */ 25320Sstevel@tonic-gate void 25330Sstevel@tonic-gate lmutex_lock(mutex_t *mp) 25340Sstevel@tonic-gate { 25350Sstevel@tonic-gate ulwp_t *self = curthread; 25360Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 25370Sstevel@tonic-gate 25380Sstevel@tonic-gate ASSERT(mp->mutex_type == USYNC_THREAD); 25390Sstevel@tonic-gate 25400Sstevel@tonic-gate enter_critical(self); 25410Sstevel@tonic-gate /* 25420Sstevel@tonic-gate * Optimize the case of no lock statistics and only a single thread. 25430Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 25440Sstevel@tonic-gate */ 25450Sstevel@tonic-gate if (udp->uberflags.uf_all == 0) { 25460Sstevel@tonic-gate /* 25470Sstevel@tonic-gate * Only one thread exists; the mutex must be free. 25480Sstevel@tonic-gate */ 25490Sstevel@tonic-gate ASSERT(mp->mutex_lockw == 0); 25500Sstevel@tonic-gate mp->mutex_lockw = LOCKSET; 25510Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 25520Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 25530Sstevel@tonic-gate } else { 25540Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 25550Sstevel@tonic-gate 25560Sstevel@tonic-gate if (!self->ul_schedctl_called) 25570Sstevel@tonic-gate (void) setup_schedctl(); 25580Sstevel@tonic-gate 25590Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 25600Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 25610Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 25624613Sraf } else if (mutex_trylock_adaptive(mp, 1) != 0) { 25630Sstevel@tonic-gate (void) mutex_lock_queue(self, msp, mp, NULL); 25640Sstevel@tonic-gate } 25650Sstevel@tonic-gate 25660Sstevel@tonic-gate if (msp) 25670Sstevel@tonic-gate record_begin_hold(msp); 25680Sstevel@tonic-gate } 25690Sstevel@tonic-gate } 25700Sstevel@tonic-gate 25710Sstevel@tonic-gate void 25720Sstevel@tonic-gate lmutex_unlock(mutex_t *mp) 25730Sstevel@tonic-gate { 25740Sstevel@tonic-gate ulwp_t *self = curthread; 25750Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 25760Sstevel@tonic-gate 25770Sstevel@tonic-gate ASSERT(mp->mutex_type == USYNC_THREAD); 25780Sstevel@tonic-gate 25790Sstevel@tonic-gate /* 25800Sstevel@tonic-gate * Optimize the case of no lock statistics and only a single thread. 25810Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 25820Sstevel@tonic-gate */ 25830Sstevel@tonic-gate if (udp->uberflags.uf_all == 0) { 25840Sstevel@tonic-gate /* 25850Sstevel@tonic-gate * Only one thread exists so there can be no waiters. 25860Sstevel@tonic-gate */ 25870Sstevel@tonic-gate mp->mutex_owner = 0; 25880Sstevel@tonic-gate mp->mutex_lockword = 0; 25890Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 25900Sstevel@tonic-gate } else { 25910Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 25920Sstevel@tonic-gate lwpid_t lwpid; 25930Sstevel@tonic-gate 25940Sstevel@tonic-gate if (msp) 25950Sstevel@tonic-gate (void) record_hold_time(msp); 25964574Sraf if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) { 25970Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 25980Sstevel@tonic-gate preempt(self); 25990Sstevel@tonic-gate } 26000Sstevel@tonic-gate } 26010Sstevel@tonic-gate exit_critical(self); 26020Sstevel@tonic-gate } 26030Sstevel@tonic-gate 26042248Sraf /* 26052248Sraf * For specialized code in libc, like the asynchronous i/o code, 26062248Sraf * the following sig_*() locking primitives are used in order 26072248Sraf * to make the code asynchronous signal safe. Signals are 26082248Sraf * deferred while locks acquired by these functions are held. 26092248Sraf */ 26102248Sraf void 26112248Sraf sig_mutex_lock(mutex_t *mp) 26122248Sraf { 26132248Sraf sigoff(curthread); 26146515Sraf (void) mutex_lock(mp); 26152248Sraf } 26162248Sraf 26172248Sraf void 26182248Sraf sig_mutex_unlock(mutex_t *mp) 26192248Sraf { 26206515Sraf (void) mutex_unlock(mp); 26212248Sraf sigon(curthread); 26222248Sraf } 26232248Sraf 26242248Sraf int 26252248Sraf sig_mutex_trylock(mutex_t *mp) 26262248Sraf { 26272248Sraf int error; 26282248Sraf 26292248Sraf sigoff(curthread); 26306515Sraf if ((error = mutex_trylock(mp)) != 0) 26312248Sraf sigon(curthread); 26322248Sraf return (error); 26332248Sraf } 26342248Sraf 26352248Sraf /* 26362248Sraf * sig_cond_wait() is a cancellation point. 26372248Sraf */ 26382248Sraf int 26392248Sraf sig_cond_wait(cond_t *cv, mutex_t *mp) 26402248Sraf { 26412248Sraf int error; 26422248Sraf 26432248Sraf ASSERT(curthread->ul_sigdefer != 0); 26446515Sraf pthread_testcancel(); 26455891Sraf error = __cond_wait(cv, mp); 26462248Sraf if (error == EINTR && curthread->ul_cursig) { 26472248Sraf sig_mutex_unlock(mp); 26482248Sraf /* take the deferred signal here */ 26492248Sraf sig_mutex_lock(mp); 26502248Sraf } 26516515Sraf pthread_testcancel(); 26522248Sraf return (error); 26532248Sraf } 26542248Sraf 26552248Sraf /* 26562248Sraf * sig_cond_reltimedwait() is a cancellation point. 26572248Sraf */ 26582248Sraf int 26592248Sraf sig_cond_reltimedwait(cond_t *cv, mutex_t *mp, const timespec_t *ts) 26602248Sraf { 26612248Sraf int error; 26622248Sraf 26632248Sraf ASSERT(curthread->ul_sigdefer != 0); 26646515Sraf pthread_testcancel(); 26655891Sraf error = __cond_reltimedwait(cv, mp, ts); 26662248Sraf if (error == EINTR && curthread->ul_cursig) { 26672248Sraf sig_mutex_unlock(mp); 26682248Sraf /* take the deferred signal here */ 26692248Sraf sig_mutex_lock(mp); 26702248Sraf } 26716515Sraf pthread_testcancel(); 26722248Sraf return (error); 26732248Sraf } 26742248Sraf 26755891Sraf /* 26765891Sraf * For specialized code in libc, like the stdio code. 26775891Sraf * the following cancel_safe_*() locking primitives are used in 26785891Sraf * order to make the code cancellation-safe. Cancellation is 26795891Sraf * deferred while locks acquired by these functions are held. 26805891Sraf */ 26815891Sraf void 26825891Sraf cancel_safe_mutex_lock(mutex_t *mp) 26835891Sraf { 26846515Sraf (void) mutex_lock(mp); 26855891Sraf curthread->ul_libc_locks++; 26865891Sraf } 26875891Sraf 26885891Sraf int 26895891Sraf cancel_safe_mutex_trylock(mutex_t *mp) 26905891Sraf { 26915891Sraf int error; 26925891Sraf 26936515Sraf if ((error = mutex_trylock(mp)) == 0) 26945891Sraf curthread->ul_libc_locks++; 26955891Sraf return (error); 26965891Sraf } 26975891Sraf 26985891Sraf void 26995891Sraf cancel_safe_mutex_unlock(mutex_t *mp) 27005891Sraf { 27015891Sraf ulwp_t *self = curthread; 27025891Sraf 27035891Sraf ASSERT(self->ul_libc_locks != 0); 27045891Sraf 27056515Sraf (void) mutex_unlock(mp); 27065891Sraf 27075891Sraf /* 27085891Sraf * Decrement the count of locks held by cancel_safe_mutex_lock(). 27095891Sraf * If we are then in a position to terminate cleanly and 27105891Sraf * if there is a pending cancellation and cancellation 27115891Sraf * is not disabled and we received EINTR from a recent 27125891Sraf * system call then perform the cancellation action now. 27135891Sraf */ 27145891Sraf if (--self->ul_libc_locks == 0 && 27155891Sraf !(self->ul_vfork | self->ul_nocancel | 27165891Sraf self->ul_critical | self->ul_sigdefer) && 27175891Sraf cancel_active()) 27186812Sraf pthread_exit(PTHREAD_CANCELED); 27195891Sraf } 27205891Sraf 27210Sstevel@tonic-gate static int 27220Sstevel@tonic-gate shared_mutex_held(mutex_t *mparg) 27230Sstevel@tonic-gate { 27240Sstevel@tonic-gate /* 27254574Sraf * The 'volatile' is necessary to make sure the compiler doesn't 27264574Sraf * reorder the tests of the various components of the mutex. 27274574Sraf * They must be tested in this order: 27284574Sraf * mutex_lockw 27294574Sraf * mutex_owner 27304574Sraf * mutex_ownerpid 27314574Sraf * This relies on the fact that everywhere mutex_lockw is cleared, 27324574Sraf * mutex_owner and mutex_ownerpid are cleared before mutex_lockw 27334574Sraf * is cleared, and that everywhere mutex_lockw is set, mutex_owner 27344574Sraf * and mutex_ownerpid are set after mutex_lockw is set, and that 27354574Sraf * mutex_lockw is set or cleared with a memory barrier. 27360Sstevel@tonic-gate */ 27370Sstevel@tonic-gate volatile mutex_t *mp = (volatile mutex_t *)mparg; 27380Sstevel@tonic-gate ulwp_t *self = curthread; 27390Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 27400Sstevel@tonic-gate 27414574Sraf return (MUTEX_OWNED(mp, self) && mp->mutex_ownerpid == udp->pid); 27420Sstevel@tonic-gate } 27430Sstevel@tonic-gate 27446812Sraf #pragma weak _mutex_held = mutex_held 27450Sstevel@tonic-gate int 27466812Sraf mutex_held(mutex_t *mparg) 27470Sstevel@tonic-gate { 27484574Sraf volatile mutex_t *mp = (volatile mutex_t *)mparg; 27494574Sraf 27504574Sraf if (mparg->mutex_type & USYNC_PROCESS) 27514574Sraf return (shared_mutex_held(mparg)); 27520Sstevel@tonic-gate return (MUTEX_OWNED(mp, curthread)); 27530Sstevel@tonic-gate } 27540Sstevel@tonic-gate 27556812Sraf #pragma weak pthread_mutex_destroy = mutex_destroy 27566812Sraf #pragma weak _mutex_destroy = mutex_destroy 27570Sstevel@tonic-gate int 27586812Sraf mutex_destroy(mutex_t *mp) 27590Sstevel@tonic-gate { 27604574Sraf if (mp->mutex_type & USYNC_PROCESS) 27614574Sraf forget_lock(mp); 27626515Sraf (void) memset(mp, 0, sizeof (*mp)); 27630Sstevel@tonic-gate tdb_sync_obj_deregister(mp); 27640Sstevel@tonic-gate return (0); 27650Sstevel@tonic-gate } 27660Sstevel@tonic-gate 27676812Sraf #pragma weak pthread_mutex_consistent_np = mutex_consistent 27684574Sraf int 27696812Sraf mutex_consistent(mutex_t *mp) 27704574Sraf { 27714574Sraf /* 27724574Sraf * Do this only for an inconsistent, initialized robust lock 27734574Sraf * that we hold. For all other cases, return EINVAL. 27744574Sraf */ 27756812Sraf if (mutex_held(mp) && 27764574Sraf (mp->mutex_type & LOCK_ROBUST) && 27774574Sraf (mp->mutex_flag & LOCK_INITED) && 27784574Sraf (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) { 27794574Sraf mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED); 27804574Sraf mp->mutex_rcount = 0; 27814574Sraf return (0); 27824574Sraf } 27834574Sraf return (EINVAL); 27844574Sraf } 27854574Sraf 27860Sstevel@tonic-gate /* 27870Sstevel@tonic-gate * Spin locks are separate from ordinary mutexes, 27880Sstevel@tonic-gate * but we use the same data structure for them. 27890Sstevel@tonic-gate */ 27900Sstevel@tonic-gate 27910Sstevel@tonic-gate int 27926812Sraf pthread_spin_init(pthread_spinlock_t *lock, int pshared) 27930Sstevel@tonic-gate { 27940Sstevel@tonic-gate mutex_t *mp = (mutex_t *)lock; 27950Sstevel@tonic-gate 27966515Sraf (void) memset(mp, 0, sizeof (*mp)); 27970Sstevel@tonic-gate if (pshared == PTHREAD_PROCESS_SHARED) 27980Sstevel@tonic-gate mp->mutex_type = USYNC_PROCESS; 27990Sstevel@tonic-gate else 28000Sstevel@tonic-gate mp->mutex_type = USYNC_THREAD; 28010Sstevel@tonic-gate mp->mutex_flag = LOCK_INITED; 28020Sstevel@tonic-gate mp->mutex_magic = MUTEX_MAGIC; 2803*7255Sraf 2804*7255Sraf /* 2805*7255Sraf * This should be at the beginning of the function, 2806*7255Sraf * but for the sake of old broken applications that 2807*7255Sraf * do not have proper alignment for their mutexes 2808*7255Sraf * (and don't check the return code from pthread_spin_init), 2809*7255Sraf * we put it here, after initializing the mutex regardless. 2810*7255Sraf */ 2811*7255Sraf if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && 2812*7255Sraf curthread->ul_misaligned == 0) 2813*7255Sraf return (EINVAL); 2814*7255Sraf 28150Sstevel@tonic-gate return (0); 28160Sstevel@tonic-gate } 28170Sstevel@tonic-gate 28180Sstevel@tonic-gate int 28196812Sraf pthread_spin_destroy(pthread_spinlock_t *lock) 28200Sstevel@tonic-gate { 28216515Sraf (void) memset(lock, 0, sizeof (*lock)); 28220Sstevel@tonic-gate return (0); 28230Sstevel@tonic-gate } 28240Sstevel@tonic-gate 28250Sstevel@tonic-gate int 28266812Sraf pthread_spin_trylock(pthread_spinlock_t *lock) 28270Sstevel@tonic-gate { 28280Sstevel@tonic-gate mutex_t *mp = (mutex_t *)lock; 28290Sstevel@tonic-gate ulwp_t *self = curthread; 28300Sstevel@tonic-gate int error = 0; 28310Sstevel@tonic-gate 28320Sstevel@tonic-gate no_preempt(self); 28330Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) != 0) 28340Sstevel@tonic-gate error = EBUSY; 28350Sstevel@tonic-gate else { 28360Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 28370Sstevel@tonic-gate if (mp->mutex_type == USYNC_PROCESS) 28380Sstevel@tonic-gate mp->mutex_ownerpid = self->ul_uberdata->pid; 28390Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 28400Sstevel@tonic-gate } 28410Sstevel@tonic-gate preempt(self); 28420Sstevel@tonic-gate return (error); 28430Sstevel@tonic-gate } 28440Sstevel@tonic-gate 28450Sstevel@tonic-gate int 28466812Sraf pthread_spin_lock(pthread_spinlock_t *lock) 28470Sstevel@tonic-gate { 28484574Sraf mutex_t *mp = (mutex_t *)lock; 28494574Sraf ulwp_t *self = curthread; 28504574Sraf volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw; 28514574Sraf int count = 0; 28524574Sraf 28534574Sraf ASSERT(!self->ul_critical || self->ul_bindflags); 28544574Sraf 28554574Sraf DTRACE_PROBE1(plockstat, mutex__spin, mp); 28564574Sraf 28570Sstevel@tonic-gate /* 28580Sstevel@tonic-gate * We don't care whether the owner is running on a processor. 28590Sstevel@tonic-gate * We just spin because that's what this interface requires. 28600Sstevel@tonic-gate */ 28610Sstevel@tonic-gate for (;;) { 28620Sstevel@tonic-gate if (*lockp == 0) { /* lock byte appears to be clear */ 28634574Sraf no_preempt(self); 28644574Sraf if (set_lock_byte(lockp) == 0) 28654574Sraf break; 28664574Sraf preempt(self); 28670Sstevel@tonic-gate } 28685629Sraf if (count < INT_MAX) 28695629Sraf count++; 28700Sstevel@tonic-gate SMT_PAUSE(); 28710Sstevel@tonic-gate } 28724574Sraf mp->mutex_owner = (uintptr_t)self; 28734574Sraf if (mp->mutex_type == USYNC_PROCESS) 28744574Sraf mp->mutex_ownerpid = self->ul_uberdata->pid; 28754574Sraf preempt(self); 28765629Sraf if (count) { 28775629Sraf DTRACE_PROBE2(plockstat, mutex__spun, 1, count); 28785629Sraf } 28794574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); 28804574Sraf return (0); 28810Sstevel@tonic-gate } 28820Sstevel@tonic-gate 28830Sstevel@tonic-gate int 28846812Sraf pthread_spin_unlock(pthread_spinlock_t *lock) 28850Sstevel@tonic-gate { 28860Sstevel@tonic-gate mutex_t *mp = (mutex_t *)lock; 28870Sstevel@tonic-gate ulwp_t *self = curthread; 28880Sstevel@tonic-gate 28890Sstevel@tonic-gate no_preempt(self); 28900Sstevel@tonic-gate mp->mutex_owner = 0; 28910Sstevel@tonic-gate mp->mutex_ownerpid = 0; 28920Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 28934570Sraf (void) atomic_swap_32(&mp->mutex_lockword, 0); 28940Sstevel@tonic-gate preempt(self); 28950Sstevel@tonic-gate return (0); 28960Sstevel@tonic-gate } 28970Sstevel@tonic-gate 28985629Sraf #define INITIAL_LOCKS 8 /* initial size of ul_heldlocks.array */ 28994574Sraf 29004574Sraf /* 29014574Sraf * Find/allocate an entry for 'lock' in our array of held locks. 29024574Sraf */ 29034574Sraf static mutex_t ** 29044574Sraf find_lock_entry(mutex_t *lock) 29054574Sraf { 29064574Sraf ulwp_t *self = curthread; 29074574Sraf mutex_t **remembered = NULL; 29084574Sraf mutex_t **lockptr; 29094574Sraf uint_t nlocks; 29104574Sraf 29114574Sraf if ((nlocks = self->ul_heldlockcnt) != 0) 29124574Sraf lockptr = self->ul_heldlocks.array; 29134574Sraf else { 29144574Sraf nlocks = 1; 29154574Sraf lockptr = &self->ul_heldlocks.single; 29164574Sraf } 29174574Sraf 29184574Sraf for (; nlocks; nlocks--, lockptr++) { 29194574Sraf if (*lockptr == lock) 29204574Sraf return (lockptr); 29214574Sraf if (*lockptr == NULL && remembered == NULL) 29224574Sraf remembered = lockptr; 29234574Sraf } 29244574Sraf if (remembered != NULL) { 29254574Sraf *remembered = lock; 29264574Sraf return (remembered); 29274574Sraf } 29284574Sraf 29294574Sraf /* 29304574Sraf * No entry available. Allocate more space, converting 29314574Sraf * the single entry into an array of entries if necessary. 29324574Sraf */ 29334574Sraf if ((nlocks = self->ul_heldlockcnt) == 0) { 29344574Sraf /* 29354574Sraf * Initial allocation of the array. 29364574Sraf * Convert the single entry into an array. 29374574Sraf */ 29384574Sraf self->ul_heldlockcnt = nlocks = INITIAL_LOCKS; 29394574Sraf lockptr = lmalloc(nlocks * sizeof (mutex_t *)); 29404574Sraf /* 29414574Sraf * The single entry becomes the first entry in the array. 29424574Sraf */ 29434574Sraf *lockptr = self->ul_heldlocks.single; 29444574Sraf self->ul_heldlocks.array = lockptr; 29454574Sraf /* 29464574Sraf * Return the next available entry in the array. 29474574Sraf */ 29484574Sraf *++lockptr = lock; 29494574Sraf return (lockptr); 29504574Sraf } 29514574Sraf /* 29524574Sraf * Reallocate the array, double the size each time. 29534574Sraf */ 29544574Sraf lockptr = lmalloc(nlocks * 2 * sizeof (mutex_t *)); 29556515Sraf (void) memcpy(lockptr, self->ul_heldlocks.array, 29564574Sraf nlocks * sizeof (mutex_t *)); 29574574Sraf lfree(self->ul_heldlocks.array, nlocks * sizeof (mutex_t *)); 29584574Sraf self->ul_heldlocks.array = lockptr; 29594574Sraf self->ul_heldlockcnt *= 2; 29604574Sraf /* 29614574Sraf * Return the next available entry in the newly allocated array. 29624574Sraf */ 29634574Sraf *(lockptr += nlocks) = lock; 29644574Sraf return (lockptr); 29654574Sraf } 29664574Sraf 29674574Sraf /* 29684574Sraf * Insert 'lock' into our list of held locks. 29694574Sraf * Currently only used for LOCK_ROBUST mutexes. 29704574Sraf */ 29714574Sraf void 29724574Sraf remember_lock(mutex_t *lock) 29734574Sraf { 29744574Sraf (void) find_lock_entry(lock); 29754574Sraf } 29764574Sraf 29774574Sraf /* 29784574Sraf * Remove 'lock' from our list of held locks. 29794574Sraf * Currently only used for LOCK_ROBUST mutexes. 29804574Sraf */ 29814574Sraf void 29824574Sraf forget_lock(mutex_t *lock) 29834574Sraf { 29844574Sraf *find_lock_entry(lock) = NULL; 29854574Sraf } 29864574Sraf 29874574Sraf /* 29884574Sraf * Free the array of held locks. 29894574Sraf */ 29904574Sraf void 29914574Sraf heldlock_free(ulwp_t *ulwp) 29924574Sraf { 29934574Sraf uint_t nlocks; 29944574Sraf 29954574Sraf if ((nlocks = ulwp->ul_heldlockcnt) != 0) 29964574Sraf lfree(ulwp->ul_heldlocks.array, nlocks * sizeof (mutex_t *)); 29974574Sraf ulwp->ul_heldlockcnt = 0; 29984574Sraf ulwp->ul_heldlocks.array = NULL; 29994574Sraf } 30004574Sraf 30014574Sraf /* 30024574Sraf * Mark all held LOCK_ROBUST mutexes LOCK_OWNERDEAD. 30034574Sraf * Called from _thrp_exit() to deal with abandoned locks. 30044574Sraf */ 30054574Sraf void 30064574Sraf heldlock_exit(void) 30074574Sraf { 30084574Sraf ulwp_t *self = curthread; 30094574Sraf mutex_t **lockptr; 30104574Sraf uint_t nlocks; 30114574Sraf mutex_t *mp; 30124574Sraf 30134574Sraf if ((nlocks = self->ul_heldlockcnt) != 0) 30144574Sraf lockptr = self->ul_heldlocks.array; 30154574Sraf else { 30164574Sraf nlocks = 1; 30174574Sraf lockptr = &self->ul_heldlocks.single; 30184574Sraf } 30194574Sraf 30204574Sraf for (; nlocks; nlocks--, lockptr++) { 30214574Sraf /* 30224574Sraf * The kernel takes care of transitioning held 30234574Sraf * LOCK_PRIO_INHERIT mutexes to LOCK_OWNERDEAD. 30244574Sraf * We avoid that case here. 30254574Sraf */ 30264574Sraf if ((mp = *lockptr) != NULL && 30276812Sraf mutex_held(mp) && 30284574Sraf (mp->mutex_type & (LOCK_ROBUST | LOCK_PRIO_INHERIT)) == 30294574Sraf LOCK_ROBUST) { 30304574Sraf mp->mutex_rcount = 0; 30314574Sraf if (!(mp->mutex_flag & LOCK_UNMAPPED)) 30324574Sraf mp->mutex_flag |= LOCK_OWNERDEAD; 30334574Sraf (void) mutex_unlock_internal(mp, 1); 30344574Sraf } 30354574Sraf } 30364574Sraf 30374574Sraf heldlock_free(self); 30384574Sraf } 30394574Sraf 30406812Sraf #pragma weak _cond_init = cond_init 30410Sstevel@tonic-gate /* ARGSUSED2 */ 30420Sstevel@tonic-gate int 30436812Sraf cond_init(cond_t *cvp, int type, void *arg) 30440Sstevel@tonic-gate { 30450Sstevel@tonic-gate if (type != USYNC_THREAD && type != USYNC_PROCESS) 30460Sstevel@tonic-gate return (EINVAL); 30476515Sraf (void) memset(cvp, 0, sizeof (*cvp)); 30480Sstevel@tonic-gate cvp->cond_type = (uint16_t)type; 30490Sstevel@tonic-gate cvp->cond_magic = COND_MAGIC; 3050*7255Sraf 3051*7255Sraf /* 3052*7255Sraf * This should be at the beginning of the function, 3053*7255Sraf * but for the sake of old broken applications that 3054*7255Sraf * do not have proper alignment for their condvars 3055*7255Sraf * (and don't check the return code from cond_init), 3056*7255Sraf * we put it here, after initializing the condvar regardless. 3057*7255Sraf */ 3058*7255Sraf if (((uintptr_t)cvp & (_LONG_LONG_ALIGNMENT - 1)) && 3059*7255Sraf curthread->ul_misaligned == 0) 3060*7255Sraf return (EINVAL); 3061*7255Sraf 30620Sstevel@tonic-gate return (0); 30630Sstevel@tonic-gate } 30640Sstevel@tonic-gate 30650Sstevel@tonic-gate /* 30660Sstevel@tonic-gate * cond_sleep_queue(): utility function for cond_wait_queue(). 30670Sstevel@tonic-gate * 30680Sstevel@tonic-gate * Go to sleep on a condvar sleep queue, expect to be waked up 30690Sstevel@tonic-gate * by someone calling cond_signal() or cond_broadcast() or due 30700Sstevel@tonic-gate * to receiving a UNIX signal or being cancelled, or just simply 30710Sstevel@tonic-gate * due to a spurious wakeup (like someome calling forkall()). 30720Sstevel@tonic-gate * 30730Sstevel@tonic-gate * The associated mutex is *not* reacquired before returning. 30740Sstevel@tonic-gate * That must be done by the caller of cond_sleep_queue(). 30750Sstevel@tonic-gate */ 30764574Sraf static int 30770Sstevel@tonic-gate cond_sleep_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 30780Sstevel@tonic-gate { 30790Sstevel@tonic-gate ulwp_t *self = curthread; 30800Sstevel@tonic-gate queue_head_t *qp; 30810Sstevel@tonic-gate queue_head_t *mqp; 30820Sstevel@tonic-gate lwpid_t lwpid; 30830Sstevel@tonic-gate int signalled; 30840Sstevel@tonic-gate int error; 30856247Sraf int cv_wake; 30864574Sraf int release_all; 30870Sstevel@tonic-gate 30880Sstevel@tonic-gate /* 30890Sstevel@tonic-gate * Put ourself on the CV sleep queue, unlock the mutex, then 30900Sstevel@tonic-gate * park ourself and unpark a candidate lwp to grab the mutex. 30910Sstevel@tonic-gate * We must go onto the CV sleep queue before dropping the 30920Sstevel@tonic-gate * mutex in order to guarantee atomicity of the operation. 30930Sstevel@tonic-gate */ 30940Sstevel@tonic-gate self->ul_sp = stkptr(); 30950Sstevel@tonic-gate qp = queue_lock(cvp, CV); 30966247Sraf enqueue(qp, self, 0); 30970Sstevel@tonic-gate cvp->cond_waiters_user = 1; 30980Sstevel@tonic-gate self->ul_cvmutex = mp; 30996247Sraf self->ul_cv_wake = cv_wake = (tsp != NULL); 31000Sstevel@tonic-gate self->ul_signalled = 0; 31014574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) { 31024574Sraf mp->mutex_flag &= ~LOCK_OWNERDEAD; 31034574Sraf mp->mutex_flag |= LOCK_NOTRECOVERABLE; 31044574Sraf } 31054574Sraf release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0); 31064574Sraf lwpid = mutex_unlock_queue(mp, release_all); 31070Sstevel@tonic-gate for (;;) { 31080Sstevel@tonic-gate set_parking_flag(self, 1); 31090Sstevel@tonic-gate queue_unlock(qp); 31100Sstevel@tonic-gate if (lwpid != 0) { 31110Sstevel@tonic-gate lwpid = preempt_unpark(self, lwpid); 31120Sstevel@tonic-gate preempt(self); 31130Sstevel@tonic-gate } 31140Sstevel@tonic-gate /* 31150Sstevel@tonic-gate * We may have a deferred signal present, 31160Sstevel@tonic-gate * in which case we should return EINTR. 31170Sstevel@tonic-gate * Also, we may have received a SIGCANCEL; if so 31180Sstevel@tonic-gate * and we are cancelable we should return EINTR. 31190Sstevel@tonic-gate * We force an immediate EINTR return from 31200Sstevel@tonic-gate * __lwp_park() by turning our parking flag off. 31210Sstevel@tonic-gate */ 31220Sstevel@tonic-gate if (self->ul_cursig != 0 || 31230Sstevel@tonic-gate (self->ul_cancelable && self->ul_cancel_pending)) 31240Sstevel@tonic-gate set_parking_flag(self, 0); 31250Sstevel@tonic-gate /* 31260Sstevel@tonic-gate * __lwp_park() will return the residual time in tsp 31270Sstevel@tonic-gate * if we are unparked before the timeout expires. 31280Sstevel@tonic-gate */ 31290Sstevel@tonic-gate error = __lwp_park(tsp, lwpid); 31300Sstevel@tonic-gate set_parking_flag(self, 0); 31310Sstevel@tonic-gate lwpid = 0; /* unpark the other lwp only once */ 31320Sstevel@tonic-gate /* 31330Sstevel@tonic-gate * We were waked up by cond_signal(), cond_broadcast(), 31340Sstevel@tonic-gate * by an interrupt or timeout (EINTR or ETIME), 31350Sstevel@tonic-gate * or we may just have gotten a spurious wakeup. 31360Sstevel@tonic-gate */ 31370Sstevel@tonic-gate qp = queue_lock(cvp, CV); 31386247Sraf if (!cv_wake) 31396247Sraf mqp = queue_lock(mp, MX); 31400Sstevel@tonic-gate if (self->ul_sleepq == NULL) 31410Sstevel@tonic-gate break; 31420Sstevel@tonic-gate /* 31430Sstevel@tonic-gate * We are on either the condvar sleep queue or the 31441893Sraf * mutex sleep queue. Break out of the sleep if we 31451893Sraf * were interrupted or we timed out (EINTR or ETIME). 31460Sstevel@tonic-gate * Else this is a spurious wakeup; continue the loop. 31470Sstevel@tonic-gate */ 31486247Sraf if (!cv_wake && self->ul_sleepq == mqp) { /* mutex queue */ 31491893Sraf if (error) { 31506247Sraf mp->mutex_waiters = dequeue_self(mqp); 31511893Sraf break; 31521893Sraf } 31531893Sraf tsp = NULL; /* no more timeout */ 31541893Sraf } else if (self->ul_sleepq == qp) { /* condvar queue */ 31550Sstevel@tonic-gate if (error) { 31566247Sraf cvp->cond_waiters_user = dequeue_self(qp); 31570Sstevel@tonic-gate break; 31580Sstevel@tonic-gate } 31590Sstevel@tonic-gate /* 31600Sstevel@tonic-gate * Else a spurious wakeup on the condvar queue. 31610Sstevel@tonic-gate * __lwp_park() has already adjusted the timeout. 31620Sstevel@tonic-gate */ 31630Sstevel@tonic-gate } else { 31640Sstevel@tonic-gate thr_panic("cond_sleep_queue(): thread not on queue"); 31650Sstevel@tonic-gate } 31666247Sraf if (!cv_wake) 31676247Sraf queue_unlock(mqp); 31680Sstevel@tonic-gate } 31690Sstevel@tonic-gate 31700Sstevel@tonic-gate self->ul_sp = 0; 31716247Sraf self->ul_cv_wake = 0; 31726247Sraf ASSERT(self->ul_cvmutex == NULL); 31730Sstevel@tonic-gate ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL && 31740Sstevel@tonic-gate self->ul_wchan == NULL); 31750Sstevel@tonic-gate 31760Sstevel@tonic-gate signalled = self->ul_signalled; 31770Sstevel@tonic-gate self->ul_signalled = 0; 31780Sstevel@tonic-gate queue_unlock(qp); 31796247Sraf if (!cv_wake) 31806247Sraf queue_unlock(mqp); 31810Sstevel@tonic-gate 31820Sstevel@tonic-gate /* 31830Sstevel@tonic-gate * If we were concurrently cond_signal()d and any of: 31840Sstevel@tonic-gate * received a UNIX signal, were cancelled, or got a timeout, 31850Sstevel@tonic-gate * then perform another cond_signal() to avoid consuming it. 31860Sstevel@tonic-gate */ 31870Sstevel@tonic-gate if (error && signalled) 31886812Sraf (void) cond_signal(cvp); 31890Sstevel@tonic-gate 31900Sstevel@tonic-gate return (error); 31910Sstevel@tonic-gate } 31920Sstevel@tonic-gate 3193*7255Sraf static void 3194*7255Sraf cond_wait_check_alignment(cond_t *cvp, mutex_t *mp) 3195*7255Sraf { 3196*7255Sraf if ((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) 3197*7255Sraf lock_error(mp, "cond_wait", cvp, "mutex is misaligned"); 3198*7255Sraf if ((uintptr_t)cvp & (_LONG_LONG_ALIGNMENT - 1)) 3199*7255Sraf lock_error(mp, "cond_wait", cvp, "condvar is misaligned"); 3200*7255Sraf } 3201*7255Sraf 32020Sstevel@tonic-gate int 32035629Sraf cond_wait_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 32040Sstevel@tonic-gate { 32050Sstevel@tonic-gate ulwp_t *self = curthread; 32060Sstevel@tonic-gate int error; 32074574Sraf int merror; 32080Sstevel@tonic-gate 3209*7255Sraf if (self->ul_error_detection && self->ul_misaligned == 0) 3210*7255Sraf cond_wait_check_alignment(cvp, mp); 3211*7255Sraf 32120Sstevel@tonic-gate /* 32130Sstevel@tonic-gate * The old thread library was programmed to defer signals 32140Sstevel@tonic-gate * while in cond_wait() so that the associated mutex would 32150Sstevel@tonic-gate * be guaranteed to be held when the application signal 32160Sstevel@tonic-gate * handler was invoked. 32170Sstevel@tonic-gate * 32180Sstevel@tonic-gate * We do not behave this way by default; the state of the 32190Sstevel@tonic-gate * associated mutex in the signal handler is undefined. 32200Sstevel@tonic-gate * 32210Sstevel@tonic-gate * To accommodate applications that depend on the old 32220Sstevel@tonic-gate * behavior, the _THREAD_COND_WAIT_DEFER environment 32230Sstevel@tonic-gate * variable can be set to 1 and we will behave in the 32240Sstevel@tonic-gate * old way with respect to cond_wait(). 32250Sstevel@tonic-gate */ 32260Sstevel@tonic-gate if (self->ul_cond_wait_defer) 32270Sstevel@tonic-gate sigoff(self); 32280Sstevel@tonic-gate 32290Sstevel@tonic-gate error = cond_sleep_queue(cvp, mp, tsp); 32300Sstevel@tonic-gate 32310Sstevel@tonic-gate /* 32320Sstevel@tonic-gate * Reacquire the mutex. 32330Sstevel@tonic-gate */ 32345629Sraf if ((merror = mutex_lock_impl(mp, NULL)) != 0) 32354574Sraf error = merror; 32360Sstevel@tonic-gate 32370Sstevel@tonic-gate /* 32380Sstevel@tonic-gate * Take any deferred signal now, after we have reacquired the mutex. 32390Sstevel@tonic-gate */ 32400Sstevel@tonic-gate if (self->ul_cond_wait_defer) 32410Sstevel@tonic-gate sigon(self); 32420Sstevel@tonic-gate 32430Sstevel@tonic-gate return (error); 32440Sstevel@tonic-gate } 32450Sstevel@tonic-gate 32460Sstevel@tonic-gate /* 32470Sstevel@tonic-gate * cond_sleep_kernel(): utility function for cond_wait_kernel(). 32480Sstevel@tonic-gate * See the comment ahead of cond_sleep_queue(), above. 32490Sstevel@tonic-gate */ 32504574Sraf static int 32510Sstevel@tonic-gate cond_sleep_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 32520Sstevel@tonic-gate { 32530Sstevel@tonic-gate int mtype = mp->mutex_type; 32540Sstevel@tonic-gate ulwp_t *self = curthread; 32550Sstevel@tonic-gate int error; 32560Sstevel@tonic-gate 32574574Sraf if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp)) 32584574Sraf _ceil_prio_waive(); 32590Sstevel@tonic-gate 32600Sstevel@tonic-gate self->ul_sp = stkptr(); 32610Sstevel@tonic-gate self->ul_wchan = cvp; 32620Sstevel@tonic-gate mp->mutex_owner = 0; 32636057Sraf /* mp->mutex_ownerpid is cleared by ___lwp_cond_wait() */ 32646247Sraf if (mtype & LOCK_PRIO_INHERIT) { 32650Sstevel@tonic-gate mp->mutex_lockw = LOCKCLEAR; 32666247Sraf self->ul_pilocks--; 32676247Sraf } 32680Sstevel@tonic-gate /* 32690Sstevel@tonic-gate * ___lwp_cond_wait() returns immediately with EINTR if 32700Sstevel@tonic-gate * set_parking_flag(self,0) is called on this lwp before it 32710Sstevel@tonic-gate * goes to sleep in the kernel. sigacthandler() calls this 32720Sstevel@tonic-gate * when a deferred signal is noted. This assures that we don't 32730Sstevel@tonic-gate * get stuck in ___lwp_cond_wait() with all signals blocked 32740Sstevel@tonic-gate * due to taking a deferred signal before going to sleep. 32750Sstevel@tonic-gate */ 32760Sstevel@tonic-gate set_parking_flag(self, 1); 32770Sstevel@tonic-gate if (self->ul_cursig != 0 || 32780Sstevel@tonic-gate (self->ul_cancelable && self->ul_cancel_pending)) 32790Sstevel@tonic-gate set_parking_flag(self, 0); 32800Sstevel@tonic-gate error = ___lwp_cond_wait(cvp, mp, tsp, 1); 32810Sstevel@tonic-gate set_parking_flag(self, 0); 32820Sstevel@tonic-gate self->ul_sp = 0; 32830Sstevel@tonic-gate self->ul_wchan = NULL; 32840Sstevel@tonic-gate return (error); 32850Sstevel@tonic-gate } 32860Sstevel@tonic-gate 32870Sstevel@tonic-gate int 32880Sstevel@tonic-gate cond_wait_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 32890Sstevel@tonic-gate { 32900Sstevel@tonic-gate ulwp_t *self = curthread; 32910Sstevel@tonic-gate int error; 32920Sstevel@tonic-gate int merror; 32930Sstevel@tonic-gate 3294*7255Sraf if (self->ul_error_detection && self->ul_misaligned == 0) 3295*7255Sraf cond_wait_check_alignment(cvp, mp); 3296*7255Sraf 32970Sstevel@tonic-gate /* 32980Sstevel@tonic-gate * See the large comment in cond_wait_queue(), above. 32990Sstevel@tonic-gate */ 33000Sstevel@tonic-gate if (self->ul_cond_wait_defer) 33010Sstevel@tonic-gate sigoff(self); 33020Sstevel@tonic-gate 33030Sstevel@tonic-gate error = cond_sleep_kernel(cvp, mp, tsp); 33040Sstevel@tonic-gate 33050Sstevel@tonic-gate /* 33060Sstevel@tonic-gate * Override the return code from ___lwp_cond_wait() 33070Sstevel@tonic-gate * with any non-zero return code from mutex_lock(). 33080Sstevel@tonic-gate * This addresses robust lock failures in particular; 33090Sstevel@tonic-gate * the caller must see the EOWNERDEAD or ENOTRECOVERABLE 33100Sstevel@tonic-gate * errors in order to take corrective action. 33110Sstevel@tonic-gate */ 33125629Sraf if ((merror = mutex_lock_impl(mp, NULL)) != 0) 33130Sstevel@tonic-gate error = merror; 33140Sstevel@tonic-gate 33150Sstevel@tonic-gate /* 33160Sstevel@tonic-gate * Take any deferred signal now, after we have reacquired the mutex. 33170Sstevel@tonic-gate */ 33180Sstevel@tonic-gate if (self->ul_cond_wait_defer) 33190Sstevel@tonic-gate sigon(self); 33200Sstevel@tonic-gate 33210Sstevel@tonic-gate return (error); 33220Sstevel@tonic-gate } 33230Sstevel@tonic-gate 33240Sstevel@tonic-gate /* 33256812Sraf * Common code for cond_wait() and cond_timedwait() 33260Sstevel@tonic-gate */ 33270Sstevel@tonic-gate int 33280Sstevel@tonic-gate cond_wait_common(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 33290Sstevel@tonic-gate { 33300Sstevel@tonic-gate int mtype = mp->mutex_type; 33310Sstevel@tonic-gate hrtime_t begin_sleep = 0; 33320Sstevel@tonic-gate ulwp_t *self = curthread; 33330Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 33340Sstevel@tonic-gate tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 33350Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 33360Sstevel@tonic-gate uint8_t rcount; 33370Sstevel@tonic-gate int error = 0; 33380Sstevel@tonic-gate 33390Sstevel@tonic-gate /* 33400Sstevel@tonic-gate * The SUSV3 Posix spec for pthread_cond_timedwait() states: 33410Sstevel@tonic-gate * Except in the case of [ETIMEDOUT], all these error checks 33420Sstevel@tonic-gate * shall act as if they were performed immediately at the 33430Sstevel@tonic-gate * beginning of processing for the function and shall cause 33440Sstevel@tonic-gate * an error return, in effect, prior to modifying the state 33450Sstevel@tonic-gate * of the mutex specified by mutex or the condition variable 33460Sstevel@tonic-gate * specified by cond. 33470Sstevel@tonic-gate * Therefore, we must return EINVAL now if the timout is invalid. 33480Sstevel@tonic-gate */ 33490Sstevel@tonic-gate if (tsp != NULL && 33500Sstevel@tonic-gate (tsp->tv_sec < 0 || (ulong_t)tsp->tv_nsec >= NANOSEC)) 33510Sstevel@tonic-gate return (EINVAL); 33520Sstevel@tonic-gate 33530Sstevel@tonic-gate if (__td_event_report(self, TD_SLEEP, udp)) { 33540Sstevel@tonic-gate self->ul_sp = stkptr(); 33550Sstevel@tonic-gate self->ul_wchan = cvp; 33560Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_SLEEP; 33570Sstevel@tonic-gate self->ul_td_evbuf.eventdata = cvp; 33580Sstevel@tonic-gate tdb_event(TD_SLEEP, udp); 33590Sstevel@tonic-gate self->ul_sp = 0; 33600Sstevel@tonic-gate } 33610Sstevel@tonic-gate if (csp) { 33620Sstevel@tonic-gate if (tsp) 33630Sstevel@tonic-gate tdb_incr(csp->cond_timedwait); 33640Sstevel@tonic-gate else 33650Sstevel@tonic-gate tdb_incr(csp->cond_wait); 33660Sstevel@tonic-gate } 33670Sstevel@tonic-gate if (msp) 33680Sstevel@tonic-gate begin_sleep = record_hold_time(msp); 33690Sstevel@tonic-gate else if (csp) 33700Sstevel@tonic-gate begin_sleep = gethrtime(); 33710Sstevel@tonic-gate 33720Sstevel@tonic-gate if (self->ul_error_detection) { 33736812Sraf if (!mutex_held(mp)) 33740Sstevel@tonic-gate lock_error(mp, "cond_wait", cvp, NULL); 33750Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) 33760Sstevel@tonic-gate lock_error(mp, "recursive mutex in cond_wait", 33775629Sraf cvp, NULL); 33780Sstevel@tonic-gate if (cvp->cond_type & USYNC_PROCESS) { 33794574Sraf if (!(mtype & USYNC_PROCESS)) 33800Sstevel@tonic-gate lock_error(mp, "cond_wait", cvp, 33815629Sraf "condvar process-shared, " 33825629Sraf "mutex process-private"); 33830Sstevel@tonic-gate } else { 33844574Sraf if (mtype & USYNC_PROCESS) 33850Sstevel@tonic-gate lock_error(mp, "cond_wait", cvp, 33865629Sraf "condvar process-private, " 33875629Sraf "mutex process-shared"); 33880Sstevel@tonic-gate } 33890Sstevel@tonic-gate } 33900Sstevel@tonic-gate 33910Sstevel@tonic-gate /* 33920Sstevel@tonic-gate * We deal with recursive mutexes by completely 33930Sstevel@tonic-gate * dropping the lock and restoring the recursion 33940Sstevel@tonic-gate * count after waking up. This is arguably wrong, 33950Sstevel@tonic-gate * but it obeys the principle of least astonishment. 33960Sstevel@tonic-gate */ 33970Sstevel@tonic-gate rcount = mp->mutex_rcount; 33980Sstevel@tonic-gate mp->mutex_rcount = 0; 33994574Sraf if ((mtype & 34004574Sraf (USYNC_PROCESS | LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT)) | 34010Sstevel@tonic-gate (cvp->cond_type & USYNC_PROCESS)) 34020Sstevel@tonic-gate error = cond_wait_kernel(cvp, mp, tsp); 34030Sstevel@tonic-gate else 34045629Sraf error = cond_wait_queue(cvp, mp, tsp); 34050Sstevel@tonic-gate mp->mutex_rcount = rcount; 34060Sstevel@tonic-gate 34070Sstevel@tonic-gate if (csp) { 34080Sstevel@tonic-gate hrtime_t lapse = gethrtime() - begin_sleep; 34090Sstevel@tonic-gate if (tsp == NULL) 34100Sstevel@tonic-gate csp->cond_wait_sleep_time += lapse; 34110Sstevel@tonic-gate else { 34120Sstevel@tonic-gate csp->cond_timedwait_sleep_time += lapse; 34130Sstevel@tonic-gate if (error == ETIME) 34140Sstevel@tonic-gate tdb_incr(csp->cond_timedwait_timeout); 34150Sstevel@tonic-gate } 34160Sstevel@tonic-gate } 34170Sstevel@tonic-gate return (error); 34180Sstevel@tonic-gate } 34190Sstevel@tonic-gate 34200Sstevel@tonic-gate /* 34216812Sraf * cond_wait() is a cancellation point but __cond_wait() is not. 34226812Sraf * Internally, libc calls the non-cancellation version. 34235891Sraf * Other libraries need to use pthread_setcancelstate(), as appropriate, 34245891Sraf * since __cond_wait() is not exported from libc. 34250Sstevel@tonic-gate */ 34260Sstevel@tonic-gate int 34275891Sraf __cond_wait(cond_t *cvp, mutex_t *mp) 34280Sstevel@tonic-gate { 34290Sstevel@tonic-gate ulwp_t *self = curthread; 34300Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 34310Sstevel@tonic-gate uberflags_t *gflags; 34320Sstevel@tonic-gate 34330Sstevel@tonic-gate /* 34340Sstevel@tonic-gate * Optimize the common case of USYNC_THREAD plus 34350Sstevel@tonic-gate * no error detection, no lock statistics, and no event tracing. 34360Sstevel@tonic-gate */ 34370Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL && 34380Sstevel@tonic-gate (cvp->cond_type | mp->mutex_type | gflags->uf_trs_ted | 34390Sstevel@tonic-gate self->ul_td_events_enable | 34400Sstevel@tonic-gate udp->tdb.tdb_ev_global_mask.event_bits[0]) == 0) 34415629Sraf return (cond_wait_queue(cvp, mp, NULL)); 34420Sstevel@tonic-gate 34430Sstevel@tonic-gate /* 34440Sstevel@tonic-gate * Else do it the long way. 34450Sstevel@tonic-gate */ 34460Sstevel@tonic-gate return (cond_wait_common(cvp, mp, NULL)); 34470Sstevel@tonic-gate } 34480Sstevel@tonic-gate 34496812Sraf #pragma weak _cond_wait = cond_wait 34500Sstevel@tonic-gate int 34516812Sraf cond_wait(cond_t *cvp, mutex_t *mp) 34520Sstevel@tonic-gate { 34530Sstevel@tonic-gate int error; 34540Sstevel@tonic-gate 34550Sstevel@tonic-gate _cancelon(); 34565891Sraf error = __cond_wait(cvp, mp); 34570Sstevel@tonic-gate if (error == EINTR) 34580Sstevel@tonic-gate _canceloff(); 34590Sstevel@tonic-gate else 34600Sstevel@tonic-gate _canceloff_nocancel(); 34610Sstevel@tonic-gate return (error); 34620Sstevel@tonic-gate } 34630Sstevel@tonic-gate 34645891Sraf /* 34655891Sraf * pthread_cond_wait() is a cancellation point. 34665891Sraf */ 34670Sstevel@tonic-gate int 34686812Sraf pthread_cond_wait(pthread_cond_t *_RESTRICT_KYWD cvp, 34696812Sraf pthread_mutex_t *_RESTRICT_KYWD mp) 34700Sstevel@tonic-gate { 34710Sstevel@tonic-gate int error; 34720Sstevel@tonic-gate 34736812Sraf error = cond_wait((cond_t *)cvp, (mutex_t *)mp); 34740Sstevel@tonic-gate return ((error == EINTR)? 0 : error); 34750Sstevel@tonic-gate } 34760Sstevel@tonic-gate 34770Sstevel@tonic-gate /* 34786812Sraf * cond_timedwait() is a cancellation point but __cond_timedwait() is not. 34790Sstevel@tonic-gate */ 34800Sstevel@tonic-gate int 34815891Sraf __cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime) 34820Sstevel@tonic-gate { 34830Sstevel@tonic-gate clockid_t clock_id = cvp->cond_clockid; 34840Sstevel@tonic-gate timespec_t reltime; 34850Sstevel@tonic-gate int error; 34860Sstevel@tonic-gate 34870Sstevel@tonic-gate if (clock_id != CLOCK_REALTIME && clock_id != CLOCK_HIGHRES) 34880Sstevel@tonic-gate clock_id = CLOCK_REALTIME; 34890Sstevel@tonic-gate abstime_to_reltime(clock_id, abstime, &reltime); 34900Sstevel@tonic-gate error = cond_wait_common(cvp, mp, &reltime); 34910Sstevel@tonic-gate if (error == ETIME && clock_id == CLOCK_HIGHRES) { 34920Sstevel@tonic-gate /* 34930Sstevel@tonic-gate * Don't return ETIME if we didn't really get a timeout. 34940Sstevel@tonic-gate * This can happen if we return because someone resets 34950Sstevel@tonic-gate * the system clock. Just return zero in this case, 34960Sstevel@tonic-gate * giving a spurious wakeup but not a timeout. 34970Sstevel@tonic-gate */ 34980Sstevel@tonic-gate if ((hrtime_t)(uint32_t)abstime->tv_sec * NANOSEC + 34990Sstevel@tonic-gate abstime->tv_nsec > gethrtime()) 35000Sstevel@tonic-gate error = 0; 35010Sstevel@tonic-gate } 35020Sstevel@tonic-gate return (error); 35030Sstevel@tonic-gate } 35040Sstevel@tonic-gate 35050Sstevel@tonic-gate int 35066812Sraf cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime) 35070Sstevel@tonic-gate { 35080Sstevel@tonic-gate int error; 35090Sstevel@tonic-gate 35100Sstevel@tonic-gate _cancelon(); 35115891Sraf error = __cond_timedwait(cvp, mp, abstime); 35120Sstevel@tonic-gate if (error == EINTR) 35130Sstevel@tonic-gate _canceloff(); 35140Sstevel@tonic-gate else 35150Sstevel@tonic-gate _canceloff_nocancel(); 35160Sstevel@tonic-gate return (error); 35170Sstevel@tonic-gate } 35180Sstevel@tonic-gate 35195891Sraf /* 35205891Sraf * pthread_cond_timedwait() is a cancellation point. 35215891Sraf */ 35220Sstevel@tonic-gate int 35236812Sraf pthread_cond_timedwait(pthread_cond_t *_RESTRICT_KYWD cvp, 35246812Sraf pthread_mutex_t *_RESTRICT_KYWD mp, 35256812Sraf const struct timespec *_RESTRICT_KYWD abstime) 35260Sstevel@tonic-gate { 35270Sstevel@tonic-gate int error; 35280Sstevel@tonic-gate 35296812Sraf error = cond_timedwait((cond_t *)cvp, (mutex_t *)mp, abstime); 35300Sstevel@tonic-gate if (error == ETIME) 35310Sstevel@tonic-gate error = ETIMEDOUT; 35320Sstevel@tonic-gate else if (error == EINTR) 35330Sstevel@tonic-gate error = 0; 35340Sstevel@tonic-gate return (error); 35350Sstevel@tonic-gate } 35360Sstevel@tonic-gate 35370Sstevel@tonic-gate /* 35386812Sraf * cond_reltimedwait() is a cancellation point but __cond_reltimedwait() is not. 35390Sstevel@tonic-gate */ 35400Sstevel@tonic-gate int 35415891Sraf __cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime) 35420Sstevel@tonic-gate { 35430Sstevel@tonic-gate timespec_t tslocal = *reltime; 35440Sstevel@tonic-gate 35450Sstevel@tonic-gate return (cond_wait_common(cvp, mp, &tslocal)); 35460Sstevel@tonic-gate } 35470Sstevel@tonic-gate 35480Sstevel@tonic-gate int 35496812Sraf cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime) 35500Sstevel@tonic-gate { 35510Sstevel@tonic-gate int error; 35520Sstevel@tonic-gate 35530Sstevel@tonic-gate _cancelon(); 35545891Sraf error = __cond_reltimedwait(cvp, mp, reltime); 35550Sstevel@tonic-gate if (error == EINTR) 35560Sstevel@tonic-gate _canceloff(); 35570Sstevel@tonic-gate else 35580Sstevel@tonic-gate _canceloff_nocancel(); 35590Sstevel@tonic-gate return (error); 35600Sstevel@tonic-gate } 35610Sstevel@tonic-gate 35620Sstevel@tonic-gate int 35636812Sraf pthread_cond_reltimedwait_np(pthread_cond_t *_RESTRICT_KYWD cvp, 35646812Sraf pthread_mutex_t *_RESTRICT_KYWD mp, 35656812Sraf const struct timespec *_RESTRICT_KYWD reltime) 35660Sstevel@tonic-gate { 35670Sstevel@tonic-gate int error; 35680Sstevel@tonic-gate 35696812Sraf error = cond_reltimedwait((cond_t *)cvp, (mutex_t *)mp, reltime); 35700Sstevel@tonic-gate if (error == ETIME) 35710Sstevel@tonic-gate error = ETIMEDOUT; 35720Sstevel@tonic-gate else if (error == EINTR) 35730Sstevel@tonic-gate error = 0; 35740Sstevel@tonic-gate return (error); 35750Sstevel@tonic-gate } 35760Sstevel@tonic-gate 35776812Sraf #pragma weak pthread_cond_signal = cond_signal 35786812Sraf #pragma weak _cond_signal = cond_signal 35790Sstevel@tonic-gate int 35806812Sraf cond_signal(cond_t *cvp) 35810Sstevel@tonic-gate { 35820Sstevel@tonic-gate ulwp_t *self = curthread; 35830Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 35840Sstevel@tonic-gate tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 35850Sstevel@tonic-gate int error = 0; 35866247Sraf int more; 35876247Sraf lwpid_t lwpid; 35880Sstevel@tonic-gate queue_head_t *qp; 35890Sstevel@tonic-gate mutex_t *mp; 35900Sstevel@tonic-gate queue_head_t *mqp; 35910Sstevel@tonic-gate ulwp_t **ulwpp; 35920Sstevel@tonic-gate ulwp_t *ulwp; 35936247Sraf ulwp_t *prev; 35940Sstevel@tonic-gate 35950Sstevel@tonic-gate if (csp) 35960Sstevel@tonic-gate tdb_incr(csp->cond_signal); 35970Sstevel@tonic-gate 35980Sstevel@tonic-gate if (cvp->cond_waiters_kernel) /* someone sleeping in the kernel? */ 35996812Sraf error = _lwp_cond_signal(cvp); 36000Sstevel@tonic-gate 36010Sstevel@tonic-gate if (!cvp->cond_waiters_user) /* no one sleeping at user-level */ 36020Sstevel@tonic-gate return (error); 36030Sstevel@tonic-gate 36040Sstevel@tonic-gate /* 36050Sstevel@tonic-gate * Move someone from the condvar sleep queue to the mutex sleep 36060Sstevel@tonic-gate * queue for the mutex that he will acquire on being waked up. 36070Sstevel@tonic-gate * We can do this only if we own the mutex he will acquire. 36080Sstevel@tonic-gate * If we do not own the mutex, or if his ul_cv_wake flag 36090Sstevel@tonic-gate * is set, just dequeue and unpark him. 36100Sstevel@tonic-gate */ 36110Sstevel@tonic-gate qp = queue_lock(cvp, CV); 36126247Sraf ulwpp = queue_slot(qp, &prev, &more); 36136247Sraf cvp->cond_waiters_user = more; 36146247Sraf if (ulwpp == NULL) { /* no one on the sleep queue */ 36150Sstevel@tonic-gate queue_unlock(qp); 36160Sstevel@tonic-gate return (error); 36170Sstevel@tonic-gate } 36186247Sraf ulwp = *ulwpp; 36190Sstevel@tonic-gate 36200Sstevel@tonic-gate /* 36210Sstevel@tonic-gate * Inform the thread that he was the recipient of a cond_signal(). 36220Sstevel@tonic-gate * This lets him deal with cond_signal() and, concurrently, 36230Sstevel@tonic-gate * one or more of a cancellation, a UNIX signal, or a timeout. 36240Sstevel@tonic-gate * These latter conditions must not consume a cond_signal(). 36250Sstevel@tonic-gate */ 36260Sstevel@tonic-gate ulwp->ul_signalled = 1; 36270Sstevel@tonic-gate 36280Sstevel@tonic-gate /* 36290Sstevel@tonic-gate * Dequeue the waiter but leave his ul_sleepq non-NULL 36300Sstevel@tonic-gate * while we move him to the mutex queue so that he can 36310Sstevel@tonic-gate * deal properly with spurious wakeups. 36320Sstevel@tonic-gate */ 36336247Sraf queue_unlink(qp, ulwpp, prev); 36340Sstevel@tonic-gate 36350Sstevel@tonic-gate mp = ulwp->ul_cvmutex; /* the mutex he will acquire */ 36360Sstevel@tonic-gate ulwp->ul_cvmutex = NULL; 36370Sstevel@tonic-gate ASSERT(mp != NULL); 36380Sstevel@tonic-gate 36390Sstevel@tonic-gate if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) { 36406247Sraf /* just wake him up */ 36416247Sraf lwpid = ulwp->ul_lwpid; 36420Sstevel@tonic-gate no_preempt(self); 36430Sstevel@tonic-gate ulwp->ul_sleepq = NULL; 36440Sstevel@tonic-gate ulwp->ul_wchan = NULL; 36450Sstevel@tonic-gate queue_unlock(qp); 36460Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 36470Sstevel@tonic-gate preempt(self); 36480Sstevel@tonic-gate } else { 36496247Sraf /* move him to the mutex queue */ 36500Sstevel@tonic-gate mqp = queue_lock(mp, MX); 36516247Sraf enqueue(mqp, ulwp, 0); 36520Sstevel@tonic-gate mp->mutex_waiters = 1; 36530Sstevel@tonic-gate queue_unlock(mqp); 36540Sstevel@tonic-gate queue_unlock(qp); 36550Sstevel@tonic-gate } 36560Sstevel@tonic-gate 36570Sstevel@tonic-gate return (error); 36580Sstevel@tonic-gate } 36590Sstevel@tonic-gate 36604570Sraf /* 36614574Sraf * Utility function called by mutex_wakeup_all(), cond_broadcast(), 36624574Sraf * and rw_queue_release() to (re)allocate a big buffer to hold the 36634574Sraf * lwpids of all the threads to be set running after they are removed 36644574Sraf * from their sleep queues. Since we are holding a queue lock, we 36654574Sraf * cannot call any function that might acquire a lock. mmap(), munmap(), 36664574Sraf * lwp_unpark_all() are simple system calls and are safe in this regard. 36674570Sraf */ 36684570Sraf lwpid_t * 36694570Sraf alloc_lwpids(lwpid_t *lwpid, int *nlwpid_ptr, int *maxlwps_ptr) 36704570Sraf { 36714570Sraf /* 36724570Sraf * Allocate NEWLWPS ids on the first overflow. 36734570Sraf * Double the allocation each time after that. 36744570Sraf */ 36754570Sraf int nlwpid = *nlwpid_ptr; 36764570Sraf int maxlwps = *maxlwps_ptr; 36774570Sraf int first_allocation; 36784570Sraf int newlwps; 36794570Sraf void *vaddr; 36804570Sraf 36814570Sraf ASSERT(nlwpid == maxlwps); 36824570Sraf 36834570Sraf first_allocation = (maxlwps == MAXLWPS); 36844570Sraf newlwps = first_allocation? NEWLWPS : 2 * maxlwps; 36856515Sraf vaddr = mmap(NULL, newlwps * sizeof (lwpid_t), 36864570Sraf PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0); 36874570Sraf 36884570Sraf if (vaddr == MAP_FAILED) { 36894570Sraf /* 36904570Sraf * Let's hope this never happens. 36914570Sraf * If it does, then we have a terrible 36924570Sraf * thundering herd on our hands. 36934570Sraf */ 36944570Sraf (void) __lwp_unpark_all(lwpid, nlwpid); 36954570Sraf *nlwpid_ptr = 0; 36964570Sraf } else { 36976515Sraf (void) memcpy(vaddr, lwpid, maxlwps * sizeof (lwpid_t)); 36984570Sraf if (!first_allocation) 36996515Sraf (void) munmap((caddr_t)lwpid, 37004570Sraf maxlwps * sizeof (lwpid_t)); 37014570Sraf lwpid = vaddr; 37024570Sraf *maxlwps_ptr = newlwps; 37034570Sraf } 37044570Sraf 37054570Sraf return (lwpid); 37064570Sraf } 37070Sstevel@tonic-gate 37086812Sraf #pragma weak pthread_cond_broadcast = cond_broadcast 37096812Sraf #pragma weak _cond_broadcast = cond_broadcast 37100Sstevel@tonic-gate int 37116812Sraf cond_broadcast(cond_t *cvp) 37120Sstevel@tonic-gate { 37130Sstevel@tonic-gate ulwp_t *self = curthread; 37140Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 37150Sstevel@tonic-gate tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 37160Sstevel@tonic-gate int error = 0; 37170Sstevel@tonic-gate queue_head_t *qp; 37186247Sraf queue_root_t *qrp; 37190Sstevel@tonic-gate mutex_t *mp; 37200Sstevel@tonic-gate mutex_t *mp_cache = NULL; 37214570Sraf queue_head_t *mqp = NULL; 37220Sstevel@tonic-gate ulwp_t *ulwp; 37234570Sraf int nlwpid = 0; 37244570Sraf int maxlwps = MAXLWPS; 37250Sstevel@tonic-gate lwpid_t buffer[MAXLWPS]; 37260Sstevel@tonic-gate lwpid_t *lwpid = buffer; 37270Sstevel@tonic-gate 37280Sstevel@tonic-gate if (csp) 37290Sstevel@tonic-gate tdb_incr(csp->cond_broadcast); 37300Sstevel@tonic-gate 37310Sstevel@tonic-gate if (cvp->cond_waiters_kernel) /* someone sleeping in the kernel? */ 37326812Sraf error = _lwp_cond_broadcast(cvp); 37330Sstevel@tonic-gate 37340Sstevel@tonic-gate if (!cvp->cond_waiters_user) /* no one sleeping at user-level */ 37350Sstevel@tonic-gate return (error); 37360Sstevel@tonic-gate 37370Sstevel@tonic-gate /* 37380Sstevel@tonic-gate * Move everyone from the condvar sleep queue to the mutex sleep 37390Sstevel@tonic-gate * queue for the mutex that they will acquire on being waked up. 37400Sstevel@tonic-gate * We can do this only if we own the mutex they will acquire. 37410Sstevel@tonic-gate * If we do not own the mutex, or if their ul_cv_wake flag 37420Sstevel@tonic-gate * is set, just dequeue and unpark them. 37430Sstevel@tonic-gate * 37440Sstevel@tonic-gate * We keep track of lwpids that are to be unparked in lwpid[]. 37450Sstevel@tonic-gate * __lwp_unpark_all() is called to unpark all of them after 37460Sstevel@tonic-gate * they have been removed from the sleep queue and the sleep 37470Sstevel@tonic-gate * queue lock has been dropped. If we run out of space in our 37480Sstevel@tonic-gate * on-stack buffer, we need to allocate more but we can't call 37490Sstevel@tonic-gate * lmalloc() because we are holding a queue lock when the overflow 37500Sstevel@tonic-gate * occurs and lmalloc() acquires a lock. We can't use alloca() 37514570Sraf * either because the application may have allocated a small 37524570Sraf * stack and we don't want to overrun the stack. So we call 37534570Sraf * alloc_lwpids() to allocate a bigger buffer using the mmap() 37540Sstevel@tonic-gate * system call directly since that path acquires no locks. 37550Sstevel@tonic-gate */ 37560Sstevel@tonic-gate qp = queue_lock(cvp, CV); 37570Sstevel@tonic-gate cvp->cond_waiters_user = 0; 37586247Sraf for (;;) { 37596247Sraf if ((qrp = qp->qh_root) == NULL || 37606247Sraf (ulwp = qrp->qr_head) == NULL) 37616247Sraf break; 37626247Sraf ASSERT(ulwp->ul_wchan == cvp); 37636247Sraf queue_unlink(qp, &qrp->qr_head, NULL); 37640Sstevel@tonic-gate mp = ulwp->ul_cvmutex; /* his mutex */ 37650Sstevel@tonic-gate ulwp->ul_cvmutex = NULL; 37660Sstevel@tonic-gate ASSERT(mp != NULL); 37670Sstevel@tonic-gate if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) { 37686247Sraf /* just wake him up */ 37690Sstevel@tonic-gate ulwp->ul_sleepq = NULL; 37700Sstevel@tonic-gate ulwp->ul_wchan = NULL; 37714570Sraf if (nlwpid == maxlwps) 37724570Sraf lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps); 37730Sstevel@tonic-gate lwpid[nlwpid++] = ulwp->ul_lwpid; 37740Sstevel@tonic-gate } else { 37756247Sraf /* move him to the mutex queue */ 37760Sstevel@tonic-gate if (mp != mp_cache) { 37770Sstevel@tonic-gate mp_cache = mp; 37784570Sraf if (mqp != NULL) 37794570Sraf queue_unlock(mqp); 37804570Sraf mqp = queue_lock(mp, MX); 37810Sstevel@tonic-gate } 37826247Sraf enqueue(mqp, ulwp, 0); 37830Sstevel@tonic-gate mp->mutex_waiters = 1; 37840Sstevel@tonic-gate } 37850Sstevel@tonic-gate } 37864570Sraf if (mqp != NULL) 37874570Sraf queue_unlock(mqp); 37884570Sraf if (nlwpid == 0) { 37894570Sraf queue_unlock(qp); 37904570Sraf } else { 37914570Sraf no_preempt(self); 37924570Sraf queue_unlock(qp); 37930Sstevel@tonic-gate if (nlwpid == 1) 37940Sstevel@tonic-gate (void) __lwp_unpark(lwpid[0]); 37950Sstevel@tonic-gate else 37960Sstevel@tonic-gate (void) __lwp_unpark_all(lwpid, nlwpid); 37974570Sraf preempt(self); 37980Sstevel@tonic-gate } 37990Sstevel@tonic-gate if (lwpid != buffer) 38006515Sraf (void) munmap((caddr_t)lwpid, maxlwps * sizeof (lwpid_t)); 38010Sstevel@tonic-gate return (error); 38020Sstevel@tonic-gate } 38030Sstevel@tonic-gate 38046812Sraf #pragma weak pthread_cond_destroy = cond_destroy 38050Sstevel@tonic-gate int 38066812Sraf cond_destroy(cond_t *cvp) 38070Sstevel@tonic-gate { 38080Sstevel@tonic-gate cvp->cond_magic = 0; 38090Sstevel@tonic-gate tdb_sync_obj_deregister(cvp); 38100Sstevel@tonic-gate return (0); 38110Sstevel@tonic-gate } 38120Sstevel@tonic-gate 38130Sstevel@tonic-gate #if defined(THREAD_DEBUG) 38140Sstevel@tonic-gate void 38150Sstevel@tonic-gate assert_no_libc_locks_held(void) 38160Sstevel@tonic-gate { 38170Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 38180Sstevel@tonic-gate } 38190Sstevel@tonic-gate 38200Sstevel@tonic-gate /* protected by link_lock */ 38210Sstevel@tonic-gate uint64_t spin_lock_spin; 38220Sstevel@tonic-gate uint64_t spin_lock_spin2; 38230Sstevel@tonic-gate uint64_t spin_lock_sleep; 38240Sstevel@tonic-gate uint64_t spin_lock_wakeup; 38250Sstevel@tonic-gate 38260Sstevel@tonic-gate /* 38270Sstevel@tonic-gate * Record spin lock statistics. 38280Sstevel@tonic-gate * Called by a thread exiting itself in thrp_exit(). 38290Sstevel@tonic-gate * Also called via atexit() from the thread calling 38300Sstevel@tonic-gate * exit() to do all the other threads as well. 38310Sstevel@tonic-gate */ 38320Sstevel@tonic-gate void 38330Sstevel@tonic-gate record_spin_locks(ulwp_t *ulwp) 38340Sstevel@tonic-gate { 38350Sstevel@tonic-gate spin_lock_spin += ulwp->ul_spin_lock_spin; 38360Sstevel@tonic-gate spin_lock_spin2 += ulwp->ul_spin_lock_spin2; 38370Sstevel@tonic-gate spin_lock_sleep += ulwp->ul_spin_lock_sleep; 38380Sstevel@tonic-gate spin_lock_wakeup += ulwp->ul_spin_lock_wakeup; 38390Sstevel@tonic-gate ulwp->ul_spin_lock_spin = 0; 38400Sstevel@tonic-gate ulwp->ul_spin_lock_spin2 = 0; 38410Sstevel@tonic-gate ulwp->ul_spin_lock_sleep = 0; 38420Sstevel@tonic-gate ulwp->ul_spin_lock_wakeup = 0; 38430Sstevel@tonic-gate } 38440Sstevel@tonic-gate 38450Sstevel@tonic-gate /* 38460Sstevel@tonic-gate * atexit function: dump the queue statistics to stderr. 38470Sstevel@tonic-gate */ 38480Sstevel@tonic-gate #include <stdio.h> 38490Sstevel@tonic-gate void 38500Sstevel@tonic-gate dump_queue_statistics(void) 38510Sstevel@tonic-gate { 38520Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 38530Sstevel@tonic-gate queue_head_t *qp; 38540Sstevel@tonic-gate int qn; 38550Sstevel@tonic-gate uint64_t spin_lock_total = 0; 38560Sstevel@tonic-gate 38570Sstevel@tonic-gate if (udp->queue_head == NULL || thread_queue_dump == 0) 38580Sstevel@tonic-gate return; 38590Sstevel@tonic-gate 38600Sstevel@tonic-gate if (fprintf(stderr, "\n%5d mutex queues:\n", QHASHSIZE) < 0 || 38616247Sraf fprintf(stderr, "queue# lockcount max qlen max hlen\n") < 0) 38620Sstevel@tonic-gate return; 38630Sstevel@tonic-gate for (qn = 0, qp = udp->queue_head; qn < QHASHSIZE; qn++, qp++) { 38640Sstevel@tonic-gate if (qp->qh_lockcount == 0) 38650Sstevel@tonic-gate continue; 38660Sstevel@tonic-gate spin_lock_total += qp->qh_lockcount; 38676247Sraf if (fprintf(stderr, "%5d %12llu%12u%12u\n", qn, 38686247Sraf (u_longlong_t)qp->qh_lockcount, 38696247Sraf qp->qh_qmax, qp->qh_hmax) < 0) 38705629Sraf return; 38710Sstevel@tonic-gate } 38720Sstevel@tonic-gate 38730Sstevel@tonic-gate if (fprintf(stderr, "\n%5d condvar queues:\n", QHASHSIZE) < 0 || 38746247Sraf fprintf(stderr, "queue# lockcount max qlen max hlen\n") < 0) 38750Sstevel@tonic-gate return; 38760Sstevel@tonic-gate for (qn = 0; qn < QHASHSIZE; qn++, qp++) { 38770Sstevel@tonic-gate if (qp->qh_lockcount == 0) 38780Sstevel@tonic-gate continue; 38790Sstevel@tonic-gate spin_lock_total += qp->qh_lockcount; 38806247Sraf if (fprintf(stderr, "%5d %12llu%12u%12u\n", qn, 38816247Sraf (u_longlong_t)qp->qh_lockcount, 38826247Sraf qp->qh_qmax, qp->qh_hmax) < 0) 38835629Sraf return; 38840Sstevel@tonic-gate } 38850Sstevel@tonic-gate 38860Sstevel@tonic-gate (void) fprintf(stderr, "\n spin_lock_total = %10llu\n", 38875629Sraf (u_longlong_t)spin_lock_total); 38880Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_spin = %10llu\n", 38895629Sraf (u_longlong_t)spin_lock_spin); 38900Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_spin2 = %10llu\n", 38915629Sraf (u_longlong_t)spin_lock_spin2); 38920Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_sleep = %10llu\n", 38935629Sraf (u_longlong_t)spin_lock_sleep); 38940Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_wakeup = %10llu\n", 38955629Sraf (u_longlong_t)spin_lock_wakeup); 38960Sstevel@tonic-gate } 38976247Sraf #endif 3898