10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 51893Sraf * Common Development and Distribution License (the "License"). 61893Sraf * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 211219Sraf 220Sstevel@tonic-gate /* 234570Sraf * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate #include <sys/sdt.h> 300Sstevel@tonic-gate 310Sstevel@tonic-gate #include "lint.h" 320Sstevel@tonic-gate #include "thr_uberdata.h" 330Sstevel@tonic-gate 340Sstevel@tonic-gate /* 350Sstevel@tonic-gate * This mutex is initialized to be held by lwp#1. 360Sstevel@tonic-gate * It is used to block a thread that has returned from a mutex_lock() 37*4574Sraf * of a LOCK_PRIO_INHERIT mutex with an unrecoverable error. 380Sstevel@tonic-gate */ 390Sstevel@tonic-gate mutex_t stall_mutex = DEFAULTMUTEX; 400Sstevel@tonic-gate 410Sstevel@tonic-gate static int shared_mutex_held(mutex_t *); 42*4574Sraf static int mutex_unlock_internal(mutex_t *, int); 43*4574Sraf static int mutex_queuelock_adaptive(mutex_t *); 44*4574Sraf static void mutex_wakeup_all(mutex_t *); 450Sstevel@tonic-gate 460Sstevel@tonic-gate /* 470Sstevel@tonic-gate * Lock statistics support functions. 480Sstevel@tonic-gate */ 490Sstevel@tonic-gate void 500Sstevel@tonic-gate record_begin_hold(tdb_mutex_stats_t *msp) 510Sstevel@tonic-gate { 520Sstevel@tonic-gate tdb_incr(msp->mutex_lock); 530Sstevel@tonic-gate msp->mutex_begin_hold = gethrtime(); 540Sstevel@tonic-gate } 550Sstevel@tonic-gate 560Sstevel@tonic-gate hrtime_t 570Sstevel@tonic-gate record_hold_time(tdb_mutex_stats_t *msp) 580Sstevel@tonic-gate { 590Sstevel@tonic-gate hrtime_t now = gethrtime(); 600Sstevel@tonic-gate 610Sstevel@tonic-gate if (msp->mutex_begin_hold) 620Sstevel@tonic-gate msp->mutex_hold_time += now - msp->mutex_begin_hold; 630Sstevel@tonic-gate msp->mutex_begin_hold = 0; 640Sstevel@tonic-gate return (now); 650Sstevel@tonic-gate } 660Sstevel@tonic-gate 670Sstevel@tonic-gate /* 680Sstevel@tonic-gate * Called once at library initialization. 690Sstevel@tonic-gate */ 700Sstevel@tonic-gate void 710Sstevel@tonic-gate mutex_setup(void) 720Sstevel@tonic-gate { 730Sstevel@tonic-gate if (set_lock_byte(&stall_mutex.mutex_lockw)) 740Sstevel@tonic-gate thr_panic("mutex_setup() cannot acquire stall_mutex"); 750Sstevel@tonic-gate stall_mutex.mutex_owner = (uintptr_t)curthread; 760Sstevel@tonic-gate } 770Sstevel@tonic-gate 780Sstevel@tonic-gate /* 790Sstevel@tonic-gate * The default spin counts of 1000 and 500 are experimentally determined. 800Sstevel@tonic-gate * On sun4u machines with any number of processors they could be raised 810Sstevel@tonic-gate * to 10,000 but that (experimentally) makes almost no difference. 820Sstevel@tonic-gate * The environment variables: 830Sstevel@tonic-gate * _THREAD_ADAPTIVE_SPIN=count 840Sstevel@tonic-gate * _THREAD_RELEASE_SPIN=count 850Sstevel@tonic-gate * can be used to override and set the counts in the range [0 .. 1,000,000]. 860Sstevel@tonic-gate */ 870Sstevel@tonic-gate int thread_adaptive_spin = 1000; 880Sstevel@tonic-gate uint_t thread_max_spinners = 100; 890Sstevel@tonic-gate int thread_release_spin = 500; 900Sstevel@tonic-gate int thread_queue_verify = 0; 910Sstevel@tonic-gate static int ncpus; 920Sstevel@tonic-gate 930Sstevel@tonic-gate /* 940Sstevel@tonic-gate * Distinguish spinning for queue locks from spinning for regular locks. 950Sstevel@tonic-gate * The environment variable: 960Sstevel@tonic-gate * _THREAD_QUEUE_SPIN=count 970Sstevel@tonic-gate * can be used to override and set the count in the range [0 .. 1,000,000]. 980Sstevel@tonic-gate * There is no release spin concept for queue locks. 990Sstevel@tonic-gate */ 1000Sstevel@tonic-gate int thread_queue_spin = 1000; 1010Sstevel@tonic-gate 1020Sstevel@tonic-gate /* 1030Sstevel@tonic-gate * Use the otherwise-unused 'mutex_ownerpid' field of a USYNC_THREAD 1040Sstevel@tonic-gate * mutex to be a count of adaptive spins in progress. 1050Sstevel@tonic-gate */ 1060Sstevel@tonic-gate #define mutex_spinners mutex_ownerpid 1070Sstevel@tonic-gate 108*4574Sraf #define ALL_ATTRIBUTES \ 109*4574Sraf (LOCK_RECURSIVE | LOCK_ERRORCHECK | \ 110*4574Sraf LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT | \ 111*4574Sraf LOCK_ROBUST) 1120Sstevel@tonic-gate 1130Sstevel@tonic-gate /* 114*4574Sraf * 'type' can be one of USYNC_THREAD, USYNC_PROCESS, or USYNC_PROCESS_ROBUST, 115*4574Sraf * augmented by zero or more the flags: 116*4574Sraf * LOCK_RECURSIVE 117*4574Sraf * LOCK_ERRORCHECK 118*4574Sraf * LOCK_PRIO_INHERIT 119*4574Sraf * LOCK_PRIO_PROTECT 120*4574Sraf * LOCK_ROBUST 1210Sstevel@tonic-gate */ 1220Sstevel@tonic-gate #pragma weak _private_mutex_init = __mutex_init 1230Sstevel@tonic-gate #pragma weak mutex_init = __mutex_init 1240Sstevel@tonic-gate #pragma weak _mutex_init = __mutex_init 1250Sstevel@tonic-gate /* ARGSUSED2 */ 1260Sstevel@tonic-gate int 1270Sstevel@tonic-gate __mutex_init(mutex_t *mp, int type, void *arg) 1280Sstevel@tonic-gate { 129*4574Sraf int basetype = (type & ~ALL_ATTRIBUTES); 130*4574Sraf int error = 0; 131*4574Sraf 132*4574Sraf if (basetype == USYNC_PROCESS_ROBUST) { 133*4574Sraf /* 134*4574Sraf * USYNC_PROCESS_ROBUST is a deprecated historical type. 135*4574Sraf * We change it into (USYNC_PROCESS | LOCK_ROBUST) but 136*4574Sraf * retain the USYNC_PROCESS_ROBUST flag so we can return 137*4574Sraf * ELOCKUNMAPPED when necessary (only USYNC_PROCESS_ROBUST 138*4574Sraf * mutexes will ever draw ELOCKUNMAPPED). 139*4574Sraf */ 140*4574Sraf type |= (USYNC_PROCESS | LOCK_ROBUST); 141*4574Sraf basetype = USYNC_PROCESS; 142*4574Sraf } 143*4574Sraf 144*4574Sraf if (!(basetype == USYNC_THREAD || basetype == USYNC_PROCESS) || 145*4574Sraf (type & (LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT)) 146*4574Sraf == (LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT)) { 147*4574Sraf error = EINVAL; 148*4574Sraf } else if (type & LOCK_ROBUST) { 149*4574Sraf /* 150*4574Sraf * Callers of mutex_init() with the LOCK_ROBUST attribute 151*4574Sraf * are required to pass an initially all-zero mutex. 152*4574Sraf * Multiple calls to mutex_init() are allowed; all but 153*4574Sraf * the first return EBUSY. A call to mutex_init() is 154*4574Sraf * allowed to make an inconsistent robust lock consistent 155*4574Sraf * (for historical usage, even though the proper interface 156*4574Sraf * for this is mutex_consistent()). Note that we use 157*4574Sraf * atomic_or_16() to set the LOCK_INITED flag so as 158*4574Sraf * not to disturb surrounding bits (LOCK_OWNERDEAD, etc). 159*4574Sraf */ 160*4574Sraf extern void _atomic_or_16(volatile uint16_t *, uint16_t); 161*4574Sraf if (!(mp->mutex_flag & LOCK_INITED)) { 162*4574Sraf mp->mutex_type = (uint8_t)type; 163*4574Sraf _atomic_or_16(&mp->mutex_flag, LOCK_INITED); 164*4574Sraf mp->mutex_magic = MUTEX_MAGIC; 165*4574Sraf } else if (type != mp->mutex_type || 166*4574Sraf ((type & LOCK_PRIO_PROTECT) && 167*4574Sraf mp->mutex_ceiling != (*(int *)arg))) { 168*4574Sraf error = EINVAL; 169*4574Sraf } else if (__mutex_consistent(mp) != 0) { 170*4574Sraf error = EBUSY; 171*4574Sraf } 172*4574Sraf /* register a process robust mutex with the kernel */ 173*4574Sraf if (basetype == USYNC_PROCESS) 174*4574Sraf register_lock(mp); 175*4574Sraf } else { 1760Sstevel@tonic-gate (void) _memset(mp, 0, sizeof (*mp)); 1770Sstevel@tonic-gate mp->mutex_type = (uint8_t)type; 1780Sstevel@tonic-gate mp->mutex_flag = LOCK_INITED; 179*4574Sraf mp->mutex_magic = MUTEX_MAGIC; 1800Sstevel@tonic-gate } 181*4574Sraf 182*4574Sraf if (error == 0 && (type & LOCK_PRIO_PROTECT)) 183*4574Sraf mp->mutex_ceiling = (uint8_t)(*(int *)arg); 184*4574Sraf 1850Sstevel@tonic-gate return (error); 1860Sstevel@tonic-gate } 1870Sstevel@tonic-gate 1880Sstevel@tonic-gate /* 1890Sstevel@tonic-gate * Delete mp from list of ceil mutexes owned by curthread. 1900Sstevel@tonic-gate * Return 1 if the head of the chain was updated. 1910Sstevel@tonic-gate */ 1920Sstevel@tonic-gate int 1930Sstevel@tonic-gate _ceil_mylist_del(mutex_t *mp) 1940Sstevel@tonic-gate { 1950Sstevel@tonic-gate ulwp_t *self = curthread; 1960Sstevel@tonic-gate mxchain_t **mcpp; 1970Sstevel@tonic-gate mxchain_t *mcp; 1980Sstevel@tonic-gate 1990Sstevel@tonic-gate mcpp = &self->ul_mxchain; 2000Sstevel@tonic-gate while ((*mcpp)->mxchain_mx != mp) 2010Sstevel@tonic-gate mcpp = &(*mcpp)->mxchain_next; 2020Sstevel@tonic-gate mcp = *mcpp; 2030Sstevel@tonic-gate *mcpp = mcp->mxchain_next; 2040Sstevel@tonic-gate lfree(mcp, sizeof (*mcp)); 2050Sstevel@tonic-gate return (mcpp == &self->ul_mxchain); 2060Sstevel@tonic-gate } 2070Sstevel@tonic-gate 2080Sstevel@tonic-gate /* 2090Sstevel@tonic-gate * Add mp to head of list of ceil mutexes owned by curthread. 2100Sstevel@tonic-gate * Return ENOMEM if no memory could be allocated. 2110Sstevel@tonic-gate */ 2120Sstevel@tonic-gate int 2130Sstevel@tonic-gate _ceil_mylist_add(mutex_t *mp) 2140Sstevel@tonic-gate { 2150Sstevel@tonic-gate ulwp_t *self = curthread; 2160Sstevel@tonic-gate mxchain_t *mcp; 2170Sstevel@tonic-gate 2180Sstevel@tonic-gate if ((mcp = lmalloc(sizeof (*mcp))) == NULL) 2190Sstevel@tonic-gate return (ENOMEM); 2200Sstevel@tonic-gate mcp->mxchain_mx = mp; 2210Sstevel@tonic-gate mcp->mxchain_next = self->ul_mxchain; 2220Sstevel@tonic-gate self->ul_mxchain = mcp; 2230Sstevel@tonic-gate return (0); 2240Sstevel@tonic-gate } 2250Sstevel@tonic-gate 2260Sstevel@tonic-gate /* 2270Sstevel@tonic-gate * Inherit priority from ceiling. The inheritance impacts the effective 2280Sstevel@tonic-gate * priority, not the assigned priority. See _thread_setschedparam_main(). 2290Sstevel@tonic-gate */ 2300Sstevel@tonic-gate void 2310Sstevel@tonic-gate _ceil_prio_inherit(int ceil) 2320Sstevel@tonic-gate { 2330Sstevel@tonic-gate ulwp_t *self = curthread; 2340Sstevel@tonic-gate struct sched_param param; 2350Sstevel@tonic-gate 2360Sstevel@tonic-gate (void) _memset(¶m, 0, sizeof (param)); 2370Sstevel@tonic-gate param.sched_priority = ceil; 2380Sstevel@tonic-gate if (_thread_setschedparam_main(self->ul_lwpid, 2390Sstevel@tonic-gate self->ul_policy, ¶m, PRIO_INHERIT)) { 2400Sstevel@tonic-gate /* 2410Sstevel@tonic-gate * Panic since unclear what error code to return. 2420Sstevel@tonic-gate * If we do return the error codes returned by above 2430Sstevel@tonic-gate * called routine, update the man page... 2440Sstevel@tonic-gate */ 2450Sstevel@tonic-gate thr_panic("_thread_setschedparam_main() fails"); 2460Sstevel@tonic-gate } 2470Sstevel@tonic-gate } 2480Sstevel@tonic-gate 2490Sstevel@tonic-gate /* 2500Sstevel@tonic-gate * Waive inherited ceiling priority. Inherit from head of owned ceiling locks 2510Sstevel@tonic-gate * if holding at least one ceiling lock. If no ceiling locks are held at this 2520Sstevel@tonic-gate * point, disinherit completely, reverting back to assigned priority. 2530Sstevel@tonic-gate */ 2540Sstevel@tonic-gate void 2550Sstevel@tonic-gate _ceil_prio_waive(void) 2560Sstevel@tonic-gate { 2570Sstevel@tonic-gate ulwp_t *self = curthread; 2580Sstevel@tonic-gate struct sched_param param; 2590Sstevel@tonic-gate 2600Sstevel@tonic-gate (void) _memset(¶m, 0, sizeof (param)); 2610Sstevel@tonic-gate if (self->ul_mxchain == NULL) { 2620Sstevel@tonic-gate /* 2630Sstevel@tonic-gate * No ceil locks held. Zero the epri, revert back to ul_pri. 2640Sstevel@tonic-gate * Since thread's hash lock is not held, one cannot just 2650Sstevel@tonic-gate * read ul_pri here...do it in the called routine... 2660Sstevel@tonic-gate */ 2670Sstevel@tonic-gate param.sched_priority = self->ul_pri; /* ignored */ 2680Sstevel@tonic-gate if (_thread_setschedparam_main(self->ul_lwpid, 2690Sstevel@tonic-gate self->ul_policy, ¶m, PRIO_DISINHERIT)) 2700Sstevel@tonic-gate thr_panic("_thread_setschedparam_main() fails"); 2710Sstevel@tonic-gate } else { 2720Sstevel@tonic-gate /* 2730Sstevel@tonic-gate * Set priority to that of the mutex at the head 2740Sstevel@tonic-gate * of the ceilmutex chain. 2750Sstevel@tonic-gate */ 2760Sstevel@tonic-gate param.sched_priority = 2770Sstevel@tonic-gate self->ul_mxchain->mxchain_mx->mutex_ceiling; 2780Sstevel@tonic-gate if (_thread_setschedparam_main(self->ul_lwpid, 2790Sstevel@tonic-gate self->ul_policy, ¶m, PRIO_INHERIT)) 2800Sstevel@tonic-gate thr_panic("_thread_setschedparam_main() fails"); 2810Sstevel@tonic-gate } 2820Sstevel@tonic-gate } 2830Sstevel@tonic-gate 2840Sstevel@tonic-gate /* 2850Sstevel@tonic-gate * Non-preemptive spin locks. Used by queue_lock(). 2860Sstevel@tonic-gate * No lock statistics are gathered for these locks. 2870Sstevel@tonic-gate */ 2880Sstevel@tonic-gate void 2890Sstevel@tonic-gate spin_lock_set(mutex_t *mp) 2900Sstevel@tonic-gate { 2910Sstevel@tonic-gate ulwp_t *self = curthread; 2920Sstevel@tonic-gate 2930Sstevel@tonic-gate no_preempt(self); 2940Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 2950Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 2960Sstevel@tonic-gate return; 2970Sstevel@tonic-gate } 2980Sstevel@tonic-gate /* 2990Sstevel@tonic-gate * Spin for a while, attempting to acquire the lock. 3000Sstevel@tonic-gate */ 3010Sstevel@tonic-gate if (self->ul_spin_lock_spin != UINT_MAX) 3020Sstevel@tonic-gate self->ul_spin_lock_spin++; 3030Sstevel@tonic-gate if (mutex_queuelock_adaptive(mp) == 0 || 3040Sstevel@tonic-gate set_lock_byte(&mp->mutex_lockw) == 0) { 3050Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 3060Sstevel@tonic-gate return; 3070Sstevel@tonic-gate } 3080Sstevel@tonic-gate /* 3090Sstevel@tonic-gate * Try harder if we were previously at a no premption level. 3100Sstevel@tonic-gate */ 3110Sstevel@tonic-gate if (self->ul_preempt > 1) { 3120Sstevel@tonic-gate if (self->ul_spin_lock_spin2 != UINT_MAX) 3130Sstevel@tonic-gate self->ul_spin_lock_spin2++; 3140Sstevel@tonic-gate if (mutex_queuelock_adaptive(mp) == 0 || 3150Sstevel@tonic-gate set_lock_byte(&mp->mutex_lockw) == 0) { 3160Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 3170Sstevel@tonic-gate return; 3180Sstevel@tonic-gate } 3190Sstevel@tonic-gate } 3200Sstevel@tonic-gate /* 3210Sstevel@tonic-gate * Give up and block in the kernel for the mutex. 3220Sstevel@tonic-gate */ 3230Sstevel@tonic-gate if (self->ul_spin_lock_sleep != UINT_MAX) 3240Sstevel@tonic-gate self->ul_spin_lock_sleep++; 3250Sstevel@tonic-gate (void) ___lwp_mutex_timedlock(mp, NULL); 3260Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 3270Sstevel@tonic-gate } 3280Sstevel@tonic-gate 3290Sstevel@tonic-gate void 3300Sstevel@tonic-gate spin_lock_clear(mutex_t *mp) 3310Sstevel@tonic-gate { 3320Sstevel@tonic-gate ulwp_t *self = curthread; 3330Sstevel@tonic-gate 3340Sstevel@tonic-gate mp->mutex_owner = 0; 3354570Sraf if (atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK) { 336*4574Sraf (void) ___lwp_mutex_wakeup(mp, 0); 3370Sstevel@tonic-gate if (self->ul_spin_lock_wakeup != UINT_MAX) 3380Sstevel@tonic-gate self->ul_spin_lock_wakeup++; 3390Sstevel@tonic-gate } 3400Sstevel@tonic-gate preempt(self); 3410Sstevel@tonic-gate } 3420Sstevel@tonic-gate 3430Sstevel@tonic-gate /* 3440Sstevel@tonic-gate * Allocate the sleep queue hash table. 3450Sstevel@tonic-gate */ 3460Sstevel@tonic-gate void 3470Sstevel@tonic-gate queue_alloc(void) 3480Sstevel@tonic-gate { 3490Sstevel@tonic-gate ulwp_t *self = curthread; 3500Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 351*4574Sraf mutex_t *mp; 3520Sstevel@tonic-gate void *data; 3530Sstevel@tonic-gate int i; 3540Sstevel@tonic-gate 3550Sstevel@tonic-gate /* 3560Sstevel@tonic-gate * No locks are needed; we call here only when single-threaded. 3570Sstevel@tonic-gate */ 3580Sstevel@tonic-gate ASSERT(self == udp->ulwp_one); 3590Sstevel@tonic-gate ASSERT(!udp->uberflags.uf_mt); 3600Sstevel@tonic-gate if ((data = _private_mmap(NULL, 2 * QHASHSIZE * sizeof (queue_head_t), 3610Sstevel@tonic-gate PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0)) 3620Sstevel@tonic-gate == MAP_FAILED) 3630Sstevel@tonic-gate thr_panic("cannot allocate thread queue_head table"); 3640Sstevel@tonic-gate udp->queue_head = (queue_head_t *)data; 365*4574Sraf for (i = 0; i < 2 * QHASHSIZE; i++) { 366*4574Sraf mp = &udp->queue_head[i].qh_lock; 367*4574Sraf mp->mutex_flag = LOCK_INITED; 368*4574Sraf mp->mutex_magic = MUTEX_MAGIC; 369*4574Sraf } 3700Sstevel@tonic-gate } 3710Sstevel@tonic-gate 3720Sstevel@tonic-gate #if defined(THREAD_DEBUG) 3730Sstevel@tonic-gate 3740Sstevel@tonic-gate /* 3750Sstevel@tonic-gate * Debugging: verify correctness of a sleep queue. 3760Sstevel@tonic-gate */ 3770Sstevel@tonic-gate void 3780Sstevel@tonic-gate QVERIFY(queue_head_t *qp) 3790Sstevel@tonic-gate { 3800Sstevel@tonic-gate ulwp_t *self = curthread; 3810Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 3820Sstevel@tonic-gate ulwp_t *ulwp; 3830Sstevel@tonic-gate ulwp_t *prev; 3840Sstevel@tonic-gate uint_t index; 3850Sstevel@tonic-gate uint32_t cnt = 0; 3860Sstevel@tonic-gate char qtype; 3870Sstevel@tonic-gate void *wchan; 3880Sstevel@tonic-gate 3890Sstevel@tonic-gate ASSERT(qp >= udp->queue_head && (qp - udp->queue_head) < 2 * QHASHSIZE); 3900Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, self)); 3910Sstevel@tonic-gate ASSERT((qp->qh_head != NULL && qp->qh_tail != NULL) || 3920Sstevel@tonic-gate (qp->qh_head == NULL && qp->qh_tail == NULL)); 3930Sstevel@tonic-gate if (!thread_queue_verify) 3940Sstevel@tonic-gate return; 3950Sstevel@tonic-gate /* real expensive stuff, only for _THREAD_QUEUE_VERIFY */ 3960Sstevel@tonic-gate qtype = ((qp - udp->queue_head) < QHASHSIZE)? MX : CV; 3970Sstevel@tonic-gate for (prev = NULL, ulwp = qp->qh_head; ulwp != NULL; 3980Sstevel@tonic-gate prev = ulwp, ulwp = ulwp->ul_link, cnt++) { 3990Sstevel@tonic-gate ASSERT(ulwp->ul_qtype == qtype); 4000Sstevel@tonic-gate ASSERT(ulwp->ul_wchan != NULL); 4010Sstevel@tonic-gate ASSERT(ulwp->ul_sleepq == qp); 4020Sstevel@tonic-gate wchan = ulwp->ul_wchan; 4030Sstevel@tonic-gate index = QUEUE_HASH(wchan, qtype); 4040Sstevel@tonic-gate ASSERT(&udp->queue_head[index] == qp); 4050Sstevel@tonic-gate } 4060Sstevel@tonic-gate ASSERT(qp->qh_tail == prev); 4070Sstevel@tonic-gate ASSERT(qp->qh_qlen == cnt); 4080Sstevel@tonic-gate } 4090Sstevel@tonic-gate 4100Sstevel@tonic-gate #else /* THREAD_DEBUG */ 4110Sstevel@tonic-gate 4120Sstevel@tonic-gate #define QVERIFY(qp) 4130Sstevel@tonic-gate 4140Sstevel@tonic-gate #endif /* THREAD_DEBUG */ 4150Sstevel@tonic-gate 4160Sstevel@tonic-gate /* 4170Sstevel@tonic-gate * Acquire a queue head. 4180Sstevel@tonic-gate */ 4190Sstevel@tonic-gate queue_head_t * 4200Sstevel@tonic-gate queue_lock(void *wchan, int qtype) 4210Sstevel@tonic-gate { 4220Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 4230Sstevel@tonic-gate queue_head_t *qp; 4240Sstevel@tonic-gate 4250Sstevel@tonic-gate ASSERT(qtype == MX || qtype == CV); 4260Sstevel@tonic-gate 4270Sstevel@tonic-gate /* 4280Sstevel@tonic-gate * It is possible that we could be called while still single-threaded. 4290Sstevel@tonic-gate * If so, we call queue_alloc() to allocate the queue_head[] array. 4300Sstevel@tonic-gate */ 4310Sstevel@tonic-gate if ((qp = udp->queue_head) == NULL) { 4320Sstevel@tonic-gate queue_alloc(); 4330Sstevel@tonic-gate qp = udp->queue_head; 4340Sstevel@tonic-gate } 4350Sstevel@tonic-gate qp += QUEUE_HASH(wchan, qtype); 4360Sstevel@tonic-gate spin_lock_set(&qp->qh_lock); 4370Sstevel@tonic-gate /* 4380Sstevel@tonic-gate * At once per nanosecond, qh_lockcount will wrap after 512 years. 4390Sstevel@tonic-gate * Were we to care about this, we could peg the value at UINT64_MAX. 4400Sstevel@tonic-gate */ 4410Sstevel@tonic-gate qp->qh_lockcount++; 4420Sstevel@tonic-gate QVERIFY(qp); 4430Sstevel@tonic-gate return (qp); 4440Sstevel@tonic-gate } 4450Sstevel@tonic-gate 4460Sstevel@tonic-gate /* 4470Sstevel@tonic-gate * Release a queue head. 4480Sstevel@tonic-gate */ 4490Sstevel@tonic-gate void 4500Sstevel@tonic-gate queue_unlock(queue_head_t *qp) 4510Sstevel@tonic-gate { 4520Sstevel@tonic-gate QVERIFY(qp); 4530Sstevel@tonic-gate spin_lock_clear(&qp->qh_lock); 4540Sstevel@tonic-gate } 4550Sstevel@tonic-gate 4560Sstevel@tonic-gate /* 4570Sstevel@tonic-gate * For rwlock queueing, we must queue writers ahead of readers of the 4580Sstevel@tonic-gate * same priority. We do this by making writers appear to have a half 4590Sstevel@tonic-gate * point higher priority for purposes of priority comparisons below. 4600Sstevel@tonic-gate */ 4610Sstevel@tonic-gate #define CMP_PRIO(ulwp) ((real_priority(ulwp) << 1) + (ulwp)->ul_writer) 4620Sstevel@tonic-gate 4630Sstevel@tonic-gate void 4640Sstevel@tonic-gate enqueue(queue_head_t *qp, ulwp_t *ulwp, void *wchan, int qtype) 4650Sstevel@tonic-gate { 4660Sstevel@tonic-gate ulwp_t **ulwpp; 4670Sstevel@tonic-gate ulwp_t *next; 4680Sstevel@tonic-gate int pri = CMP_PRIO(ulwp); 4690Sstevel@tonic-gate int force_fifo = (qtype & FIFOQ); 4700Sstevel@tonic-gate int do_fifo; 4710Sstevel@tonic-gate 4720Sstevel@tonic-gate qtype &= ~FIFOQ; 4730Sstevel@tonic-gate ASSERT(qtype == MX || qtype == CV); 4740Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread)); 4750Sstevel@tonic-gate ASSERT(ulwp->ul_sleepq != qp); 4760Sstevel@tonic-gate 4770Sstevel@tonic-gate /* 4780Sstevel@tonic-gate * LIFO queue ordering is unfair and can lead to starvation, 4790Sstevel@tonic-gate * but it gives better performance for heavily contended locks. 4800Sstevel@tonic-gate * We use thread_queue_fifo (range is 0..8) to determine 4810Sstevel@tonic-gate * the frequency of FIFO vs LIFO queuing: 4820Sstevel@tonic-gate * 0 : every 256th time (almost always LIFO) 4830Sstevel@tonic-gate * 1 : every 128th time 4840Sstevel@tonic-gate * 2 : every 64th time 4850Sstevel@tonic-gate * 3 : every 32nd time 4860Sstevel@tonic-gate * 4 : every 16th time (the default value, mostly LIFO) 4870Sstevel@tonic-gate * 5 : every 8th time 4880Sstevel@tonic-gate * 6 : every 4th time 4890Sstevel@tonic-gate * 7 : every 2nd time 4900Sstevel@tonic-gate * 8 : every time (never LIFO, always FIFO) 4910Sstevel@tonic-gate * Note that there is always some degree of FIFO ordering. 4920Sstevel@tonic-gate * This breaks live lock conditions that occur in applications 4930Sstevel@tonic-gate * that are written assuming (incorrectly) that threads acquire 4940Sstevel@tonic-gate * locks fairly, that is, in roughly round-robin order. 4950Sstevel@tonic-gate * In any event, the queue is maintained in priority order. 4960Sstevel@tonic-gate * 4970Sstevel@tonic-gate * If we are given the FIFOQ flag in qtype, fifo queueing is forced. 4980Sstevel@tonic-gate * SUSV3 requires this for semaphores. 4990Sstevel@tonic-gate */ 5000Sstevel@tonic-gate do_fifo = (force_fifo || 5010Sstevel@tonic-gate ((++qp->qh_qcnt << curthread->ul_queue_fifo) & 0xff) == 0); 5020Sstevel@tonic-gate 5030Sstevel@tonic-gate if (qp->qh_head == NULL) { 5040Sstevel@tonic-gate /* 5050Sstevel@tonic-gate * The queue is empty. LIFO/FIFO doesn't matter. 5060Sstevel@tonic-gate */ 5070Sstevel@tonic-gate ASSERT(qp->qh_tail == NULL); 5080Sstevel@tonic-gate ulwpp = &qp->qh_head; 5090Sstevel@tonic-gate } else if (do_fifo) { 5100Sstevel@tonic-gate /* 5110Sstevel@tonic-gate * Enqueue after the last thread whose priority is greater 5120Sstevel@tonic-gate * than or equal to the priority of the thread being queued. 5130Sstevel@tonic-gate * Attempt first to go directly onto the tail of the queue. 5140Sstevel@tonic-gate */ 5150Sstevel@tonic-gate if (pri <= CMP_PRIO(qp->qh_tail)) 5160Sstevel@tonic-gate ulwpp = &qp->qh_tail->ul_link; 5170Sstevel@tonic-gate else { 5180Sstevel@tonic-gate for (ulwpp = &qp->qh_head; (next = *ulwpp) != NULL; 5190Sstevel@tonic-gate ulwpp = &next->ul_link) 5200Sstevel@tonic-gate if (pri > CMP_PRIO(next)) 5210Sstevel@tonic-gate break; 5220Sstevel@tonic-gate } 5230Sstevel@tonic-gate } else { 5240Sstevel@tonic-gate /* 5250Sstevel@tonic-gate * Enqueue before the first thread whose priority is less 5260Sstevel@tonic-gate * than or equal to the priority of the thread being queued. 5270Sstevel@tonic-gate * Hopefully we can go directly onto the head of the queue. 5280Sstevel@tonic-gate */ 5290Sstevel@tonic-gate for (ulwpp = &qp->qh_head; (next = *ulwpp) != NULL; 5300Sstevel@tonic-gate ulwpp = &next->ul_link) 5310Sstevel@tonic-gate if (pri >= CMP_PRIO(next)) 5320Sstevel@tonic-gate break; 5330Sstevel@tonic-gate } 5340Sstevel@tonic-gate if ((ulwp->ul_link = *ulwpp) == NULL) 5350Sstevel@tonic-gate qp->qh_tail = ulwp; 5360Sstevel@tonic-gate *ulwpp = ulwp; 5370Sstevel@tonic-gate 5380Sstevel@tonic-gate ulwp->ul_sleepq = qp; 5390Sstevel@tonic-gate ulwp->ul_wchan = wchan; 5400Sstevel@tonic-gate ulwp->ul_qtype = qtype; 5410Sstevel@tonic-gate if (qp->qh_qmax < ++qp->qh_qlen) 5420Sstevel@tonic-gate qp->qh_qmax = qp->qh_qlen; 5430Sstevel@tonic-gate } 5440Sstevel@tonic-gate 5450Sstevel@tonic-gate /* 5460Sstevel@tonic-gate * Return a pointer to the queue slot of the 5470Sstevel@tonic-gate * highest priority thread on the queue. 5480Sstevel@tonic-gate * On return, prevp, if not NULL, will contain a pointer 5490Sstevel@tonic-gate * to the thread's predecessor on the queue 5500Sstevel@tonic-gate */ 5510Sstevel@tonic-gate static ulwp_t ** 5520Sstevel@tonic-gate queue_slot(queue_head_t *qp, void *wchan, int *more, ulwp_t **prevp) 5530Sstevel@tonic-gate { 5540Sstevel@tonic-gate ulwp_t **ulwpp; 5550Sstevel@tonic-gate ulwp_t *ulwp; 5560Sstevel@tonic-gate ulwp_t *prev = NULL; 5570Sstevel@tonic-gate ulwp_t **suspp = NULL; 5580Sstevel@tonic-gate ulwp_t *susprev; 5590Sstevel@tonic-gate 5600Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread)); 5610Sstevel@tonic-gate 5620Sstevel@tonic-gate /* 5630Sstevel@tonic-gate * Find a waiter on the sleep queue. 5640Sstevel@tonic-gate */ 5650Sstevel@tonic-gate for (ulwpp = &qp->qh_head; (ulwp = *ulwpp) != NULL; 5660Sstevel@tonic-gate prev = ulwp, ulwpp = &ulwp->ul_link) { 5670Sstevel@tonic-gate if (ulwp->ul_wchan == wchan) { 5680Sstevel@tonic-gate if (!ulwp->ul_stop) 5690Sstevel@tonic-gate break; 5700Sstevel@tonic-gate /* 5710Sstevel@tonic-gate * Try not to return a suspended thread. 5720Sstevel@tonic-gate * This mimics the old libthread's behavior. 5730Sstevel@tonic-gate */ 5740Sstevel@tonic-gate if (suspp == NULL) { 5750Sstevel@tonic-gate suspp = ulwpp; 5760Sstevel@tonic-gate susprev = prev; 5770Sstevel@tonic-gate } 5780Sstevel@tonic-gate } 5790Sstevel@tonic-gate } 5800Sstevel@tonic-gate 5810Sstevel@tonic-gate if (ulwp == NULL && suspp != NULL) { 5820Sstevel@tonic-gate ulwp = *(ulwpp = suspp); 5830Sstevel@tonic-gate prev = susprev; 5840Sstevel@tonic-gate suspp = NULL; 5850Sstevel@tonic-gate } 5860Sstevel@tonic-gate if (ulwp == NULL) { 5870Sstevel@tonic-gate if (more != NULL) 5880Sstevel@tonic-gate *more = 0; 5890Sstevel@tonic-gate return (NULL); 5900Sstevel@tonic-gate } 5910Sstevel@tonic-gate 5920Sstevel@tonic-gate if (prevp != NULL) 5930Sstevel@tonic-gate *prevp = prev; 5940Sstevel@tonic-gate if (more == NULL) 5950Sstevel@tonic-gate return (ulwpp); 5960Sstevel@tonic-gate 5970Sstevel@tonic-gate /* 5980Sstevel@tonic-gate * Scan the remainder of the queue for another waiter. 5990Sstevel@tonic-gate */ 6000Sstevel@tonic-gate if (suspp != NULL) { 6010Sstevel@tonic-gate *more = 1; 6020Sstevel@tonic-gate return (ulwpp); 6030Sstevel@tonic-gate } 6040Sstevel@tonic-gate for (ulwp = ulwp->ul_link; ulwp != NULL; ulwp = ulwp->ul_link) { 6050Sstevel@tonic-gate if (ulwp->ul_wchan == wchan) { 6060Sstevel@tonic-gate *more = 1; 6070Sstevel@tonic-gate return (ulwpp); 6080Sstevel@tonic-gate } 6090Sstevel@tonic-gate } 6100Sstevel@tonic-gate 6110Sstevel@tonic-gate *more = 0; 6120Sstevel@tonic-gate return (ulwpp); 6130Sstevel@tonic-gate } 6140Sstevel@tonic-gate 6150Sstevel@tonic-gate ulwp_t * 6164570Sraf queue_unlink(queue_head_t *qp, ulwp_t **ulwpp, ulwp_t *prev) 6170Sstevel@tonic-gate { 6180Sstevel@tonic-gate ulwp_t *ulwp; 6190Sstevel@tonic-gate 6200Sstevel@tonic-gate ulwp = *ulwpp; 6210Sstevel@tonic-gate *ulwpp = ulwp->ul_link; 6220Sstevel@tonic-gate ulwp->ul_link = NULL; 6230Sstevel@tonic-gate if (qp->qh_tail == ulwp) 6240Sstevel@tonic-gate qp->qh_tail = prev; 6250Sstevel@tonic-gate qp->qh_qlen--; 6260Sstevel@tonic-gate ulwp->ul_sleepq = NULL; 6270Sstevel@tonic-gate ulwp->ul_wchan = NULL; 6280Sstevel@tonic-gate 6290Sstevel@tonic-gate return (ulwp); 6300Sstevel@tonic-gate } 6310Sstevel@tonic-gate 6324570Sraf ulwp_t * 6334570Sraf dequeue(queue_head_t *qp, void *wchan, int *more) 6344570Sraf { 6354570Sraf ulwp_t **ulwpp; 6364570Sraf ulwp_t *prev; 6374570Sraf 6384570Sraf if ((ulwpp = queue_slot(qp, wchan, more, &prev)) == NULL) 6394570Sraf return (NULL); 6404570Sraf return (queue_unlink(qp, ulwpp, prev)); 6414570Sraf } 6424570Sraf 6430Sstevel@tonic-gate /* 6440Sstevel@tonic-gate * Return a pointer to the highest priority thread sleeping on wchan. 6450Sstevel@tonic-gate */ 6460Sstevel@tonic-gate ulwp_t * 6470Sstevel@tonic-gate queue_waiter(queue_head_t *qp, void *wchan) 6480Sstevel@tonic-gate { 6490Sstevel@tonic-gate ulwp_t **ulwpp; 6500Sstevel@tonic-gate 6510Sstevel@tonic-gate if ((ulwpp = queue_slot(qp, wchan, NULL, NULL)) == NULL) 6520Sstevel@tonic-gate return (NULL); 6530Sstevel@tonic-gate return (*ulwpp); 6540Sstevel@tonic-gate } 6550Sstevel@tonic-gate 6560Sstevel@tonic-gate uint8_t 6570Sstevel@tonic-gate dequeue_self(queue_head_t *qp, void *wchan) 6580Sstevel@tonic-gate { 6590Sstevel@tonic-gate ulwp_t *self = curthread; 6600Sstevel@tonic-gate ulwp_t **ulwpp; 6610Sstevel@tonic-gate ulwp_t *ulwp; 6620Sstevel@tonic-gate ulwp_t *prev = NULL; 6630Sstevel@tonic-gate int found = 0; 6640Sstevel@tonic-gate int more = 0; 6650Sstevel@tonic-gate 6660Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, self)); 6670Sstevel@tonic-gate 6680Sstevel@tonic-gate /* find self on the sleep queue */ 6690Sstevel@tonic-gate for (ulwpp = &qp->qh_head; (ulwp = *ulwpp) != NULL; 6700Sstevel@tonic-gate prev = ulwp, ulwpp = &ulwp->ul_link) { 6710Sstevel@tonic-gate if (ulwp == self) { 6720Sstevel@tonic-gate /* dequeue ourself */ 6730Sstevel@tonic-gate ASSERT(self->ul_wchan == wchan); 6744570Sraf (void) queue_unlink(qp, ulwpp, prev); 6750Sstevel@tonic-gate self->ul_cvmutex = NULL; 6760Sstevel@tonic-gate self->ul_cv_wake = 0; 6770Sstevel@tonic-gate found = 1; 6780Sstevel@tonic-gate break; 6790Sstevel@tonic-gate } 6800Sstevel@tonic-gate if (ulwp->ul_wchan == wchan) 6810Sstevel@tonic-gate more = 1; 6820Sstevel@tonic-gate } 6830Sstevel@tonic-gate 6840Sstevel@tonic-gate if (!found) 6850Sstevel@tonic-gate thr_panic("dequeue_self(): curthread not found on queue"); 6860Sstevel@tonic-gate 6870Sstevel@tonic-gate if (more) 6880Sstevel@tonic-gate return (1); 6890Sstevel@tonic-gate 6900Sstevel@tonic-gate /* scan the remainder of the queue for another waiter */ 6910Sstevel@tonic-gate for (ulwp = *ulwpp; ulwp != NULL; ulwp = ulwp->ul_link) { 6920Sstevel@tonic-gate if (ulwp->ul_wchan == wchan) 6930Sstevel@tonic-gate return (1); 6940Sstevel@tonic-gate } 6950Sstevel@tonic-gate 6960Sstevel@tonic-gate return (0); 6970Sstevel@tonic-gate } 6980Sstevel@tonic-gate 6990Sstevel@tonic-gate /* 7000Sstevel@tonic-gate * Called from call_user_handler() and _thrp_suspend() to take 7010Sstevel@tonic-gate * ourself off of our sleep queue so we can grab locks. 7020Sstevel@tonic-gate */ 7030Sstevel@tonic-gate void 7040Sstevel@tonic-gate unsleep_self(void) 7050Sstevel@tonic-gate { 7060Sstevel@tonic-gate ulwp_t *self = curthread; 7070Sstevel@tonic-gate queue_head_t *qp; 7080Sstevel@tonic-gate 7090Sstevel@tonic-gate /* 7100Sstevel@tonic-gate * Calling enter_critical()/exit_critical() here would lead 7110Sstevel@tonic-gate * to recursion. Just manipulate self->ul_critical directly. 7120Sstevel@tonic-gate */ 7130Sstevel@tonic-gate self->ul_critical++; 7140Sstevel@tonic-gate while (self->ul_sleepq != NULL) { 7150Sstevel@tonic-gate qp = queue_lock(self->ul_wchan, self->ul_qtype); 7160Sstevel@tonic-gate /* 7170Sstevel@tonic-gate * We may have been moved from a CV queue to a 7180Sstevel@tonic-gate * mutex queue while we were attempting queue_lock(). 7190Sstevel@tonic-gate * If so, just loop around and try again. 7200Sstevel@tonic-gate * dequeue_self() clears self->ul_sleepq. 7210Sstevel@tonic-gate */ 7224570Sraf if (qp == self->ul_sleepq) { 7230Sstevel@tonic-gate (void) dequeue_self(qp, self->ul_wchan); 7244570Sraf self->ul_writer = 0; 7254570Sraf } 7260Sstevel@tonic-gate queue_unlock(qp); 7270Sstevel@tonic-gate } 7280Sstevel@tonic-gate self->ul_critical--; 7290Sstevel@tonic-gate } 7300Sstevel@tonic-gate 7310Sstevel@tonic-gate /* 7320Sstevel@tonic-gate * Common code for calling the the ___lwp_mutex_timedlock() system call. 7330Sstevel@tonic-gate * Returns with mutex_owner and mutex_ownerpid set correctly. 7340Sstevel@tonic-gate */ 735*4574Sraf static int 7360Sstevel@tonic-gate mutex_lock_kernel(mutex_t *mp, timespec_t *tsp, tdb_mutex_stats_t *msp) 7370Sstevel@tonic-gate { 7380Sstevel@tonic-gate ulwp_t *self = curthread; 7390Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 740*4574Sraf int mtype = mp->mutex_type; 7410Sstevel@tonic-gate hrtime_t begin_sleep; 742*4574Sraf int acquired; 7430Sstevel@tonic-gate int error; 7440Sstevel@tonic-gate 7450Sstevel@tonic-gate self->ul_sp = stkptr(); 7460Sstevel@tonic-gate self->ul_wchan = mp; 7470Sstevel@tonic-gate if (__td_event_report(self, TD_SLEEP, udp)) { 7480Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_SLEEP; 7490Sstevel@tonic-gate self->ul_td_evbuf.eventdata = mp; 7500Sstevel@tonic-gate tdb_event(TD_SLEEP, udp); 7510Sstevel@tonic-gate } 7520Sstevel@tonic-gate if (msp) { 7530Sstevel@tonic-gate tdb_incr(msp->mutex_sleep); 7540Sstevel@tonic-gate begin_sleep = gethrtime(); 7550Sstevel@tonic-gate } 7560Sstevel@tonic-gate 7570Sstevel@tonic-gate DTRACE_PROBE1(plockstat, mutex__block, mp); 7580Sstevel@tonic-gate 7590Sstevel@tonic-gate for (;;) { 760*4574Sraf /* 761*4574Sraf * A return value of EOWNERDEAD or ELOCKUNMAPPED 762*4574Sraf * means we successfully acquired the lock. 763*4574Sraf */ 764*4574Sraf if ((error = ___lwp_mutex_timedlock(mp, tsp)) != 0 && 765*4574Sraf error != EOWNERDEAD && error != ELOCKUNMAPPED) { 766*4574Sraf acquired = 0; 7670Sstevel@tonic-gate break; 7680Sstevel@tonic-gate } 7690Sstevel@tonic-gate 770*4574Sraf if (mtype & USYNC_PROCESS) { 7710Sstevel@tonic-gate /* 7720Sstevel@tonic-gate * Defend against forkall(). We may be the child, 7730Sstevel@tonic-gate * in which case we don't actually own the mutex. 7740Sstevel@tonic-gate */ 7750Sstevel@tonic-gate enter_critical(self); 7760Sstevel@tonic-gate if (mp->mutex_ownerpid == udp->pid) { 7770Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 7780Sstevel@tonic-gate exit_critical(self); 779*4574Sraf acquired = 1; 7800Sstevel@tonic-gate break; 7810Sstevel@tonic-gate } 7820Sstevel@tonic-gate exit_critical(self); 7830Sstevel@tonic-gate } else { 7840Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 785*4574Sraf acquired = 1; 7860Sstevel@tonic-gate break; 7870Sstevel@tonic-gate } 7880Sstevel@tonic-gate } 7890Sstevel@tonic-gate if (msp) 7900Sstevel@tonic-gate msp->mutex_sleep_time += gethrtime() - begin_sleep; 7910Sstevel@tonic-gate self->ul_wchan = NULL; 7920Sstevel@tonic-gate self->ul_sp = 0; 7930Sstevel@tonic-gate 794*4574Sraf if (acquired) { 795*4574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1); 796*4574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 797*4574Sraf } else { 798*4574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0); 799*4574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 800*4574Sraf } 801*4574Sraf 8020Sstevel@tonic-gate return (error); 8030Sstevel@tonic-gate } 8040Sstevel@tonic-gate 8050Sstevel@tonic-gate /* 8060Sstevel@tonic-gate * Common code for calling the ___lwp_mutex_trylock() system call. 8070Sstevel@tonic-gate * Returns with mutex_owner and mutex_ownerpid set correctly. 8080Sstevel@tonic-gate */ 8090Sstevel@tonic-gate int 8100Sstevel@tonic-gate mutex_trylock_kernel(mutex_t *mp) 8110Sstevel@tonic-gate { 8120Sstevel@tonic-gate ulwp_t *self = curthread; 8130Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 814*4574Sraf int mtype = mp->mutex_type; 8150Sstevel@tonic-gate int error; 816*4574Sraf int acquired; 8170Sstevel@tonic-gate 8180Sstevel@tonic-gate for (;;) { 819*4574Sraf /* 820*4574Sraf * A return value of EOWNERDEAD or ELOCKUNMAPPED 821*4574Sraf * means we successfully acquired the lock. 822*4574Sraf */ 823*4574Sraf if ((error = ___lwp_mutex_trylock(mp)) != 0 && 824*4574Sraf error != EOWNERDEAD && error != ELOCKUNMAPPED) { 825*4574Sraf acquired = 0; 8260Sstevel@tonic-gate break; 8270Sstevel@tonic-gate } 8280Sstevel@tonic-gate 829*4574Sraf if (mtype & USYNC_PROCESS) { 8300Sstevel@tonic-gate /* 8310Sstevel@tonic-gate * Defend against forkall(). We may be the child, 8320Sstevel@tonic-gate * in which case we don't actually own the mutex. 8330Sstevel@tonic-gate */ 8340Sstevel@tonic-gate enter_critical(self); 8350Sstevel@tonic-gate if (mp->mutex_ownerpid == udp->pid) { 8360Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 8370Sstevel@tonic-gate exit_critical(self); 838*4574Sraf acquired = 1; 8390Sstevel@tonic-gate break; 8400Sstevel@tonic-gate } 8410Sstevel@tonic-gate exit_critical(self); 8420Sstevel@tonic-gate } else { 8430Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 844*4574Sraf acquired = 1; 8450Sstevel@tonic-gate break; 8460Sstevel@tonic-gate } 8470Sstevel@tonic-gate } 8480Sstevel@tonic-gate 849*4574Sraf if (acquired) { 850*4574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 851*4574Sraf } else if (error != EBUSY) { 852*4574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 853*4574Sraf } 854*4574Sraf 8550Sstevel@tonic-gate return (error); 8560Sstevel@tonic-gate } 8570Sstevel@tonic-gate 8580Sstevel@tonic-gate volatile sc_shared_t * 8590Sstevel@tonic-gate setup_schedctl(void) 8600Sstevel@tonic-gate { 8610Sstevel@tonic-gate ulwp_t *self = curthread; 8620Sstevel@tonic-gate volatile sc_shared_t *scp; 8630Sstevel@tonic-gate sc_shared_t *tmp; 8640Sstevel@tonic-gate 8650Sstevel@tonic-gate if ((scp = self->ul_schedctl) == NULL && /* no shared state yet */ 8660Sstevel@tonic-gate !self->ul_vfork && /* not a child of vfork() */ 8670Sstevel@tonic-gate !self->ul_schedctl_called) { /* haven't been called before */ 8680Sstevel@tonic-gate enter_critical(self); 8690Sstevel@tonic-gate self->ul_schedctl_called = &self->ul_uberdata->uberflags; 8700Sstevel@tonic-gate if ((tmp = __schedctl()) != (sc_shared_t *)(-1)) 8710Sstevel@tonic-gate self->ul_schedctl = scp = tmp; 8720Sstevel@tonic-gate exit_critical(self); 8730Sstevel@tonic-gate } 8740Sstevel@tonic-gate /* 8750Sstevel@tonic-gate * Unless the call to setup_schedctl() is surrounded 8760Sstevel@tonic-gate * by enter_critical()/exit_critical(), the address 8770Sstevel@tonic-gate * we are returning could be invalid due to a forkall() 8780Sstevel@tonic-gate * having occurred in another thread. 8790Sstevel@tonic-gate */ 8800Sstevel@tonic-gate return (scp); 8810Sstevel@tonic-gate } 8820Sstevel@tonic-gate 8830Sstevel@tonic-gate /* 8840Sstevel@tonic-gate * Interfaces from libsched, incorporated into libc. 8850Sstevel@tonic-gate * libsched.so.1 is now a filter library onto libc. 8860Sstevel@tonic-gate */ 8870Sstevel@tonic-gate #pragma weak schedctl_lookup = _schedctl_init 8880Sstevel@tonic-gate #pragma weak _schedctl_lookup = _schedctl_init 8890Sstevel@tonic-gate #pragma weak schedctl_init = _schedctl_init 8900Sstevel@tonic-gate schedctl_t * 8910Sstevel@tonic-gate _schedctl_init(void) 8920Sstevel@tonic-gate { 8930Sstevel@tonic-gate volatile sc_shared_t *scp = setup_schedctl(); 8940Sstevel@tonic-gate return ((scp == NULL)? NULL : (schedctl_t *)&scp->sc_preemptctl); 8950Sstevel@tonic-gate } 8960Sstevel@tonic-gate 8970Sstevel@tonic-gate #pragma weak schedctl_exit = _schedctl_exit 8980Sstevel@tonic-gate void 8990Sstevel@tonic-gate _schedctl_exit(void) 9000Sstevel@tonic-gate { 9010Sstevel@tonic-gate } 9020Sstevel@tonic-gate 9030Sstevel@tonic-gate /* 9040Sstevel@tonic-gate * Contract private interface for java. 9050Sstevel@tonic-gate * Set up the schedctl data if it doesn't exist yet. 9060Sstevel@tonic-gate * Return a pointer to the pointer to the schedctl data. 9070Sstevel@tonic-gate */ 9080Sstevel@tonic-gate volatile sc_shared_t *volatile * 9090Sstevel@tonic-gate _thr_schedctl(void) 9100Sstevel@tonic-gate { 9110Sstevel@tonic-gate ulwp_t *self = curthread; 9120Sstevel@tonic-gate volatile sc_shared_t *volatile *ptr; 9130Sstevel@tonic-gate 9140Sstevel@tonic-gate if (self->ul_vfork) 9150Sstevel@tonic-gate return (NULL); 9160Sstevel@tonic-gate if (*(ptr = &self->ul_schedctl) == NULL) 9170Sstevel@tonic-gate (void) setup_schedctl(); 9180Sstevel@tonic-gate return (ptr); 9190Sstevel@tonic-gate } 9200Sstevel@tonic-gate 9210Sstevel@tonic-gate /* 9220Sstevel@tonic-gate * Block signals and attempt to block preemption. 9230Sstevel@tonic-gate * no_preempt()/preempt() must be used in pairs but can be nested. 9240Sstevel@tonic-gate */ 9250Sstevel@tonic-gate void 9260Sstevel@tonic-gate no_preempt(ulwp_t *self) 9270Sstevel@tonic-gate { 9280Sstevel@tonic-gate volatile sc_shared_t *scp; 9290Sstevel@tonic-gate 9300Sstevel@tonic-gate if (self->ul_preempt++ == 0) { 9310Sstevel@tonic-gate enter_critical(self); 9320Sstevel@tonic-gate if ((scp = self->ul_schedctl) != NULL || 9330Sstevel@tonic-gate (scp = setup_schedctl()) != NULL) { 9340Sstevel@tonic-gate /* 9350Sstevel@tonic-gate * Save the pre-existing preempt value. 9360Sstevel@tonic-gate */ 9370Sstevel@tonic-gate self->ul_savpreempt = scp->sc_preemptctl.sc_nopreempt; 9380Sstevel@tonic-gate scp->sc_preemptctl.sc_nopreempt = 1; 9390Sstevel@tonic-gate } 9400Sstevel@tonic-gate } 9410Sstevel@tonic-gate } 9420Sstevel@tonic-gate 9430Sstevel@tonic-gate /* 9440Sstevel@tonic-gate * Undo the effects of no_preempt(). 9450Sstevel@tonic-gate */ 9460Sstevel@tonic-gate void 9470Sstevel@tonic-gate preempt(ulwp_t *self) 9480Sstevel@tonic-gate { 9490Sstevel@tonic-gate volatile sc_shared_t *scp; 9500Sstevel@tonic-gate 9510Sstevel@tonic-gate ASSERT(self->ul_preempt > 0); 9520Sstevel@tonic-gate if (--self->ul_preempt == 0) { 9530Sstevel@tonic-gate if ((scp = self->ul_schedctl) != NULL) { 9540Sstevel@tonic-gate /* 9550Sstevel@tonic-gate * Restore the pre-existing preempt value. 9560Sstevel@tonic-gate */ 9570Sstevel@tonic-gate scp->sc_preemptctl.sc_nopreempt = self->ul_savpreempt; 9580Sstevel@tonic-gate if (scp->sc_preemptctl.sc_yield && 9590Sstevel@tonic-gate scp->sc_preemptctl.sc_nopreempt == 0) { 9600Sstevel@tonic-gate lwp_yield(); 9610Sstevel@tonic-gate if (scp->sc_preemptctl.sc_yield) { 9620Sstevel@tonic-gate /* 9630Sstevel@tonic-gate * Shouldn't happen. This is either 9640Sstevel@tonic-gate * a race condition or the thread 9650Sstevel@tonic-gate * just entered the real-time class. 9660Sstevel@tonic-gate */ 9670Sstevel@tonic-gate lwp_yield(); 9680Sstevel@tonic-gate scp->sc_preemptctl.sc_yield = 0; 9690Sstevel@tonic-gate } 9700Sstevel@tonic-gate } 9710Sstevel@tonic-gate } 9720Sstevel@tonic-gate exit_critical(self); 9730Sstevel@tonic-gate } 9740Sstevel@tonic-gate } 9750Sstevel@tonic-gate 9760Sstevel@tonic-gate /* 9770Sstevel@tonic-gate * If a call to preempt() would cause the current thread to yield or to 9780Sstevel@tonic-gate * take deferred actions in exit_critical(), then unpark the specified 9790Sstevel@tonic-gate * lwp so it can run while we delay. Return the original lwpid if the 9800Sstevel@tonic-gate * unpark was not performed, else return zero. The tests are a repeat 9810Sstevel@tonic-gate * of some of the tests in preempt(), above. This is a statistical 9820Sstevel@tonic-gate * optimization solely for cond_sleep_queue(), below. 9830Sstevel@tonic-gate */ 9840Sstevel@tonic-gate static lwpid_t 9850Sstevel@tonic-gate preempt_unpark(ulwp_t *self, lwpid_t lwpid) 9860Sstevel@tonic-gate { 9870Sstevel@tonic-gate volatile sc_shared_t *scp = self->ul_schedctl; 9880Sstevel@tonic-gate 9890Sstevel@tonic-gate ASSERT(self->ul_preempt == 1 && self->ul_critical > 0); 9900Sstevel@tonic-gate if ((scp != NULL && scp->sc_preemptctl.sc_yield) || 9910Sstevel@tonic-gate (self->ul_curplease && self->ul_critical == 1)) { 9920Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 9930Sstevel@tonic-gate lwpid = 0; 9940Sstevel@tonic-gate } 9950Sstevel@tonic-gate return (lwpid); 9960Sstevel@tonic-gate } 9970Sstevel@tonic-gate 9980Sstevel@tonic-gate /* 999*4574Sraf * Spin for a while, trying to grab the lock. 10000Sstevel@tonic-gate * If this fails, return EBUSY and let the caller deal with it. 10010Sstevel@tonic-gate * If this succeeds, return 0 with mutex_owner set to curthread. 10020Sstevel@tonic-gate */ 1003*4574Sraf static int 10040Sstevel@tonic-gate mutex_trylock_adaptive(mutex_t *mp) 10050Sstevel@tonic-gate { 10060Sstevel@tonic-gate ulwp_t *self = curthread; 1007*4574Sraf int error = EBUSY; 10080Sstevel@tonic-gate ulwp_t *ulwp; 10090Sstevel@tonic-gate volatile sc_shared_t *scp; 10100Sstevel@tonic-gate volatile uint8_t *lockp; 10110Sstevel@tonic-gate volatile uint64_t *ownerp; 1012*4574Sraf int count; 1013*4574Sraf int max; 1014*4574Sraf 1015*4574Sraf ASSERT(!(mp->mutex_type & USYNC_PROCESS)); 1016*4574Sraf 1017*4574Sraf if (MUTEX_OWNER(mp) == self) 10180Sstevel@tonic-gate return (EBUSY); 10190Sstevel@tonic-gate 1020*4574Sraf /* short-cut, not definitive (see below) */ 1021*4574Sraf if (mp->mutex_flag & LOCK_NOTRECOVERABLE) { 1022*4574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 1023*4574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, ENOTRECOVERABLE); 1024*4574Sraf return (ENOTRECOVERABLE); 1025*4574Sraf } 1026*4574Sraf 1027*4574Sraf if ((max = self->ul_adaptive_spin) == 0 || 1028*4574Sraf mp->mutex_spinners >= self->ul_max_spinners) 1029*4574Sraf max = 1; /* try at least once */ 1030*4574Sraf 1031*4574Sraf DTRACE_PROBE1(plockstat, mutex__spin, mp); 1032*4574Sraf 10330Sstevel@tonic-gate lockp = (volatile uint8_t *)&mp->mutex_lockw; 10340Sstevel@tonic-gate ownerp = (volatile uint64_t *)&mp->mutex_owner; 10350Sstevel@tonic-gate /* 10360Sstevel@tonic-gate * This spin loop is unfair to lwps that have already dropped into 10370Sstevel@tonic-gate * the kernel to sleep. They will starve on a highly-contended mutex. 10380Sstevel@tonic-gate * This is just too bad. The adaptive spin algorithm is intended 10390Sstevel@tonic-gate * to allow programs with highly-contended locks (that is, broken 10400Sstevel@tonic-gate * programs) to execute with reasonable speed despite their contention. 10410Sstevel@tonic-gate * Being fair would reduce the speed of such programs and well-written 10420Sstevel@tonic-gate * programs will not suffer in any case. 10430Sstevel@tonic-gate */ 10440Sstevel@tonic-gate enter_critical(self); /* protects ul_schedctl */ 10454570Sraf atomic_inc_32(&mp->mutex_spinners); 1046*4574Sraf for (count = 1; count <= max; count++) { 10470Sstevel@tonic-gate if (*lockp == 0 && set_lock_byte(lockp) == 0) { 10480Sstevel@tonic-gate *ownerp = (uintptr_t)self; 1049*4574Sraf error = 0; 1050*4574Sraf break; 10510Sstevel@tonic-gate } 10520Sstevel@tonic-gate SMT_PAUSE(); 10530Sstevel@tonic-gate /* 10540Sstevel@tonic-gate * Stop spinning if the mutex owner is not running on 10550Sstevel@tonic-gate * a processor; it will not drop the lock any time soon 10560Sstevel@tonic-gate * and we would just be wasting time to keep spinning. 10570Sstevel@tonic-gate * 10580Sstevel@tonic-gate * Note that we are looking at another thread (ulwp_t) 10590Sstevel@tonic-gate * without ensuring that the other thread does not exit. 10600Sstevel@tonic-gate * The scheme relies on ulwp_t structures never being 10610Sstevel@tonic-gate * deallocated by the library (the library employs a free 10620Sstevel@tonic-gate * list of ulwp_t structs that are reused when new threads 10630Sstevel@tonic-gate * are created) and on schedctl shared memory never being 10640Sstevel@tonic-gate * deallocated once created via __schedctl(). 10650Sstevel@tonic-gate * 10660Sstevel@tonic-gate * Thus, the worst that can happen when the spinning thread 10670Sstevel@tonic-gate * looks at the owner's schedctl data is that it is looking 10680Sstevel@tonic-gate * at some other thread's schedctl data. This almost never 10690Sstevel@tonic-gate * happens and is benign when it does. 10700Sstevel@tonic-gate */ 10710Sstevel@tonic-gate if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL && 10720Sstevel@tonic-gate ((scp = ulwp->ul_schedctl) == NULL || 10730Sstevel@tonic-gate scp->sc_state != SC_ONPROC)) 10740Sstevel@tonic-gate break; 10750Sstevel@tonic-gate } 10764570Sraf atomic_dec_32(&mp->mutex_spinners); 10770Sstevel@tonic-gate exit_critical(self); 10780Sstevel@tonic-gate 1079*4574Sraf if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { 1080*4574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 1081*4574Sraf /* 1082*4574Sraf * We shouldn't own the mutex; clear the lock. 1083*4574Sraf */ 1084*4574Sraf mp->mutex_owner = 0; 1085*4574Sraf if (atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK) 1086*4574Sraf mutex_wakeup_all(mp); 1087*4574Sraf error = ENOTRECOVERABLE; 1088*4574Sraf } 1089*4574Sraf 1090*4574Sraf if (error) { 1091*4574Sraf DTRACE_PROBE2(plockstat, mutex__spun, 0, count); 1092*4574Sraf if (error != EBUSY) { 1093*4574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 1094*4574Sraf } 1095*4574Sraf } else { 1096*4574Sraf DTRACE_PROBE2(plockstat, mutex__spun, 1, count); 1097*4574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); 1098*4574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) { 1099*4574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 1100*4574Sraf error = EOWNERDEAD; 1101*4574Sraf } 1102*4574Sraf } 1103*4574Sraf 1104*4574Sraf return (error); 11050Sstevel@tonic-gate } 11060Sstevel@tonic-gate 11070Sstevel@tonic-gate /* 11080Sstevel@tonic-gate * Same as mutex_trylock_adaptive(), except specifically for queue locks. 11090Sstevel@tonic-gate * The owner field is not set here; the caller (spin_lock_set()) sets it. 11100Sstevel@tonic-gate */ 1111*4574Sraf static int 11120Sstevel@tonic-gate mutex_queuelock_adaptive(mutex_t *mp) 11130Sstevel@tonic-gate { 11140Sstevel@tonic-gate ulwp_t *ulwp; 11150Sstevel@tonic-gate volatile sc_shared_t *scp; 11160Sstevel@tonic-gate volatile uint8_t *lockp; 11170Sstevel@tonic-gate volatile uint64_t *ownerp; 11180Sstevel@tonic-gate int count = curthread->ul_queue_spin; 11190Sstevel@tonic-gate 11200Sstevel@tonic-gate ASSERT(mp->mutex_type == USYNC_THREAD); 11210Sstevel@tonic-gate 11220Sstevel@tonic-gate if (count == 0) 11230Sstevel@tonic-gate return (EBUSY); 11240Sstevel@tonic-gate 11250Sstevel@tonic-gate lockp = (volatile uint8_t *)&mp->mutex_lockw; 11260Sstevel@tonic-gate ownerp = (volatile uint64_t *)&mp->mutex_owner; 11270Sstevel@tonic-gate while (--count >= 0) { 11280Sstevel@tonic-gate if (*lockp == 0 && set_lock_byte(lockp) == 0) 11290Sstevel@tonic-gate return (0); 11300Sstevel@tonic-gate SMT_PAUSE(); 11310Sstevel@tonic-gate if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL && 11320Sstevel@tonic-gate ((scp = ulwp->ul_schedctl) == NULL || 11330Sstevel@tonic-gate scp->sc_state != SC_ONPROC)) 11340Sstevel@tonic-gate break; 11350Sstevel@tonic-gate } 11360Sstevel@tonic-gate 11370Sstevel@tonic-gate return (EBUSY); 11380Sstevel@tonic-gate } 11390Sstevel@tonic-gate 11400Sstevel@tonic-gate /* 11410Sstevel@tonic-gate * Like mutex_trylock_adaptive(), but for process-shared mutexes. 1142*4574Sraf * Spin for a while, trying to grab the lock. 11430Sstevel@tonic-gate * If this fails, return EBUSY and let the caller deal with it. 11440Sstevel@tonic-gate * If this succeeds, return 0 with mutex_owner set to curthread 11450Sstevel@tonic-gate * and mutex_ownerpid set to the current pid. 11460Sstevel@tonic-gate */ 1147*4574Sraf static int 11480Sstevel@tonic-gate mutex_trylock_process(mutex_t *mp) 11490Sstevel@tonic-gate { 11500Sstevel@tonic-gate ulwp_t *self = curthread; 1151*4574Sraf int error = EBUSY; 11520Sstevel@tonic-gate volatile uint8_t *lockp; 1153*4574Sraf int count; 1154*4574Sraf int max; 1155*4574Sraf 1156*4574Sraf ASSERT(mp->mutex_type & USYNC_PROCESS); 1157*4574Sraf 1158*4574Sraf if (shared_mutex_held(mp)) 11590Sstevel@tonic-gate return (EBUSY); 11600Sstevel@tonic-gate 1161*4574Sraf /* short-cut, not definitive (see below) */ 1162*4574Sraf if (mp->mutex_flag & LOCK_NOTRECOVERABLE) { 1163*4574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 1164*4574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, ENOTRECOVERABLE); 1165*4574Sraf return (ENOTRECOVERABLE); 1166*4574Sraf } 1167*4574Sraf 1168*4574Sraf if (ncpus == 0) 1169*4574Sraf ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN); 1170*4574Sraf max = (ncpus > 1)? self->ul_adaptive_spin : 1; 1171*4574Sraf if (max == 0) 1172*4574Sraf max = 1; /* try at least once */ 1173*4574Sraf 1174*4574Sraf DTRACE_PROBE1(plockstat, mutex__spin, mp); 1175*4574Sraf 11760Sstevel@tonic-gate lockp = (volatile uint8_t *)&mp->mutex_lockw; 11770Sstevel@tonic-gate /* 11780Sstevel@tonic-gate * This is a process-shared mutex. 11790Sstevel@tonic-gate * We cannot know if the owner is running on a processor. 11800Sstevel@tonic-gate * We just spin and hope that it is on a processor. 11810Sstevel@tonic-gate */ 1182*4574Sraf enter_critical(self); 1183*4574Sraf for (count = 1; count <= max; count++) { 1184*4574Sraf if (*lockp == 0 && set_lock_byte(lockp) == 0) { 1185*4574Sraf mp->mutex_owner = (uintptr_t)self; 1186*4574Sraf mp->mutex_ownerpid = self->ul_uberdata->pid; 1187*4574Sraf error = 0; 1188*4574Sraf break; 1189*4574Sraf } 1190*4574Sraf SMT_PAUSE(); 1191*4574Sraf } 1192*4574Sraf exit_critical(self); 1193*4574Sraf 1194*4574Sraf if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { 1195*4574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 1196*4574Sraf /* 1197*4574Sraf * We shouldn't own the mutex; clear the lock. 1198*4574Sraf */ 1199*4574Sraf mp->mutex_owner = 0; 1200*4574Sraf mp->mutex_ownerpid = 0; 1201*4574Sraf if (atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK) { 1202*4574Sraf no_preempt(self); 1203*4574Sraf (void) ___lwp_mutex_wakeup(mp, 1); 1204*4574Sraf preempt(self); 12050Sstevel@tonic-gate } 1206*4574Sraf error = ENOTRECOVERABLE; 12070Sstevel@tonic-gate } 12080Sstevel@tonic-gate 1209*4574Sraf if (error) { 1210*4574Sraf DTRACE_PROBE2(plockstat, mutex__spun, 0, count); 1211*4574Sraf if (error != EBUSY) { 1212*4574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 1213*4574Sraf } 1214*4574Sraf } else { 1215*4574Sraf DTRACE_PROBE2(plockstat, mutex__spun, 1, count); 1216*4574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); 1217*4574Sraf if (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED)) { 1218*4574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 1219*4574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) 1220*4574Sraf error = EOWNERDEAD; 1221*4574Sraf else if (mp->mutex_type & USYNC_PROCESS_ROBUST) 1222*4574Sraf error = ELOCKUNMAPPED; 1223*4574Sraf else 1224*4574Sraf error = EOWNERDEAD; 1225*4574Sraf } 1226*4574Sraf } 1227*4574Sraf 1228*4574Sraf return (error); 12290Sstevel@tonic-gate } 12300Sstevel@tonic-gate 12310Sstevel@tonic-gate /* 12320Sstevel@tonic-gate * Mutex wakeup code for releasing a USYNC_THREAD mutex. 12330Sstevel@tonic-gate * Returns the lwpid of the thread that was dequeued, if any. 12340Sstevel@tonic-gate * The caller of mutex_wakeup() must call __lwp_unpark(lwpid) 12350Sstevel@tonic-gate * to wake up the specified lwp. 12360Sstevel@tonic-gate */ 1237*4574Sraf static lwpid_t 12380Sstevel@tonic-gate mutex_wakeup(mutex_t *mp) 12390Sstevel@tonic-gate { 12400Sstevel@tonic-gate lwpid_t lwpid = 0; 12410Sstevel@tonic-gate queue_head_t *qp; 12420Sstevel@tonic-gate ulwp_t *ulwp; 12430Sstevel@tonic-gate int more; 12440Sstevel@tonic-gate 12450Sstevel@tonic-gate /* 12460Sstevel@tonic-gate * Dequeue a waiter from the sleep queue. Don't touch the mutex 12470Sstevel@tonic-gate * waiters bit if no one was found on the queue because the mutex 12480Sstevel@tonic-gate * might have been deallocated or reallocated for another purpose. 12490Sstevel@tonic-gate */ 12500Sstevel@tonic-gate qp = queue_lock(mp, MX); 12510Sstevel@tonic-gate if ((ulwp = dequeue(qp, mp, &more)) != NULL) { 12520Sstevel@tonic-gate lwpid = ulwp->ul_lwpid; 12530Sstevel@tonic-gate mp->mutex_waiters = (more? 1 : 0); 12540Sstevel@tonic-gate } 12550Sstevel@tonic-gate queue_unlock(qp); 12560Sstevel@tonic-gate return (lwpid); 12570Sstevel@tonic-gate } 12580Sstevel@tonic-gate 12590Sstevel@tonic-gate /* 1260*4574Sraf * Mutex wakeup code for releasing all waiters on a USYNC_THREAD mutex. 1261*4574Sraf */ 1262*4574Sraf static void 1263*4574Sraf mutex_wakeup_all(mutex_t *mp) 1264*4574Sraf { 1265*4574Sraf queue_head_t *qp; 1266*4574Sraf int nlwpid = 0; 1267*4574Sraf int maxlwps = MAXLWPS; 1268*4574Sraf ulwp_t **ulwpp; 1269*4574Sraf ulwp_t *ulwp; 1270*4574Sraf ulwp_t *prev = NULL; 1271*4574Sraf lwpid_t buffer[MAXLWPS]; 1272*4574Sraf lwpid_t *lwpid = buffer; 1273*4574Sraf 1274*4574Sraf /* 1275*4574Sraf * Walk the list of waiters and prepare to wake up all of them. 1276*4574Sraf * The waiters flag has already been cleared from the mutex. 1277*4574Sraf * 1278*4574Sraf * We keep track of lwpids that are to be unparked in lwpid[]. 1279*4574Sraf * __lwp_unpark_all() is called to unpark all of them after 1280*4574Sraf * they have been removed from the sleep queue and the sleep 1281*4574Sraf * queue lock has been dropped. If we run out of space in our 1282*4574Sraf * on-stack buffer, we need to allocate more but we can't call 1283*4574Sraf * lmalloc() because we are holding a queue lock when the overflow 1284*4574Sraf * occurs and lmalloc() acquires a lock. We can't use alloca() 1285*4574Sraf * either because the application may have allocated a small 1286*4574Sraf * stack and we don't want to overrun the stack. So we call 1287*4574Sraf * alloc_lwpids() to allocate a bigger buffer using the mmap() 1288*4574Sraf * system call directly since that path acquires no locks. 1289*4574Sraf */ 1290*4574Sraf qp = queue_lock(mp, MX); 1291*4574Sraf ulwpp = &qp->qh_head; 1292*4574Sraf while ((ulwp = *ulwpp) != NULL) { 1293*4574Sraf if (ulwp->ul_wchan != mp) { 1294*4574Sraf prev = ulwp; 1295*4574Sraf ulwpp = &ulwp->ul_link; 1296*4574Sraf } else { 1297*4574Sraf if (nlwpid == maxlwps) 1298*4574Sraf lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps); 1299*4574Sraf (void) queue_unlink(qp, ulwpp, prev); 1300*4574Sraf lwpid[nlwpid++] = ulwp->ul_lwpid; 1301*4574Sraf } 1302*4574Sraf } 1303*4574Sraf mp->mutex_waiters = 0; 1304*4574Sraf 1305*4574Sraf if (nlwpid == 0) { 1306*4574Sraf queue_unlock(qp); 1307*4574Sraf } else { 1308*4574Sraf no_preempt(curthread); 1309*4574Sraf queue_unlock(qp); 1310*4574Sraf if (nlwpid == 1) 1311*4574Sraf (void) __lwp_unpark(lwpid[0]); 1312*4574Sraf else 1313*4574Sraf (void) __lwp_unpark_all(lwpid, nlwpid); 1314*4574Sraf preempt(curthread); 1315*4574Sraf } 1316*4574Sraf 1317*4574Sraf if (lwpid != buffer) 1318*4574Sraf (void) _private_munmap(lwpid, maxlwps * sizeof (lwpid_t)); 1319*4574Sraf } 1320*4574Sraf 1321*4574Sraf /* 13220Sstevel@tonic-gate * Spin for a while, testing to see if the lock has been grabbed. 13230Sstevel@tonic-gate * If this fails, call mutex_wakeup() to release a waiter. 13240Sstevel@tonic-gate */ 1325*4574Sraf static lwpid_t 1326*4574Sraf mutex_unlock_queue(mutex_t *mp, int release_all) 13270Sstevel@tonic-gate { 13280Sstevel@tonic-gate ulwp_t *self = curthread; 13290Sstevel@tonic-gate uint32_t *lockw = &mp->mutex_lockword; 13300Sstevel@tonic-gate lwpid_t lwpid; 13310Sstevel@tonic-gate volatile uint8_t *lockp; 13320Sstevel@tonic-gate volatile uint32_t *spinp; 13330Sstevel@tonic-gate int count; 13340Sstevel@tonic-gate 13350Sstevel@tonic-gate /* 13360Sstevel@tonic-gate * We use the swap primitive to clear the lock, but we must 13370Sstevel@tonic-gate * atomically retain the waiters bit for the remainder of this 13380Sstevel@tonic-gate * code to work. We first check to see if the waiters bit is 13390Sstevel@tonic-gate * set and if so clear the lock by swapping in a word containing 13400Sstevel@tonic-gate * only the waiters bit. This could produce a false positive test 13410Sstevel@tonic-gate * for whether there are waiters that need to be waked up, but 13420Sstevel@tonic-gate * this just causes an extra call to mutex_wakeup() to do nothing. 13430Sstevel@tonic-gate * The opposite case is more delicate: If there are no waiters, 13440Sstevel@tonic-gate * we swap in a zero lock byte and a zero waiters bit. The result 13450Sstevel@tonic-gate * of the swap could indicate that there really was a waiter so in 13460Sstevel@tonic-gate * this case we go directly to mutex_wakeup() without performing 13470Sstevel@tonic-gate * any of the adaptive code because the waiter bit has been cleared 13480Sstevel@tonic-gate * and the adaptive code is unreliable in this case. 13490Sstevel@tonic-gate */ 1350*4574Sraf if (release_all || !(*lockw & WAITERMASK)) { 13510Sstevel@tonic-gate mp->mutex_owner = 0; 13520Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 13534570Sraf if (!(atomic_swap_32(lockw, 0) & WAITERMASK)) 1354*4574Sraf return (0); /* no waiters */ 13550Sstevel@tonic-gate no_preempt(self); /* ensure a prompt wakeup */ 13560Sstevel@tonic-gate } else { 13570Sstevel@tonic-gate no_preempt(self); /* ensure a prompt wakeup */ 13580Sstevel@tonic-gate lockp = (volatile uint8_t *)&mp->mutex_lockw; 13590Sstevel@tonic-gate spinp = (volatile uint32_t *)&mp->mutex_spinners; 13600Sstevel@tonic-gate mp->mutex_owner = 0; 13610Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 13624570Sraf /* clear lock, retain waiter */ 13634570Sraf (void) atomic_swap_32(lockw, WAITER); 13640Sstevel@tonic-gate 13650Sstevel@tonic-gate /* 13660Sstevel@tonic-gate * We spin here fewer times than mutex_trylock_adaptive(). 13670Sstevel@tonic-gate * We are trying to balance two conflicting goals: 13680Sstevel@tonic-gate * 1. Avoid waking up anyone if a spinning thread 13690Sstevel@tonic-gate * grabs the lock. 13700Sstevel@tonic-gate * 2. Wake up a sleeping thread promptly to get on 13710Sstevel@tonic-gate * with useful work. 13720Sstevel@tonic-gate * We don't spin at all if there is no acquiring spinner; 13730Sstevel@tonic-gate * (mp->mutex_spinners is non-zero if there are spinners). 13740Sstevel@tonic-gate */ 13750Sstevel@tonic-gate for (count = self->ul_release_spin; 13760Sstevel@tonic-gate *spinp && count > 0; count--) { 13770Sstevel@tonic-gate /* 13780Sstevel@tonic-gate * There is a waiter that we will have to wake 13790Sstevel@tonic-gate * up unless someone else grabs the lock while 13800Sstevel@tonic-gate * we are busy spinning. Like the spin loop in 13810Sstevel@tonic-gate * mutex_trylock_adaptive(), this spin loop is 13820Sstevel@tonic-gate * unfair to lwps that have already dropped into 13830Sstevel@tonic-gate * the kernel to sleep. They will starve on a 13840Sstevel@tonic-gate * highly-contended mutex. Too bad. 13850Sstevel@tonic-gate */ 13860Sstevel@tonic-gate if (*lockp != 0) { /* somebody grabbed the lock */ 13870Sstevel@tonic-gate preempt(self); 13880Sstevel@tonic-gate return (0); 13890Sstevel@tonic-gate } 13900Sstevel@tonic-gate SMT_PAUSE(); 13910Sstevel@tonic-gate } 13920Sstevel@tonic-gate 13930Sstevel@tonic-gate /* 13940Sstevel@tonic-gate * No one grabbed the lock. 13950Sstevel@tonic-gate * Wake up some lwp that is waiting for it. 13960Sstevel@tonic-gate */ 13970Sstevel@tonic-gate mp->mutex_waiters = 0; 1398*4574Sraf } 1399*4574Sraf 1400*4574Sraf if (release_all) { 1401*4574Sraf mutex_wakeup_all(mp); 1402*4574Sraf lwpid = 0; 1403*4574Sraf } else { 14040Sstevel@tonic-gate lwpid = mutex_wakeup(mp); 14050Sstevel@tonic-gate } 14060Sstevel@tonic-gate if (lwpid == 0) 14070Sstevel@tonic-gate preempt(self); 14080Sstevel@tonic-gate return (lwpid); 14090Sstevel@tonic-gate } 14100Sstevel@tonic-gate 14110Sstevel@tonic-gate /* 14120Sstevel@tonic-gate * Like mutex_unlock_queue(), but for process-shared mutexes. 14130Sstevel@tonic-gate * We tested the waiters field before calling here and it was non-zero. 14140Sstevel@tonic-gate */ 1415*4574Sraf static void 1416*4574Sraf mutex_unlock_process(mutex_t *mp, int release_all) 14170Sstevel@tonic-gate { 14180Sstevel@tonic-gate ulwp_t *self = curthread; 14190Sstevel@tonic-gate int count; 14200Sstevel@tonic-gate volatile uint8_t *lockp; 14210Sstevel@tonic-gate 14220Sstevel@tonic-gate /* 14230Sstevel@tonic-gate * See the comments in mutex_unlock_queue(), above. 14240Sstevel@tonic-gate */ 1425*4574Sraf if (ncpus == 0) 1426*4574Sraf ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN); 1427*4574Sraf count = (ncpus > 1)? self->ul_release_spin : 0; 14280Sstevel@tonic-gate no_preempt(self); 14290Sstevel@tonic-gate mp->mutex_owner = 0; 14300Sstevel@tonic-gate mp->mutex_ownerpid = 0; 14310Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 1432*4574Sraf if (release_all || count == 0) { 14330Sstevel@tonic-gate /* clear lock, test waiter */ 14344570Sraf if (!(atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK)) { 14350Sstevel@tonic-gate /* no waiters now */ 14360Sstevel@tonic-gate preempt(self); 14370Sstevel@tonic-gate return; 14380Sstevel@tonic-gate } 14390Sstevel@tonic-gate } else { 14400Sstevel@tonic-gate /* clear lock, retain waiter */ 14414570Sraf (void) atomic_swap_32(&mp->mutex_lockword, WAITER); 14420Sstevel@tonic-gate lockp = (volatile uint8_t *)&mp->mutex_lockw; 14430Sstevel@tonic-gate while (--count >= 0) { 14440Sstevel@tonic-gate if (*lockp != 0) { 14450Sstevel@tonic-gate /* somebody grabbed the lock */ 14460Sstevel@tonic-gate preempt(self); 14470Sstevel@tonic-gate return; 14480Sstevel@tonic-gate } 14490Sstevel@tonic-gate SMT_PAUSE(); 14500Sstevel@tonic-gate } 14510Sstevel@tonic-gate /* 14520Sstevel@tonic-gate * We must clear the waiters field before going 14530Sstevel@tonic-gate * to the kernel, else it could remain set forever. 14540Sstevel@tonic-gate */ 14550Sstevel@tonic-gate mp->mutex_waiters = 0; 14560Sstevel@tonic-gate } 1457*4574Sraf (void) ___lwp_mutex_wakeup(mp, release_all); 14580Sstevel@tonic-gate preempt(self); 14590Sstevel@tonic-gate } 14600Sstevel@tonic-gate 14610Sstevel@tonic-gate /* 14620Sstevel@tonic-gate * Return the real priority of a thread. 14630Sstevel@tonic-gate */ 14640Sstevel@tonic-gate int 14650Sstevel@tonic-gate real_priority(ulwp_t *ulwp) 14660Sstevel@tonic-gate { 14670Sstevel@tonic-gate if (ulwp->ul_epri == 0) 14680Sstevel@tonic-gate return (ulwp->ul_mappedpri? ulwp->ul_mappedpri : ulwp->ul_pri); 14690Sstevel@tonic-gate return (ulwp->ul_emappedpri? ulwp->ul_emappedpri : ulwp->ul_epri); 14700Sstevel@tonic-gate } 14710Sstevel@tonic-gate 14720Sstevel@tonic-gate void 14730Sstevel@tonic-gate stall(void) 14740Sstevel@tonic-gate { 14750Sstevel@tonic-gate for (;;) 14760Sstevel@tonic-gate (void) mutex_lock_kernel(&stall_mutex, NULL, NULL); 14770Sstevel@tonic-gate } 14780Sstevel@tonic-gate 14790Sstevel@tonic-gate /* 14800Sstevel@tonic-gate * Acquire a USYNC_THREAD mutex via user-level sleep queues. 14810Sstevel@tonic-gate * We failed set_lock_byte(&mp->mutex_lockw) before coming here. 1482*4574Sraf * If successful, returns with mutex_owner set correctly. 14830Sstevel@tonic-gate */ 14840Sstevel@tonic-gate int 14850Sstevel@tonic-gate mutex_lock_queue(ulwp_t *self, tdb_mutex_stats_t *msp, mutex_t *mp, 14860Sstevel@tonic-gate timespec_t *tsp) 14870Sstevel@tonic-gate { 14880Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 14890Sstevel@tonic-gate queue_head_t *qp; 14900Sstevel@tonic-gate hrtime_t begin_sleep; 14910Sstevel@tonic-gate int error = 0; 14920Sstevel@tonic-gate 14930Sstevel@tonic-gate self->ul_sp = stkptr(); 14940Sstevel@tonic-gate if (__td_event_report(self, TD_SLEEP, udp)) { 14950Sstevel@tonic-gate self->ul_wchan = mp; 14960Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_SLEEP; 14970Sstevel@tonic-gate self->ul_td_evbuf.eventdata = mp; 14980Sstevel@tonic-gate tdb_event(TD_SLEEP, udp); 14990Sstevel@tonic-gate } 15000Sstevel@tonic-gate if (msp) { 15010Sstevel@tonic-gate tdb_incr(msp->mutex_sleep); 15020Sstevel@tonic-gate begin_sleep = gethrtime(); 15030Sstevel@tonic-gate } 15040Sstevel@tonic-gate 15050Sstevel@tonic-gate DTRACE_PROBE1(plockstat, mutex__block, mp); 15060Sstevel@tonic-gate 15070Sstevel@tonic-gate /* 15080Sstevel@tonic-gate * Put ourself on the sleep queue, and while we are 15090Sstevel@tonic-gate * unable to grab the lock, go park in the kernel. 15100Sstevel@tonic-gate * Take ourself off the sleep queue after we acquire the lock. 15110Sstevel@tonic-gate * The waiter bit can be set/cleared only while holding the queue lock. 15120Sstevel@tonic-gate */ 15130Sstevel@tonic-gate qp = queue_lock(mp, MX); 15140Sstevel@tonic-gate enqueue(qp, self, mp, MX); 15150Sstevel@tonic-gate mp->mutex_waiters = 1; 15160Sstevel@tonic-gate for (;;) { 15170Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 15180Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 15190Sstevel@tonic-gate mp->mutex_waiters = dequeue_self(qp, mp); 15200Sstevel@tonic-gate break; 15210Sstevel@tonic-gate } 15220Sstevel@tonic-gate set_parking_flag(self, 1); 15230Sstevel@tonic-gate queue_unlock(qp); 15240Sstevel@tonic-gate /* 15250Sstevel@tonic-gate * __lwp_park() will return the residual time in tsp 15260Sstevel@tonic-gate * if we are unparked before the timeout expires. 15270Sstevel@tonic-gate */ 15280Sstevel@tonic-gate if ((error = __lwp_park(tsp, 0)) == EINTR) 15290Sstevel@tonic-gate error = 0; 15300Sstevel@tonic-gate set_parking_flag(self, 0); 15310Sstevel@tonic-gate /* 15320Sstevel@tonic-gate * We could have taken a signal or suspended ourself. 15330Sstevel@tonic-gate * If we did, then we removed ourself from the queue. 15340Sstevel@tonic-gate * Someone else may have removed us from the queue 15350Sstevel@tonic-gate * as a consequence of mutex_unlock(). We may have 15360Sstevel@tonic-gate * gotten a timeout from __lwp_park(). Or we may still 15370Sstevel@tonic-gate * be on the queue and this is just a spurious wakeup. 15380Sstevel@tonic-gate */ 15390Sstevel@tonic-gate qp = queue_lock(mp, MX); 15400Sstevel@tonic-gate if (self->ul_sleepq == NULL) { 1541*4574Sraf if (error) 15420Sstevel@tonic-gate break; 15430Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 15440Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 15450Sstevel@tonic-gate break; 15460Sstevel@tonic-gate } 15470Sstevel@tonic-gate enqueue(qp, self, mp, MX); 15480Sstevel@tonic-gate mp->mutex_waiters = 1; 15490Sstevel@tonic-gate } 15500Sstevel@tonic-gate ASSERT(self->ul_sleepq == qp && 15510Sstevel@tonic-gate self->ul_qtype == MX && 15520Sstevel@tonic-gate self->ul_wchan == mp); 15530Sstevel@tonic-gate if (error) { 15540Sstevel@tonic-gate mp->mutex_waiters = dequeue_self(qp, mp); 15550Sstevel@tonic-gate break; 15560Sstevel@tonic-gate } 15570Sstevel@tonic-gate } 15580Sstevel@tonic-gate ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL && 15590Sstevel@tonic-gate self->ul_wchan == NULL); 15600Sstevel@tonic-gate self->ul_sp = 0; 15610Sstevel@tonic-gate queue_unlock(qp); 1562*4574Sraf 15630Sstevel@tonic-gate if (msp) 15640Sstevel@tonic-gate msp->mutex_sleep_time += gethrtime() - begin_sleep; 15650Sstevel@tonic-gate 15660Sstevel@tonic-gate ASSERT(error == 0 || error == EINVAL || error == ETIME); 1567*4574Sraf 1568*4574Sraf if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { 1569*4574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 1570*4574Sraf /* 1571*4574Sraf * We shouldn't own the mutex; clear the lock. 1572*4574Sraf */ 1573*4574Sraf mp->mutex_owner = 0; 1574*4574Sraf if (atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK) 1575*4574Sraf mutex_wakeup_all(mp); 1576*4574Sraf error = ENOTRECOVERABLE; 1577*4574Sraf } 1578*4574Sraf 1579*4574Sraf if (error) { 1580*4574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0); 1581*4574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 1582*4574Sraf } else { 1583*4574Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1); 1584*4574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 1585*4574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) { 1586*4574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 1587*4574Sraf error = EOWNERDEAD; 1588*4574Sraf } 1589*4574Sraf } 1590*4574Sraf 15910Sstevel@tonic-gate return (error); 15920Sstevel@tonic-gate } 15930Sstevel@tonic-gate 1594*4574Sraf static int 1595*4574Sraf mutex_recursion(mutex_t *mp, int mtype, int try) 1596*4574Sraf { 1597*4574Sraf ASSERT(mutex_is_held(mp)); 1598*4574Sraf ASSERT(mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)); 1599*4574Sraf ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK); 1600*4574Sraf 1601*4574Sraf if (mtype & LOCK_RECURSIVE) { 1602*4574Sraf if (mp->mutex_rcount == RECURSION_MAX) { 1603*4574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EAGAIN); 1604*4574Sraf return (EAGAIN); 1605*4574Sraf } 1606*4574Sraf mp->mutex_rcount++; 1607*4574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1, 0); 1608*4574Sraf return (0); 1609*4574Sraf } 1610*4574Sraf if (try == MUTEX_LOCK) { 1611*4574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK); 1612*4574Sraf return (EDEADLK); 1613*4574Sraf } 1614*4574Sraf return (EBUSY); 1615*4574Sraf } 1616*4574Sraf 1617*4574Sraf /* 1618*4574Sraf * Register this USYNC_PROCESS|LOCK_ROBUST mutex with the kernel so 1619*4574Sraf * it can apply LOCK_OWNERDEAD|LOCK_UNMAPPED if it becomes necessary. 1620*4574Sraf * We use tdb_hash_lock here and in the synch object tracking code in 1621*4574Sraf * the tdb_agent.c file. There is no conflict between these two usages. 1622*4574Sraf */ 1623*4574Sraf void 1624*4574Sraf register_lock(mutex_t *mp) 1625*4574Sraf { 1626*4574Sraf uberdata_t *udp = curthread->ul_uberdata; 1627*4574Sraf uint_t hash = LOCK_HASH(mp); 1628*4574Sraf robust_t *rlp; 1629*4574Sraf robust_t **rlpp; 1630*4574Sraf robust_t **table; 1631*4574Sraf 1632*4574Sraf if ((table = udp->robustlocks) == NULL) { 1633*4574Sraf lmutex_lock(&udp->tdb_hash_lock); 1634*4574Sraf if ((table = udp->robustlocks) == NULL) { 1635*4574Sraf table = lmalloc(LOCKHASHSZ * sizeof (robust_t *)); 1636*4574Sraf _membar_producer(); 1637*4574Sraf udp->robustlocks = table; 1638*4574Sraf } 1639*4574Sraf lmutex_unlock(&udp->tdb_hash_lock); 1640*4574Sraf } 1641*4574Sraf _membar_consumer(); 1642*4574Sraf 1643*4574Sraf /* 1644*4574Sraf * First search the registered table with no locks held. 1645*4574Sraf * This is safe because the table never shrinks 1646*4574Sraf * and we can only get a false negative. 1647*4574Sraf */ 1648*4574Sraf for (rlp = table[hash]; rlp != NULL; rlp = rlp->robust_next) { 1649*4574Sraf if (rlp->robust_lock == mp) /* already registered */ 1650*4574Sraf return; 1651*4574Sraf } 1652*4574Sraf 1653*4574Sraf /* 1654*4574Sraf * The lock was not found. 1655*4574Sraf * Repeat the operation with tdb_hash_lock held. 1656*4574Sraf */ 1657*4574Sraf lmutex_lock(&udp->tdb_hash_lock); 1658*4574Sraf 1659*4574Sraf for (rlpp = &table[hash]; 1660*4574Sraf (rlp = *rlpp) != NULL; 1661*4574Sraf rlpp = &rlp->robust_next) { 1662*4574Sraf if (rlp->robust_lock == mp) { /* already registered */ 1663*4574Sraf lmutex_unlock(&udp->tdb_hash_lock); 1664*4574Sraf return; 1665*4574Sraf } 1666*4574Sraf } 1667*4574Sraf 1668*4574Sraf /* 1669*4574Sraf * The lock has never been registered. 1670*4574Sraf * Register it now and add it to the table. 1671*4574Sraf */ 1672*4574Sraf (void) ___lwp_mutex_register(mp); 1673*4574Sraf rlp = lmalloc(sizeof (*rlp)); 1674*4574Sraf rlp->robust_lock = mp; 1675*4574Sraf _membar_producer(); 1676*4574Sraf *rlpp = rlp; 1677*4574Sraf 1678*4574Sraf lmutex_unlock(&udp->tdb_hash_lock); 1679*4574Sraf } 1680*4574Sraf 1681*4574Sraf /* 1682*4574Sraf * This is called in the child of fork()/forkall() to start over 1683*4574Sraf * with a clean slate. (Each process must register its own locks.) 1684*4574Sraf * No locks are needed because all other threads are suspended or gone. 1685*4574Sraf */ 1686*4574Sraf void 1687*4574Sraf unregister_locks(void) 1688*4574Sraf { 1689*4574Sraf uberdata_t *udp = curthread->ul_uberdata; 1690*4574Sraf uint_t hash; 1691*4574Sraf robust_t **table; 1692*4574Sraf robust_t *rlp; 1693*4574Sraf robust_t *next; 1694*4574Sraf 1695*4574Sraf if ((table = udp->robustlocks) != NULL) { 1696*4574Sraf for (hash = 0; hash < LOCKHASHSZ; hash++) { 1697*4574Sraf rlp = table[hash]; 1698*4574Sraf while (rlp != NULL) { 1699*4574Sraf next = rlp->robust_next; 1700*4574Sraf lfree(rlp, sizeof (*rlp)); 1701*4574Sraf rlp = next; 1702*4574Sraf } 1703*4574Sraf } 1704*4574Sraf lfree(table, LOCKHASHSZ * sizeof (robust_t *)); 1705*4574Sraf udp->robustlocks = NULL; 1706*4574Sraf } 1707*4574Sraf } 1708*4574Sraf 17090Sstevel@tonic-gate /* 17100Sstevel@tonic-gate * Returns with mutex_owner set correctly. 17110Sstevel@tonic-gate */ 1712*4574Sraf static int 17130Sstevel@tonic-gate mutex_lock_internal(mutex_t *mp, timespec_t *tsp, int try) 17140Sstevel@tonic-gate { 17150Sstevel@tonic-gate ulwp_t *self = curthread; 17160Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 17170Sstevel@tonic-gate int mtype = mp->mutex_type; 17180Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 17190Sstevel@tonic-gate int error = 0; 1720*4574Sraf uint8_t ceil; 1721*4574Sraf int myprio; 17220Sstevel@tonic-gate 17230Sstevel@tonic-gate ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK); 17240Sstevel@tonic-gate 17250Sstevel@tonic-gate if (!self->ul_schedctl_called) 17260Sstevel@tonic-gate (void) setup_schedctl(); 17270Sstevel@tonic-gate 17280Sstevel@tonic-gate if (msp && try == MUTEX_TRY) 17290Sstevel@tonic-gate tdb_incr(msp->mutex_try); 17300Sstevel@tonic-gate 1731*4574Sraf if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && mutex_is_held(mp)) 1732*4574Sraf return (mutex_recursion(mp, mtype, try)); 17330Sstevel@tonic-gate 17340Sstevel@tonic-gate if (self->ul_error_detection && try == MUTEX_LOCK && 17350Sstevel@tonic-gate tsp == NULL && mutex_is_held(mp)) 17360Sstevel@tonic-gate lock_error(mp, "mutex_lock", NULL, NULL); 17370Sstevel@tonic-gate 1738*4574Sraf if (mtype & LOCK_PRIO_PROTECT) { 1739*4574Sraf ceil = mp->mutex_ceiling; 1740*4574Sraf ASSERT(_validate_rt_prio(SCHED_FIFO, ceil) == 0); 1741*4574Sraf myprio = real_priority(self); 1742*4574Sraf if (myprio > ceil) { 1743*4574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EINVAL); 1744*4574Sraf return (EINVAL); 1745*4574Sraf } 1746*4574Sraf if ((error = _ceil_mylist_add(mp)) != 0) { 1747*4574Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 1748*4574Sraf return (error); 17490Sstevel@tonic-gate } 1750*4574Sraf if (myprio < ceil) 1751*4574Sraf _ceil_prio_inherit(ceil); 1752*4574Sraf } 1753*4574Sraf 1754*4574Sraf if ((mtype & (USYNC_PROCESS | LOCK_ROBUST)) 1755*4574Sraf == (USYNC_PROCESS | LOCK_ROBUST)) 1756*4574Sraf register_lock(mp); 1757*4574Sraf 1758*4574Sraf if (mtype & LOCK_PRIO_INHERIT) { 1759*4574Sraf /* go straight to the kernel */ 1760*4574Sraf if (try == MUTEX_TRY) 1761*4574Sraf error = mutex_trylock_kernel(mp); 1762*4574Sraf else /* MUTEX_LOCK */ 1763*4574Sraf error = mutex_lock_kernel(mp, tsp, msp); 1764*4574Sraf /* 1765*4574Sraf * The kernel never sets or clears the lock byte 1766*4574Sraf * for LOCK_PRIO_INHERIT mutexes. 1767*4574Sraf * Set it here for consistency. 1768*4574Sraf */ 1769*4574Sraf switch (error) { 1770*4574Sraf case 0: 1771*4574Sraf mp->mutex_lockw = LOCKSET; 1772*4574Sraf break; 1773*4574Sraf case EOWNERDEAD: 1774*4574Sraf case ELOCKUNMAPPED: 1775*4574Sraf mp->mutex_lockw = LOCKSET; 1776*4574Sraf /* FALLTHROUGH */ 1777*4574Sraf case ENOTRECOVERABLE: 1778*4574Sraf ASSERT(mtype & LOCK_ROBUST); 1779*4574Sraf break; 1780*4574Sraf case EDEADLK: 1781*4574Sraf if (try == MUTEX_LOCK) 1782*4574Sraf stall(); 1783*4574Sraf error = EBUSY; 1784*4574Sraf break; 17850Sstevel@tonic-gate } 17860Sstevel@tonic-gate } else if (mtype & USYNC_PROCESS) { 1787*4574Sraf error = mutex_trylock_process(mp); 1788*4574Sraf if (error == EBUSY && try == MUTEX_LOCK) 17890Sstevel@tonic-gate error = mutex_lock_kernel(mp, tsp, msp); 17900Sstevel@tonic-gate } else { /* USYNC_THREAD */ 1791*4574Sraf error = mutex_trylock_adaptive(mp); 1792*4574Sraf if (error == EBUSY && try == MUTEX_LOCK) 1793*4574Sraf error = mutex_lock_queue(self, msp, mp, tsp); 17940Sstevel@tonic-gate } 17950Sstevel@tonic-gate 17960Sstevel@tonic-gate switch (error) { 1797*4574Sraf case 0: 17980Sstevel@tonic-gate case EOWNERDEAD: 17990Sstevel@tonic-gate case ELOCKUNMAPPED: 1800*4574Sraf if (mtype & LOCK_ROBUST) 1801*4574Sraf remember_lock(mp); 18020Sstevel@tonic-gate if (msp) 18030Sstevel@tonic-gate record_begin_hold(msp); 18040Sstevel@tonic-gate break; 18050Sstevel@tonic-gate default: 1806*4574Sraf if (mtype & LOCK_PRIO_PROTECT) { 1807*4574Sraf (void) _ceil_mylist_del(mp); 1808*4574Sraf if (myprio < ceil) 1809*4574Sraf _ceil_prio_waive(); 1810*4574Sraf } 18110Sstevel@tonic-gate if (try == MUTEX_TRY) { 18120Sstevel@tonic-gate if (msp) 18130Sstevel@tonic-gate tdb_incr(msp->mutex_try_fail); 18140Sstevel@tonic-gate if (__td_event_report(self, TD_LOCK_TRY, udp)) { 18150Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 18160Sstevel@tonic-gate tdb_event(TD_LOCK_TRY, udp); 18170Sstevel@tonic-gate } 18180Sstevel@tonic-gate } 18190Sstevel@tonic-gate break; 18200Sstevel@tonic-gate } 18210Sstevel@tonic-gate 18220Sstevel@tonic-gate return (error); 18230Sstevel@tonic-gate } 18240Sstevel@tonic-gate 18250Sstevel@tonic-gate int 18260Sstevel@tonic-gate fast_process_lock(mutex_t *mp, timespec_t *tsp, int mtype, int try) 18270Sstevel@tonic-gate { 18280Sstevel@tonic-gate ulwp_t *self = curthread; 18290Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 18300Sstevel@tonic-gate 18310Sstevel@tonic-gate /* 18320Sstevel@tonic-gate * We know that USYNC_PROCESS is set in mtype and that 18330Sstevel@tonic-gate * zero, one, or both of the flags LOCK_RECURSIVE and 18340Sstevel@tonic-gate * LOCK_ERRORCHECK are set, and that no other flags are set. 18350Sstevel@tonic-gate */ 1836*4574Sraf ASSERT((mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0); 18370Sstevel@tonic-gate enter_critical(self); 18380Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 18390Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 18400Sstevel@tonic-gate mp->mutex_ownerpid = udp->pid; 18410Sstevel@tonic-gate exit_critical(self); 18420Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 18430Sstevel@tonic-gate return (0); 18440Sstevel@tonic-gate } 18450Sstevel@tonic-gate exit_critical(self); 18460Sstevel@tonic-gate 1847*4574Sraf if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && shared_mutex_held(mp)) 1848*4574Sraf return (mutex_recursion(mp, mtype, try)); 1849*4574Sraf 1850*4574Sraf /* try a little harder */ 1851*4574Sraf if (mutex_trylock_process(mp) == 0) 18520Sstevel@tonic-gate return (0); 18530Sstevel@tonic-gate 18540Sstevel@tonic-gate if (try == MUTEX_LOCK) 18550Sstevel@tonic-gate return (mutex_lock_kernel(mp, tsp, NULL)); 18560Sstevel@tonic-gate 18570Sstevel@tonic-gate if (__td_event_report(self, TD_LOCK_TRY, udp)) { 18580Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 18590Sstevel@tonic-gate tdb_event(TD_LOCK_TRY, udp); 18600Sstevel@tonic-gate } 18610Sstevel@tonic-gate return (EBUSY); 18620Sstevel@tonic-gate } 18630Sstevel@tonic-gate 18640Sstevel@tonic-gate static int 18650Sstevel@tonic-gate mutex_lock_impl(mutex_t *mp, timespec_t *tsp) 18660Sstevel@tonic-gate { 18670Sstevel@tonic-gate ulwp_t *self = curthread; 18680Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 18690Sstevel@tonic-gate uberflags_t *gflags; 18700Sstevel@tonic-gate int mtype; 18710Sstevel@tonic-gate 18720Sstevel@tonic-gate /* 18730Sstevel@tonic-gate * Optimize the case of USYNC_THREAD, including 18740Sstevel@tonic-gate * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 18750Sstevel@tonic-gate * no error detection, no lock statistics, 18760Sstevel@tonic-gate * and the process has only a single thread. 18770Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 18780Sstevel@tonic-gate */ 18790Sstevel@tonic-gate if ((((mtype = mp->mutex_type) & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 18800Sstevel@tonic-gate udp->uberflags.uf_all) == 0) { 18810Sstevel@tonic-gate /* 18820Sstevel@tonic-gate * Only one thread exists so we don't need an atomic operation. 18830Sstevel@tonic-gate */ 18840Sstevel@tonic-gate if (mp->mutex_lockw == 0) { 18850Sstevel@tonic-gate mp->mutex_lockw = LOCKSET; 18860Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 18870Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 18880Sstevel@tonic-gate return (0); 18890Sstevel@tonic-gate } 1890*4574Sraf if (mtype && MUTEX_OWNER(mp) == self) 1891*4574Sraf return (mutex_recursion(mp, mtype, MUTEX_LOCK)); 18920Sstevel@tonic-gate /* 18930Sstevel@tonic-gate * We have reached a deadlock, probably because the 18940Sstevel@tonic-gate * process is executing non-async-signal-safe code in 18950Sstevel@tonic-gate * a signal handler and is attempting to acquire a lock 18960Sstevel@tonic-gate * that it already owns. This is not surprising, given 18970Sstevel@tonic-gate * bad programming practices over the years that has 18980Sstevel@tonic-gate * resulted in applications calling printf() and such 18990Sstevel@tonic-gate * in their signal handlers. Unless the user has told 19000Sstevel@tonic-gate * us that the signal handlers are safe by setting: 19010Sstevel@tonic-gate * export _THREAD_ASYNC_SAFE=1 19020Sstevel@tonic-gate * we return EDEADLK rather than actually deadlocking. 19030Sstevel@tonic-gate */ 19040Sstevel@tonic-gate if (tsp == NULL && 19050Sstevel@tonic-gate MUTEX_OWNER(mp) == self && !self->ul_async_safe) { 19060Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK); 19070Sstevel@tonic-gate return (EDEADLK); 19080Sstevel@tonic-gate } 19090Sstevel@tonic-gate } 19100Sstevel@tonic-gate 19110Sstevel@tonic-gate /* 19120Sstevel@tonic-gate * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 19130Sstevel@tonic-gate * no error detection, and no lock statistics. 19140Sstevel@tonic-gate * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 19150Sstevel@tonic-gate */ 19160Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL && 19170Sstevel@tonic-gate (gflags->uf_trs_ted | 19180Sstevel@tonic-gate (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) { 19190Sstevel@tonic-gate if (mtype & USYNC_PROCESS) 19200Sstevel@tonic-gate return (fast_process_lock(mp, tsp, mtype, MUTEX_LOCK)); 19210Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 19220Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 19230Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 19240Sstevel@tonic-gate return (0); 19250Sstevel@tonic-gate } 1926*4574Sraf if (mtype && MUTEX_OWNER(mp) == self) 1927*4574Sraf return (mutex_recursion(mp, mtype, MUTEX_LOCK)); 1928*4574Sraf if (mutex_trylock_adaptive(mp) != 0) 1929*4574Sraf return (mutex_lock_queue(self, NULL, mp, tsp)); 1930*4574Sraf return (0); 19310Sstevel@tonic-gate } 19320Sstevel@tonic-gate 19330Sstevel@tonic-gate /* else do it the long way */ 19340Sstevel@tonic-gate return (mutex_lock_internal(mp, tsp, MUTEX_LOCK)); 19350Sstevel@tonic-gate } 19360Sstevel@tonic-gate 19370Sstevel@tonic-gate #pragma weak _private_mutex_lock = __mutex_lock 19380Sstevel@tonic-gate #pragma weak mutex_lock = __mutex_lock 19390Sstevel@tonic-gate #pragma weak _mutex_lock = __mutex_lock 19400Sstevel@tonic-gate #pragma weak pthread_mutex_lock = __mutex_lock 19410Sstevel@tonic-gate #pragma weak _pthread_mutex_lock = __mutex_lock 19420Sstevel@tonic-gate int 19430Sstevel@tonic-gate __mutex_lock(mutex_t *mp) 19440Sstevel@tonic-gate { 19450Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 19460Sstevel@tonic-gate return (mutex_lock_impl(mp, NULL)); 19470Sstevel@tonic-gate } 19480Sstevel@tonic-gate 19490Sstevel@tonic-gate #pragma weak pthread_mutex_timedlock = _pthread_mutex_timedlock 19500Sstevel@tonic-gate int 19510Sstevel@tonic-gate _pthread_mutex_timedlock(mutex_t *mp, const timespec_t *abstime) 19520Sstevel@tonic-gate { 19530Sstevel@tonic-gate timespec_t tslocal; 19540Sstevel@tonic-gate int error; 19550Sstevel@tonic-gate 19560Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 19570Sstevel@tonic-gate abstime_to_reltime(CLOCK_REALTIME, abstime, &tslocal); 19580Sstevel@tonic-gate error = mutex_lock_impl(mp, &tslocal); 19590Sstevel@tonic-gate if (error == ETIME) 19600Sstevel@tonic-gate error = ETIMEDOUT; 19610Sstevel@tonic-gate return (error); 19620Sstevel@tonic-gate } 19630Sstevel@tonic-gate 19640Sstevel@tonic-gate #pragma weak pthread_mutex_reltimedlock_np = _pthread_mutex_reltimedlock_np 19650Sstevel@tonic-gate int 19660Sstevel@tonic-gate _pthread_mutex_reltimedlock_np(mutex_t *mp, const timespec_t *reltime) 19670Sstevel@tonic-gate { 19680Sstevel@tonic-gate timespec_t tslocal; 19690Sstevel@tonic-gate int error; 19700Sstevel@tonic-gate 19710Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 19720Sstevel@tonic-gate tslocal = *reltime; 19730Sstevel@tonic-gate error = mutex_lock_impl(mp, &tslocal); 19740Sstevel@tonic-gate if (error == ETIME) 19750Sstevel@tonic-gate error = ETIMEDOUT; 19760Sstevel@tonic-gate return (error); 19770Sstevel@tonic-gate } 19780Sstevel@tonic-gate 19790Sstevel@tonic-gate #pragma weak _private_mutex_trylock = __mutex_trylock 19800Sstevel@tonic-gate #pragma weak mutex_trylock = __mutex_trylock 19810Sstevel@tonic-gate #pragma weak _mutex_trylock = __mutex_trylock 19820Sstevel@tonic-gate #pragma weak pthread_mutex_trylock = __mutex_trylock 19830Sstevel@tonic-gate #pragma weak _pthread_mutex_trylock = __mutex_trylock 19840Sstevel@tonic-gate int 19850Sstevel@tonic-gate __mutex_trylock(mutex_t *mp) 19860Sstevel@tonic-gate { 19870Sstevel@tonic-gate ulwp_t *self = curthread; 19880Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 19890Sstevel@tonic-gate uberflags_t *gflags; 19900Sstevel@tonic-gate int mtype; 19910Sstevel@tonic-gate 19920Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 19930Sstevel@tonic-gate /* 19940Sstevel@tonic-gate * Optimize the case of USYNC_THREAD, including 19950Sstevel@tonic-gate * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 19960Sstevel@tonic-gate * no error detection, no lock statistics, 19970Sstevel@tonic-gate * and the process has only a single thread. 19980Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 19990Sstevel@tonic-gate */ 20000Sstevel@tonic-gate if ((((mtype = mp->mutex_type) & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 20010Sstevel@tonic-gate udp->uberflags.uf_all) == 0) { 20020Sstevel@tonic-gate /* 20030Sstevel@tonic-gate * Only one thread exists so we don't need an atomic operation. 20040Sstevel@tonic-gate */ 20050Sstevel@tonic-gate if (mp->mutex_lockw == 0) { 20060Sstevel@tonic-gate mp->mutex_lockw = LOCKSET; 20070Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 20080Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 20090Sstevel@tonic-gate return (0); 20100Sstevel@tonic-gate } 2011*4574Sraf if (mtype && MUTEX_OWNER(mp) == self) 2012*4574Sraf return (mutex_recursion(mp, mtype, MUTEX_TRY)); 20130Sstevel@tonic-gate return (EBUSY); 20140Sstevel@tonic-gate } 20150Sstevel@tonic-gate 20160Sstevel@tonic-gate /* 20170Sstevel@tonic-gate * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 20180Sstevel@tonic-gate * no error detection, and no lock statistics. 20190Sstevel@tonic-gate * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 20200Sstevel@tonic-gate */ 20210Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL && 20220Sstevel@tonic-gate (gflags->uf_trs_ted | 20230Sstevel@tonic-gate (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) { 20240Sstevel@tonic-gate if (mtype & USYNC_PROCESS) 20250Sstevel@tonic-gate return (fast_process_lock(mp, NULL, mtype, MUTEX_TRY)); 20260Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 20270Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 20280Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 20290Sstevel@tonic-gate return (0); 20300Sstevel@tonic-gate } 2031*4574Sraf if (mtype && MUTEX_OWNER(mp) == self) 2032*4574Sraf return (mutex_recursion(mp, mtype, MUTEX_TRY)); 2033*4574Sraf if (mutex_trylock_adaptive(mp) != 0) { 2034*4574Sraf if (__td_event_report(self, TD_LOCK_TRY, udp)) { 2035*4574Sraf self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 2036*4574Sraf tdb_event(TD_LOCK_TRY, udp); 20370Sstevel@tonic-gate } 2038*4574Sraf return (EBUSY); 20390Sstevel@tonic-gate } 2040*4574Sraf return (0); 20410Sstevel@tonic-gate } 20420Sstevel@tonic-gate 20430Sstevel@tonic-gate /* else do it the long way */ 20440Sstevel@tonic-gate return (mutex_lock_internal(mp, NULL, MUTEX_TRY)); 20450Sstevel@tonic-gate } 20460Sstevel@tonic-gate 20470Sstevel@tonic-gate int 2048*4574Sraf mutex_unlock_internal(mutex_t *mp, int retain_robust_flags) 20490Sstevel@tonic-gate { 20500Sstevel@tonic-gate ulwp_t *self = curthread; 20510Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 20520Sstevel@tonic-gate int mtype = mp->mutex_type; 20530Sstevel@tonic-gate tdb_mutex_stats_t *msp; 2054*4574Sraf int error = 0; 2055*4574Sraf int release_all; 20560Sstevel@tonic-gate lwpid_t lwpid; 20570Sstevel@tonic-gate 20580Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !mutex_is_held(mp)) 20590Sstevel@tonic-gate return (EPERM); 20600Sstevel@tonic-gate 20610Sstevel@tonic-gate if (self->ul_error_detection && !mutex_is_held(mp)) 20620Sstevel@tonic-gate lock_error(mp, "mutex_unlock", NULL, NULL); 20630Sstevel@tonic-gate 20640Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 20650Sstevel@tonic-gate mp->mutex_rcount--; 20660Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 20670Sstevel@tonic-gate return (0); 20680Sstevel@tonic-gate } 20690Sstevel@tonic-gate 20700Sstevel@tonic-gate if ((msp = MUTEX_STATS(mp, udp)) != NULL) 20710Sstevel@tonic-gate (void) record_hold_time(msp); 20720Sstevel@tonic-gate 2073*4574Sraf if (!retain_robust_flags && !(mtype & LOCK_PRIO_INHERIT) && 2074*4574Sraf (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) { 2075*4574Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 2076*4574Sraf mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED); 2077*4574Sraf mp->mutex_flag |= LOCK_NOTRECOVERABLE; 2078*4574Sraf } 2079*4574Sraf release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0); 2080*4574Sraf 2081*4574Sraf if (mtype & LOCK_PRIO_INHERIT) { 20820Sstevel@tonic-gate no_preempt(self); 20830Sstevel@tonic-gate mp->mutex_owner = 0; 20840Sstevel@tonic-gate mp->mutex_ownerpid = 0; 20850Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 2086*4574Sraf mp->mutex_lockw = LOCKCLEAR; 2087*4574Sraf error = ___lwp_mutex_unlock(mp); 20880Sstevel@tonic-gate preempt(self); 20890Sstevel@tonic-gate } else if (mtype & USYNC_PROCESS) { 2090*4574Sraf if (mp->mutex_lockword & WAITERMASK) { 2091*4574Sraf mutex_unlock_process(mp, release_all); 2092*4574Sraf } else { 20930Sstevel@tonic-gate mp->mutex_owner = 0; 20940Sstevel@tonic-gate mp->mutex_ownerpid = 0; 20950Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 20964570Sraf if (atomic_swap_32(&mp->mutex_lockword, 0) & 2097*4574Sraf WAITERMASK) { /* a waiter suddenly appeared */ 20980Sstevel@tonic-gate no_preempt(self); 2099*4574Sraf (void) ___lwp_mutex_wakeup(mp, release_all); 21000Sstevel@tonic-gate preempt(self); 21010Sstevel@tonic-gate } 21020Sstevel@tonic-gate } 21030Sstevel@tonic-gate } else { /* USYNC_THREAD */ 2104*4574Sraf if ((lwpid = mutex_unlock_queue(mp, release_all)) != 0) { 21050Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 21060Sstevel@tonic-gate preempt(self); 21070Sstevel@tonic-gate } 21080Sstevel@tonic-gate } 21090Sstevel@tonic-gate 2110*4574Sraf if (mtype & LOCK_ROBUST) 2111*4574Sraf forget_lock(mp); 2112*4574Sraf 2113*4574Sraf if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp)) 2114*4574Sraf _ceil_prio_waive(); 2115*4574Sraf 21160Sstevel@tonic-gate return (error); 21170Sstevel@tonic-gate } 21180Sstevel@tonic-gate 21190Sstevel@tonic-gate #pragma weak _private_mutex_unlock = __mutex_unlock 21200Sstevel@tonic-gate #pragma weak mutex_unlock = __mutex_unlock 21210Sstevel@tonic-gate #pragma weak _mutex_unlock = __mutex_unlock 21220Sstevel@tonic-gate #pragma weak pthread_mutex_unlock = __mutex_unlock 21230Sstevel@tonic-gate #pragma weak _pthread_mutex_unlock = __mutex_unlock 21240Sstevel@tonic-gate int 21250Sstevel@tonic-gate __mutex_unlock(mutex_t *mp) 21260Sstevel@tonic-gate { 21270Sstevel@tonic-gate ulwp_t *self = curthread; 21280Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 21290Sstevel@tonic-gate uberflags_t *gflags; 21300Sstevel@tonic-gate lwpid_t lwpid; 21310Sstevel@tonic-gate int mtype; 21320Sstevel@tonic-gate short el; 21330Sstevel@tonic-gate 21340Sstevel@tonic-gate /* 21350Sstevel@tonic-gate * Optimize the case of USYNC_THREAD, including 21360Sstevel@tonic-gate * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 21370Sstevel@tonic-gate * no error detection, no lock statistics, 21380Sstevel@tonic-gate * and the process has only a single thread. 21390Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 21400Sstevel@tonic-gate */ 21410Sstevel@tonic-gate if ((((mtype = mp->mutex_type) & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 21420Sstevel@tonic-gate udp->uberflags.uf_all) == 0) { 21430Sstevel@tonic-gate if (mtype) { 21440Sstevel@tonic-gate /* 21450Sstevel@tonic-gate * At this point we know that one or both of the 21460Sstevel@tonic-gate * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set. 21470Sstevel@tonic-gate */ 21480Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self)) 21490Sstevel@tonic-gate return (EPERM); 21500Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 21510Sstevel@tonic-gate mp->mutex_rcount--; 21520Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 21530Sstevel@tonic-gate return (0); 21540Sstevel@tonic-gate } 21550Sstevel@tonic-gate } 21560Sstevel@tonic-gate /* 21570Sstevel@tonic-gate * Only one thread exists so we don't need an atomic operation. 21580Sstevel@tonic-gate * Also, there can be no waiters. 21590Sstevel@tonic-gate */ 21600Sstevel@tonic-gate mp->mutex_owner = 0; 21610Sstevel@tonic-gate mp->mutex_lockword = 0; 21620Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 21630Sstevel@tonic-gate return (0); 21640Sstevel@tonic-gate } 21650Sstevel@tonic-gate 21660Sstevel@tonic-gate /* 21670Sstevel@tonic-gate * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 21680Sstevel@tonic-gate * no error detection, and no lock statistics. 21690Sstevel@tonic-gate * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 21700Sstevel@tonic-gate */ 21710Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL) { 21720Sstevel@tonic-gate if (((el = gflags->uf_trs_ted) | mtype) == 0) { 21730Sstevel@tonic-gate fast_unlock: 21740Sstevel@tonic-gate if (!(mp->mutex_lockword & WAITERMASK)) { 21750Sstevel@tonic-gate /* no waiter exists right now */ 21760Sstevel@tonic-gate mp->mutex_owner = 0; 21770Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 21784570Sraf if (atomic_swap_32(&mp->mutex_lockword, 0) & 21790Sstevel@tonic-gate WAITERMASK) { 21800Sstevel@tonic-gate /* a waiter suddenly appeared */ 21810Sstevel@tonic-gate no_preempt(self); 21820Sstevel@tonic-gate if ((lwpid = mutex_wakeup(mp)) != 0) 21830Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 21840Sstevel@tonic-gate preempt(self); 21850Sstevel@tonic-gate } 2186*4574Sraf } else if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) { 21870Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 21880Sstevel@tonic-gate preempt(self); 21890Sstevel@tonic-gate } 21900Sstevel@tonic-gate return (0); 21910Sstevel@tonic-gate } 21920Sstevel@tonic-gate if (el) /* error detection or lock statistics */ 21930Sstevel@tonic-gate goto slow_unlock; 21940Sstevel@tonic-gate if ((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) { 21950Sstevel@tonic-gate /* 21960Sstevel@tonic-gate * At this point we know that one or both of the 21970Sstevel@tonic-gate * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set. 21980Sstevel@tonic-gate */ 21990Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self)) 22000Sstevel@tonic-gate return (EPERM); 22010Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 22020Sstevel@tonic-gate mp->mutex_rcount--; 22030Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 22040Sstevel@tonic-gate return (0); 22050Sstevel@tonic-gate } 22060Sstevel@tonic-gate goto fast_unlock; 22070Sstevel@tonic-gate } 22080Sstevel@tonic-gate if ((mtype & 22090Sstevel@tonic-gate ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) { 22100Sstevel@tonic-gate /* 22110Sstevel@tonic-gate * At this point we know that zero, one, or both of the 22120Sstevel@tonic-gate * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set and 22130Sstevel@tonic-gate * that the USYNC_PROCESS flag is set. 22140Sstevel@tonic-gate */ 22150Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !shared_mutex_held(mp)) 22160Sstevel@tonic-gate return (EPERM); 22170Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 22180Sstevel@tonic-gate mp->mutex_rcount--; 22190Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 22200Sstevel@tonic-gate return (0); 22210Sstevel@tonic-gate } 2222*4574Sraf if (mp->mutex_lockword & WAITERMASK) { 2223*4574Sraf mutex_unlock_process(mp, 0); 2224*4574Sraf } else { 22250Sstevel@tonic-gate mp->mutex_owner = 0; 22260Sstevel@tonic-gate mp->mutex_ownerpid = 0; 22270Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 22284570Sraf if (atomic_swap_32(&mp->mutex_lockword, 0) & 22290Sstevel@tonic-gate WAITERMASK) { 22300Sstevel@tonic-gate no_preempt(self); 2231*4574Sraf (void) ___lwp_mutex_wakeup(mp, 0); 22320Sstevel@tonic-gate preempt(self); 22330Sstevel@tonic-gate } 22340Sstevel@tonic-gate } 22350Sstevel@tonic-gate return (0); 22360Sstevel@tonic-gate } 22370Sstevel@tonic-gate } 22380Sstevel@tonic-gate 22390Sstevel@tonic-gate /* else do it the long way */ 22400Sstevel@tonic-gate slow_unlock: 2241*4574Sraf return (mutex_unlock_internal(mp, 0)); 22420Sstevel@tonic-gate } 22430Sstevel@tonic-gate 22440Sstevel@tonic-gate /* 22450Sstevel@tonic-gate * Internally to the library, almost all mutex lock/unlock actions 22460Sstevel@tonic-gate * go through these lmutex_ functions, to protect critical regions. 22470Sstevel@tonic-gate * We replicate a bit of code from __mutex_lock() and __mutex_unlock() 22480Sstevel@tonic-gate * to make these functions faster since we know that the mutex type 22490Sstevel@tonic-gate * of all internal locks is USYNC_THREAD. We also know that internal 22500Sstevel@tonic-gate * locking can never fail, so we panic if it does. 22510Sstevel@tonic-gate */ 22520Sstevel@tonic-gate void 22530Sstevel@tonic-gate lmutex_lock(mutex_t *mp) 22540Sstevel@tonic-gate { 22550Sstevel@tonic-gate ulwp_t *self = curthread; 22560Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 22570Sstevel@tonic-gate 22580Sstevel@tonic-gate ASSERT(mp->mutex_type == USYNC_THREAD); 22590Sstevel@tonic-gate 22600Sstevel@tonic-gate enter_critical(self); 22610Sstevel@tonic-gate /* 22620Sstevel@tonic-gate * Optimize the case of no lock statistics and only a single thread. 22630Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 22640Sstevel@tonic-gate */ 22650Sstevel@tonic-gate if (udp->uberflags.uf_all == 0) { 22660Sstevel@tonic-gate /* 22670Sstevel@tonic-gate * Only one thread exists; the mutex must be free. 22680Sstevel@tonic-gate */ 22690Sstevel@tonic-gate ASSERT(mp->mutex_lockw == 0); 22700Sstevel@tonic-gate mp->mutex_lockw = LOCKSET; 22710Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 22720Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 22730Sstevel@tonic-gate } else { 22740Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 22750Sstevel@tonic-gate 22760Sstevel@tonic-gate if (!self->ul_schedctl_called) 22770Sstevel@tonic-gate (void) setup_schedctl(); 22780Sstevel@tonic-gate 22790Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 22800Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 22810Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 22820Sstevel@tonic-gate } else if (mutex_trylock_adaptive(mp) != 0) { 22830Sstevel@tonic-gate (void) mutex_lock_queue(self, msp, mp, NULL); 22840Sstevel@tonic-gate } 22850Sstevel@tonic-gate 22860Sstevel@tonic-gate if (msp) 22870Sstevel@tonic-gate record_begin_hold(msp); 22880Sstevel@tonic-gate } 22890Sstevel@tonic-gate } 22900Sstevel@tonic-gate 22910Sstevel@tonic-gate void 22920Sstevel@tonic-gate lmutex_unlock(mutex_t *mp) 22930Sstevel@tonic-gate { 22940Sstevel@tonic-gate ulwp_t *self = curthread; 22950Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 22960Sstevel@tonic-gate 22970Sstevel@tonic-gate ASSERT(mp->mutex_type == USYNC_THREAD); 22980Sstevel@tonic-gate 22990Sstevel@tonic-gate /* 23000Sstevel@tonic-gate * Optimize the case of no lock statistics and only a single thread. 23010Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 23020Sstevel@tonic-gate */ 23030Sstevel@tonic-gate if (udp->uberflags.uf_all == 0) { 23040Sstevel@tonic-gate /* 23050Sstevel@tonic-gate * Only one thread exists so there can be no waiters. 23060Sstevel@tonic-gate */ 23070Sstevel@tonic-gate mp->mutex_owner = 0; 23080Sstevel@tonic-gate mp->mutex_lockword = 0; 23090Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 23100Sstevel@tonic-gate } else { 23110Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 23120Sstevel@tonic-gate lwpid_t lwpid; 23130Sstevel@tonic-gate 23140Sstevel@tonic-gate if (msp) 23150Sstevel@tonic-gate (void) record_hold_time(msp); 2316*4574Sraf if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) { 23170Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 23180Sstevel@tonic-gate preempt(self); 23190Sstevel@tonic-gate } 23200Sstevel@tonic-gate } 23210Sstevel@tonic-gate exit_critical(self); 23220Sstevel@tonic-gate } 23230Sstevel@tonic-gate 23242248Sraf /* 23252248Sraf * For specialized code in libc, like the asynchronous i/o code, 23262248Sraf * the following sig_*() locking primitives are used in order 23272248Sraf * to make the code asynchronous signal safe. Signals are 23282248Sraf * deferred while locks acquired by these functions are held. 23292248Sraf */ 23302248Sraf void 23312248Sraf sig_mutex_lock(mutex_t *mp) 23322248Sraf { 23332248Sraf sigoff(curthread); 23342248Sraf (void) _private_mutex_lock(mp); 23352248Sraf } 23362248Sraf 23372248Sraf void 23382248Sraf sig_mutex_unlock(mutex_t *mp) 23392248Sraf { 23402248Sraf (void) _private_mutex_unlock(mp); 23412248Sraf sigon(curthread); 23422248Sraf } 23432248Sraf 23442248Sraf int 23452248Sraf sig_mutex_trylock(mutex_t *mp) 23462248Sraf { 23472248Sraf int error; 23482248Sraf 23492248Sraf sigoff(curthread); 23502248Sraf if ((error = _private_mutex_trylock(mp)) != 0) 23512248Sraf sigon(curthread); 23522248Sraf return (error); 23532248Sraf } 23542248Sraf 23552248Sraf /* 23562248Sraf * sig_cond_wait() is a cancellation point. 23572248Sraf */ 23582248Sraf int 23592248Sraf sig_cond_wait(cond_t *cv, mutex_t *mp) 23602248Sraf { 23612248Sraf int error; 23622248Sraf 23632248Sraf ASSERT(curthread->ul_sigdefer != 0); 23642248Sraf _private_testcancel(); 23652248Sraf error = _cond_wait(cv, mp); 23662248Sraf if (error == EINTR && curthread->ul_cursig) { 23672248Sraf sig_mutex_unlock(mp); 23682248Sraf /* take the deferred signal here */ 23692248Sraf sig_mutex_lock(mp); 23702248Sraf } 23712248Sraf _private_testcancel(); 23722248Sraf return (error); 23732248Sraf } 23742248Sraf 23752248Sraf /* 23762248Sraf * sig_cond_reltimedwait() is a cancellation point. 23772248Sraf */ 23782248Sraf int 23792248Sraf sig_cond_reltimedwait(cond_t *cv, mutex_t *mp, const timespec_t *ts) 23802248Sraf { 23812248Sraf int error; 23822248Sraf 23832248Sraf ASSERT(curthread->ul_sigdefer != 0); 23842248Sraf _private_testcancel(); 23852248Sraf error = _cond_reltimedwait(cv, mp, ts); 23862248Sraf if (error == EINTR && curthread->ul_cursig) { 23872248Sraf sig_mutex_unlock(mp); 23882248Sraf /* take the deferred signal here */ 23892248Sraf sig_mutex_lock(mp); 23902248Sraf } 23912248Sraf _private_testcancel(); 23922248Sraf return (error); 23932248Sraf } 23942248Sraf 23950Sstevel@tonic-gate static int 23960Sstevel@tonic-gate shared_mutex_held(mutex_t *mparg) 23970Sstevel@tonic-gate { 23980Sstevel@tonic-gate /* 2399*4574Sraf * The 'volatile' is necessary to make sure the compiler doesn't 2400*4574Sraf * reorder the tests of the various components of the mutex. 2401*4574Sraf * They must be tested in this order: 2402*4574Sraf * mutex_lockw 2403*4574Sraf * mutex_owner 2404*4574Sraf * mutex_ownerpid 2405*4574Sraf * This relies on the fact that everywhere mutex_lockw is cleared, 2406*4574Sraf * mutex_owner and mutex_ownerpid are cleared before mutex_lockw 2407*4574Sraf * is cleared, and that everywhere mutex_lockw is set, mutex_owner 2408*4574Sraf * and mutex_ownerpid are set after mutex_lockw is set, and that 2409*4574Sraf * mutex_lockw is set or cleared with a memory barrier. 24100Sstevel@tonic-gate */ 24110Sstevel@tonic-gate volatile mutex_t *mp = (volatile mutex_t *)mparg; 24120Sstevel@tonic-gate ulwp_t *self = curthread; 24130Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 24140Sstevel@tonic-gate 2415*4574Sraf return (MUTEX_OWNED(mp, self) && mp->mutex_ownerpid == udp->pid); 24160Sstevel@tonic-gate } 24170Sstevel@tonic-gate 24180Sstevel@tonic-gate /* 24190Sstevel@tonic-gate * Some crufty old programs define their own version of _mutex_held() 24200Sstevel@tonic-gate * to be simply return(1). This breaks internal libc logic, so we 24210Sstevel@tonic-gate * define a private version for exclusive use by libc, mutex_is_held(), 24220Sstevel@tonic-gate * and also a new public function, __mutex_held(), to be used in new 24230Sstevel@tonic-gate * code to circumvent these crufty old programs. 24240Sstevel@tonic-gate */ 24250Sstevel@tonic-gate #pragma weak mutex_held = mutex_is_held 24260Sstevel@tonic-gate #pragma weak _mutex_held = mutex_is_held 24270Sstevel@tonic-gate #pragma weak __mutex_held = mutex_is_held 24280Sstevel@tonic-gate int 2429*4574Sraf mutex_is_held(mutex_t *mparg) 24300Sstevel@tonic-gate { 2431*4574Sraf volatile mutex_t *mp = (volatile mutex_t *)mparg; 2432*4574Sraf 2433*4574Sraf if (mparg->mutex_type & USYNC_PROCESS) 2434*4574Sraf return (shared_mutex_held(mparg)); 24350Sstevel@tonic-gate return (MUTEX_OWNED(mp, curthread)); 24360Sstevel@tonic-gate } 24370Sstevel@tonic-gate 24380Sstevel@tonic-gate #pragma weak _private_mutex_destroy = __mutex_destroy 24390Sstevel@tonic-gate #pragma weak mutex_destroy = __mutex_destroy 24400Sstevel@tonic-gate #pragma weak _mutex_destroy = __mutex_destroy 24410Sstevel@tonic-gate #pragma weak pthread_mutex_destroy = __mutex_destroy 24420Sstevel@tonic-gate #pragma weak _pthread_mutex_destroy = __mutex_destroy 24430Sstevel@tonic-gate int 24440Sstevel@tonic-gate __mutex_destroy(mutex_t *mp) 24450Sstevel@tonic-gate { 2446*4574Sraf if (mp->mutex_type & USYNC_PROCESS) 2447*4574Sraf forget_lock(mp); 2448*4574Sraf (void) _memset(mp, 0, sizeof (*mp)); 24490Sstevel@tonic-gate tdb_sync_obj_deregister(mp); 24500Sstevel@tonic-gate return (0); 24510Sstevel@tonic-gate } 24520Sstevel@tonic-gate 2453*4574Sraf #pragma weak mutex_consistent = __mutex_consistent 2454*4574Sraf #pragma weak _mutex_consistent = __mutex_consistent 2455*4574Sraf #pragma weak pthread_mutex_consistent_np = __mutex_consistent 2456*4574Sraf #pragma weak _pthread_mutex_consistent_np = __mutex_consistent 2457*4574Sraf int 2458*4574Sraf __mutex_consistent(mutex_t *mp) 2459*4574Sraf { 2460*4574Sraf /* 2461*4574Sraf * Do this only for an inconsistent, initialized robust lock 2462*4574Sraf * that we hold. For all other cases, return EINVAL. 2463*4574Sraf */ 2464*4574Sraf if (mutex_is_held(mp) && 2465*4574Sraf (mp->mutex_type & LOCK_ROBUST) && 2466*4574Sraf (mp->mutex_flag & LOCK_INITED) && 2467*4574Sraf (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) { 2468*4574Sraf mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED); 2469*4574Sraf mp->mutex_rcount = 0; 2470*4574Sraf return (0); 2471*4574Sraf } 2472*4574Sraf return (EINVAL); 2473*4574Sraf } 2474*4574Sraf 24750Sstevel@tonic-gate /* 24760Sstevel@tonic-gate * Spin locks are separate from ordinary mutexes, 24770Sstevel@tonic-gate * but we use the same data structure for them. 24780Sstevel@tonic-gate */ 24790Sstevel@tonic-gate 24800Sstevel@tonic-gate #pragma weak pthread_spin_init = _pthread_spin_init 24810Sstevel@tonic-gate int 24820Sstevel@tonic-gate _pthread_spin_init(pthread_spinlock_t *lock, int pshared) 24830Sstevel@tonic-gate { 24840Sstevel@tonic-gate mutex_t *mp = (mutex_t *)lock; 24850Sstevel@tonic-gate 24860Sstevel@tonic-gate (void) _memset(mp, 0, sizeof (*mp)); 24870Sstevel@tonic-gate if (pshared == PTHREAD_PROCESS_SHARED) 24880Sstevel@tonic-gate mp->mutex_type = USYNC_PROCESS; 24890Sstevel@tonic-gate else 24900Sstevel@tonic-gate mp->mutex_type = USYNC_THREAD; 24910Sstevel@tonic-gate mp->mutex_flag = LOCK_INITED; 24920Sstevel@tonic-gate mp->mutex_magic = MUTEX_MAGIC; 24930Sstevel@tonic-gate return (0); 24940Sstevel@tonic-gate } 24950Sstevel@tonic-gate 24960Sstevel@tonic-gate #pragma weak pthread_spin_destroy = _pthread_spin_destroy 24970Sstevel@tonic-gate int 24980Sstevel@tonic-gate _pthread_spin_destroy(pthread_spinlock_t *lock) 24990Sstevel@tonic-gate { 25000Sstevel@tonic-gate (void) _memset(lock, 0, sizeof (*lock)); 25010Sstevel@tonic-gate return (0); 25020Sstevel@tonic-gate } 25030Sstevel@tonic-gate 25040Sstevel@tonic-gate #pragma weak pthread_spin_trylock = _pthread_spin_trylock 25050Sstevel@tonic-gate int 25060Sstevel@tonic-gate _pthread_spin_trylock(pthread_spinlock_t *lock) 25070Sstevel@tonic-gate { 25080Sstevel@tonic-gate mutex_t *mp = (mutex_t *)lock; 25090Sstevel@tonic-gate ulwp_t *self = curthread; 25100Sstevel@tonic-gate int error = 0; 25110Sstevel@tonic-gate 25120Sstevel@tonic-gate no_preempt(self); 25130Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) != 0) 25140Sstevel@tonic-gate error = EBUSY; 25150Sstevel@tonic-gate else { 25160Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 25170Sstevel@tonic-gate if (mp->mutex_type == USYNC_PROCESS) 25180Sstevel@tonic-gate mp->mutex_ownerpid = self->ul_uberdata->pid; 25190Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 25200Sstevel@tonic-gate } 25210Sstevel@tonic-gate preempt(self); 25220Sstevel@tonic-gate return (error); 25230Sstevel@tonic-gate } 25240Sstevel@tonic-gate 25250Sstevel@tonic-gate #pragma weak pthread_spin_lock = _pthread_spin_lock 25260Sstevel@tonic-gate int 25270Sstevel@tonic-gate _pthread_spin_lock(pthread_spinlock_t *lock) 25280Sstevel@tonic-gate { 2529*4574Sraf mutex_t *mp = (mutex_t *)lock; 2530*4574Sraf ulwp_t *self = curthread; 2531*4574Sraf volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw; 2532*4574Sraf int count = 0; 2533*4574Sraf 2534*4574Sraf ASSERT(!self->ul_critical || self->ul_bindflags); 2535*4574Sraf 2536*4574Sraf DTRACE_PROBE1(plockstat, mutex__spin, mp); 2537*4574Sraf 25380Sstevel@tonic-gate /* 25390Sstevel@tonic-gate * We don't care whether the owner is running on a processor. 25400Sstevel@tonic-gate * We just spin because that's what this interface requires. 25410Sstevel@tonic-gate */ 25420Sstevel@tonic-gate for (;;) { 2543*4574Sraf if (count < INT_MAX) 2544*4574Sraf count++; 25450Sstevel@tonic-gate if (*lockp == 0) { /* lock byte appears to be clear */ 2546*4574Sraf no_preempt(self); 2547*4574Sraf if (set_lock_byte(lockp) == 0) 2548*4574Sraf break; 2549*4574Sraf preempt(self); 25500Sstevel@tonic-gate } 25510Sstevel@tonic-gate SMT_PAUSE(); 25520Sstevel@tonic-gate } 2553*4574Sraf mp->mutex_owner = (uintptr_t)self; 2554*4574Sraf if (mp->mutex_type == USYNC_PROCESS) 2555*4574Sraf mp->mutex_ownerpid = self->ul_uberdata->pid; 2556*4574Sraf preempt(self); 2557*4574Sraf DTRACE_PROBE2(plockstat, mutex__spun, 1, count); 2558*4574Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); 2559*4574Sraf return (0); 25600Sstevel@tonic-gate } 25610Sstevel@tonic-gate 25620Sstevel@tonic-gate #pragma weak pthread_spin_unlock = _pthread_spin_unlock 25630Sstevel@tonic-gate int 25640Sstevel@tonic-gate _pthread_spin_unlock(pthread_spinlock_t *lock) 25650Sstevel@tonic-gate { 25660Sstevel@tonic-gate mutex_t *mp = (mutex_t *)lock; 25670Sstevel@tonic-gate ulwp_t *self = curthread; 25680Sstevel@tonic-gate 25690Sstevel@tonic-gate no_preempt(self); 25700Sstevel@tonic-gate mp->mutex_owner = 0; 25710Sstevel@tonic-gate mp->mutex_ownerpid = 0; 25720Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 25734570Sraf (void) atomic_swap_32(&mp->mutex_lockword, 0); 25740Sstevel@tonic-gate preempt(self); 25750Sstevel@tonic-gate return (0); 25760Sstevel@tonic-gate } 25770Sstevel@tonic-gate 2578*4574Sraf #define INITIAL_LOCKS 8 /* initialial size of ul_heldlocks.array */ 2579*4574Sraf 2580*4574Sraf /* 2581*4574Sraf * Find/allocate an entry for 'lock' in our array of held locks. 2582*4574Sraf */ 2583*4574Sraf static mutex_t ** 2584*4574Sraf find_lock_entry(mutex_t *lock) 2585*4574Sraf { 2586*4574Sraf ulwp_t *self = curthread; 2587*4574Sraf mutex_t **remembered = NULL; 2588*4574Sraf mutex_t **lockptr; 2589*4574Sraf uint_t nlocks; 2590*4574Sraf 2591*4574Sraf if ((nlocks = self->ul_heldlockcnt) != 0) 2592*4574Sraf lockptr = self->ul_heldlocks.array; 2593*4574Sraf else { 2594*4574Sraf nlocks = 1; 2595*4574Sraf lockptr = &self->ul_heldlocks.single; 2596*4574Sraf } 2597*4574Sraf 2598*4574Sraf for (; nlocks; nlocks--, lockptr++) { 2599*4574Sraf if (*lockptr == lock) 2600*4574Sraf return (lockptr); 2601*4574Sraf if (*lockptr == NULL && remembered == NULL) 2602*4574Sraf remembered = lockptr; 2603*4574Sraf } 2604*4574Sraf if (remembered != NULL) { 2605*4574Sraf *remembered = lock; 2606*4574Sraf return (remembered); 2607*4574Sraf } 2608*4574Sraf 2609*4574Sraf /* 2610*4574Sraf * No entry available. Allocate more space, converting 2611*4574Sraf * the single entry into an array of entries if necessary. 2612*4574Sraf */ 2613*4574Sraf if ((nlocks = self->ul_heldlockcnt) == 0) { 2614*4574Sraf /* 2615*4574Sraf * Initial allocation of the array. 2616*4574Sraf * Convert the single entry into an array. 2617*4574Sraf */ 2618*4574Sraf self->ul_heldlockcnt = nlocks = INITIAL_LOCKS; 2619*4574Sraf lockptr = lmalloc(nlocks * sizeof (mutex_t *)); 2620*4574Sraf /* 2621*4574Sraf * The single entry becomes the first entry in the array. 2622*4574Sraf */ 2623*4574Sraf *lockptr = self->ul_heldlocks.single; 2624*4574Sraf self->ul_heldlocks.array = lockptr; 2625*4574Sraf /* 2626*4574Sraf * Return the next available entry in the array. 2627*4574Sraf */ 2628*4574Sraf *++lockptr = lock; 2629*4574Sraf return (lockptr); 2630*4574Sraf } 2631*4574Sraf /* 2632*4574Sraf * Reallocate the array, double the size each time. 2633*4574Sraf */ 2634*4574Sraf lockptr = lmalloc(nlocks * 2 * sizeof (mutex_t *)); 2635*4574Sraf (void) _memcpy(lockptr, self->ul_heldlocks.array, 2636*4574Sraf nlocks * sizeof (mutex_t *)); 2637*4574Sraf lfree(self->ul_heldlocks.array, nlocks * sizeof (mutex_t *)); 2638*4574Sraf self->ul_heldlocks.array = lockptr; 2639*4574Sraf self->ul_heldlockcnt *= 2; 2640*4574Sraf /* 2641*4574Sraf * Return the next available entry in the newly allocated array. 2642*4574Sraf */ 2643*4574Sraf *(lockptr += nlocks) = lock; 2644*4574Sraf return (lockptr); 2645*4574Sraf } 2646*4574Sraf 2647*4574Sraf /* 2648*4574Sraf * Insert 'lock' into our list of held locks. 2649*4574Sraf * Currently only used for LOCK_ROBUST mutexes. 2650*4574Sraf */ 2651*4574Sraf void 2652*4574Sraf remember_lock(mutex_t *lock) 2653*4574Sraf { 2654*4574Sraf (void) find_lock_entry(lock); 2655*4574Sraf } 2656*4574Sraf 2657*4574Sraf /* 2658*4574Sraf * Remove 'lock' from our list of held locks. 2659*4574Sraf * Currently only used for LOCK_ROBUST mutexes. 2660*4574Sraf */ 2661*4574Sraf void 2662*4574Sraf forget_lock(mutex_t *lock) 2663*4574Sraf { 2664*4574Sraf *find_lock_entry(lock) = NULL; 2665*4574Sraf } 2666*4574Sraf 2667*4574Sraf /* 2668*4574Sraf * Free the array of held locks. 2669*4574Sraf */ 2670*4574Sraf void 2671*4574Sraf heldlock_free(ulwp_t *ulwp) 2672*4574Sraf { 2673*4574Sraf uint_t nlocks; 2674*4574Sraf 2675*4574Sraf if ((nlocks = ulwp->ul_heldlockcnt) != 0) 2676*4574Sraf lfree(ulwp->ul_heldlocks.array, nlocks * sizeof (mutex_t *)); 2677*4574Sraf ulwp->ul_heldlockcnt = 0; 2678*4574Sraf ulwp->ul_heldlocks.array = NULL; 2679*4574Sraf } 2680*4574Sraf 2681*4574Sraf /* 2682*4574Sraf * Mark all held LOCK_ROBUST mutexes LOCK_OWNERDEAD. 2683*4574Sraf * Called from _thrp_exit() to deal with abandoned locks. 2684*4574Sraf */ 2685*4574Sraf void 2686*4574Sraf heldlock_exit(void) 2687*4574Sraf { 2688*4574Sraf ulwp_t *self = curthread; 2689*4574Sraf mutex_t **lockptr; 2690*4574Sraf uint_t nlocks; 2691*4574Sraf mutex_t *mp; 2692*4574Sraf 2693*4574Sraf if ((nlocks = self->ul_heldlockcnt) != 0) 2694*4574Sraf lockptr = self->ul_heldlocks.array; 2695*4574Sraf else { 2696*4574Sraf nlocks = 1; 2697*4574Sraf lockptr = &self->ul_heldlocks.single; 2698*4574Sraf } 2699*4574Sraf 2700*4574Sraf for (; nlocks; nlocks--, lockptr++) { 2701*4574Sraf /* 2702*4574Sraf * The kernel takes care of transitioning held 2703*4574Sraf * LOCK_PRIO_INHERIT mutexes to LOCK_OWNERDEAD. 2704*4574Sraf * We avoid that case here. 2705*4574Sraf */ 2706*4574Sraf if ((mp = *lockptr) != NULL && 2707*4574Sraf mutex_is_held(mp) && 2708*4574Sraf (mp->mutex_type & (LOCK_ROBUST | LOCK_PRIO_INHERIT)) == 2709*4574Sraf LOCK_ROBUST) { 2710*4574Sraf mp->mutex_rcount = 0; 2711*4574Sraf if (!(mp->mutex_flag & LOCK_UNMAPPED)) 2712*4574Sraf mp->mutex_flag |= LOCK_OWNERDEAD; 2713*4574Sraf (void) mutex_unlock_internal(mp, 1); 2714*4574Sraf } 2715*4574Sraf } 2716*4574Sraf 2717*4574Sraf heldlock_free(self); 2718*4574Sraf } 2719*4574Sraf 27200Sstevel@tonic-gate #pragma weak cond_init = _cond_init 27210Sstevel@tonic-gate /* ARGSUSED2 */ 27220Sstevel@tonic-gate int 27230Sstevel@tonic-gate _cond_init(cond_t *cvp, int type, void *arg) 27240Sstevel@tonic-gate { 27250Sstevel@tonic-gate if (type != USYNC_THREAD && type != USYNC_PROCESS) 27260Sstevel@tonic-gate return (EINVAL); 27270Sstevel@tonic-gate (void) _memset(cvp, 0, sizeof (*cvp)); 27280Sstevel@tonic-gate cvp->cond_type = (uint16_t)type; 27290Sstevel@tonic-gate cvp->cond_magic = COND_MAGIC; 27300Sstevel@tonic-gate return (0); 27310Sstevel@tonic-gate } 27320Sstevel@tonic-gate 27330Sstevel@tonic-gate /* 27340Sstevel@tonic-gate * cond_sleep_queue(): utility function for cond_wait_queue(). 27350Sstevel@tonic-gate * 27360Sstevel@tonic-gate * Go to sleep on a condvar sleep queue, expect to be waked up 27370Sstevel@tonic-gate * by someone calling cond_signal() or cond_broadcast() or due 27380Sstevel@tonic-gate * to receiving a UNIX signal or being cancelled, or just simply 27390Sstevel@tonic-gate * due to a spurious wakeup (like someome calling forkall()). 27400Sstevel@tonic-gate * 27410Sstevel@tonic-gate * The associated mutex is *not* reacquired before returning. 27420Sstevel@tonic-gate * That must be done by the caller of cond_sleep_queue(). 27430Sstevel@tonic-gate */ 2744*4574Sraf static int 27450Sstevel@tonic-gate cond_sleep_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 27460Sstevel@tonic-gate { 27470Sstevel@tonic-gate ulwp_t *self = curthread; 27480Sstevel@tonic-gate queue_head_t *qp; 27490Sstevel@tonic-gate queue_head_t *mqp; 27500Sstevel@tonic-gate lwpid_t lwpid; 27510Sstevel@tonic-gate int signalled; 27520Sstevel@tonic-gate int error; 2753*4574Sraf int release_all; 27540Sstevel@tonic-gate 27550Sstevel@tonic-gate /* 27560Sstevel@tonic-gate * Put ourself on the CV sleep queue, unlock the mutex, then 27570Sstevel@tonic-gate * park ourself and unpark a candidate lwp to grab the mutex. 27580Sstevel@tonic-gate * We must go onto the CV sleep queue before dropping the 27590Sstevel@tonic-gate * mutex in order to guarantee atomicity of the operation. 27600Sstevel@tonic-gate */ 27610Sstevel@tonic-gate self->ul_sp = stkptr(); 27620Sstevel@tonic-gate qp = queue_lock(cvp, CV); 27630Sstevel@tonic-gate enqueue(qp, self, cvp, CV); 27640Sstevel@tonic-gate cvp->cond_waiters_user = 1; 27650Sstevel@tonic-gate self->ul_cvmutex = mp; 27660Sstevel@tonic-gate self->ul_cv_wake = (tsp != NULL); 27670Sstevel@tonic-gate self->ul_signalled = 0; 2768*4574Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) { 2769*4574Sraf mp->mutex_flag &= ~LOCK_OWNERDEAD; 2770*4574Sraf mp->mutex_flag |= LOCK_NOTRECOVERABLE; 2771*4574Sraf } 2772*4574Sraf release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0); 2773*4574Sraf lwpid = mutex_unlock_queue(mp, release_all); 27740Sstevel@tonic-gate for (;;) { 27750Sstevel@tonic-gate set_parking_flag(self, 1); 27760Sstevel@tonic-gate queue_unlock(qp); 27770Sstevel@tonic-gate if (lwpid != 0) { 27780Sstevel@tonic-gate lwpid = preempt_unpark(self, lwpid); 27790Sstevel@tonic-gate preempt(self); 27800Sstevel@tonic-gate } 27810Sstevel@tonic-gate /* 27820Sstevel@tonic-gate * We may have a deferred signal present, 27830Sstevel@tonic-gate * in which case we should return EINTR. 27840Sstevel@tonic-gate * Also, we may have received a SIGCANCEL; if so 27850Sstevel@tonic-gate * and we are cancelable we should return EINTR. 27860Sstevel@tonic-gate * We force an immediate EINTR return from 27870Sstevel@tonic-gate * __lwp_park() by turning our parking flag off. 27880Sstevel@tonic-gate */ 27890Sstevel@tonic-gate if (self->ul_cursig != 0 || 27900Sstevel@tonic-gate (self->ul_cancelable && self->ul_cancel_pending)) 27910Sstevel@tonic-gate set_parking_flag(self, 0); 27920Sstevel@tonic-gate /* 27930Sstevel@tonic-gate * __lwp_park() will return the residual time in tsp 27940Sstevel@tonic-gate * if we are unparked before the timeout expires. 27950Sstevel@tonic-gate */ 27960Sstevel@tonic-gate error = __lwp_park(tsp, lwpid); 27970Sstevel@tonic-gate set_parking_flag(self, 0); 27980Sstevel@tonic-gate lwpid = 0; /* unpark the other lwp only once */ 27990Sstevel@tonic-gate /* 28000Sstevel@tonic-gate * We were waked up by cond_signal(), cond_broadcast(), 28010Sstevel@tonic-gate * by an interrupt or timeout (EINTR or ETIME), 28020Sstevel@tonic-gate * or we may just have gotten a spurious wakeup. 28030Sstevel@tonic-gate */ 28040Sstevel@tonic-gate qp = queue_lock(cvp, CV); 28050Sstevel@tonic-gate mqp = queue_lock(mp, MX); 28060Sstevel@tonic-gate if (self->ul_sleepq == NULL) 28070Sstevel@tonic-gate break; 28080Sstevel@tonic-gate /* 28090Sstevel@tonic-gate * We are on either the condvar sleep queue or the 28101893Sraf * mutex sleep queue. Break out of the sleep if we 28111893Sraf * were interrupted or we timed out (EINTR or ETIME). 28120Sstevel@tonic-gate * Else this is a spurious wakeup; continue the loop. 28130Sstevel@tonic-gate */ 28141893Sraf if (self->ul_sleepq == mqp) { /* mutex queue */ 28151893Sraf if (error) { 28161893Sraf mp->mutex_waiters = dequeue_self(mqp, mp); 28171893Sraf break; 28181893Sraf } 28191893Sraf tsp = NULL; /* no more timeout */ 28201893Sraf } else if (self->ul_sleepq == qp) { /* condvar queue */ 28210Sstevel@tonic-gate if (error) { 28220Sstevel@tonic-gate cvp->cond_waiters_user = dequeue_self(qp, cvp); 28230Sstevel@tonic-gate break; 28240Sstevel@tonic-gate } 28250Sstevel@tonic-gate /* 28260Sstevel@tonic-gate * Else a spurious wakeup on the condvar queue. 28270Sstevel@tonic-gate * __lwp_park() has already adjusted the timeout. 28280Sstevel@tonic-gate */ 28290Sstevel@tonic-gate } else { 28300Sstevel@tonic-gate thr_panic("cond_sleep_queue(): thread not on queue"); 28310Sstevel@tonic-gate } 28320Sstevel@tonic-gate queue_unlock(mqp); 28330Sstevel@tonic-gate } 28340Sstevel@tonic-gate 28350Sstevel@tonic-gate self->ul_sp = 0; 28360Sstevel@tonic-gate ASSERT(self->ul_cvmutex == NULL && self->ul_cv_wake == 0); 28370Sstevel@tonic-gate ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL && 28380Sstevel@tonic-gate self->ul_wchan == NULL); 28390Sstevel@tonic-gate 28400Sstevel@tonic-gate signalled = self->ul_signalled; 28410Sstevel@tonic-gate self->ul_signalled = 0; 28420Sstevel@tonic-gate queue_unlock(qp); 28430Sstevel@tonic-gate queue_unlock(mqp); 28440Sstevel@tonic-gate 28450Sstevel@tonic-gate /* 28460Sstevel@tonic-gate * If we were concurrently cond_signal()d and any of: 28470Sstevel@tonic-gate * received a UNIX signal, were cancelled, or got a timeout, 28480Sstevel@tonic-gate * then perform another cond_signal() to avoid consuming it. 28490Sstevel@tonic-gate */ 28500Sstevel@tonic-gate if (error && signalled) 28510Sstevel@tonic-gate (void) cond_signal_internal(cvp); 28520Sstevel@tonic-gate 28530Sstevel@tonic-gate return (error); 28540Sstevel@tonic-gate } 28550Sstevel@tonic-gate 28560Sstevel@tonic-gate int 28570Sstevel@tonic-gate cond_wait_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp, 28580Sstevel@tonic-gate tdb_mutex_stats_t *msp) 28590Sstevel@tonic-gate { 28600Sstevel@tonic-gate ulwp_t *self = curthread; 28610Sstevel@tonic-gate int error; 2862*4574Sraf int merror; 28630Sstevel@tonic-gate 28640Sstevel@tonic-gate /* 28650Sstevel@tonic-gate * The old thread library was programmed to defer signals 28660Sstevel@tonic-gate * while in cond_wait() so that the associated mutex would 28670Sstevel@tonic-gate * be guaranteed to be held when the application signal 28680Sstevel@tonic-gate * handler was invoked. 28690Sstevel@tonic-gate * 28700Sstevel@tonic-gate * We do not behave this way by default; the state of the 28710Sstevel@tonic-gate * associated mutex in the signal handler is undefined. 28720Sstevel@tonic-gate * 28730Sstevel@tonic-gate * To accommodate applications that depend on the old 28740Sstevel@tonic-gate * behavior, the _THREAD_COND_WAIT_DEFER environment 28750Sstevel@tonic-gate * variable can be set to 1 and we will behave in the 28760Sstevel@tonic-gate * old way with respect to cond_wait(). 28770Sstevel@tonic-gate */ 28780Sstevel@tonic-gate if (self->ul_cond_wait_defer) 28790Sstevel@tonic-gate sigoff(self); 28800Sstevel@tonic-gate 28810Sstevel@tonic-gate error = cond_sleep_queue(cvp, mp, tsp); 28820Sstevel@tonic-gate 28830Sstevel@tonic-gate /* 28840Sstevel@tonic-gate * Reacquire the mutex. 28850Sstevel@tonic-gate */ 2886*4574Sraf if ((merror = mutex_trylock_adaptive(mp)) == EBUSY) 2887*4574Sraf merror = mutex_lock_queue(self, msp, mp, NULL); 2888*4574Sraf if (merror) 2889*4574Sraf error = merror; 2890*4574Sraf if (msp && (merror == 0 || merror == EOWNERDEAD)) 28910Sstevel@tonic-gate record_begin_hold(msp); 28920Sstevel@tonic-gate 28930Sstevel@tonic-gate /* 28940Sstevel@tonic-gate * Take any deferred signal now, after we have reacquired the mutex. 28950Sstevel@tonic-gate */ 28960Sstevel@tonic-gate if (self->ul_cond_wait_defer) 28970Sstevel@tonic-gate sigon(self); 28980Sstevel@tonic-gate 28990Sstevel@tonic-gate return (error); 29000Sstevel@tonic-gate } 29010Sstevel@tonic-gate 29020Sstevel@tonic-gate /* 29030Sstevel@tonic-gate * cond_sleep_kernel(): utility function for cond_wait_kernel(). 29040Sstevel@tonic-gate * See the comment ahead of cond_sleep_queue(), above. 29050Sstevel@tonic-gate */ 2906*4574Sraf static int 29070Sstevel@tonic-gate cond_sleep_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 29080Sstevel@tonic-gate { 29090Sstevel@tonic-gate int mtype = mp->mutex_type; 29100Sstevel@tonic-gate ulwp_t *self = curthread; 29110Sstevel@tonic-gate int error; 29120Sstevel@tonic-gate 2913*4574Sraf if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp)) 2914*4574Sraf _ceil_prio_waive(); 29150Sstevel@tonic-gate 29160Sstevel@tonic-gate self->ul_sp = stkptr(); 29170Sstevel@tonic-gate self->ul_wchan = cvp; 29180Sstevel@tonic-gate mp->mutex_owner = 0; 29190Sstevel@tonic-gate mp->mutex_ownerpid = 0; 2920*4574Sraf if (mtype & LOCK_PRIO_INHERIT) 29210Sstevel@tonic-gate mp->mutex_lockw = LOCKCLEAR; 29220Sstevel@tonic-gate /* 29230Sstevel@tonic-gate * ___lwp_cond_wait() returns immediately with EINTR if 29240Sstevel@tonic-gate * set_parking_flag(self,0) is called on this lwp before it 29250Sstevel@tonic-gate * goes to sleep in the kernel. sigacthandler() calls this 29260Sstevel@tonic-gate * when a deferred signal is noted. This assures that we don't 29270Sstevel@tonic-gate * get stuck in ___lwp_cond_wait() with all signals blocked 29280Sstevel@tonic-gate * due to taking a deferred signal before going to sleep. 29290Sstevel@tonic-gate */ 29300Sstevel@tonic-gate set_parking_flag(self, 1); 29310Sstevel@tonic-gate if (self->ul_cursig != 0 || 29320Sstevel@tonic-gate (self->ul_cancelable && self->ul_cancel_pending)) 29330Sstevel@tonic-gate set_parking_flag(self, 0); 29340Sstevel@tonic-gate error = ___lwp_cond_wait(cvp, mp, tsp, 1); 29350Sstevel@tonic-gate set_parking_flag(self, 0); 29360Sstevel@tonic-gate self->ul_sp = 0; 29370Sstevel@tonic-gate self->ul_wchan = NULL; 29380Sstevel@tonic-gate return (error); 29390Sstevel@tonic-gate } 29400Sstevel@tonic-gate 29410Sstevel@tonic-gate int 29420Sstevel@tonic-gate cond_wait_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 29430Sstevel@tonic-gate { 29440Sstevel@tonic-gate ulwp_t *self = curthread; 29450Sstevel@tonic-gate int error; 29460Sstevel@tonic-gate int merror; 29470Sstevel@tonic-gate 29480Sstevel@tonic-gate /* 29490Sstevel@tonic-gate * See the large comment in cond_wait_queue(), above. 29500Sstevel@tonic-gate */ 29510Sstevel@tonic-gate if (self->ul_cond_wait_defer) 29520Sstevel@tonic-gate sigoff(self); 29530Sstevel@tonic-gate 29540Sstevel@tonic-gate error = cond_sleep_kernel(cvp, mp, tsp); 29550Sstevel@tonic-gate 29560Sstevel@tonic-gate /* 29570Sstevel@tonic-gate * Override the return code from ___lwp_cond_wait() 29580Sstevel@tonic-gate * with any non-zero return code from mutex_lock(). 29590Sstevel@tonic-gate * This addresses robust lock failures in particular; 29600Sstevel@tonic-gate * the caller must see the EOWNERDEAD or ENOTRECOVERABLE 29610Sstevel@tonic-gate * errors in order to take corrective action. 29620Sstevel@tonic-gate */ 29630Sstevel@tonic-gate if ((merror = _private_mutex_lock(mp)) != 0) 29640Sstevel@tonic-gate error = merror; 29650Sstevel@tonic-gate 29660Sstevel@tonic-gate /* 29670Sstevel@tonic-gate * Take any deferred signal now, after we have reacquired the mutex. 29680Sstevel@tonic-gate */ 29690Sstevel@tonic-gate if (self->ul_cond_wait_defer) 29700Sstevel@tonic-gate sigon(self); 29710Sstevel@tonic-gate 29720Sstevel@tonic-gate return (error); 29730Sstevel@tonic-gate } 29740Sstevel@tonic-gate 29750Sstevel@tonic-gate /* 29760Sstevel@tonic-gate * Common code for _cond_wait() and _cond_timedwait() 29770Sstevel@tonic-gate */ 29780Sstevel@tonic-gate int 29790Sstevel@tonic-gate cond_wait_common(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 29800Sstevel@tonic-gate { 29810Sstevel@tonic-gate int mtype = mp->mutex_type; 29820Sstevel@tonic-gate hrtime_t begin_sleep = 0; 29830Sstevel@tonic-gate ulwp_t *self = curthread; 29840Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 29850Sstevel@tonic-gate tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 29860Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 29870Sstevel@tonic-gate uint8_t rcount; 29880Sstevel@tonic-gate int error = 0; 29890Sstevel@tonic-gate 29900Sstevel@tonic-gate /* 29910Sstevel@tonic-gate * The SUSV3 Posix spec for pthread_cond_timedwait() states: 29920Sstevel@tonic-gate * Except in the case of [ETIMEDOUT], all these error checks 29930Sstevel@tonic-gate * shall act as if they were performed immediately at the 29940Sstevel@tonic-gate * beginning of processing for the function and shall cause 29950Sstevel@tonic-gate * an error return, in effect, prior to modifying the state 29960Sstevel@tonic-gate * of the mutex specified by mutex or the condition variable 29970Sstevel@tonic-gate * specified by cond. 29980Sstevel@tonic-gate * Therefore, we must return EINVAL now if the timout is invalid. 29990Sstevel@tonic-gate */ 30000Sstevel@tonic-gate if (tsp != NULL && 30010Sstevel@tonic-gate (tsp->tv_sec < 0 || (ulong_t)tsp->tv_nsec >= NANOSEC)) 30020Sstevel@tonic-gate return (EINVAL); 30030Sstevel@tonic-gate 30040Sstevel@tonic-gate if (__td_event_report(self, TD_SLEEP, udp)) { 30050Sstevel@tonic-gate self->ul_sp = stkptr(); 30060Sstevel@tonic-gate self->ul_wchan = cvp; 30070Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_SLEEP; 30080Sstevel@tonic-gate self->ul_td_evbuf.eventdata = cvp; 30090Sstevel@tonic-gate tdb_event(TD_SLEEP, udp); 30100Sstevel@tonic-gate self->ul_sp = 0; 30110Sstevel@tonic-gate } 30120Sstevel@tonic-gate if (csp) { 30130Sstevel@tonic-gate if (tsp) 30140Sstevel@tonic-gate tdb_incr(csp->cond_timedwait); 30150Sstevel@tonic-gate else 30160Sstevel@tonic-gate tdb_incr(csp->cond_wait); 30170Sstevel@tonic-gate } 30180Sstevel@tonic-gate if (msp) 30190Sstevel@tonic-gate begin_sleep = record_hold_time(msp); 30200Sstevel@tonic-gate else if (csp) 30210Sstevel@tonic-gate begin_sleep = gethrtime(); 30220Sstevel@tonic-gate 30230Sstevel@tonic-gate if (self->ul_error_detection) { 30240Sstevel@tonic-gate if (!mutex_is_held(mp)) 30250Sstevel@tonic-gate lock_error(mp, "cond_wait", cvp, NULL); 30260Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) 30270Sstevel@tonic-gate lock_error(mp, "recursive mutex in cond_wait", 30280Sstevel@tonic-gate cvp, NULL); 30290Sstevel@tonic-gate if (cvp->cond_type & USYNC_PROCESS) { 3030*4574Sraf if (!(mtype & USYNC_PROCESS)) 30310Sstevel@tonic-gate lock_error(mp, "cond_wait", cvp, 30320Sstevel@tonic-gate "condvar process-shared, " 30330Sstevel@tonic-gate "mutex process-private"); 30340Sstevel@tonic-gate } else { 3035*4574Sraf if (mtype & USYNC_PROCESS) 30360Sstevel@tonic-gate lock_error(mp, "cond_wait", cvp, 30370Sstevel@tonic-gate "condvar process-private, " 30380Sstevel@tonic-gate "mutex process-shared"); 30390Sstevel@tonic-gate } 30400Sstevel@tonic-gate } 30410Sstevel@tonic-gate 30420Sstevel@tonic-gate /* 30430Sstevel@tonic-gate * We deal with recursive mutexes by completely 30440Sstevel@tonic-gate * dropping the lock and restoring the recursion 30450Sstevel@tonic-gate * count after waking up. This is arguably wrong, 30460Sstevel@tonic-gate * but it obeys the principle of least astonishment. 30470Sstevel@tonic-gate */ 30480Sstevel@tonic-gate rcount = mp->mutex_rcount; 30490Sstevel@tonic-gate mp->mutex_rcount = 0; 3050*4574Sraf if ((mtype & 3051*4574Sraf (USYNC_PROCESS | LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT)) | 30520Sstevel@tonic-gate (cvp->cond_type & USYNC_PROCESS)) 30530Sstevel@tonic-gate error = cond_wait_kernel(cvp, mp, tsp); 30540Sstevel@tonic-gate else 30550Sstevel@tonic-gate error = cond_wait_queue(cvp, mp, tsp, msp); 30560Sstevel@tonic-gate mp->mutex_rcount = rcount; 30570Sstevel@tonic-gate 30580Sstevel@tonic-gate if (csp) { 30590Sstevel@tonic-gate hrtime_t lapse = gethrtime() - begin_sleep; 30600Sstevel@tonic-gate if (tsp == NULL) 30610Sstevel@tonic-gate csp->cond_wait_sleep_time += lapse; 30620Sstevel@tonic-gate else { 30630Sstevel@tonic-gate csp->cond_timedwait_sleep_time += lapse; 30640Sstevel@tonic-gate if (error == ETIME) 30650Sstevel@tonic-gate tdb_incr(csp->cond_timedwait_timeout); 30660Sstevel@tonic-gate } 30670Sstevel@tonic-gate } 30680Sstevel@tonic-gate return (error); 30690Sstevel@tonic-gate } 30700Sstevel@tonic-gate 30710Sstevel@tonic-gate /* 30720Sstevel@tonic-gate * cond_wait() is a cancellation point but _cond_wait() is not. 30730Sstevel@tonic-gate * System libraries call the non-cancellation version. 30740Sstevel@tonic-gate * It is expected that only applications call the cancellation version. 30750Sstevel@tonic-gate */ 30760Sstevel@tonic-gate int 30770Sstevel@tonic-gate _cond_wait(cond_t *cvp, mutex_t *mp) 30780Sstevel@tonic-gate { 30790Sstevel@tonic-gate ulwp_t *self = curthread; 30800Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 30810Sstevel@tonic-gate uberflags_t *gflags; 30820Sstevel@tonic-gate 30830Sstevel@tonic-gate /* 30840Sstevel@tonic-gate * Optimize the common case of USYNC_THREAD plus 30850Sstevel@tonic-gate * no error detection, no lock statistics, and no event tracing. 30860Sstevel@tonic-gate */ 30870Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL && 30880Sstevel@tonic-gate (cvp->cond_type | mp->mutex_type | gflags->uf_trs_ted | 30890Sstevel@tonic-gate self->ul_td_events_enable | 30900Sstevel@tonic-gate udp->tdb.tdb_ev_global_mask.event_bits[0]) == 0) 30910Sstevel@tonic-gate return (cond_wait_queue(cvp, mp, NULL, NULL)); 30920Sstevel@tonic-gate 30930Sstevel@tonic-gate /* 30940Sstevel@tonic-gate * Else do it the long way. 30950Sstevel@tonic-gate */ 30960Sstevel@tonic-gate return (cond_wait_common(cvp, mp, NULL)); 30970Sstevel@tonic-gate } 30980Sstevel@tonic-gate 30990Sstevel@tonic-gate int 31000Sstevel@tonic-gate cond_wait(cond_t *cvp, mutex_t *mp) 31010Sstevel@tonic-gate { 31020Sstevel@tonic-gate int error; 31030Sstevel@tonic-gate 31040Sstevel@tonic-gate _cancelon(); 31050Sstevel@tonic-gate error = _cond_wait(cvp, mp); 31060Sstevel@tonic-gate if (error == EINTR) 31070Sstevel@tonic-gate _canceloff(); 31080Sstevel@tonic-gate else 31090Sstevel@tonic-gate _canceloff_nocancel(); 31100Sstevel@tonic-gate return (error); 31110Sstevel@tonic-gate } 31120Sstevel@tonic-gate 31130Sstevel@tonic-gate #pragma weak pthread_cond_wait = _pthread_cond_wait 31140Sstevel@tonic-gate int 31150Sstevel@tonic-gate _pthread_cond_wait(cond_t *cvp, mutex_t *mp) 31160Sstevel@tonic-gate { 31170Sstevel@tonic-gate int error; 31180Sstevel@tonic-gate 31190Sstevel@tonic-gate error = cond_wait(cvp, mp); 31200Sstevel@tonic-gate return ((error == EINTR)? 0 : error); 31210Sstevel@tonic-gate } 31220Sstevel@tonic-gate 31230Sstevel@tonic-gate /* 31240Sstevel@tonic-gate * cond_timedwait() is a cancellation point but _cond_timedwait() is not. 31250Sstevel@tonic-gate * System libraries call the non-cancellation version. 31260Sstevel@tonic-gate * It is expected that only applications call the cancellation version. 31270Sstevel@tonic-gate */ 31280Sstevel@tonic-gate int 31290Sstevel@tonic-gate _cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime) 31300Sstevel@tonic-gate { 31310Sstevel@tonic-gate clockid_t clock_id = cvp->cond_clockid; 31320Sstevel@tonic-gate timespec_t reltime; 31330Sstevel@tonic-gate int error; 31340Sstevel@tonic-gate 31350Sstevel@tonic-gate if (clock_id != CLOCK_REALTIME && clock_id != CLOCK_HIGHRES) 31360Sstevel@tonic-gate clock_id = CLOCK_REALTIME; 31370Sstevel@tonic-gate abstime_to_reltime(clock_id, abstime, &reltime); 31380Sstevel@tonic-gate error = cond_wait_common(cvp, mp, &reltime); 31390Sstevel@tonic-gate if (error == ETIME && clock_id == CLOCK_HIGHRES) { 31400Sstevel@tonic-gate /* 31410Sstevel@tonic-gate * Don't return ETIME if we didn't really get a timeout. 31420Sstevel@tonic-gate * This can happen if we return because someone resets 31430Sstevel@tonic-gate * the system clock. Just return zero in this case, 31440Sstevel@tonic-gate * giving a spurious wakeup but not a timeout. 31450Sstevel@tonic-gate */ 31460Sstevel@tonic-gate if ((hrtime_t)(uint32_t)abstime->tv_sec * NANOSEC + 31470Sstevel@tonic-gate abstime->tv_nsec > gethrtime()) 31480Sstevel@tonic-gate error = 0; 31490Sstevel@tonic-gate } 31500Sstevel@tonic-gate return (error); 31510Sstevel@tonic-gate } 31520Sstevel@tonic-gate 31530Sstevel@tonic-gate int 31540Sstevel@tonic-gate cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime) 31550Sstevel@tonic-gate { 31560Sstevel@tonic-gate int error; 31570Sstevel@tonic-gate 31580Sstevel@tonic-gate _cancelon(); 31590Sstevel@tonic-gate error = _cond_timedwait(cvp, mp, abstime); 31600Sstevel@tonic-gate if (error == EINTR) 31610Sstevel@tonic-gate _canceloff(); 31620Sstevel@tonic-gate else 31630Sstevel@tonic-gate _canceloff_nocancel(); 31640Sstevel@tonic-gate return (error); 31650Sstevel@tonic-gate } 31660Sstevel@tonic-gate 31670Sstevel@tonic-gate #pragma weak pthread_cond_timedwait = _pthread_cond_timedwait 31680Sstevel@tonic-gate int 31690Sstevel@tonic-gate _pthread_cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime) 31700Sstevel@tonic-gate { 31710Sstevel@tonic-gate int error; 31720Sstevel@tonic-gate 31730Sstevel@tonic-gate error = cond_timedwait(cvp, mp, abstime); 31740Sstevel@tonic-gate if (error == ETIME) 31750Sstevel@tonic-gate error = ETIMEDOUT; 31760Sstevel@tonic-gate else if (error == EINTR) 31770Sstevel@tonic-gate error = 0; 31780Sstevel@tonic-gate return (error); 31790Sstevel@tonic-gate } 31800Sstevel@tonic-gate 31810Sstevel@tonic-gate /* 31820Sstevel@tonic-gate * cond_reltimedwait() is a cancellation point but _cond_reltimedwait() 31830Sstevel@tonic-gate * is not. System libraries call the non-cancellation version. 31840Sstevel@tonic-gate * It is expected that only applications call the cancellation version. 31850Sstevel@tonic-gate */ 31860Sstevel@tonic-gate int 31870Sstevel@tonic-gate _cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime) 31880Sstevel@tonic-gate { 31890Sstevel@tonic-gate timespec_t tslocal = *reltime; 31900Sstevel@tonic-gate 31910Sstevel@tonic-gate return (cond_wait_common(cvp, mp, &tslocal)); 31920Sstevel@tonic-gate } 31930Sstevel@tonic-gate 31940Sstevel@tonic-gate #pragma weak cond_reltimedwait = _cond_reltimedwait_cancel 31950Sstevel@tonic-gate int 31960Sstevel@tonic-gate _cond_reltimedwait_cancel(cond_t *cvp, mutex_t *mp, const timespec_t *reltime) 31970Sstevel@tonic-gate { 31980Sstevel@tonic-gate int error; 31990Sstevel@tonic-gate 32000Sstevel@tonic-gate _cancelon(); 32010Sstevel@tonic-gate error = _cond_reltimedwait(cvp, mp, reltime); 32020Sstevel@tonic-gate if (error == EINTR) 32030Sstevel@tonic-gate _canceloff(); 32040Sstevel@tonic-gate else 32050Sstevel@tonic-gate _canceloff_nocancel(); 32060Sstevel@tonic-gate return (error); 32070Sstevel@tonic-gate } 32080Sstevel@tonic-gate 32090Sstevel@tonic-gate #pragma weak pthread_cond_reltimedwait_np = _pthread_cond_reltimedwait_np 32100Sstevel@tonic-gate int 32110Sstevel@tonic-gate _pthread_cond_reltimedwait_np(cond_t *cvp, mutex_t *mp, 32120Sstevel@tonic-gate const timespec_t *reltime) 32130Sstevel@tonic-gate { 32140Sstevel@tonic-gate int error; 32150Sstevel@tonic-gate 32160Sstevel@tonic-gate error = _cond_reltimedwait_cancel(cvp, mp, reltime); 32170Sstevel@tonic-gate if (error == ETIME) 32180Sstevel@tonic-gate error = ETIMEDOUT; 32190Sstevel@tonic-gate else if (error == EINTR) 32200Sstevel@tonic-gate error = 0; 32210Sstevel@tonic-gate return (error); 32220Sstevel@tonic-gate } 32230Sstevel@tonic-gate 32240Sstevel@tonic-gate #pragma weak pthread_cond_signal = cond_signal_internal 32250Sstevel@tonic-gate #pragma weak _pthread_cond_signal = cond_signal_internal 32260Sstevel@tonic-gate #pragma weak cond_signal = cond_signal_internal 32270Sstevel@tonic-gate #pragma weak _cond_signal = cond_signal_internal 32280Sstevel@tonic-gate int 32290Sstevel@tonic-gate cond_signal_internal(cond_t *cvp) 32300Sstevel@tonic-gate { 32310Sstevel@tonic-gate ulwp_t *self = curthread; 32320Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 32330Sstevel@tonic-gate tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 32340Sstevel@tonic-gate int error = 0; 32350Sstevel@tonic-gate queue_head_t *qp; 32360Sstevel@tonic-gate mutex_t *mp; 32370Sstevel@tonic-gate queue_head_t *mqp; 32380Sstevel@tonic-gate ulwp_t **ulwpp; 32390Sstevel@tonic-gate ulwp_t *ulwp; 32400Sstevel@tonic-gate ulwp_t *prev = NULL; 32410Sstevel@tonic-gate ulwp_t *next; 32420Sstevel@tonic-gate ulwp_t **suspp = NULL; 32430Sstevel@tonic-gate ulwp_t *susprev; 32440Sstevel@tonic-gate 32450Sstevel@tonic-gate if (csp) 32460Sstevel@tonic-gate tdb_incr(csp->cond_signal); 32470Sstevel@tonic-gate 32480Sstevel@tonic-gate if (cvp->cond_waiters_kernel) /* someone sleeping in the kernel? */ 32490Sstevel@tonic-gate error = __lwp_cond_signal(cvp); 32500Sstevel@tonic-gate 32510Sstevel@tonic-gate if (!cvp->cond_waiters_user) /* no one sleeping at user-level */ 32520Sstevel@tonic-gate return (error); 32530Sstevel@tonic-gate 32540Sstevel@tonic-gate /* 32550Sstevel@tonic-gate * Move someone from the condvar sleep queue to the mutex sleep 32560Sstevel@tonic-gate * queue for the mutex that he will acquire on being waked up. 32570Sstevel@tonic-gate * We can do this only if we own the mutex he will acquire. 32580Sstevel@tonic-gate * If we do not own the mutex, or if his ul_cv_wake flag 32590Sstevel@tonic-gate * is set, just dequeue and unpark him. 32600Sstevel@tonic-gate */ 32610Sstevel@tonic-gate qp = queue_lock(cvp, CV); 32620Sstevel@tonic-gate for (ulwpp = &qp->qh_head; (ulwp = *ulwpp) != NULL; 32630Sstevel@tonic-gate prev = ulwp, ulwpp = &ulwp->ul_link) { 32640Sstevel@tonic-gate if (ulwp->ul_wchan == cvp) { 32650Sstevel@tonic-gate if (!ulwp->ul_stop) 32660Sstevel@tonic-gate break; 32670Sstevel@tonic-gate /* 32680Sstevel@tonic-gate * Try not to dequeue a suspended thread. 32690Sstevel@tonic-gate * This mimics the old libthread's behavior. 32700Sstevel@tonic-gate */ 32710Sstevel@tonic-gate if (suspp == NULL) { 32720Sstevel@tonic-gate suspp = ulwpp; 32730Sstevel@tonic-gate susprev = prev; 32740Sstevel@tonic-gate } 32750Sstevel@tonic-gate } 32760Sstevel@tonic-gate } 32770Sstevel@tonic-gate if (ulwp == NULL && suspp != NULL) { 32780Sstevel@tonic-gate ulwp = *(ulwpp = suspp); 32790Sstevel@tonic-gate prev = susprev; 32800Sstevel@tonic-gate suspp = NULL; 32810Sstevel@tonic-gate } 32820Sstevel@tonic-gate if (ulwp == NULL) { /* no one on the sleep queue */ 32830Sstevel@tonic-gate cvp->cond_waiters_user = 0; 32840Sstevel@tonic-gate queue_unlock(qp); 32850Sstevel@tonic-gate return (error); 32860Sstevel@tonic-gate } 32870Sstevel@tonic-gate /* 32880Sstevel@tonic-gate * Scan the remainder of the CV queue for another waiter. 32890Sstevel@tonic-gate */ 32900Sstevel@tonic-gate if (suspp != NULL) { 32910Sstevel@tonic-gate next = *suspp; 32920Sstevel@tonic-gate } else { 32930Sstevel@tonic-gate for (next = ulwp->ul_link; next != NULL; next = next->ul_link) 32940Sstevel@tonic-gate if (next->ul_wchan == cvp) 32950Sstevel@tonic-gate break; 32960Sstevel@tonic-gate } 32970Sstevel@tonic-gate if (next == NULL) 32980Sstevel@tonic-gate cvp->cond_waiters_user = 0; 32990Sstevel@tonic-gate 33000Sstevel@tonic-gate /* 33010Sstevel@tonic-gate * Inform the thread that he was the recipient of a cond_signal(). 33020Sstevel@tonic-gate * This lets him deal with cond_signal() and, concurrently, 33030Sstevel@tonic-gate * one or more of a cancellation, a UNIX signal, or a timeout. 33040Sstevel@tonic-gate * These latter conditions must not consume a cond_signal(). 33050Sstevel@tonic-gate */ 33060Sstevel@tonic-gate ulwp->ul_signalled = 1; 33070Sstevel@tonic-gate 33080Sstevel@tonic-gate /* 33090Sstevel@tonic-gate * Dequeue the waiter but leave his ul_sleepq non-NULL 33100Sstevel@tonic-gate * while we move him to the mutex queue so that he can 33110Sstevel@tonic-gate * deal properly with spurious wakeups. 33120Sstevel@tonic-gate */ 33130Sstevel@tonic-gate *ulwpp = ulwp->ul_link; 3314*4574Sraf ulwp->ul_link = NULL; 33150Sstevel@tonic-gate if (qp->qh_tail == ulwp) 33160Sstevel@tonic-gate qp->qh_tail = prev; 33170Sstevel@tonic-gate qp->qh_qlen--; 33180Sstevel@tonic-gate 33190Sstevel@tonic-gate mp = ulwp->ul_cvmutex; /* the mutex he will acquire */ 33200Sstevel@tonic-gate ulwp->ul_cvmutex = NULL; 33210Sstevel@tonic-gate ASSERT(mp != NULL); 33220Sstevel@tonic-gate 33230Sstevel@tonic-gate if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) { 33240Sstevel@tonic-gate lwpid_t lwpid = ulwp->ul_lwpid; 33250Sstevel@tonic-gate 33260Sstevel@tonic-gate no_preempt(self); 33270Sstevel@tonic-gate ulwp->ul_sleepq = NULL; 33280Sstevel@tonic-gate ulwp->ul_wchan = NULL; 33290Sstevel@tonic-gate ulwp->ul_cv_wake = 0; 33300Sstevel@tonic-gate queue_unlock(qp); 33310Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 33320Sstevel@tonic-gate preempt(self); 33330Sstevel@tonic-gate } else { 33340Sstevel@tonic-gate mqp = queue_lock(mp, MX); 33350Sstevel@tonic-gate enqueue(mqp, ulwp, mp, MX); 33360Sstevel@tonic-gate mp->mutex_waiters = 1; 33370Sstevel@tonic-gate queue_unlock(mqp); 33380Sstevel@tonic-gate queue_unlock(qp); 33390Sstevel@tonic-gate } 33400Sstevel@tonic-gate 33410Sstevel@tonic-gate return (error); 33420Sstevel@tonic-gate } 33430Sstevel@tonic-gate 33444570Sraf /* 3345*4574Sraf * Utility function called by mutex_wakeup_all(), cond_broadcast(), 3346*4574Sraf * and rw_queue_release() to (re)allocate a big buffer to hold the 3347*4574Sraf * lwpids of all the threads to be set running after they are removed 3348*4574Sraf * from their sleep queues. Since we are holding a queue lock, we 3349*4574Sraf * cannot call any function that might acquire a lock. mmap(), munmap(), 3350*4574Sraf * lwp_unpark_all() are simple system calls and are safe in this regard. 33514570Sraf */ 33524570Sraf lwpid_t * 33534570Sraf alloc_lwpids(lwpid_t *lwpid, int *nlwpid_ptr, int *maxlwps_ptr) 33544570Sraf { 33554570Sraf /* 33564570Sraf * Allocate NEWLWPS ids on the first overflow. 33574570Sraf * Double the allocation each time after that. 33584570Sraf */ 33594570Sraf int nlwpid = *nlwpid_ptr; 33604570Sraf int maxlwps = *maxlwps_ptr; 33614570Sraf int first_allocation; 33624570Sraf int newlwps; 33634570Sraf void *vaddr; 33644570Sraf 33654570Sraf ASSERT(nlwpid == maxlwps); 33664570Sraf 33674570Sraf first_allocation = (maxlwps == MAXLWPS); 33684570Sraf newlwps = first_allocation? NEWLWPS : 2 * maxlwps; 33694570Sraf vaddr = _private_mmap(NULL, newlwps * sizeof (lwpid_t), 33704570Sraf PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0); 33714570Sraf 33724570Sraf if (vaddr == MAP_FAILED) { 33734570Sraf /* 33744570Sraf * Let's hope this never happens. 33754570Sraf * If it does, then we have a terrible 33764570Sraf * thundering herd on our hands. 33774570Sraf */ 33784570Sraf (void) __lwp_unpark_all(lwpid, nlwpid); 33794570Sraf *nlwpid_ptr = 0; 33804570Sraf } else { 33814570Sraf (void) _memcpy(vaddr, lwpid, maxlwps * sizeof (lwpid_t)); 33824570Sraf if (!first_allocation) 33834570Sraf (void) _private_munmap(lwpid, 33844570Sraf maxlwps * sizeof (lwpid_t)); 33854570Sraf lwpid = vaddr; 33864570Sraf *maxlwps_ptr = newlwps; 33874570Sraf } 33884570Sraf 33894570Sraf return (lwpid); 33904570Sraf } 33910Sstevel@tonic-gate 33920Sstevel@tonic-gate #pragma weak pthread_cond_broadcast = cond_broadcast_internal 33930Sstevel@tonic-gate #pragma weak _pthread_cond_broadcast = cond_broadcast_internal 33940Sstevel@tonic-gate #pragma weak cond_broadcast = cond_broadcast_internal 33950Sstevel@tonic-gate #pragma weak _cond_broadcast = cond_broadcast_internal 33960Sstevel@tonic-gate int 33970Sstevel@tonic-gate cond_broadcast_internal(cond_t *cvp) 33980Sstevel@tonic-gate { 33990Sstevel@tonic-gate ulwp_t *self = curthread; 34000Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 34010Sstevel@tonic-gate tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 34020Sstevel@tonic-gate int error = 0; 34030Sstevel@tonic-gate queue_head_t *qp; 34040Sstevel@tonic-gate mutex_t *mp; 34050Sstevel@tonic-gate mutex_t *mp_cache = NULL; 34064570Sraf queue_head_t *mqp = NULL; 34070Sstevel@tonic-gate ulwp_t **ulwpp; 34080Sstevel@tonic-gate ulwp_t *ulwp; 34090Sstevel@tonic-gate ulwp_t *prev = NULL; 34104570Sraf int nlwpid = 0; 34114570Sraf int maxlwps = MAXLWPS; 34120Sstevel@tonic-gate lwpid_t buffer[MAXLWPS]; 34130Sstevel@tonic-gate lwpid_t *lwpid = buffer; 34140Sstevel@tonic-gate 34150Sstevel@tonic-gate if (csp) 34160Sstevel@tonic-gate tdb_incr(csp->cond_broadcast); 34170Sstevel@tonic-gate 34180Sstevel@tonic-gate if (cvp->cond_waiters_kernel) /* someone sleeping in the kernel? */ 34190Sstevel@tonic-gate error = __lwp_cond_broadcast(cvp); 34200Sstevel@tonic-gate 34210Sstevel@tonic-gate if (!cvp->cond_waiters_user) /* no one sleeping at user-level */ 34220Sstevel@tonic-gate return (error); 34230Sstevel@tonic-gate 34240Sstevel@tonic-gate /* 34250Sstevel@tonic-gate * Move everyone from the condvar sleep queue to the mutex sleep 34260Sstevel@tonic-gate * queue for the mutex that they will acquire on being waked up. 34270Sstevel@tonic-gate * We can do this only if we own the mutex they will acquire. 34280Sstevel@tonic-gate * If we do not own the mutex, or if their ul_cv_wake flag 34290Sstevel@tonic-gate * is set, just dequeue and unpark them. 34300Sstevel@tonic-gate * 34310Sstevel@tonic-gate * We keep track of lwpids that are to be unparked in lwpid[]. 34320Sstevel@tonic-gate * __lwp_unpark_all() is called to unpark all of them after 34330Sstevel@tonic-gate * they have been removed from the sleep queue and the sleep 34340Sstevel@tonic-gate * queue lock has been dropped. If we run out of space in our 34350Sstevel@tonic-gate * on-stack buffer, we need to allocate more but we can't call 34360Sstevel@tonic-gate * lmalloc() because we are holding a queue lock when the overflow 34370Sstevel@tonic-gate * occurs and lmalloc() acquires a lock. We can't use alloca() 34384570Sraf * either because the application may have allocated a small 34394570Sraf * stack and we don't want to overrun the stack. So we call 34404570Sraf * alloc_lwpids() to allocate a bigger buffer using the mmap() 34410Sstevel@tonic-gate * system call directly since that path acquires no locks. 34420Sstevel@tonic-gate */ 34430Sstevel@tonic-gate qp = queue_lock(cvp, CV); 34440Sstevel@tonic-gate cvp->cond_waiters_user = 0; 34450Sstevel@tonic-gate ulwpp = &qp->qh_head; 34460Sstevel@tonic-gate while ((ulwp = *ulwpp) != NULL) { 34470Sstevel@tonic-gate if (ulwp->ul_wchan != cvp) { 34480Sstevel@tonic-gate prev = ulwp; 34490Sstevel@tonic-gate ulwpp = &ulwp->ul_link; 34500Sstevel@tonic-gate continue; 34510Sstevel@tonic-gate } 34520Sstevel@tonic-gate *ulwpp = ulwp->ul_link; 3453*4574Sraf ulwp->ul_link = NULL; 34540Sstevel@tonic-gate if (qp->qh_tail == ulwp) 34550Sstevel@tonic-gate qp->qh_tail = prev; 34560Sstevel@tonic-gate qp->qh_qlen--; 34570Sstevel@tonic-gate mp = ulwp->ul_cvmutex; /* his mutex */ 34580Sstevel@tonic-gate ulwp->ul_cvmutex = NULL; 34590Sstevel@tonic-gate ASSERT(mp != NULL); 34600Sstevel@tonic-gate if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) { 34610Sstevel@tonic-gate ulwp->ul_sleepq = NULL; 34620Sstevel@tonic-gate ulwp->ul_wchan = NULL; 34630Sstevel@tonic-gate ulwp->ul_cv_wake = 0; 34644570Sraf if (nlwpid == maxlwps) 34654570Sraf lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps); 34660Sstevel@tonic-gate lwpid[nlwpid++] = ulwp->ul_lwpid; 34670Sstevel@tonic-gate } else { 34680Sstevel@tonic-gate if (mp != mp_cache) { 34690Sstevel@tonic-gate mp_cache = mp; 34704570Sraf if (mqp != NULL) 34714570Sraf queue_unlock(mqp); 34724570Sraf mqp = queue_lock(mp, MX); 34730Sstevel@tonic-gate } 34740Sstevel@tonic-gate enqueue(mqp, ulwp, mp, MX); 34750Sstevel@tonic-gate mp->mutex_waiters = 1; 34760Sstevel@tonic-gate } 34770Sstevel@tonic-gate } 34784570Sraf if (mqp != NULL) 34794570Sraf queue_unlock(mqp); 34804570Sraf if (nlwpid == 0) { 34814570Sraf queue_unlock(qp); 34824570Sraf } else { 34834570Sraf no_preempt(self); 34844570Sraf queue_unlock(qp); 34850Sstevel@tonic-gate if (nlwpid == 1) 34860Sstevel@tonic-gate (void) __lwp_unpark(lwpid[0]); 34870Sstevel@tonic-gate else 34880Sstevel@tonic-gate (void) __lwp_unpark_all(lwpid, nlwpid); 34894570Sraf preempt(self); 34900Sstevel@tonic-gate } 34910Sstevel@tonic-gate if (lwpid != buffer) 34920Sstevel@tonic-gate (void) _private_munmap(lwpid, maxlwps * sizeof (lwpid_t)); 34930Sstevel@tonic-gate return (error); 34940Sstevel@tonic-gate } 34950Sstevel@tonic-gate 34960Sstevel@tonic-gate #pragma weak pthread_cond_destroy = _cond_destroy 34970Sstevel@tonic-gate #pragma weak _pthread_cond_destroy = _cond_destroy 34980Sstevel@tonic-gate #pragma weak cond_destroy = _cond_destroy 34990Sstevel@tonic-gate int 35000Sstevel@tonic-gate _cond_destroy(cond_t *cvp) 35010Sstevel@tonic-gate { 35020Sstevel@tonic-gate cvp->cond_magic = 0; 35030Sstevel@tonic-gate tdb_sync_obj_deregister(cvp); 35040Sstevel@tonic-gate return (0); 35050Sstevel@tonic-gate } 35060Sstevel@tonic-gate 35070Sstevel@tonic-gate #if defined(THREAD_DEBUG) 35080Sstevel@tonic-gate void 35090Sstevel@tonic-gate assert_no_libc_locks_held(void) 35100Sstevel@tonic-gate { 35110Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 35120Sstevel@tonic-gate } 35130Sstevel@tonic-gate #endif 35140Sstevel@tonic-gate 35150Sstevel@tonic-gate /* protected by link_lock */ 35160Sstevel@tonic-gate uint64_t spin_lock_spin; 35170Sstevel@tonic-gate uint64_t spin_lock_spin2; 35180Sstevel@tonic-gate uint64_t spin_lock_sleep; 35190Sstevel@tonic-gate uint64_t spin_lock_wakeup; 35200Sstevel@tonic-gate 35210Sstevel@tonic-gate /* 35220Sstevel@tonic-gate * Record spin lock statistics. 35230Sstevel@tonic-gate * Called by a thread exiting itself in thrp_exit(). 35240Sstevel@tonic-gate * Also called via atexit() from the thread calling 35250Sstevel@tonic-gate * exit() to do all the other threads as well. 35260Sstevel@tonic-gate */ 35270Sstevel@tonic-gate void 35280Sstevel@tonic-gate record_spin_locks(ulwp_t *ulwp) 35290Sstevel@tonic-gate { 35300Sstevel@tonic-gate spin_lock_spin += ulwp->ul_spin_lock_spin; 35310Sstevel@tonic-gate spin_lock_spin2 += ulwp->ul_spin_lock_spin2; 35320Sstevel@tonic-gate spin_lock_sleep += ulwp->ul_spin_lock_sleep; 35330Sstevel@tonic-gate spin_lock_wakeup += ulwp->ul_spin_lock_wakeup; 35340Sstevel@tonic-gate ulwp->ul_spin_lock_spin = 0; 35350Sstevel@tonic-gate ulwp->ul_spin_lock_spin2 = 0; 35360Sstevel@tonic-gate ulwp->ul_spin_lock_sleep = 0; 35370Sstevel@tonic-gate ulwp->ul_spin_lock_wakeup = 0; 35380Sstevel@tonic-gate } 35390Sstevel@tonic-gate 35400Sstevel@tonic-gate /* 35410Sstevel@tonic-gate * atexit function: dump the queue statistics to stderr. 35420Sstevel@tonic-gate */ 35431219Sraf #if !defined(__lint) 35441219Sraf #define fprintf _fprintf 35451219Sraf #endif 35460Sstevel@tonic-gate #include <stdio.h> 35470Sstevel@tonic-gate void 35480Sstevel@tonic-gate dump_queue_statistics(void) 35490Sstevel@tonic-gate { 35500Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 35510Sstevel@tonic-gate queue_head_t *qp; 35520Sstevel@tonic-gate int qn; 35530Sstevel@tonic-gate uint64_t spin_lock_total = 0; 35540Sstevel@tonic-gate 35550Sstevel@tonic-gate if (udp->queue_head == NULL || thread_queue_dump == 0) 35560Sstevel@tonic-gate return; 35570Sstevel@tonic-gate 35580Sstevel@tonic-gate if (fprintf(stderr, "\n%5d mutex queues:\n", QHASHSIZE) < 0 || 35590Sstevel@tonic-gate fprintf(stderr, "queue# lockcount max qlen\n") < 0) 35600Sstevel@tonic-gate return; 35610Sstevel@tonic-gate for (qn = 0, qp = udp->queue_head; qn < QHASHSIZE; qn++, qp++) { 35620Sstevel@tonic-gate if (qp->qh_lockcount == 0) 35630Sstevel@tonic-gate continue; 35640Sstevel@tonic-gate spin_lock_total += qp->qh_lockcount; 35650Sstevel@tonic-gate if (fprintf(stderr, "%5d %12llu%12u\n", qn, 35660Sstevel@tonic-gate (u_longlong_t)qp->qh_lockcount, qp->qh_qmax) < 0) 35670Sstevel@tonic-gate return; 35680Sstevel@tonic-gate } 35690Sstevel@tonic-gate 35700Sstevel@tonic-gate if (fprintf(stderr, "\n%5d condvar queues:\n", QHASHSIZE) < 0 || 35710Sstevel@tonic-gate fprintf(stderr, "queue# lockcount max qlen\n") < 0) 35720Sstevel@tonic-gate return; 35730Sstevel@tonic-gate for (qn = 0; qn < QHASHSIZE; qn++, qp++) { 35740Sstevel@tonic-gate if (qp->qh_lockcount == 0) 35750Sstevel@tonic-gate continue; 35760Sstevel@tonic-gate spin_lock_total += qp->qh_lockcount; 35770Sstevel@tonic-gate if (fprintf(stderr, "%5d %12llu%12u\n", qn, 35780Sstevel@tonic-gate (u_longlong_t)qp->qh_lockcount, qp->qh_qmax) < 0) 35790Sstevel@tonic-gate return; 35800Sstevel@tonic-gate } 35810Sstevel@tonic-gate 35820Sstevel@tonic-gate (void) fprintf(stderr, "\n spin_lock_total = %10llu\n", 35830Sstevel@tonic-gate (u_longlong_t)spin_lock_total); 35840Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_spin = %10llu\n", 35850Sstevel@tonic-gate (u_longlong_t)spin_lock_spin); 35860Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_spin2 = %10llu\n", 35870Sstevel@tonic-gate (u_longlong_t)spin_lock_spin2); 35880Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_sleep = %10llu\n", 35890Sstevel@tonic-gate (u_longlong_t)spin_lock_sleep); 35900Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_wakeup = %10llu\n", 35910Sstevel@tonic-gate (u_longlong_t)spin_lock_wakeup); 35920Sstevel@tonic-gate } 3593