xref: /onnv-gate/usr/src/lib/libc/port/threads/synch.c (revision 4570:f93b74ddbdd5)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51893Sraf  * Common Development and Distribution License (the "License").
61893Sraf  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
211219Sraf 
220Sstevel@tonic-gate /*
23*4570Sraf  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
290Sstevel@tonic-gate #include <sys/sdt.h>
300Sstevel@tonic-gate 
310Sstevel@tonic-gate #include "lint.h"
320Sstevel@tonic-gate #include "thr_uberdata.h"
330Sstevel@tonic-gate 
340Sstevel@tonic-gate /*
350Sstevel@tonic-gate  * This mutex is initialized to be held by lwp#1.
360Sstevel@tonic-gate  * It is used to block a thread that has returned from a mutex_lock()
370Sstevel@tonic-gate  * of a PTHREAD_PRIO_INHERIT mutex with an unrecoverable error.
380Sstevel@tonic-gate  */
390Sstevel@tonic-gate mutex_t	stall_mutex = DEFAULTMUTEX;
400Sstevel@tonic-gate 
410Sstevel@tonic-gate static int shared_mutex_held(mutex_t *);
420Sstevel@tonic-gate 
430Sstevel@tonic-gate /*
440Sstevel@tonic-gate  * Lock statistics support functions.
450Sstevel@tonic-gate  */
460Sstevel@tonic-gate void
470Sstevel@tonic-gate record_begin_hold(tdb_mutex_stats_t *msp)
480Sstevel@tonic-gate {
490Sstevel@tonic-gate 	tdb_incr(msp->mutex_lock);
500Sstevel@tonic-gate 	msp->mutex_begin_hold = gethrtime();
510Sstevel@tonic-gate }
520Sstevel@tonic-gate 
530Sstevel@tonic-gate hrtime_t
540Sstevel@tonic-gate record_hold_time(tdb_mutex_stats_t *msp)
550Sstevel@tonic-gate {
560Sstevel@tonic-gate 	hrtime_t now = gethrtime();
570Sstevel@tonic-gate 
580Sstevel@tonic-gate 	if (msp->mutex_begin_hold)
590Sstevel@tonic-gate 		msp->mutex_hold_time += now - msp->mutex_begin_hold;
600Sstevel@tonic-gate 	msp->mutex_begin_hold = 0;
610Sstevel@tonic-gate 	return (now);
620Sstevel@tonic-gate }
630Sstevel@tonic-gate 
640Sstevel@tonic-gate /*
650Sstevel@tonic-gate  * Called once at library initialization.
660Sstevel@tonic-gate  */
670Sstevel@tonic-gate void
680Sstevel@tonic-gate mutex_setup(void)
690Sstevel@tonic-gate {
700Sstevel@tonic-gate 	if (set_lock_byte(&stall_mutex.mutex_lockw))
710Sstevel@tonic-gate 		thr_panic("mutex_setup() cannot acquire stall_mutex");
720Sstevel@tonic-gate 	stall_mutex.mutex_owner = (uintptr_t)curthread;
730Sstevel@tonic-gate }
740Sstevel@tonic-gate 
750Sstevel@tonic-gate /*
760Sstevel@tonic-gate  * The default spin counts of 1000 and 500 are experimentally determined.
770Sstevel@tonic-gate  * On sun4u machines with any number of processors they could be raised
780Sstevel@tonic-gate  * to 10,000 but that (experimentally) makes almost no difference.
790Sstevel@tonic-gate  * The environment variables:
800Sstevel@tonic-gate  *	_THREAD_ADAPTIVE_SPIN=count
810Sstevel@tonic-gate  *	_THREAD_RELEASE_SPIN=count
820Sstevel@tonic-gate  * can be used to override and set the counts in the range [0 .. 1,000,000].
830Sstevel@tonic-gate  */
840Sstevel@tonic-gate int	thread_adaptive_spin = 1000;
850Sstevel@tonic-gate uint_t	thread_max_spinners = 100;
860Sstevel@tonic-gate int	thread_release_spin = 500;
870Sstevel@tonic-gate int	thread_queue_verify = 0;
880Sstevel@tonic-gate static	int	ncpus;
890Sstevel@tonic-gate 
900Sstevel@tonic-gate /*
910Sstevel@tonic-gate  * Distinguish spinning for queue locks from spinning for regular locks.
920Sstevel@tonic-gate  * The environment variable:
930Sstevel@tonic-gate  *	_THREAD_QUEUE_SPIN=count
940Sstevel@tonic-gate  * can be used to override and set the count in the range [0 .. 1,000,000].
950Sstevel@tonic-gate  * There is no release spin concept for queue locks.
960Sstevel@tonic-gate  */
970Sstevel@tonic-gate int	thread_queue_spin = 1000;
980Sstevel@tonic-gate 
990Sstevel@tonic-gate /*
1000Sstevel@tonic-gate  * Use the otherwise-unused 'mutex_ownerpid' field of a USYNC_THREAD
1010Sstevel@tonic-gate  * mutex to be a count of adaptive spins in progress.
1020Sstevel@tonic-gate  */
1030Sstevel@tonic-gate #define	mutex_spinners	mutex_ownerpid
1040Sstevel@tonic-gate 
1050Sstevel@tonic-gate void
1060Sstevel@tonic-gate _mutex_set_typeattr(mutex_t *mp, int attr)
1070Sstevel@tonic-gate {
1080Sstevel@tonic-gate 	mp->mutex_type |= (uint8_t)attr;
1090Sstevel@tonic-gate }
1100Sstevel@tonic-gate 
1110Sstevel@tonic-gate /*
1120Sstevel@tonic-gate  * 'type' can be one of USYNC_THREAD or USYNC_PROCESS, possibly
1130Sstevel@tonic-gate  * augmented by the flags LOCK_RECURSIVE and/or LOCK_ERRORCHECK,
1140Sstevel@tonic-gate  * or it can be USYNC_PROCESS_ROBUST with no extra flags.
1150Sstevel@tonic-gate  */
1160Sstevel@tonic-gate #pragma weak _private_mutex_init = __mutex_init
1170Sstevel@tonic-gate #pragma weak mutex_init = __mutex_init
1180Sstevel@tonic-gate #pragma weak _mutex_init = __mutex_init
1190Sstevel@tonic-gate /* ARGSUSED2 */
1200Sstevel@tonic-gate int
1210Sstevel@tonic-gate __mutex_init(mutex_t *mp, int type, void *arg)
1220Sstevel@tonic-gate {
1230Sstevel@tonic-gate 	int error;
1240Sstevel@tonic-gate 
1250Sstevel@tonic-gate 	switch (type & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) {
1260Sstevel@tonic-gate 	case USYNC_THREAD:
1270Sstevel@tonic-gate 	case USYNC_PROCESS:
1280Sstevel@tonic-gate 		(void) _memset(mp, 0, sizeof (*mp));
1290Sstevel@tonic-gate 		mp->mutex_type = (uint8_t)type;
1300Sstevel@tonic-gate 		mp->mutex_flag = LOCK_INITED;
1310Sstevel@tonic-gate 		error = 0;
1320Sstevel@tonic-gate 		break;
1330Sstevel@tonic-gate 	case USYNC_PROCESS_ROBUST:
1340Sstevel@tonic-gate 		if (type & (LOCK_RECURSIVE|LOCK_ERRORCHECK))
1350Sstevel@tonic-gate 			error = EINVAL;
1360Sstevel@tonic-gate 		else
1370Sstevel@tonic-gate 			error = ___lwp_mutex_init(mp, type);
1380Sstevel@tonic-gate 		break;
1390Sstevel@tonic-gate 	default:
1400Sstevel@tonic-gate 		error = EINVAL;
1410Sstevel@tonic-gate 		break;
1420Sstevel@tonic-gate 	}
1430Sstevel@tonic-gate 	if (error == 0)
1440Sstevel@tonic-gate 		mp->mutex_magic = MUTEX_MAGIC;
1450Sstevel@tonic-gate 	return (error);
1460Sstevel@tonic-gate }
1470Sstevel@tonic-gate 
1480Sstevel@tonic-gate /*
1490Sstevel@tonic-gate  * Delete mp from list of ceil mutexes owned by curthread.
1500Sstevel@tonic-gate  * Return 1 if the head of the chain was updated.
1510Sstevel@tonic-gate  */
1520Sstevel@tonic-gate int
1530Sstevel@tonic-gate _ceil_mylist_del(mutex_t *mp)
1540Sstevel@tonic-gate {
1550Sstevel@tonic-gate 	ulwp_t *self = curthread;
1560Sstevel@tonic-gate 	mxchain_t **mcpp;
1570Sstevel@tonic-gate 	mxchain_t *mcp;
1580Sstevel@tonic-gate 
1590Sstevel@tonic-gate 	mcpp = &self->ul_mxchain;
1600Sstevel@tonic-gate 	while ((*mcpp)->mxchain_mx != mp)
1610Sstevel@tonic-gate 		mcpp = &(*mcpp)->mxchain_next;
1620Sstevel@tonic-gate 	mcp = *mcpp;
1630Sstevel@tonic-gate 	*mcpp = mcp->mxchain_next;
1640Sstevel@tonic-gate 	lfree(mcp, sizeof (*mcp));
1650Sstevel@tonic-gate 	return (mcpp == &self->ul_mxchain);
1660Sstevel@tonic-gate }
1670Sstevel@tonic-gate 
1680Sstevel@tonic-gate /*
1690Sstevel@tonic-gate  * Add mp to head of list of ceil mutexes owned by curthread.
1700Sstevel@tonic-gate  * Return ENOMEM if no memory could be allocated.
1710Sstevel@tonic-gate  */
1720Sstevel@tonic-gate int
1730Sstevel@tonic-gate _ceil_mylist_add(mutex_t *mp)
1740Sstevel@tonic-gate {
1750Sstevel@tonic-gate 	ulwp_t *self = curthread;
1760Sstevel@tonic-gate 	mxchain_t *mcp;
1770Sstevel@tonic-gate 
1780Sstevel@tonic-gate 	if ((mcp = lmalloc(sizeof (*mcp))) == NULL)
1790Sstevel@tonic-gate 		return (ENOMEM);
1800Sstevel@tonic-gate 	mcp->mxchain_mx = mp;
1810Sstevel@tonic-gate 	mcp->mxchain_next = self->ul_mxchain;
1820Sstevel@tonic-gate 	self->ul_mxchain = mcp;
1830Sstevel@tonic-gate 	return (0);
1840Sstevel@tonic-gate }
1850Sstevel@tonic-gate 
1860Sstevel@tonic-gate /*
1870Sstevel@tonic-gate  * Inherit priority from ceiling.  The inheritance impacts the effective
1880Sstevel@tonic-gate  * priority, not the assigned priority.  See _thread_setschedparam_main().
1890Sstevel@tonic-gate  */
1900Sstevel@tonic-gate void
1910Sstevel@tonic-gate _ceil_prio_inherit(int ceil)
1920Sstevel@tonic-gate {
1930Sstevel@tonic-gate 	ulwp_t *self = curthread;
1940Sstevel@tonic-gate 	struct sched_param param;
1950Sstevel@tonic-gate 
1960Sstevel@tonic-gate 	(void) _memset(&param, 0, sizeof (param));
1970Sstevel@tonic-gate 	param.sched_priority = ceil;
1980Sstevel@tonic-gate 	if (_thread_setschedparam_main(self->ul_lwpid,
1990Sstevel@tonic-gate 	    self->ul_policy, &param, PRIO_INHERIT)) {
2000Sstevel@tonic-gate 		/*
2010Sstevel@tonic-gate 		 * Panic since unclear what error code to return.
2020Sstevel@tonic-gate 		 * If we do return the error codes returned by above
2030Sstevel@tonic-gate 		 * called routine, update the man page...
2040Sstevel@tonic-gate 		 */
2050Sstevel@tonic-gate 		thr_panic("_thread_setschedparam_main() fails");
2060Sstevel@tonic-gate 	}
2070Sstevel@tonic-gate }
2080Sstevel@tonic-gate 
2090Sstevel@tonic-gate /*
2100Sstevel@tonic-gate  * Waive inherited ceiling priority.  Inherit from head of owned ceiling locks
2110Sstevel@tonic-gate  * if holding at least one ceiling lock.  If no ceiling locks are held at this
2120Sstevel@tonic-gate  * point, disinherit completely, reverting back to assigned priority.
2130Sstevel@tonic-gate  */
2140Sstevel@tonic-gate void
2150Sstevel@tonic-gate _ceil_prio_waive(void)
2160Sstevel@tonic-gate {
2170Sstevel@tonic-gate 	ulwp_t *self = curthread;
2180Sstevel@tonic-gate 	struct sched_param param;
2190Sstevel@tonic-gate 
2200Sstevel@tonic-gate 	(void) _memset(&param, 0, sizeof (param));
2210Sstevel@tonic-gate 	if (self->ul_mxchain == NULL) {
2220Sstevel@tonic-gate 		/*
2230Sstevel@tonic-gate 		 * No ceil locks held.  Zero the epri, revert back to ul_pri.
2240Sstevel@tonic-gate 		 * Since thread's hash lock is not held, one cannot just
2250Sstevel@tonic-gate 		 * read ul_pri here...do it in the called routine...
2260Sstevel@tonic-gate 		 */
2270Sstevel@tonic-gate 		param.sched_priority = self->ul_pri;	/* ignored */
2280Sstevel@tonic-gate 		if (_thread_setschedparam_main(self->ul_lwpid,
2290Sstevel@tonic-gate 		    self->ul_policy, &param, PRIO_DISINHERIT))
2300Sstevel@tonic-gate 			thr_panic("_thread_setschedparam_main() fails");
2310Sstevel@tonic-gate 	} else {
2320Sstevel@tonic-gate 		/*
2330Sstevel@tonic-gate 		 * Set priority to that of the mutex at the head
2340Sstevel@tonic-gate 		 * of the ceilmutex chain.
2350Sstevel@tonic-gate 		 */
2360Sstevel@tonic-gate 		param.sched_priority =
2370Sstevel@tonic-gate 		    self->ul_mxchain->mxchain_mx->mutex_ceiling;
2380Sstevel@tonic-gate 		if (_thread_setschedparam_main(self->ul_lwpid,
2390Sstevel@tonic-gate 		    self->ul_policy, &param, PRIO_INHERIT))
2400Sstevel@tonic-gate 			thr_panic("_thread_setschedparam_main() fails");
2410Sstevel@tonic-gate 	}
2420Sstevel@tonic-gate }
2430Sstevel@tonic-gate 
2440Sstevel@tonic-gate /*
2450Sstevel@tonic-gate  * Non-preemptive spin locks.  Used by queue_lock().
2460Sstevel@tonic-gate  * No lock statistics are gathered for these locks.
2470Sstevel@tonic-gate  */
2480Sstevel@tonic-gate void
2490Sstevel@tonic-gate spin_lock_set(mutex_t *mp)
2500Sstevel@tonic-gate {
2510Sstevel@tonic-gate 	ulwp_t *self = curthread;
2520Sstevel@tonic-gate 
2530Sstevel@tonic-gate 	no_preempt(self);
2540Sstevel@tonic-gate 	if (set_lock_byte(&mp->mutex_lockw) == 0) {
2550Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
2560Sstevel@tonic-gate 		return;
2570Sstevel@tonic-gate 	}
2580Sstevel@tonic-gate 	/*
2590Sstevel@tonic-gate 	 * Spin for a while, attempting to acquire the lock.
2600Sstevel@tonic-gate 	 */
2610Sstevel@tonic-gate 	if (self->ul_spin_lock_spin != UINT_MAX)
2620Sstevel@tonic-gate 		self->ul_spin_lock_spin++;
2630Sstevel@tonic-gate 	if (mutex_queuelock_adaptive(mp) == 0 ||
2640Sstevel@tonic-gate 	    set_lock_byte(&mp->mutex_lockw) == 0) {
2650Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
2660Sstevel@tonic-gate 		return;
2670Sstevel@tonic-gate 	}
2680Sstevel@tonic-gate 	/*
2690Sstevel@tonic-gate 	 * Try harder if we were previously at a no premption level.
2700Sstevel@tonic-gate 	 */
2710Sstevel@tonic-gate 	if (self->ul_preempt > 1) {
2720Sstevel@tonic-gate 		if (self->ul_spin_lock_spin2 != UINT_MAX)
2730Sstevel@tonic-gate 			self->ul_spin_lock_spin2++;
2740Sstevel@tonic-gate 		if (mutex_queuelock_adaptive(mp) == 0 ||
2750Sstevel@tonic-gate 		    set_lock_byte(&mp->mutex_lockw) == 0) {
2760Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
2770Sstevel@tonic-gate 			return;
2780Sstevel@tonic-gate 		}
2790Sstevel@tonic-gate 	}
2800Sstevel@tonic-gate 	/*
2810Sstevel@tonic-gate 	 * Give up and block in the kernel for the mutex.
2820Sstevel@tonic-gate 	 */
2830Sstevel@tonic-gate 	if (self->ul_spin_lock_sleep != UINT_MAX)
2840Sstevel@tonic-gate 		self->ul_spin_lock_sleep++;
2850Sstevel@tonic-gate 	(void) ___lwp_mutex_timedlock(mp, NULL);
2860Sstevel@tonic-gate 	mp->mutex_owner = (uintptr_t)self;
2870Sstevel@tonic-gate }
2880Sstevel@tonic-gate 
2890Sstevel@tonic-gate void
2900Sstevel@tonic-gate spin_lock_clear(mutex_t *mp)
2910Sstevel@tonic-gate {
2920Sstevel@tonic-gate 	ulwp_t *self = curthread;
2930Sstevel@tonic-gate 
2940Sstevel@tonic-gate 	mp->mutex_owner = 0;
295*4570Sraf 	if (atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK) {
2960Sstevel@tonic-gate 		(void) ___lwp_mutex_wakeup(mp);
2970Sstevel@tonic-gate 		if (self->ul_spin_lock_wakeup != UINT_MAX)
2980Sstevel@tonic-gate 			self->ul_spin_lock_wakeup++;
2990Sstevel@tonic-gate 	}
3000Sstevel@tonic-gate 	preempt(self);
3010Sstevel@tonic-gate }
3020Sstevel@tonic-gate 
3030Sstevel@tonic-gate /*
3040Sstevel@tonic-gate  * Allocate the sleep queue hash table.
3050Sstevel@tonic-gate  */
3060Sstevel@tonic-gate void
3070Sstevel@tonic-gate queue_alloc(void)
3080Sstevel@tonic-gate {
3090Sstevel@tonic-gate 	ulwp_t *self = curthread;
3100Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
3110Sstevel@tonic-gate 	void *data;
3120Sstevel@tonic-gate 	int i;
3130Sstevel@tonic-gate 
3140Sstevel@tonic-gate 	/*
3150Sstevel@tonic-gate 	 * No locks are needed; we call here only when single-threaded.
3160Sstevel@tonic-gate 	 */
3170Sstevel@tonic-gate 	ASSERT(self == udp->ulwp_one);
3180Sstevel@tonic-gate 	ASSERT(!udp->uberflags.uf_mt);
3190Sstevel@tonic-gate 	if ((data = _private_mmap(NULL, 2 * QHASHSIZE * sizeof (queue_head_t),
3200Sstevel@tonic-gate 	    PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
3210Sstevel@tonic-gate 	    == MAP_FAILED)
3220Sstevel@tonic-gate 		thr_panic("cannot allocate thread queue_head table");
3230Sstevel@tonic-gate 	udp->queue_head = (queue_head_t *)data;
3240Sstevel@tonic-gate 	for (i = 0; i < 2 * QHASHSIZE; i++)
3250Sstevel@tonic-gate 		udp->queue_head[i].qh_lock.mutex_magic = MUTEX_MAGIC;
3260Sstevel@tonic-gate }
3270Sstevel@tonic-gate 
3280Sstevel@tonic-gate #if defined(THREAD_DEBUG)
3290Sstevel@tonic-gate 
3300Sstevel@tonic-gate /*
3310Sstevel@tonic-gate  * Debugging: verify correctness of a sleep queue.
3320Sstevel@tonic-gate  */
3330Sstevel@tonic-gate void
3340Sstevel@tonic-gate QVERIFY(queue_head_t *qp)
3350Sstevel@tonic-gate {
3360Sstevel@tonic-gate 	ulwp_t *self = curthread;
3370Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
3380Sstevel@tonic-gate 	ulwp_t *ulwp;
3390Sstevel@tonic-gate 	ulwp_t *prev;
3400Sstevel@tonic-gate 	uint_t index;
3410Sstevel@tonic-gate 	uint32_t cnt = 0;
3420Sstevel@tonic-gate 	char qtype;
3430Sstevel@tonic-gate 	void *wchan;
3440Sstevel@tonic-gate 
3450Sstevel@tonic-gate 	ASSERT(qp >= udp->queue_head && (qp - udp->queue_head) < 2 * QHASHSIZE);
3460Sstevel@tonic-gate 	ASSERT(MUTEX_OWNED(&qp->qh_lock, self));
3470Sstevel@tonic-gate 	ASSERT((qp->qh_head != NULL && qp->qh_tail != NULL) ||
3480Sstevel@tonic-gate 		(qp->qh_head == NULL && qp->qh_tail == NULL));
3490Sstevel@tonic-gate 	if (!thread_queue_verify)
3500Sstevel@tonic-gate 		return;
3510Sstevel@tonic-gate 	/* real expensive stuff, only for _THREAD_QUEUE_VERIFY */
3520Sstevel@tonic-gate 	qtype = ((qp - udp->queue_head) < QHASHSIZE)? MX : CV;
3530Sstevel@tonic-gate 	for (prev = NULL, ulwp = qp->qh_head; ulwp != NULL;
3540Sstevel@tonic-gate 	    prev = ulwp, ulwp = ulwp->ul_link, cnt++) {
3550Sstevel@tonic-gate 		ASSERT(ulwp->ul_qtype == qtype);
3560Sstevel@tonic-gate 		ASSERT(ulwp->ul_wchan != NULL);
3570Sstevel@tonic-gate 		ASSERT(ulwp->ul_sleepq == qp);
3580Sstevel@tonic-gate 		wchan = ulwp->ul_wchan;
3590Sstevel@tonic-gate 		index = QUEUE_HASH(wchan, qtype);
3600Sstevel@tonic-gate 		ASSERT(&udp->queue_head[index] == qp);
3610Sstevel@tonic-gate 	}
3620Sstevel@tonic-gate 	ASSERT(qp->qh_tail == prev);
3630Sstevel@tonic-gate 	ASSERT(qp->qh_qlen == cnt);
3640Sstevel@tonic-gate }
3650Sstevel@tonic-gate 
3660Sstevel@tonic-gate #else	/* THREAD_DEBUG */
3670Sstevel@tonic-gate 
3680Sstevel@tonic-gate #define	QVERIFY(qp)
3690Sstevel@tonic-gate 
3700Sstevel@tonic-gate #endif	/* THREAD_DEBUG */
3710Sstevel@tonic-gate 
3720Sstevel@tonic-gate /*
3730Sstevel@tonic-gate  * Acquire a queue head.
3740Sstevel@tonic-gate  */
3750Sstevel@tonic-gate queue_head_t *
3760Sstevel@tonic-gate queue_lock(void *wchan, int qtype)
3770Sstevel@tonic-gate {
3780Sstevel@tonic-gate 	uberdata_t *udp = curthread->ul_uberdata;
3790Sstevel@tonic-gate 	queue_head_t *qp;
3800Sstevel@tonic-gate 
3810Sstevel@tonic-gate 	ASSERT(qtype == MX || qtype == CV);
3820Sstevel@tonic-gate 
3830Sstevel@tonic-gate 	/*
3840Sstevel@tonic-gate 	 * It is possible that we could be called while still single-threaded.
3850Sstevel@tonic-gate 	 * If so, we call queue_alloc() to allocate the queue_head[] array.
3860Sstevel@tonic-gate 	 */
3870Sstevel@tonic-gate 	if ((qp = udp->queue_head) == NULL) {
3880Sstevel@tonic-gate 		queue_alloc();
3890Sstevel@tonic-gate 		qp = udp->queue_head;
3900Sstevel@tonic-gate 	}
3910Sstevel@tonic-gate 	qp += QUEUE_HASH(wchan, qtype);
3920Sstevel@tonic-gate 	spin_lock_set(&qp->qh_lock);
3930Sstevel@tonic-gate 	/*
3940Sstevel@tonic-gate 	 * At once per nanosecond, qh_lockcount will wrap after 512 years.
3950Sstevel@tonic-gate 	 * Were we to care about this, we could peg the value at UINT64_MAX.
3960Sstevel@tonic-gate 	 */
3970Sstevel@tonic-gate 	qp->qh_lockcount++;
3980Sstevel@tonic-gate 	QVERIFY(qp);
3990Sstevel@tonic-gate 	return (qp);
4000Sstevel@tonic-gate }
4010Sstevel@tonic-gate 
4020Sstevel@tonic-gate /*
4030Sstevel@tonic-gate  * Release a queue head.
4040Sstevel@tonic-gate  */
4050Sstevel@tonic-gate void
4060Sstevel@tonic-gate queue_unlock(queue_head_t *qp)
4070Sstevel@tonic-gate {
4080Sstevel@tonic-gate 	QVERIFY(qp);
4090Sstevel@tonic-gate 	spin_lock_clear(&qp->qh_lock);
4100Sstevel@tonic-gate }
4110Sstevel@tonic-gate 
4120Sstevel@tonic-gate /*
4130Sstevel@tonic-gate  * For rwlock queueing, we must queue writers ahead of readers of the
4140Sstevel@tonic-gate  * same priority.  We do this by making writers appear to have a half
4150Sstevel@tonic-gate  * point higher priority for purposes of priority comparisons below.
4160Sstevel@tonic-gate  */
4170Sstevel@tonic-gate #define	CMP_PRIO(ulwp)	((real_priority(ulwp) << 1) + (ulwp)->ul_writer)
4180Sstevel@tonic-gate 
4190Sstevel@tonic-gate void
4200Sstevel@tonic-gate enqueue(queue_head_t *qp, ulwp_t *ulwp, void *wchan, int qtype)
4210Sstevel@tonic-gate {
4220Sstevel@tonic-gate 	ulwp_t **ulwpp;
4230Sstevel@tonic-gate 	ulwp_t *next;
4240Sstevel@tonic-gate 	int pri = CMP_PRIO(ulwp);
4250Sstevel@tonic-gate 	int force_fifo = (qtype & FIFOQ);
4260Sstevel@tonic-gate 	int do_fifo;
4270Sstevel@tonic-gate 
4280Sstevel@tonic-gate 	qtype &= ~FIFOQ;
4290Sstevel@tonic-gate 	ASSERT(qtype == MX || qtype == CV);
4300Sstevel@tonic-gate 	ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread));
4310Sstevel@tonic-gate 	ASSERT(ulwp->ul_sleepq != qp);
4320Sstevel@tonic-gate 
4330Sstevel@tonic-gate 	/*
4340Sstevel@tonic-gate 	 * LIFO queue ordering is unfair and can lead to starvation,
4350Sstevel@tonic-gate 	 * but it gives better performance for heavily contended locks.
4360Sstevel@tonic-gate 	 * We use thread_queue_fifo (range is 0..8) to determine
4370Sstevel@tonic-gate 	 * the frequency of FIFO vs LIFO queuing:
4380Sstevel@tonic-gate 	 *	0 : every 256th time	(almost always LIFO)
4390Sstevel@tonic-gate 	 *	1 : every 128th time
4400Sstevel@tonic-gate 	 *	2 : every 64th  time
4410Sstevel@tonic-gate 	 *	3 : every 32nd  time
4420Sstevel@tonic-gate 	 *	4 : every 16th  time	(the default value, mostly LIFO)
4430Sstevel@tonic-gate 	 *	5 : every 8th   time
4440Sstevel@tonic-gate 	 *	6 : every 4th   time
4450Sstevel@tonic-gate 	 *	7 : every 2nd   time
4460Sstevel@tonic-gate 	 *	8 : every time		(never LIFO, always FIFO)
4470Sstevel@tonic-gate 	 * Note that there is always some degree of FIFO ordering.
4480Sstevel@tonic-gate 	 * This breaks live lock conditions that occur in applications
4490Sstevel@tonic-gate 	 * that are written assuming (incorrectly) that threads acquire
4500Sstevel@tonic-gate 	 * locks fairly, that is, in roughly round-robin order.
4510Sstevel@tonic-gate 	 * In any event, the queue is maintained in priority order.
4520Sstevel@tonic-gate 	 *
4530Sstevel@tonic-gate 	 * If we are given the FIFOQ flag in qtype, fifo queueing is forced.
4540Sstevel@tonic-gate 	 * SUSV3 requires this for semaphores.
4550Sstevel@tonic-gate 	 */
4560Sstevel@tonic-gate 	do_fifo = (force_fifo ||
4570Sstevel@tonic-gate 		((++qp->qh_qcnt << curthread->ul_queue_fifo) & 0xff) == 0);
4580Sstevel@tonic-gate 
4590Sstevel@tonic-gate 	if (qp->qh_head == NULL) {
4600Sstevel@tonic-gate 		/*
4610Sstevel@tonic-gate 		 * The queue is empty.  LIFO/FIFO doesn't matter.
4620Sstevel@tonic-gate 		 */
4630Sstevel@tonic-gate 		ASSERT(qp->qh_tail == NULL);
4640Sstevel@tonic-gate 		ulwpp = &qp->qh_head;
4650Sstevel@tonic-gate 	} else if (do_fifo) {
4660Sstevel@tonic-gate 		/*
4670Sstevel@tonic-gate 		 * Enqueue after the last thread whose priority is greater
4680Sstevel@tonic-gate 		 * than or equal to the priority of the thread being queued.
4690Sstevel@tonic-gate 		 * Attempt first to go directly onto the tail of the queue.
4700Sstevel@tonic-gate 		 */
4710Sstevel@tonic-gate 		if (pri <= CMP_PRIO(qp->qh_tail))
4720Sstevel@tonic-gate 			ulwpp = &qp->qh_tail->ul_link;
4730Sstevel@tonic-gate 		else {
4740Sstevel@tonic-gate 			for (ulwpp = &qp->qh_head; (next = *ulwpp) != NULL;
4750Sstevel@tonic-gate 			    ulwpp = &next->ul_link)
4760Sstevel@tonic-gate 				if (pri > CMP_PRIO(next))
4770Sstevel@tonic-gate 					break;
4780Sstevel@tonic-gate 		}
4790Sstevel@tonic-gate 	} else {
4800Sstevel@tonic-gate 		/*
4810Sstevel@tonic-gate 		 * Enqueue before the first thread whose priority is less
4820Sstevel@tonic-gate 		 * than or equal to the priority of the thread being queued.
4830Sstevel@tonic-gate 		 * Hopefully we can go directly onto the head of the queue.
4840Sstevel@tonic-gate 		 */
4850Sstevel@tonic-gate 		for (ulwpp = &qp->qh_head; (next = *ulwpp) != NULL;
4860Sstevel@tonic-gate 		    ulwpp = &next->ul_link)
4870Sstevel@tonic-gate 			if (pri >= CMP_PRIO(next))
4880Sstevel@tonic-gate 				break;
4890Sstevel@tonic-gate 	}
4900Sstevel@tonic-gate 	if ((ulwp->ul_link = *ulwpp) == NULL)
4910Sstevel@tonic-gate 		qp->qh_tail = ulwp;
4920Sstevel@tonic-gate 	*ulwpp = ulwp;
4930Sstevel@tonic-gate 
4940Sstevel@tonic-gate 	ulwp->ul_sleepq = qp;
4950Sstevel@tonic-gate 	ulwp->ul_wchan = wchan;
4960Sstevel@tonic-gate 	ulwp->ul_qtype = qtype;
4970Sstevel@tonic-gate 	if (qp->qh_qmax < ++qp->qh_qlen)
4980Sstevel@tonic-gate 		qp->qh_qmax = qp->qh_qlen;
4990Sstevel@tonic-gate }
5000Sstevel@tonic-gate 
5010Sstevel@tonic-gate /*
5020Sstevel@tonic-gate  * Return a pointer to the queue slot of the
5030Sstevel@tonic-gate  * highest priority thread on the queue.
5040Sstevel@tonic-gate  * On return, prevp, if not NULL, will contain a pointer
5050Sstevel@tonic-gate  * to the thread's predecessor on the queue
5060Sstevel@tonic-gate  */
5070Sstevel@tonic-gate static ulwp_t **
5080Sstevel@tonic-gate queue_slot(queue_head_t *qp, void *wchan, int *more, ulwp_t **prevp)
5090Sstevel@tonic-gate {
5100Sstevel@tonic-gate 	ulwp_t **ulwpp;
5110Sstevel@tonic-gate 	ulwp_t *ulwp;
5120Sstevel@tonic-gate 	ulwp_t *prev = NULL;
5130Sstevel@tonic-gate 	ulwp_t **suspp = NULL;
5140Sstevel@tonic-gate 	ulwp_t *susprev;
5150Sstevel@tonic-gate 
5160Sstevel@tonic-gate 	ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread));
5170Sstevel@tonic-gate 
5180Sstevel@tonic-gate 	/*
5190Sstevel@tonic-gate 	 * Find a waiter on the sleep queue.
5200Sstevel@tonic-gate 	 */
5210Sstevel@tonic-gate 	for (ulwpp = &qp->qh_head; (ulwp = *ulwpp) != NULL;
5220Sstevel@tonic-gate 	    prev = ulwp, ulwpp = &ulwp->ul_link) {
5230Sstevel@tonic-gate 		if (ulwp->ul_wchan == wchan) {
5240Sstevel@tonic-gate 			if (!ulwp->ul_stop)
5250Sstevel@tonic-gate 				break;
5260Sstevel@tonic-gate 			/*
5270Sstevel@tonic-gate 			 * Try not to return a suspended thread.
5280Sstevel@tonic-gate 			 * This mimics the old libthread's behavior.
5290Sstevel@tonic-gate 			 */
5300Sstevel@tonic-gate 			if (suspp == NULL) {
5310Sstevel@tonic-gate 				suspp = ulwpp;
5320Sstevel@tonic-gate 				susprev = prev;
5330Sstevel@tonic-gate 			}
5340Sstevel@tonic-gate 		}
5350Sstevel@tonic-gate 	}
5360Sstevel@tonic-gate 
5370Sstevel@tonic-gate 	if (ulwp == NULL && suspp != NULL) {
5380Sstevel@tonic-gate 		ulwp = *(ulwpp = suspp);
5390Sstevel@tonic-gate 		prev = susprev;
5400Sstevel@tonic-gate 		suspp = NULL;
5410Sstevel@tonic-gate 	}
5420Sstevel@tonic-gate 	if (ulwp == NULL) {
5430Sstevel@tonic-gate 		if (more != NULL)
5440Sstevel@tonic-gate 			*more = 0;
5450Sstevel@tonic-gate 		return (NULL);
5460Sstevel@tonic-gate 	}
5470Sstevel@tonic-gate 
5480Sstevel@tonic-gate 	if (prevp != NULL)
5490Sstevel@tonic-gate 		*prevp = prev;
5500Sstevel@tonic-gate 	if (more == NULL)
5510Sstevel@tonic-gate 		return (ulwpp);
5520Sstevel@tonic-gate 
5530Sstevel@tonic-gate 	/*
5540Sstevel@tonic-gate 	 * Scan the remainder of the queue for another waiter.
5550Sstevel@tonic-gate 	 */
5560Sstevel@tonic-gate 	if (suspp != NULL) {
5570Sstevel@tonic-gate 		*more = 1;
5580Sstevel@tonic-gate 		return (ulwpp);
5590Sstevel@tonic-gate 	}
5600Sstevel@tonic-gate 	for (ulwp = ulwp->ul_link; ulwp != NULL; ulwp = ulwp->ul_link) {
5610Sstevel@tonic-gate 		if (ulwp->ul_wchan == wchan) {
5620Sstevel@tonic-gate 			*more = 1;
5630Sstevel@tonic-gate 			return (ulwpp);
5640Sstevel@tonic-gate 		}
5650Sstevel@tonic-gate 	}
5660Sstevel@tonic-gate 
5670Sstevel@tonic-gate 	*more = 0;
5680Sstevel@tonic-gate 	return (ulwpp);
5690Sstevel@tonic-gate }
5700Sstevel@tonic-gate 
5710Sstevel@tonic-gate ulwp_t *
572*4570Sraf queue_unlink(queue_head_t *qp, ulwp_t **ulwpp, ulwp_t *prev)
5730Sstevel@tonic-gate {
5740Sstevel@tonic-gate 	ulwp_t *ulwp;
5750Sstevel@tonic-gate 
5760Sstevel@tonic-gate 	ulwp = *ulwpp;
5770Sstevel@tonic-gate 	*ulwpp = ulwp->ul_link;
5780Sstevel@tonic-gate 	ulwp->ul_link = NULL;
5790Sstevel@tonic-gate 	if (qp->qh_tail == ulwp)
5800Sstevel@tonic-gate 		qp->qh_tail = prev;
5810Sstevel@tonic-gate 	qp->qh_qlen--;
5820Sstevel@tonic-gate 	ulwp->ul_sleepq = NULL;
5830Sstevel@tonic-gate 	ulwp->ul_wchan = NULL;
5840Sstevel@tonic-gate 
5850Sstevel@tonic-gate 	return (ulwp);
5860Sstevel@tonic-gate }
5870Sstevel@tonic-gate 
588*4570Sraf ulwp_t *
589*4570Sraf dequeue(queue_head_t *qp, void *wchan, int *more)
590*4570Sraf {
591*4570Sraf 	ulwp_t **ulwpp;
592*4570Sraf 	ulwp_t *prev;
593*4570Sraf 
594*4570Sraf 	if ((ulwpp = queue_slot(qp, wchan, more, &prev)) == NULL)
595*4570Sraf 		return (NULL);
596*4570Sraf 	return (queue_unlink(qp, ulwpp, prev));
597*4570Sraf }
598*4570Sraf 
5990Sstevel@tonic-gate /*
6000Sstevel@tonic-gate  * Return a pointer to the highest priority thread sleeping on wchan.
6010Sstevel@tonic-gate  */
6020Sstevel@tonic-gate ulwp_t *
6030Sstevel@tonic-gate queue_waiter(queue_head_t *qp, void *wchan)
6040Sstevel@tonic-gate {
6050Sstevel@tonic-gate 	ulwp_t **ulwpp;
6060Sstevel@tonic-gate 
6070Sstevel@tonic-gate 	if ((ulwpp = queue_slot(qp, wchan, NULL, NULL)) == NULL)
6080Sstevel@tonic-gate 		return (NULL);
6090Sstevel@tonic-gate 	return (*ulwpp);
6100Sstevel@tonic-gate }
6110Sstevel@tonic-gate 
6120Sstevel@tonic-gate uint8_t
6130Sstevel@tonic-gate dequeue_self(queue_head_t *qp, void *wchan)
6140Sstevel@tonic-gate {
6150Sstevel@tonic-gate 	ulwp_t *self = curthread;
6160Sstevel@tonic-gate 	ulwp_t **ulwpp;
6170Sstevel@tonic-gate 	ulwp_t *ulwp;
6180Sstevel@tonic-gate 	ulwp_t *prev = NULL;
6190Sstevel@tonic-gate 	int found = 0;
6200Sstevel@tonic-gate 	int more = 0;
6210Sstevel@tonic-gate 
6220Sstevel@tonic-gate 	ASSERT(MUTEX_OWNED(&qp->qh_lock, self));
6230Sstevel@tonic-gate 
6240Sstevel@tonic-gate 	/* find self on the sleep queue */
6250Sstevel@tonic-gate 	for (ulwpp = &qp->qh_head; (ulwp = *ulwpp) != NULL;
6260Sstevel@tonic-gate 	    prev = ulwp, ulwpp = &ulwp->ul_link) {
6270Sstevel@tonic-gate 		if (ulwp == self) {
6280Sstevel@tonic-gate 			/* dequeue ourself */
6290Sstevel@tonic-gate 			ASSERT(self->ul_wchan == wchan);
630*4570Sraf 			(void) queue_unlink(qp, ulwpp, prev);
6310Sstevel@tonic-gate 			self->ul_cvmutex = NULL;
6320Sstevel@tonic-gate 			self->ul_cv_wake = 0;
6330Sstevel@tonic-gate 			found = 1;
6340Sstevel@tonic-gate 			break;
6350Sstevel@tonic-gate 		}
6360Sstevel@tonic-gate 		if (ulwp->ul_wchan == wchan)
6370Sstevel@tonic-gate 			more = 1;
6380Sstevel@tonic-gate 	}
6390Sstevel@tonic-gate 
6400Sstevel@tonic-gate 	if (!found)
6410Sstevel@tonic-gate 		thr_panic("dequeue_self(): curthread not found on queue");
6420Sstevel@tonic-gate 
6430Sstevel@tonic-gate 	if (more)
6440Sstevel@tonic-gate 		return (1);
6450Sstevel@tonic-gate 
6460Sstevel@tonic-gate 	/* scan the remainder of the queue for another waiter */
6470Sstevel@tonic-gate 	for (ulwp = *ulwpp; ulwp != NULL; ulwp = ulwp->ul_link) {
6480Sstevel@tonic-gate 		if (ulwp->ul_wchan == wchan)
6490Sstevel@tonic-gate 			return (1);
6500Sstevel@tonic-gate 	}
6510Sstevel@tonic-gate 
6520Sstevel@tonic-gate 	return (0);
6530Sstevel@tonic-gate }
6540Sstevel@tonic-gate 
6550Sstevel@tonic-gate /*
6560Sstevel@tonic-gate  * Called from call_user_handler() and _thrp_suspend() to take
6570Sstevel@tonic-gate  * ourself off of our sleep queue so we can grab locks.
6580Sstevel@tonic-gate  */
6590Sstevel@tonic-gate void
6600Sstevel@tonic-gate unsleep_self(void)
6610Sstevel@tonic-gate {
6620Sstevel@tonic-gate 	ulwp_t *self = curthread;
6630Sstevel@tonic-gate 	queue_head_t *qp;
6640Sstevel@tonic-gate 
6650Sstevel@tonic-gate 	/*
6660Sstevel@tonic-gate 	 * Calling enter_critical()/exit_critical() here would lead
6670Sstevel@tonic-gate 	 * to recursion.  Just manipulate self->ul_critical directly.
6680Sstevel@tonic-gate 	 */
6690Sstevel@tonic-gate 	self->ul_critical++;
6700Sstevel@tonic-gate 	while (self->ul_sleepq != NULL) {
6710Sstevel@tonic-gate 		qp = queue_lock(self->ul_wchan, self->ul_qtype);
6720Sstevel@tonic-gate 		/*
6730Sstevel@tonic-gate 		 * We may have been moved from a CV queue to a
6740Sstevel@tonic-gate 		 * mutex queue while we were attempting queue_lock().
6750Sstevel@tonic-gate 		 * If so, just loop around and try again.
6760Sstevel@tonic-gate 		 * dequeue_self() clears self->ul_sleepq.
6770Sstevel@tonic-gate 		 */
678*4570Sraf 		if (qp == self->ul_sleepq) {
6790Sstevel@tonic-gate 			(void) dequeue_self(qp, self->ul_wchan);
680*4570Sraf 			self->ul_writer = 0;
681*4570Sraf 		}
6820Sstevel@tonic-gate 		queue_unlock(qp);
6830Sstevel@tonic-gate 	}
6840Sstevel@tonic-gate 	self->ul_critical--;
6850Sstevel@tonic-gate }
6860Sstevel@tonic-gate 
6870Sstevel@tonic-gate /*
6880Sstevel@tonic-gate  * Common code for calling the the ___lwp_mutex_timedlock() system call.
6890Sstevel@tonic-gate  * Returns with mutex_owner and mutex_ownerpid set correctly.
6900Sstevel@tonic-gate  */
6910Sstevel@tonic-gate int
6920Sstevel@tonic-gate mutex_lock_kernel(mutex_t *mp, timespec_t *tsp, tdb_mutex_stats_t *msp)
6930Sstevel@tonic-gate {
6940Sstevel@tonic-gate 	ulwp_t *self = curthread;
6950Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
6960Sstevel@tonic-gate 	hrtime_t begin_sleep;
6970Sstevel@tonic-gate 	int error;
6980Sstevel@tonic-gate 
6990Sstevel@tonic-gate 	self->ul_sp = stkptr();
7000Sstevel@tonic-gate 	self->ul_wchan = mp;
7010Sstevel@tonic-gate 	if (__td_event_report(self, TD_SLEEP, udp)) {
7020Sstevel@tonic-gate 		self->ul_td_evbuf.eventnum = TD_SLEEP;
7030Sstevel@tonic-gate 		self->ul_td_evbuf.eventdata = mp;
7040Sstevel@tonic-gate 		tdb_event(TD_SLEEP, udp);
7050Sstevel@tonic-gate 	}
7060Sstevel@tonic-gate 	if (msp) {
7070Sstevel@tonic-gate 		tdb_incr(msp->mutex_sleep);
7080Sstevel@tonic-gate 		begin_sleep = gethrtime();
7090Sstevel@tonic-gate 	}
7100Sstevel@tonic-gate 
7110Sstevel@tonic-gate 	DTRACE_PROBE1(plockstat, mutex__block, mp);
7120Sstevel@tonic-gate 
7130Sstevel@tonic-gate 	for (;;) {
7140Sstevel@tonic-gate 		if ((error = ___lwp_mutex_timedlock(mp, tsp)) != 0) {
7150Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0);
7160Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__error, mp, error);
7170Sstevel@tonic-gate 			break;
7180Sstevel@tonic-gate 		}
7190Sstevel@tonic-gate 
7200Sstevel@tonic-gate 		if (mp->mutex_type & (USYNC_PROCESS | USYNC_PROCESS_ROBUST)) {
7210Sstevel@tonic-gate 			/*
7220Sstevel@tonic-gate 			 * Defend against forkall().  We may be the child,
7230Sstevel@tonic-gate 			 * in which case we don't actually own the mutex.
7240Sstevel@tonic-gate 			 */
7250Sstevel@tonic-gate 			enter_critical(self);
7260Sstevel@tonic-gate 			if (mp->mutex_ownerpid == udp->pid) {
7270Sstevel@tonic-gate 				mp->mutex_owner = (uintptr_t)self;
7280Sstevel@tonic-gate 				exit_critical(self);
7290Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1);
7300Sstevel@tonic-gate 				DTRACE_PROBE3(plockstat, mutex__acquire, mp,
7310Sstevel@tonic-gate 				    0, 0);
7320Sstevel@tonic-gate 				break;
7330Sstevel@tonic-gate 			}
7340Sstevel@tonic-gate 			exit_critical(self);
7350Sstevel@tonic-gate 		} else {
7360Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
7370Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1);
7380Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
7390Sstevel@tonic-gate 			break;
7400Sstevel@tonic-gate 		}
7410Sstevel@tonic-gate 	}
7420Sstevel@tonic-gate 	if (msp)
7430Sstevel@tonic-gate 		msp->mutex_sleep_time += gethrtime() - begin_sleep;
7440Sstevel@tonic-gate 	self->ul_wchan = NULL;
7450Sstevel@tonic-gate 	self->ul_sp = 0;
7460Sstevel@tonic-gate 
7470Sstevel@tonic-gate 	return (error);
7480Sstevel@tonic-gate }
7490Sstevel@tonic-gate 
7500Sstevel@tonic-gate /*
7510Sstevel@tonic-gate  * Common code for calling the ___lwp_mutex_trylock() system call.
7520Sstevel@tonic-gate  * Returns with mutex_owner and mutex_ownerpid set correctly.
7530Sstevel@tonic-gate  */
7540Sstevel@tonic-gate int
7550Sstevel@tonic-gate mutex_trylock_kernel(mutex_t *mp)
7560Sstevel@tonic-gate {
7570Sstevel@tonic-gate 	ulwp_t *self = curthread;
7580Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
7590Sstevel@tonic-gate 	int error;
7600Sstevel@tonic-gate 
7610Sstevel@tonic-gate 	for (;;) {
7620Sstevel@tonic-gate 		if ((error = ___lwp_mutex_trylock(mp)) != 0) {
7630Sstevel@tonic-gate 			if (error != EBUSY) {
7640Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__error, mp,
7650Sstevel@tonic-gate 				    error);
7660Sstevel@tonic-gate 			}
7670Sstevel@tonic-gate 			break;
7680Sstevel@tonic-gate 		}
7690Sstevel@tonic-gate 
7700Sstevel@tonic-gate 		if (mp->mutex_type & (USYNC_PROCESS | USYNC_PROCESS_ROBUST)) {
7710Sstevel@tonic-gate 			/*
7720Sstevel@tonic-gate 			 * Defend against forkall().  We may be the child,
7730Sstevel@tonic-gate 			 * in which case we don't actually own the mutex.
7740Sstevel@tonic-gate 			 */
7750Sstevel@tonic-gate 			enter_critical(self);
7760Sstevel@tonic-gate 			if (mp->mutex_ownerpid == udp->pid) {
7770Sstevel@tonic-gate 				mp->mutex_owner = (uintptr_t)self;
7780Sstevel@tonic-gate 				exit_critical(self);
7790Sstevel@tonic-gate 				DTRACE_PROBE3(plockstat, mutex__acquire, mp,
7800Sstevel@tonic-gate 				    0, 0);
7810Sstevel@tonic-gate 				break;
7820Sstevel@tonic-gate 			}
7830Sstevel@tonic-gate 			exit_critical(self);
7840Sstevel@tonic-gate 		} else {
7850Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
7860Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
7870Sstevel@tonic-gate 			break;
7880Sstevel@tonic-gate 		}
7890Sstevel@tonic-gate 	}
7900Sstevel@tonic-gate 
7910Sstevel@tonic-gate 	return (error);
7920Sstevel@tonic-gate }
7930Sstevel@tonic-gate 
7940Sstevel@tonic-gate volatile sc_shared_t *
7950Sstevel@tonic-gate setup_schedctl(void)
7960Sstevel@tonic-gate {
7970Sstevel@tonic-gate 	ulwp_t *self = curthread;
7980Sstevel@tonic-gate 	volatile sc_shared_t *scp;
7990Sstevel@tonic-gate 	sc_shared_t *tmp;
8000Sstevel@tonic-gate 
8010Sstevel@tonic-gate 	if ((scp = self->ul_schedctl) == NULL && /* no shared state yet */
8020Sstevel@tonic-gate 	    !self->ul_vfork &&			/* not a child of vfork() */
8030Sstevel@tonic-gate 	    !self->ul_schedctl_called) {	/* haven't been called before */
8040Sstevel@tonic-gate 		enter_critical(self);
8050Sstevel@tonic-gate 		self->ul_schedctl_called = &self->ul_uberdata->uberflags;
8060Sstevel@tonic-gate 		if ((tmp = __schedctl()) != (sc_shared_t *)(-1))
8070Sstevel@tonic-gate 			self->ul_schedctl = scp = tmp;
8080Sstevel@tonic-gate 		exit_critical(self);
8090Sstevel@tonic-gate 	}
8100Sstevel@tonic-gate 	/*
8110Sstevel@tonic-gate 	 * Unless the call to setup_schedctl() is surrounded
8120Sstevel@tonic-gate 	 * by enter_critical()/exit_critical(), the address
8130Sstevel@tonic-gate 	 * we are returning could be invalid due to a forkall()
8140Sstevel@tonic-gate 	 * having occurred in another thread.
8150Sstevel@tonic-gate 	 */
8160Sstevel@tonic-gate 	return (scp);
8170Sstevel@tonic-gate }
8180Sstevel@tonic-gate 
8190Sstevel@tonic-gate /*
8200Sstevel@tonic-gate  * Interfaces from libsched, incorporated into libc.
8210Sstevel@tonic-gate  * libsched.so.1 is now a filter library onto libc.
8220Sstevel@tonic-gate  */
8230Sstevel@tonic-gate #pragma weak schedctl_lookup = _schedctl_init
8240Sstevel@tonic-gate #pragma weak _schedctl_lookup = _schedctl_init
8250Sstevel@tonic-gate #pragma weak schedctl_init = _schedctl_init
8260Sstevel@tonic-gate schedctl_t *
8270Sstevel@tonic-gate _schedctl_init(void)
8280Sstevel@tonic-gate {
8290Sstevel@tonic-gate 	volatile sc_shared_t *scp = setup_schedctl();
8300Sstevel@tonic-gate 	return ((scp == NULL)? NULL : (schedctl_t *)&scp->sc_preemptctl);
8310Sstevel@tonic-gate }
8320Sstevel@tonic-gate 
8330Sstevel@tonic-gate #pragma weak schedctl_exit = _schedctl_exit
8340Sstevel@tonic-gate void
8350Sstevel@tonic-gate _schedctl_exit(void)
8360Sstevel@tonic-gate {
8370Sstevel@tonic-gate }
8380Sstevel@tonic-gate 
8390Sstevel@tonic-gate /*
8400Sstevel@tonic-gate  * Contract private interface for java.
8410Sstevel@tonic-gate  * Set up the schedctl data if it doesn't exist yet.
8420Sstevel@tonic-gate  * Return a pointer to the pointer to the schedctl data.
8430Sstevel@tonic-gate  */
8440Sstevel@tonic-gate volatile sc_shared_t *volatile *
8450Sstevel@tonic-gate _thr_schedctl(void)
8460Sstevel@tonic-gate {
8470Sstevel@tonic-gate 	ulwp_t *self = curthread;
8480Sstevel@tonic-gate 	volatile sc_shared_t *volatile *ptr;
8490Sstevel@tonic-gate 
8500Sstevel@tonic-gate 	if (self->ul_vfork)
8510Sstevel@tonic-gate 		return (NULL);
8520Sstevel@tonic-gate 	if (*(ptr = &self->ul_schedctl) == NULL)
8530Sstevel@tonic-gate 		(void) setup_schedctl();
8540Sstevel@tonic-gate 	return (ptr);
8550Sstevel@tonic-gate }
8560Sstevel@tonic-gate 
8570Sstevel@tonic-gate /*
8580Sstevel@tonic-gate  * Block signals and attempt to block preemption.
8590Sstevel@tonic-gate  * no_preempt()/preempt() must be used in pairs but can be nested.
8600Sstevel@tonic-gate  */
8610Sstevel@tonic-gate void
8620Sstevel@tonic-gate no_preempt(ulwp_t *self)
8630Sstevel@tonic-gate {
8640Sstevel@tonic-gate 	volatile sc_shared_t *scp;
8650Sstevel@tonic-gate 
8660Sstevel@tonic-gate 	if (self->ul_preempt++ == 0) {
8670Sstevel@tonic-gate 		enter_critical(self);
8680Sstevel@tonic-gate 		if ((scp = self->ul_schedctl) != NULL ||
8690Sstevel@tonic-gate 		    (scp = setup_schedctl()) != NULL) {
8700Sstevel@tonic-gate 			/*
8710Sstevel@tonic-gate 			 * Save the pre-existing preempt value.
8720Sstevel@tonic-gate 			 */
8730Sstevel@tonic-gate 			self->ul_savpreempt = scp->sc_preemptctl.sc_nopreempt;
8740Sstevel@tonic-gate 			scp->sc_preemptctl.sc_nopreempt = 1;
8750Sstevel@tonic-gate 		}
8760Sstevel@tonic-gate 	}
8770Sstevel@tonic-gate }
8780Sstevel@tonic-gate 
8790Sstevel@tonic-gate /*
8800Sstevel@tonic-gate  * Undo the effects of no_preempt().
8810Sstevel@tonic-gate  */
8820Sstevel@tonic-gate void
8830Sstevel@tonic-gate preempt(ulwp_t *self)
8840Sstevel@tonic-gate {
8850Sstevel@tonic-gate 	volatile sc_shared_t *scp;
8860Sstevel@tonic-gate 
8870Sstevel@tonic-gate 	ASSERT(self->ul_preempt > 0);
8880Sstevel@tonic-gate 	if (--self->ul_preempt == 0) {
8890Sstevel@tonic-gate 		if ((scp = self->ul_schedctl) != NULL) {
8900Sstevel@tonic-gate 			/*
8910Sstevel@tonic-gate 			 * Restore the pre-existing preempt value.
8920Sstevel@tonic-gate 			 */
8930Sstevel@tonic-gate 			scp->sc_preemptctl.sc_nopreempt = self->ul_savpreempt;
8940Sstevel@tonic-gate 			if (scp->sc_preemptctl.sc_yield &&
8950Sstevel@tonic-gate 			    scp->sc_preemptctl.sc_nopreempt == 0) {
8960Sstevel@tonic-gate 				lwp_yield();
8970Sstevel@tonic-gate 				if (scp->sc_preemptctl.sc_yield) {
8980Sstevel@tonic-gate 					/*
8990Sstevel@tonic-gate 					 * Shouldn't happen.  This is either
9000Sstevel@tonic-gate 					 * a race condition or the thread
9010Sstevel@tonic-gate 					 * just entered the real-time class.
9020Sstevel@tonic-gate 					 */
9030Sstevel@tonic-gate 					lwp_yield();
9040Sstevel@tonic-gate 					scp->sc_preemptctl.sc_yield = 0;
9050Sstevel@tonic-gate 				}
9060Sstevel@tonic-gate 			}
9070Sstevel@tonic-gate 		}
9080Sstevel@tonic-gate 		exit_critical(self);
9090Sstevel@tonic-gate 	}
9100Sstevel@tonic-gate }
9110Sstevel@tonic-gate 
9120Sstevel@tonic-gate /*
9130Sstevel@tonic-gate  * If a call to preempt() would cause the current thread to yield or to
9140Sstevel@tonic-gate  * take deferred actions in exit_critical(), then unpark the specified
9150Sstevel@tonic-gate  * lwp so it can run while we delay.  Return the original lwpid if the
9160Sstevel@tonic-gate  * unpark was not performed, else return zero.  The tests are a repeat
9170Sstevel@tonic-gate  * of some of the tests in preempt(), above.  This is a statistical
9180Sstevel@tonic-gate  * optimization solely for cond_sleep_queue(), below.
9190Sstevel@tonic-gate  */
9200Sstevel@tonic-gate static lwpid_t
9210Sstevel@tonic-gate preempt_unpark(ulwp_t *self, lwpid_t lwpid)
9220Sstevel@tonic-gate {
9230Sstevel@tonic-gate 	volatile sc_shared_t *scp = self->ul_schedctl;
9240Sstevel@tonic-gate 
9250Sstevel@tonic-gate 	ASSERT(self->ul_preempt == 1 && self->ul_critical > 0);
9260Sstevel@tonic-gate 	if ((scp != NULL && scp->sc_preemptctl.sc_yield) ||
9270Sstevel@tonic-gate 	    (self->ul_curplease && self->ul_critical == 1)) {
9280Sstevel@tonic-gate 		(void) __lwp_unpark(lwpid);
9290Sstevel@tonic-gate 		lwpid = 0;
9300Sstevel@tonic-gate 	}
9310Sstevel@tonic-gate 	return (lwpid);
9320Sstevel@tonic-gate }
9330Sstevel@tonic-gate 
9340Sstevel@tonic-gate /*
9350Sstevel@tonic-gate  * Spin for a while, trying to grab the lock.  We know that we
9360Sstevel@tonic-gate  * failed set_lock_byte(&mp->mutex_lockw) once before coming here.
9370Sstevel@tonic-gate  * If this fails, return EBUSY and let the caller deal with it.
9380Sstevel@tonic-gate  * If this succeeds, return 0 with mutex_owner set to curthread.
9390Sstevel@tonic-gate  */
9400Sstevel@tonic-gate int
9410Sstevel@tonic-gate mutex_trylock_adaptive(mutex_t *mp)
9420Sstevel@tonic-gate {
9430Sstevel@tonic-gate 	ulwp_t *self = curthread;
9440Sstevel@tonic-gate 	ulwp_t *ulwp;
9450Sstevel@tonic-gate 	volatile sc_shared_t *scp;
9460Sstevel@tonic-gate 	volatile uint8_t *lockp;
9470Sstevel@tonic-gate 	volatile uint64_t *ownerp;
9480Sstevel@tonic-gate 	int count, max = self->ul_adaptive_spin;
9490Sstevel@tonic-gate 
9500Sstevel@tonic-gate 	ASSERT(!(mp->mutex_type & (USYNC_PROCESS | USYNC_PROCESS_ROBUST)));
9510Sstevel@tonic-gate 
9520Sstevel@tonic-gate 	if (max == 0 || (mp->mutex_spinners >= self->ul_max_spinners))
9530Sstevel@tonic-gate 		return (EBUSY);
9540Sstevel@tonic-gate 
9550Sstevel@tonic-gate 	lockp = (volatile uint8_t *)&mp->mutex_lockw;
9560Sstevel@tonic-gate 	ownerp = (volatile uint64_t *)&mp->mutex_owner;
9570Sstevel@tonic-gate 
9580Sstevel@tonic-gate 	DTRACE_PROBE1(plockstat, mutex__spin, mp);
9590Sstevel@tonic-gate 
9600Sstevel@tonic-gate 	/*
9610Sstevel@tonic-gate 	 * This spin loop is unfair to lwps that have already dropped into
9620Sstevel@tonic-gate 	 * the kernel to sleep.  They will starve on a highly-contended mutex.
9630Sstevel@tonic-gate 	 * This is just too bad.  The adaptive spin algorithm is intended
9640Sstevel@tonic-gate 	 * to allow programs with highly-contended locks (that is, broken
9650Sstevel@tonic-gate 	 * programs) to execute with reasonable speed despite their contention.
9660Sstevel@tonic-gate 	 * Being fair would reduce the speed of such programs and well-written
9670Sstevel@tonic-gate 	 * programs will not suffer in any case.
9680Sstevel@tonic-gate 	 */
9690Sstevel@tonic-gate 	enter_critical(self);		/* protects ul_schedctl */
970*4570Sraf 	atomic_inc_32(&mp->mutex_spinners);
9710Sstevel@tonic-gate 	for (count = 0; count < max; count++) {
9720Sstevel@tonic-gate 		if (*lockp == 0 && set_lock_byte(lockp) == 0) {
9730Sstevel@tonic-gate 			*ownerp = (uintptr_t)self;
974*4570Sraf 			atomic_dec_32(&mp->mutex_spinners);
9750Sstevel@tonic-gate 			exit_critical(self);
9760Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__spun, 1, count);
9770Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count);
9780Sstevel@tonic-gate 			return (0);
9790Sstevel@tonic-gate 		}
9800Sstevel@tonic-gate 		SMT_PAUSE();
9810Sstevel@tonic-gate 		/*
9820Sstevel@tonic-gate 		 * Stop spinning if the mutex owner is not running on
9830Sstevel@tonic-gate 		 * a processor; it will not drop the lock any time soon
9840Sstevel@tonic-gate 		 * and we would just be wasting time to keep spinning.
9850Sstevel@tonic-gate 		 *
9860Sstevel@tonic-gate 		 * Note that we are looking at another thread (ulwp_t)
9870Sstevel@tonic-gate 		 * without ensuring that the other thread does not exit.
9880Sstevel@tonic-gate 		 * The scheme relies on ulwp_t structures never being
9890Sstevel@tonic-gate 		 * deallocated by the library (the library employs a free
9900Sstevel@tonic-gate 		 * list of ulwp_t structs that are reused when new threads
9910Sstevel@tonic-gate 		 * are created) and on schedctl shared memory never being
9920Sstevel@tonic-gate 		 * deallocated once created via __schedctl().
9930Sstevel@tonic-gate 		 *
9940Sstevel@tonic-gate 		 * Thus, the worst that can happen when the spinning thread
9950Sstevel@tonic-gate 		 * looks at the owner's schedctl data is that it is looking
9960Sstevel@tonic-gate 		 * at some other thread's schedctl data.  This almost never
9970Sstevel@tonic-gate 		 * happens and is benign when it does.
9980Sstevel@tonic-gate 		 */
9990Sstevel@tonic-gate 		if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL &&
10000Sstevel@tonic-gate 		    ((scp = ulwp->ul_schedctl) == NULL ||
10010Sstevel@tonic-gate 		    scp->sc_state != SC_ONPROC))
10020Sstevel@tonic-gate 			break;
10030Sstevel@tonic-gate 	}
1004*4570Sraf 	atomic_dec_32(&mp->mutex_spinners);
10050Sstevel@tonic-gate 	exit_critical(self);
10060Sstevel@tonic-gate 
10070Sstevel@tonic-gate 	DTRACE_PROBE2(plockstat, mutex__spun, 0, count);
10080Sstevel@tonic-gate 
10090Sstevel@tonic-gate 	return (EBUSY);
10100Sstevel@tonic-gate }
10110Sstevel@tonic-gate 
10120Sstevel@tonic-gate /*
10130Sstevel@tonic-gate  * Same as mutex_trylock_adaptive(), except specifically for queue locks.
10140Sstevel@tonic-gate  * The owner field is not set here; the caller (spin_lock_set()) sets it.
10150Sstevel@tonic-gate  */
10160Sstevel@tonic-gate int
10170Sstevel@tonic-gate mutex_queuelock_adaptive(mutex_t *mp)
10180Sstevel@tonic-gate {
10190Sstevel@tonic-gate 	ulwp_t *ulwp;
10200Sstevel@tonic-gate 	volatile sc_shared_t *scp;
10210Sstevel@tonic-gate 	volatile uint8_t *lockp;
10220Sstevel@tonic-gate 	volatile uint64_t *ownerp;
10230Sstevel@tonic-gate 	int count = curthread->ul_queue_spin;
10240Sstevel@tonic-gate 
10250Sstevel@tonic-gate 	ASSERT(mp->mutex_type == USYNC_THREAD);
10260Sstevel@tonic-gate 
10270Sstevel@tonic-gate 	if (count == 0)
10280Sstevel@tonic-gate 		return (EBUSY);
10290Sstevel@tonic-gate 
10300Sstevel@tonic-gate 	lockp = (volatile uint8_t *)&mp->mutex_lockw;
10310Sstevel@tonic-gate 	ownerp = (volatile uint64_t *)&mp->mutex_owner;
10320Sstevel@tonic-gate 	while (--count >= 0) {
10330Sstevel@tonic-gate 		if (*lockp == 0 && set_lock_byte(lockp) == 0)
10340Sstevel@tonic-gate 			return (0);
10350Sstevel@tonic-gate 		SMT_PAUSE();
10360Sstevel@tonic-gate 		if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL &&
10370Sstevel@tonic-gate 		    ((scp = ulwp->ul_schedctl) == NULL ||
10380Sstevel@tonic-gate 		    scp->sc_state != SC_ONPROC))
10390Sstevel@tonic-gate 			break;
10400Sstevel@tonic-gate 	}
10410Sstevel@tonic-gate 
10420Sstevel@tonic-gate 	return (EBUSY);
10430Sstevel@tonic-gate }
10440Sstevel@tonic-gate 
10450Sstevel@tonic-gate /*
10460Sstevel@tonic-gate  * Like mutex_trylock_adaptive(), but for process-shared mutexes.
10470Sstevel@tonic-gate  * Spin for a while, trying to grab the lock.  We know that we
10480Sstevel@tonic-gate  * failed set_lock_byte(&mp->mutex_lockw) once before coming here.
10490Sstevel@tonic-gate  * If this fails, return EBUSY and let the caller deal with it.
10500Sstevel@tonic-gate  * If this succeeds, return 0 with mutex_owner set to curthread
10510Sstevel@tonic-gate  * and mutex_ownerpid set to the current pid.
10520Sstevel@tonic-gate  */
10530Sstevel@tonic-gate int
10540Sstevel@tonic-gate mutex_trylock_process(mutex_t *mp)
10550Sstevel@tonic-gate {
10560Sstevel@tonic-gate 	ulwp_t *self = curthread;
10570Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
10580Sstevel@tonic-gate 	int count;
10590Sstevel@tonic-gate 	volatile uint8_t *lockp;
10600Sstevel@tonic-gate 	volatile uint64_t *ownerp;
10610Sstevel@tonic-gate 	volatile int32_t *pidp;
10620Sstevel@tonic-gate 	pid_t pid, newpid;
10630Sstevel@tonic-gate 	uint64_t owner, newowner;
10640Sstevel@tonic-gate 
10650Sstevel@tonic-gate 	if ((count = ncpus) == 0)
10660Sstevel@tonic-gate 		count = ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN);
10670Sstevel@tonic-gate 	count = (count > 1)? self->ul_adaptive_spin : 0;
10680Sstevel@tonic-gate 
10690Sstevel@tonic-gate 	ASSERT((mp->mutex_type & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) ==
10700Sstevel@tonic-gate 		USYNC_PROCESS);
10710Sstevel@tonic-gate 
10720Sstevel@tonic-gate 	if (count == 0)
10730Sstevel@tonic-gate 		return (EBUSY);
10740Sstevel@tonic-gate 
10750Sstevel@tonic-gate 	lockp = (volatile uint8_t *)&mp->mutex_lockw;
10760Sstevel@tonic-gate 	ownerp = (volatile uint64_t *)&mp->mutex_owner;
10770Sstevel@tonic-gate 	pidp = (volatile int32_t *)&mp->mutex_ownerpid;
10780Sstevel@tonic-gate 	owner = *ownerp;
10790Sstevel@tonic-gate 	pid = *pidp;
10800Sstevel@tonic-gate 	/*
10810Sstevel@tonic-gate 	 * This is a process-shared mutex.
10820Sstevel@tonic-gate 	 * We cannot know if the owner is running on a processor.
10830Sstevel@tonic-gate 	 * We just spin and hope that it is on a processor.
10840Sstevel@tonic-gate 	 */
10850Sstevel@tonic-gate 	while (--count >= 0) {
10860Sstevel@tonic-gate 		if (*lockp == 0) {
10870Sstevel@tonic-gate 			enter_critical(self);
10880Sstevel@tonic-gate 			if (set_lock_byte(lockp) == 0) {
10890Sstevel@tonic-gate 				*ownerp = (uintptr_t)self;
10900Sstevel@tonic-gate 				*pidp = udp->pid;
10910Sstevel@tonic-gate 				exit_critical(self);
10920Sstevel@tonic-gate 				DTRACE_PROBE3(plockstat, mutex__acquire, mp,
10930Sstevel@tonic-gate 				    0, 0);
10940Sstevel@tonic-gate 				return (0);
10950Sstevel@tonic-gate 			}
10960Sstevel@tonic-gate 			exit_critical(self);
10970Sstevel@tonic-gate 		} else if ((newowner = *ownerp) == owner &&
10980Sstevel@tonic-gate 		    (newpid = *pidp) == pid) {
10990Sstevel@tonic-gate 			SMT_PAUSE();
11000Sstevel@tonic-gate 			continue;
11010Sstevel@tonic-gate 		}
11020Sstevel@tonic-gate 		/*
11030Sstevel@tonic-gate 		 * The owner of the lock changed; start the count over again.
11040Sstevel@tonic-gate 		 * This may be too aggressive; it needs testing.
11050Sstevel@tonic-gate 		 */
11060Sstevel@tonic-gate 		owner = newowner;
11070Sstevel@tonic-gate 		pid = newpid;
11080Sstevel@tonic-gate 		count = self->ul_adaptive_spin;
11090Sstevel@tonic-gate 	}
11100Sstevel@tonic-gate 
11110Sstevel@tonic-gate 	return (EBUSY);
11120Sstevel@tonic-gate }
11130Sstevel@tonic-gate 
11140Sstevel@tonic-gate /*
11150Sstevel@tonic-gate  * Mutex wakeup code for releasing a USYNC_THREAD mutex.
11160Sstevel@tonic-gate  * Returns the lwpid of the thread that was dequeued, if any.
11170Sstevel@tonic-gate  * The caller of mutex_wakeup() must call __lwp_unpark(lwpid)
11180Sstevel@tonic-gate  * to wake up the specified lwp.
11190Sstevel@tonic-gate  */
11200Sstevel@tonic-gate lwpid_t
11210Sstevel@tonic-gate mutex_wakeup(mutex_t *mp)
11220Sstevel@tonic-gate {
11230Sstevel@tonic-gate 	lwpid_t lwpid = 0;
11240Sstevel@tonic-gate 	queue_head_t *qp;
11250Sstevel@tonic-gate 	ulwp_t *ulwp;
11260Sstevel@tonic-gate 	int more;
11270Sstevel@tonic-gate 
11280Sstevel@tonic-gate 	/*
11290Sstevel@tonic-gate 	 * Dequeue a waiter from the sleep queue.  Don't touch the mutex
11300Sstevel@tonic-gate 	 * waiters bit if no one was found on the queue because the mutex
11310Sstevel@tonic-gate 	 * might have been deallocated or reallocated for another purpose.
11320Sstevel@tonic-gate 	 */
11330Sstevel@tonic-gate 	qp = queue_lock(mp, MX);
11340Sstevel@tonic-gate 	if ((ulwp = dequeue(qp, mp, &more)) != NULL) {
11350Sstevel@tonic-gate 		lwpid = ulwp->ul_lwpid;
11360Sstevel@tonic-gate 		mp->mutex_waiters = (more? 1 : 0);
11370Sstevel@tonic-gate 	}
11380Sstevel@tonic-gate 	queue_unlock(qp);
11390Sstevel@tonic-gate 	return (lwpid);
11400Sstevel@tonic-gate }
11410Sstevel@tonic-gate 
11420Sstevel@tonic-gate /*
11430Sstevel@tonic-gate  * Spin for a while, testing to see if the lock has been grabbed.
11440Sstevel@tonic-gate  * If this fails, call mutex_wakeup() to release a waiter.
11450Sstevel@tonic-gate  */
11460Sstevel@tonic-gate lwpid_t
11470Sstevel@tonic-gate mutex_unlock_queue(mutex_t *mp)
11480Sstevel@tonic-gate {
11490Sstevel@tonic-gate 	ulwp_t *self = curthread;
11500Sstevel@tonic-gate 	uint32_t *lockw = &mp->mutex_lockword;
11510Sstevel@tonic-gate 	lwpid_t lwpid;
11520Sstevel@tonic-gate 	volatile uint8_t *lockp;
11530Sstevel@tonic-gate 	volatile uint32_t *spinp;
11540Sstevel@tonic-gate 	int count;
11550Sstevel@tonic-gate 
11560Sstevel@tonic-gate 	/*
11570Sstevel@tonic-gate 	 * We use the swap primitive to clear the lock, but we must
11580Sstevel@tonic-gate 	 * atomically retain the waiters bit for the remainder of this
11590Sstevel@tonic-gate 	 * code to work.  We first check to see if the waiters bit is
11600Sstevel@tonic-gate 	 * set and if so clear the lock by swapping in a word containing
11610Sstevel@tonic-gate 	 * only the waiters bit.  This could produce a false positive test
11620Sstevel@tonic-gate 	 * for whether there are waiters that need to be waked up, but
11630Sstevel@tonic-gate 	 * this just causes an extra call to mutex_wakeup() to do nothing.
11640Sstevel@tonic-gate 	 * The opposite case is more delicate:  If there are no waiters,
11650Sstevel@tonic-gate 	 * we swap in a zero lock byte and a zero waiters bit.  The result
11660Sstevel@tonic-gate 	 * of the swap could indicate that there really was a waiter so in
11670Sstevel@tonic-gate 	 * this case we go directly to mutex_wakeup() without performing
11680Sstevel@tonic-gate 	 * any of the adaptive code because the waiter bit has been cleared
11690Sstevel@tonic-gate 	 * and the adaptive code is unreliable in this case.
11700Sstevel@tonic-gate 	 */
11710Sstevel@tonic-gate 	if (!(*lockw & WAITERMASK)) {	/* no waiter exists right now */
11720Sstevel@tonic-gate 		mp->mutex_owner = 0;
11730Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
1174*4570Sraf 		if (!(atomic_swap_32(lockw, 0) & WAITERMASK))
1175*4570Sraf 			return (0);	/* still no waiters */
11760Sstevel@tonic-gate 		no_preempt(self);	/* ensure a prompt wakeup */
11770Sstevel@tonic-gate 		lwpid = mutex_wakeup(mp);
11780Sstevel@tonic-gate 	} else {
11790Sstevel@tonic-gate 		no_preempt(self);	/* ensure a prompt wakeup */
11800Sstevel@tonic-gate 		lockp = (volatile uint8_t *)&mp->mutex_lockw;
11810Sstevel@tonic-gate 		spinp = (volatile uint32_t *)&mp->mutex_spinners;
11820Sstevel@tonic-gate 		mp->mutex_owner = 0;
11830Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
1184*4570Sraf 		/* clear lock, retain waiter */
1185*4570Sraf 		(void) atomic_swap_32(lockw, WAITER);
11860Sstevel@tonic-gate 
11870Sstevel@tonic-gate 		/*
11880Sstevel@tonic-gate 		 * We spin here fewer times than mutex_trylock_adaptive().
11890Sstevel@tonic-gate 		 * We are trying to balance two conflicting goals:
11900Sstevel@tonic-gate 		 * 1. Avoid waking up anyone if a spinning thread
11910Sstevel@tonic-gate 		 *    grabs the lock.
11920Sstevel@tonic-gate 		 * 2. Wake up a sleeping thread promptly to get on
11930Sstevel@tonic-gate 		 *    with useful work.
11940Sstevel@tonic-gate 		 * We don't spin at all if there is no acquiring spinner;
11950Sstevel@tonic-gate 		 * (mp->mutex_spinners is non-zero if there are spinners).
11960Sstevel@tonic-gate 		 */
11970Sstevel@tonic-gate 		for (count = self->ul_release_spin;
11980Sstevel@tonic-gate 		    *spinp && count > 0; count--) {
11990Sstevel@tonic-gate 			/*
12000Sstevel@tonic-gate 			 * There is a waiter that we will have to wake
12010Sstevel@tonic-gate 			 * up unless someone else grabs the lock while
12020Sstevel@tonic-gate 			 * we are busy spinning.  Like the spin loop in
12030Sstevel@tonic-gate 			 * mutex_trylock_adaptive(), this spin loop is
12040Sstevel@tonic-gate 			 * unfair to lwps that have already dropped into
12050Sstevel@tonic-gate 			 * the kernel to sleep.  They will starve on a
12060Sstevel@tonic-gate 			 * highly-contended mutex.  Too bad.
12070Sstevel@tonic-gate 			 */
12080Sstevel@tonic-gate 			if (*lockp != 0) {	/* somebody grabbed the lock */
12090Sstevel@tonic-gate 				preempt(self);
12100Sstevel@tonic-gate 				return (0);
12110Sstevel@tonic-gate 			}
12120Sstevel@tonic-gate 			SMT_PAUSE();
12130Sstevel@tonic-gate 		}
12140Sstevel@tonic-gate 
12150Sstevel@tonic-gate 		/*
12160Sstevel@tonic-gate 		 * No one grabbed the lock.
12170Sstevel@tonic-gate 		 * Wake up some lwp that is waiting for it.
12180Sstevel@tonic-gate 		 */
12190Sstevel@tonic-gate 		mp->mutex_waiters = 0;
12200Sstevel@tonic-gate 		lwpid = mutex_wakeup(mp);
12210Sstevel@tonic-gate 	}
12220Sstevel@tonic-gate 
12230Sstevel@tonic-gate 	if (lwpid == 0)
12240Sstevel@tonic-gate 		preempt(self);
12250Sstevel@tonic-gate 	return (lwpid);
12260Sstevel@tonic-gate }
12270Sstevel@tonic-gate 
12280Sstevel@tonic-gate /*
12290Sstevel@tonic-gate  * Like mutex_unlock_queue(), but for process-shared mutexes.
12300Sstevel@tonic-gate  * We tested the waiters field before calling here and it was non-zero.
12310Sstevel@tonic-gate  */
12320Sstevel@tonic-gate void
12330Sstevel@tonic-gate mutex_unlock_process(mutex_t *mp)
12340Sstevel@tonic-gate {
12350Sstevel@tonic-gate 	ulwp_t *self = curthread;
12360Sstevel@tonic-gate 	int count;
12370Sstevel@tonic-gate 	volatile uint8_t *lockp;
12380Sstevel@tonic-gate 
12390Sstevel@tonic-gate 	/*
12400Sstevel@tonic-gate 	 * See the comments in mutex_unlock_queue(), above.
12410Sstevel@tonic-gate 	 */
12420Sstevel@tonic-gate 	if ((count = ncpus) == 0)
12430Sstevel@tonic-gate 		count = ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN);
12440Sstevel@tonic-gate 	count = (count > 1)? self->ul_release_spin : 0;
12450Sstevel@tonic-gate 	no_preempt(self);
12460Sstevel@tonic-gate 	mp->mutex_owner = 0;
12470Sstevel@tonic-gate 	mp->mutex_ownerpid = 0;
12480Sstevel@tonic-gate 	DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
12490Sstevel@tonic-gate 	if (count == 0) {
12500Sstevel@tonic-gate 		/* clear lock, test waiter */
1251*4570Sraf 		if (!(atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK)) {
12520Sstevel@tonic-gate 			/* no waiters now */
12530Sstevel@tonic-gate 			preempt(self);
12540Sstevel@tonic-gate 			return;
12550Sstevel@tonic-gate 		}
12560Sstevel@tonic-gate 	} else {
12570Sstevel@tonic-gate 		/* clear lock, retain waiter */
1258*4570Sraf 		(void) atomic_swap_32(&mp->mutex_lockword, WAITER);
12590Sstevel@tonic-gate 		lockp = (volatile uint8_t *)&mp->mutex_lockw;
12600Sstevel@tonic-gate 		while (--count >= 0) {
12610Sstevel@tonic-gate 			if (*lockp != 0) {
12620Sstevel@tonic-gate 				/* somebody grabbed the lock */
12630Sstevel@tonic-gate 				preempt(self);
12640Sstevel@tonic-gate 				return;
12650Sstevel@tonic-gate 			}
12660Sstevel@tonic-gate 			SMT_PAUSE();
12670Sstevel@tonic-gate 		}
12680Sstevel@tonic-gate 		/*
12690Sstevel@tonic-gate 		 * We must clear the waiters field before going
12700Sstevel@tonic-gate 		 * to the kernel, else it could remain set forever.
12710Sstevel@tonic-gate 		 */
12720Sstevel@tonic-gate 		mp->mutex_waiters = 0;
12730Sstevel@tonic-gate 	}
12740Sstevel@tonic-gate 	(void) ___lwp_mutex_wakeup(mp);
12750Sstevel@tonic-gate 	preempt(self);
12760Sstevel@tonic-gate }
12770Sstevel@tonic-gate 
12780Sstevel@tonic-gate /*
12790Sstevel@tonic-gate  * Return the real priority of a thread.
12800Sstevel@tonic-gate  */
12810Sstevel@tonic-gate int
12820Sstevel@tonic-gate real_priority(ulwp_t *ulwp)
12830Sstevel@tonic-gate {
12840Sstevel@tonic-gate 	if (ulwp->ul_epri == 0)
12850Sstevel@tonic-gate 		return (ulwp->ul_mappedpri? ulwp->ul_mappedpri : ulwp->ul_pri);
12860Sstevel@tonic-gate 	return (ulwp->ul_emappedpri? ulwp->ul_emappedpri : ulwp->ul_epri);
12870Sstevel@tonic-gate }
12880Sstevel@tonic-gate 
12890Sstevel@tonic-gate void
12900Sstevel@tonic-gate stall(void)
12910Sstevel@tonic-gate {
12920Sstevel@tonic-gate 	for (;;)
12930Sstevel@tonic-gate 		(void) mutex_lock_kernel(&stall_mutex, NULL, NULL);
12940Sstevel@tonic-gate }
12950Sstevel@tonic-gate 
12960Sstevel@tonic-gate /*
12970Sstevel@tonic-gate  * Acquire a USYNC_THREAD mutex via user-level sleep queues.
12980Sstevel@tonic-gate  * We failed set_lock_byte(&mp->mutex_lockw) before coming here.
12990Sstevel@tonic-gate  * Returns with mutex_owner set correctly.
13000Sstevel@tonic-gate  */
13010Sstevel@tonic-gate int
13020Sstevel@tonic-gate mutex_lock_queue(ulwp_t *self, tdb_mutex_stats_t *msp, mutex_t *mp,
13030Sstevel@tonic-gate 	timespec_t *tsp)
13040Sstevel@tonic-gate {
13050Sstevel@tonic-gate 	uberdata_t *udp = curthread->ul_uberdata;
13060Sstevel@tonic-gate 	queue_head_t *qp;
13070Sstevel@tonic-gate 	hrtime_t begin_sleep;
13080Sstevel@tonic-gate 	int error = 0;
13090Sstevel@tonic-gate 
13100Sstevel@tonic-gate 	self->ul_sp = stkptr();
13110Sstevel@tonic-gate 	if (__td_event_report(self, TD_SLEEP, udp)) {
13120Sstevel@tonic-gate 		self->ul_wchan = mp;
13130Sstevel@tonic-gate 		self->ul_td_evbuf.eventnum = TD_SLEEP;
13140Sstevel@tonic-gate 		self->ul_td_evbuf.eventdata = mp;
13150Sstevel@tonic-gate 		tdb_event(TD_SLEEP, udp);
13160Sstevel@tonic-gate 	}
13170Sstevel@tonic-gate 	if (msp) {
13180Sstevel@tonic-gate 		tdb_incr(msp->mutex_sleep);
13190Sstevel@tonic-gate 		begin_sleep = gethrtime();
13200Sstevel@tonic-gate 	}
13210Sstevel@tonic-gate 
13220Sstevel@tonic-gate 	DTRACE_PROBE1(plockstat, mutex__block, mp);
13230Sstevel@tonic-gate 
13240Sstevel@tonic-gate 	/*
13250Sstevel@tonic-gate 	 * Put ourself on the sleep queue, and while we are
13260Sstevel@tonic-gate 	 * unable to grab the lock, go park in the kernel.
13270Sstevel@tonic-gate 	 * Take ourself off the sleep queue after we acquire the lock.
13280Sstevel@tonic-gate 	 * The waiter bit can be set/cleared only while holding the queue lock.
13290Sstevel@tonic-gate 	 */
13300Sstevel@tonic-gate 	qp = queue_lock(mp, MX);
13310Sstevel@tonic-gate 	enqueue(qp, self, mp, MX);
13320Sstevel@tonic-gate 	mp->mutex_waiters = 1;
13330Sstevel@tonic-gate 	for (;;) {
13340Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
13350Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
13360Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1);
13370Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
13380Sstevel@tonic-gate 			mp->mutex_waiters = dequeue_self(qp, mp);
13390Sstevel@tonic-gate 			break;
13400Sstevel@tonic-gate 		}
13410Sstevel@tonic-gate 		set_parking_flag(self, 1);
13420Sstevel@tonic-gate 		queue_unlock(qp);
13430Sstevel@tonic-gate 		/*
13440Sstevel@tonic-gate 		 * __lwp_park() will return the residual time in tsp
13450Sstevel@tonic-gate 		 * if we are unparked before the timeout expires.
13460Sstevel@tonic-gate 		 */
13470Sstevel@tonic-gate 		if ((error = __lwp_park(tsp, 0)) == EINTR)
13480Sstevel@tonic-gate 			error = 0;
13490Sstevel@tonic-gate 		set_parking_flag(self, 0);
13500Sstevel@tonic-gate 		/*
13510Sstevel@tonic-gate 		 * We could have taken a signal or suspended ourself.
13520Sstevel@tonic-gate 		 * If we did, then we removed ourself from the queue.
13530Sstevel@tonic-gate 		 * Someone else may have removed us from the queue
13540Sstevel@tonic-gate 		 * as a consequence of mutex_unlock().  We may have
13550Sstevel@tonic-gate 		 * gotten a timeout from __lwp_park().  Or we may still
13560Sstevel@tonic-gate 		 * be on the queue and this is just a spurious wakeup.
13570Sstevel@tonic-gate 		 */
13580Sstevel@tonic-gate 		qp = queue_lock(mp, MX);
13590Sstevel@tonic-gate 		if (self->ul_sleepq == NULL) {
13600Sstevel@tonic-gate 			if (error) {
13610Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0);
13620Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__error, mp,
13630Sstevel@tonic-gate 				    error);
13640Sstevel@tonic-gate 				break;
13650Sstevel@tonic-gate 			}
13660Sstevel@tonic-gate 			if (set_lock_byte(&mp->mutex_lockw) == 0) {
13670Sstevel@tonic-gate 				mp->mutex_owner = (uintptr_t)self;
13680Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1);
13690Sstevel@tonic-gate 				DTRACE_PROBE3(plockstat, mutex__acquire, mp,
13700Sstevel@tonic-gate 				    0, 0);
13710Sstevel@tonic-gate 				break;
13720Sstevel@tonic-gate 			}
13730Sstevel@tonic-gate 			enqueue(qp, self, mp, MX);
13740Sstevel@tonic-gate 			mp->mutex_waiters = 1;
13750Sstevel@tonic-gate 		}
13760Sstevel@tonic-gate 		ASSERT(self->ul_sleepq == qp &&
13770Sstevel@tonic-gate 		    self->ul_qtype == MX &&
13780Sstevel@tonic-gate 		    self->ul_wchan == mp);
13790Sstevel@tonic-gate 		if (error) {
13800Sstevel@tonic-gate 			mp->mutex_waiters = dequeue_self(qp, mp);
13810Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0);
13820Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__error, mp, error);
13830Sstevel@tonic-gate 			break;
13840Sstevel@tonic-gate 		}
13850Sstevel@tonic-gate 	}
13860Sstevel@tonic-gate 
13870Sstevel@tonic-gate 	ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL &&
13880Sstevel@tonic-gate 	    self->ul_wchan == NULL);
13890Sstevel@tonic-gate 	self->ul_sp = 0;
13900Sstevel@tonic-gate 
13910Sstevel@tonic-gate 	queue_unlock(qp);
13920Sstevel@tonic-gate 	if (msp)
13930Sstevel@tonic-gate 		msp->mutex_sleep_time += gethrtime() - begin_sleep;
13940Sstevel@tonic-gate 
13950Sstevel@tonic-gate 	ASSERT(error == 0 || error == EINVAL || error == ETIME);
13960Sstevel@tonic-gate 	return (error);
13970Sstevel@tonic-gate }
13980Sstevel@tonic-gate 
13990Sstevel@tonic-gate /*
14000Sstevel@tonic-gate  * Returns with mutex_owner set correctly.
14010Sstevel@tonic-gate  */
14020Sstevel@tonic-gate int
14030Sstevel@tonic-gate mutex_lock_internal(mutex_t *mp, timespec_t *tsp, int try)
14040Sstevel@tonic-gate {
14050Sstevel@tonic-gate 	ulwp_t *self = curthread;
14060Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
14070Sstevel@tonic-gate 	int mtype = mp->mutex_type;
14080Sstevel@tonic-gate 	tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
14090Sstevel@tonic-gate 	int error = 0;
14100Sstevel@tonic-gate 
14110Sstevel@tonic-gate 	ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK);
14120Sstevel@tonic-gate 
14130Sstevel@tonic-gate 	if (!self->ul_schedctl_called)
14140Sstevel@tonic-gate 		(void) setup_schedctl();
14150Sstevel@tonic-gate 
14160Sstevel@tonic-gate 	if (msp && try == MUTEX_TRY)
14170Sstevel@tonic-gate 		tdb_incr(msp->mutex_try);
14180Sstevel@tonic-gate 
14190Sstevel@tonic-gate 	if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && mutex_is_held(mp)) {
14200Sstevel@tonic-gate 		if (mtype & LOCK_RECURSIVE) {
14210Sstevel@tonic-gate 			if (mp->mutex_rcount == RECURSION_MAX) {
14220Sstevel@tonic-gate 				error = EAGAIN;
14230Sstevel@tonic-gate 			} else {
14240Sstevel@tonic-gate 				mp->mutex_rcount++;
14250Sstevel@tonic-gate 				DTRACE_PROBE3(plockstat, mutex__acquire, mp,
14260Sstevel@tonic-gate 				    1, 0);
14270Sstevel@tonic-gate 				return (0);
14280Sstevel@tonic-gate 			}
14290Sstevel@tonic-gate 		} else if (try == MUTEX_TRY) {
14300Sstevel@tonic-gate 			return (EBUSY);
14310Sstevel@tonic-gate 		} else {
14320Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK);
14330Sstevel@tonic-gate 			return (EDEADLK);
14340Sstevel@tonic-gate 		}
14350Sstevel@tonic-gate 	}
14360Sstevel@tonic-gate 
14370Sstevel@tonic-gate 	if (self->ul_error_detection && try == MUTEX_LOCK &&
14380Sstevel@tonic-gate 	    tsp == NULL && mutex_is_held(mp))
14390Sstevel@tonic-gate 		lock_error(mp, "mutex_lock", NULL, NULL);
14400Sstevel@tonic-gate 
14410Sstevel@tonic-gate 	if (mtype &
14420Sstevel@tonic-gate 	    (USYNC_PROCESS_ROBUST|PTHREAD_PRIO_INHERIT|PTHREAD_PRIO_PROTECT)) {
14430Sstevel@tonic-gate 		uint8_t ceil;
14440Sstevel@tonic-gate 		int myprio;
14450Sstevel@tonic-gate 
14460Sstevel@tonic-gate 		if (mtype & PTHREAD_PRIO_PROTECT) {
14470Sstevel@tonic-gate 			ceil = mp->mutex_ceiling;
14480Sstevel@tonic-gate 			ASSERT(_validate_rt_prio(SCHED_FIFO, ceil) == 0);
14490Sstevel@tonic-gate 			myprio = real_priority(self);
14500Sstevel@tonic-gate 			if (myprio > ceil) {
14510Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__error, mp,
14520Sstevel@tonic-gate 				    EINVAL);
14530Sstevel@tonic-gate 				return (EINVAL);
14540Sstevel@tonic-gate 			}
14550Sstevel@tonic-gate 			if ((error = _ceil_mylist_add(mp)) != 0) {
14560Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__error, mp,
14570Sstevel@tonic-gate 				    error);
14580Sstevel@tonic-gate 				return (error);
14590Sstevel@tonic-gate 			}
14600Sstevel@tonic-gate 			if (myprio < ceil)
14610Sstevel@tonic-gate 				_ceil_prio_inherit(ceil);
14620Sstevel@tonic-gate 		}
14630Sstevel@tonic-gate 
14640Sstevel@tonic-gate 		if (mtype & PTHREAD_PRIO_INHERIT) {
14650Sstevel@tonic-gate 			/* go straight to the kernel */
14660Sstevel@tonic-gate 			if (try == MUTEX_TRY)
14670Sstevel@tonic-gate 				error = mutex_trylock_kernel(mp);
14680Sstevel@tonic-gate 			else	/* MUTEX_LOCK */
14690Sstevel@tonic-gate 				error = mutex_lock_kernel(mp, tsp, msp);
14700Sstevel@tonic-gate 			/*
14710Sstevel@tonic-gate 			 * The kernel never sets or clears the lock byte
14720Sstevel@tonic-gate 			 * for PTHREAD_PRIO_INHERIT mutexes.
14730Sstevel@tonic-gate 			 * Set it here for debugging consistency.
14740Sstevel@tonic-gate 			 */
14750Sstevel@tonic-gate 			switch (error) {
14760Sstevel@tonic-gate 			case 0:
14770Sstevel@tonic-gate 			case EOWNERDEAD:
14780Sstevel@tonic-gate 				mp->mutex_lockw = LOCKSET;
14790Sstevel@tonic-gate 				break;
14800Sstevel@tonic-gate 			}
14810Sstevel@tonic-gate 		} else if (mtype & USYNC_PROCESS_ROBUST) {
14820Sstevel@tonic-gate 			/* go straight to the kernel */
14830Sstevel@tonic-gate 			if (try == MUTEX_TRY)
14840Sstevel@tonic-gate 				error = mutex_trylock_kernel(mp);
14850Sstevel@tonic-gate 			else	/* MUTEX_LOCK */
14860Sstevel@tonic-gate 				error = mutex_lock_kernel(mp, tsp, msp);
14870Sstevel@tonic-gate 		} else {	/* PTHREAD_PRIO_PROTECT */
14880Sstevel@tonic-gate 			/*
14890Sstevel@tonic-gate 			 * Try once at user level before going to the kernel.
14900Sstevel@tonic-gate 			 * If this is a process shared mutex then protect
14910Sstevel@tonic-gate 			 * against forkall() while setting mp->mutex_ownerpid.
14920Sstevel@tonic-gate 			 */
14930Sstevel@tonic-gate 			if (mtype & (USYNC_PROCESS | USYNC_PROCESS_ROBUST)) {
14940Sstevel@tonic-gate 				enter_critical(self);
14950Sstevel@tonic-gate 				if (set_lock_byte(&mp->mutex_lockw) == 0) {
14960Sstevel@tonic-gate 					mp->mutex_owner = (uintptr_t)self;
14970Sstevel@tonic-gate 					mp->mutex_ownerpid = udp->pid;
14980Sstevel@tonic-gate 					exit_critical(self);
14990Sstevel@tonic-gate 					DTRACE_PROBE3(plockstat,
15000Sstevel@tonic-gate 					    mutex__acquire, mp, 0, 0);
15010Sstevel@tonic-gate 				} else {
15020Sstevel@tonic-gate 					exit_critical(self);
15030Sstevel@tonic-gate 					error = EBUSY;
15040Sstevel@tonic-gate 				}
15050Sstevel@tonic-gate 			} else {
15060Sstevel@tonic-gate 				if (set_lock_byte(&mp->mutex_lockw) == 0) {
15070Sstevel@tonic-gate 					mp->mutex_owner = (uintptr_t)self;
15080Sstevel@tonic-gate 					DTRACE_PROBE3(plockstat,
15090Sstevel@tonic-gate 					    mutex__acquire, mp, 0, 0);
15100Sstevel@tonic-gate 				} else {
15110Sstevel@tonic-gate 					error = EBUSY;
15120Sstevel@tonic-gate 				}
15130Sstevel@tonic-gate 			}
15140Sstevel@tonic-gate 			if (error && try == MUTEX_LOCK)
15150Sstevel@tonic-gate 				error = mutex_lock_kernel(mp, tsp, msp);
15160Sstevel@tonic-gate 		}
15170Sstevel@tonic-gate 
15180Sstevel@tonic-gate 		if (error) {
15190Sstevel@tonic-gate 			if (mtype & PTHREAD_PRIO_INHERIT) {
15200Sstevel@tonic-gate 				switch (error) {
15210Sstevel@tonic-gate 				case EOWNERDEAD:
15220Sstevel@tonic-gate 				case ENOTRECOVERABLE:
15230Sstevel@tonic-gate 					if (mtype & PTHREAD_MUTEX_ROBUST_NP)
15240Sstevel@tonic-gate 						break;
15250Sstevel@tonic-gate 					if (error == EOWNERDEAD) {
15260Sstevel@tonic-gate 						/*
15270Sstevel@tonic-gate 						 * We own the mutex; unlock it.
15280Sstevel@tonic-gate 						 * It becomes ENOTRECOVERABLE.
15290Sstevel@tonic-gate 						 * All waiters are waked up.
15300Sstevel@tonic-gate 						 */
15310Sstevel@tonic-gate 						mp->mutex_owner = 0;
15320Sstevel@tonic-gate 						mp->mutex_ownerpid = 0;
15330Sstevel@tonic-gate 						DTRACE_PROBE2(plockstat,
15340Sstevel@tonic-gate 						    mutex__release, mp, 0);
15350Sstevel@tonic-gate 						mp->mutex_lockw = LOCKCLEAR;
15360Sstevel@tonic-gate 						(void) ___lwp_mutex_unlock(mp);
15370Sstevel@tonic-gate 					}
15380Sstevel@tonic-gate 					/* FALLTHROUGH */
15390Sstevel@tonic-gate 				case EDEADLK:
15400Sstevel@tonic-gate 					if (try == MUTEX_LOCK)
15410Sstevel@tonic-gate 						stall();
15420Sstevel@tonic-gate 					error = EBUSY;
15430Sstevel@tonic-gate 					break;
15440Sstevel@tonic-gate 				}
15450Sstevel@tonic-gate 			}
15460Sstevel@tonic-gate 			if ((mtype & PTHREAD_PRIO_PROTECT) &&
15470Sstevel@tonic-gate 			    error != EOWNERDEAD) {
15480Sstevel@tonic-gate 				(void) _ceil_mylist_del(mp);
15490Sstevel@tonic-gate 				if (myprio < ceil)
15500Sstevel@tonic-gate 					_ceil_prio_waive();
15510Sstevel@tonic-gate 			}
15520Sstevel@tonic-gate 		}
15530Sstevel@tonic-gate 	} else if (mtype & USYNC_PROCESS) {
15540Sstevel@tonic-gate 		/*
15550Sstevel@tonic-gate 		 * This is a process shared mutex.  Protect against
15560Sstevel@tonic-gate 		 * forkall() while setting mp->mutex_ownerpid.
15570Sstevel@tonic-gate 		 */
15580Sstevel@tonic-gate 		enter_critical(self);
15590Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
15600Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
15610Sstevel@tonic-gate 			mp->mutex_ownerpid = udp->pid;
15620Sstevel@tonic-gate 			exit_critical(self);
15630Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
15640Sstevel@tonic-gate 		} else {
15650Sstevel@tonic-gate 			/* try a little harder */
15660Sstevel@tonic-gate 			exit_critical(self);
15670Sstevel@tonic-gate 			error = mutex_trylock_process(mp);
15680Sstevel@tonic-gate 		}
15690Sstevel@tonic-gate 		if (error && try == MUTEX_LOCK)
15700Sstevel@tonic-gate 			error = mutex_lock_kernel(mp, tsp, msp);
15710Sstevel@tonic-gate 	} else  {	/* USYNC_THREAD */
15720Sstevel@tonic-gate 		/* try once */
15730Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
15740Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
15750Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
15760Sstevel@tonic-gate 		} else {
15770Sstevel@tonic-gate 			/* try a little harder if we don't own the mutex */
15780Sstevel@tonic-gate 			error = EBUSY;
15790Sstevel@tonic-gate 			if (MUTEX_OWNER(mp) != self)
15800Sstevel@tonic-gate 				error = mutex_trylock_adaptive(mp);
15810Sstevel@tonic-gate 			if (error && try == MUTEX_LOCK)		/* go park */
15820Sstevel@tonic-gate 				error = mutex_lock_queue(self, msp, mp, tsp);
15830Sstevel@tonic-gate 		}
15840Sstevel@tonic-gate 	}
15850Sstevel@tonic-gate 
15860Sstevel@tonic-gate 	switch (error) {
15870Sstevel@tonic-gate 	case EOWNERDEAD:
15880Sstevel@tonic-gate 	case ELOCKUNMAPPED:
15890Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
15900Sstevel@tonic-gate 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
15910Sstevel@tonic-gate 		/* FALLTHROUGH */
15920Sstevel@tonic-gate 	case 0:
15930Sstevel@tonic-gate 		if (msp)
15940Sstevel@tonic-gate 			record_begin_hold(msp);
15950Sstevel@tonic-gate 		break;
15960Sstevel@tonic-gate 	default:
15970Sstevel@tonic-gate 		if (try == MUTEX_TRY) {
15980Sstevel@tonic-gate 			if (msp)
15990Sstevel@tonic-gate 				tdb_incr(msp->mutex_try_fail);
16000Sstevel@tonic-gate 			if (__td_event_report(self, TD_LOCK_TRY, udp)) {
16010Sstevel@tonic-gate 				self->ul_td_evbuf.eventnum = TD_LOCK_TRY;
16020Sstevel@tonic-gate 				tdb_event(TD_LOCK_TRY, udp);
16030Sstevel@tonic-gate 			}
16040Sstevel@tonic-gate 		}
16050Sstevel@tonic-gate 		break;
16060Sstevel@tonic-gate 	}
16070Sstevel@tonic-gate 
16080Sstevel@tonic-gate 	return (error);
16090Sstevel@tonic-gate }
16100Sstevel@tonic-gate 
16110Sstevel@tonic-gate int
16120Sstevel@tonic-gate fast_process_lock(mutex_t *mp, timespec_t *tsp, int mtype, int try)
16130Sstevel@tonic-gate {
16140Sstevel@tonic-gate 	ulwp_t *self = curthread;
16150Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
16160Sstevel@tonic-gate 
16170Sstevel@tonic-gate 	/*
16180Sstevel@tonic-gate 	 * We know that USYNC_PROCESS is set in mtype and that
16190Sstevel@tonic-gate 	 * zero, one, or both of the flags LOCK_RECURSIVE and
16200Sstevel@tonic-gate 	 * LOCK_ERRORCHECK are set, and that no other flags are set.
16210Sstevel@tonic-gate 	 */
16220Sstevel@tonic-gate 	enter_critical(self);
16230Sstevel@tonic-gate 	if (set_lock_byte(&mp->mutex_lockw) == 0) {
16240Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
16250Sstevel@tonic-gate 		mp->mutex_ownerpid = udp->pid;
16260Sstevel@tonic-gate 		exit_critical(self);
16270Sstevel@tonic-gate 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
16280Sstevel@tonic-gate 		return (0);
16290Sstevel@tonic-gate 	}
16300Sstevel@tonic-gate 	exit_critical(self);
16310Sstevel@tonic-gate 
16320Sstevel@tonic-gate 	if ((mtype & ~USYNC_PROCESS) && shared_mutex_held(mp)) {
16330Sstevel@tonic-gate 		if (mtype & LOCK_RECURSIVE) {
16340Sstevel@tonic-gate 			if (mp->mutex_rcount == RECURSION_MAX)
16350Sstevel@tonic-gate 				return (EAGAIN);
16360Sstevel@tonic-gate 			mp->mutex_rcount++;
16370Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1, 0);
16380Sstevel@tonic-gate 			return (0);
16390Sstevel@tonic-gate 		}
16400Sstevel@tonic-gate 		if (try == MUTEX_LOCK) {
16410Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK);
16420Sstevel@tonic-gate 			return (EDEADLK);
16430Sstevel@tonic-gate 		}
16440Sstevel@tonic-gate 		return (EBUSY);
16450Sstevel@tonic-gate 	}
16460Sstevel@tonic-gate 
16470Sstevel@tonic-gate 	/* try a little harder if we don't own the mutex */
16480Sstevel@tonic-gate 	if (!shared_mutex_held(mp) && mutex_trylock_process(mp) == 0)
16490Sstevel@tonic-gate 		return (0);
16500Sstevel@tonic-gate 
16510Sstevel@tonic-gate 	if (try == MUTEX_LOCK)
16520Sstevel@tonic-gate 		return (mutex_lock_kernel(mp, tsp, NULL));
16530Sstevel@tonic-gate 
16540Sstevel@tonic-gate 	if (__td_event_report(self, TD_LOCK_TRY, udp)) {
16550Sstevel@tonic-gate 		self->ul_td_evbuf.eventnum = TD_LOCK_TRY;
16560Sstevel@tonic-gate 		tdb_event(TD_LOCK_TRY, udp);
16570Sstevel@tonic-gate 	}
16580Sstevel@tonic-gate 	return (EBUSY);
16590Sstevel@tonic-gate }
16600Sstevel@tonic-gate 
16610Sstevel@tonic-gate static int
16620Sstevel@tonic-gate slow_lock(ulwp_t *self, mutex_t *mp, timespec_t *tsp)
16630Sstevel@tonic-gate {
16640Sstevel@tonic-gate 	int error = 0;
16650Sstevel@tonic-gate 
16660Sstevel@tonic-gate 	if (MUTEX_OWNER(mp) == self || mutex_trylock_adaptive(mp) != 0)
16670Sstevel@tonic-gate 		error = mutex_lock_queue(self, NULL, mp, tsp);
16680Sstevel@tonic-gate 	return (error);
16690Sstevel@tonic-gate }
16700Sstevel@tonic-gate 
16710Sstevel@tonic-gate int
16720Sstevel@tonic-gate mutex_lock_impl(mutex_t *mp, timespec_t *tsp)
16730Sstevel@tonic-gate {
16740Sstevel@tonic-gate 	ulwp_t *self = curthread;
16750Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
16760Sstevel@tonic-gate 	uberflags_t *gflags;
16770Sstevel@tonic-gate 	int mtype;
16780Sstevel@tonic-gate 
16790Sstevel@tonic-gate 	/*
16800Sstevel@tonic-gate 	 * Optimize the case of USYNC_THREAD, including
16810Sstevel@tonic-gate 	 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
16820Sstevel@tonic-gate 	 * no error detection, no lock statistics,
16830Sstevel@tonic-gate 	 * and the process has only a single thread.
16840Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
16850Sstevel@tonic-gate 	 */
16860Sstevel@tonic-gate 	if ((((mtype = mp->mutex_type) & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) |
16870Sstevel@tonic-gate 	    udp->uberflags.uf_all) == 0) {
16880Sstevel@tonic-gate 		/*
16890Sstevel@tonic-gate 		 * Only one thread exists so we don't need an atomic operation.
16900Sstevel@tonic-gate 		 */
16910Sstevel@tonic-gate 		if (mp->mutex_lockw == 0) {
16920Sstevel@tonic-gate 			mp->mutex_lockw = LOCKSET;
16930Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
16940Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
16950Sstevel@tonic-gate 			return (0);
16960Sstevel@tonic-gate 		}
16970Sstevel@tonic-gate 		if (mtype && MUTEX_OWNER(mp) == self) {
16980Sstevel@tonic-gate 			/*
16990Sstevel@tonic-gate 			 * LOCK_RECURSIVE, LOCK_ERRORCHECK, or both.
17000Sstevel@tonic-gate 			 */
17010Sstevel@tonic-gate 			if (mtype & LOCK_RECURSIVE) {
17020Sstevel@tonic-gate 				if (mp->mutex_rcount == RECURSION_MAX)
17030Sstevel@tonic-gate 					return (EAGAIN);
17040Sstevel@tonic-gate 				mp->mutex_rcount++;
17050Sstevel@tonic-gate 				DTRACE_PROBE3(plockstat, mutex__acquire, mp,
17060Sstevel@tonic-gate 				    1, 0);
17070Sstevel@tonic-gate 				return (0);
17080Sstevel@tonic-gate 			}
17090Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK);
17100Sstevel@tonic-gate 			return (EDEADLK);	/* LOCK_ERRORCHECK */
17110Sstevel@tonic-gate 		}
17120Sstevel@tonic-gate 		/*
17130Sstevel@tonic-gate 		 * We have reached a deadlock, probably because the
17140Sstevel@tonic-gate 		 * process is executing non-async-signal-safe code in
17150Sstevel@tonic-gate 		 * a signal handler and is attempting to acquire a lock
17160Sstevel@tonic-gate 		 * that it already owns.  This is not surprising, given
17170Sstevel@tonic-gate 		 * bad programming practices over the years that has
17180Sstevel@tonic-gate 		 * resulted in applications calling printf() and such
17190Sstevel@tonic-gate 		 * in their signal handlers.  Unless the user has told
17200Sstevel@tonic-gate 		 * us that the signal handlers are safe by setting:
17210Sstevel@tonic-gate 		 *	export _THREAD_ASYNC_SAFE=1
17220Sstevel@tonic-gate 		 * we return EDEADLK rather than actually deadlocking.
17230Sstevel@tonic-gate 		 */
17240Sstevel@tonic-gate 		if (tsp == NULL &&
17250Sstevel@tonic-gate 		    MUTEX_OWNER(mp) == self && !self->ul_async_safe) {
17260Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK);
17270Sstevel@tonic-gate 			return (EDEADLK);
17280Sstevel@tonic-gate 		}
17290Sstevel@tonic-gate 	}
17300Sstevel@tonic-gate 
17310Sstevel@tonic-gate 	/*
17320Sstevel@tonic-gate 	 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
17330Sstevel@tonic-gate 	 * no error detection, and no lock statistics.
17340Sstevel@tonic-gate 	 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
17350Sstevel@tonic-gate 	 */
17360Sstevel@tonic-gate 	if ((gflags = self->ul_schedctl_called) != NULL &&
17370Sstevel@tonic-gate 	    (gflags->uf_trs_ted |
17380Sstevel@tonic-gate 	    (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) {
17390Sstevel@tonic-gate 
17400Sstevel@tonic-gate 		if (mtype & USYNC_PROCESS)
17410Sstevel@tonic-gate 			return (fast_process_lock(mp, tsp, mtype, MUTEX_LOCK));
17420Sstevel@tonic-gate 
17430Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
17440Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
17450Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
17460Sstevel@tonic-gate 			return (0);
17470Sstevel@tonic-gate 		}
17480Sstevel@tonic-gate 
17490Sstevel@tonic-gate 		if (mtype && MUTEX_OWNER(mp) == self) {
17500Sstevel@tonic-gate 			if (mtype & LOCK_RECURSIVE) {
17510Sstevel@tonic-gate 				if (mp->mutex_rcount == RECURSION_MAX)
17520Sstevel@tonic-gate 					return (EAGAIN);
17530Sstevel@tonic-gate 				mp->mutex_rcount++;
17540Sstevel@tonic-gate 				DTRACE_PROBE3(plockstat, mutex__acquire, mp,
17550Sstevel@tonic-gate 				    1, 0);
17560Sstevel@tonic-gate 				return (0);
17570Sstevel@tonic-gate 			}
17580Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK);
17590Sstevel@tonic-gate 			return (EDEADLK);	/* LOCK_ERRORCHECK */
17600Sstevel@tonic-gate 		}
17610Sstevel@tonic-gate 
17620Sstevel@tonic-gate 		return (slow_lock(self, mp, tsp));
17630Sstevel@tonic-gate 	}
17640Sstevel@tonic-gate 
17650Sstevel@tonic-gate 	/* else do it the long way */
17660Sstevel@tonic-gate 	return (mutex_lock_internal(mp, tsp, MUTEX_LOCK));
17670Sstevel@tonic-gate }
17680Sstevel@tonic-gate 
17690Sstevel@tonic-gate #pragma weak _private_mutex_lock = __mutex_lock
17700Sstevel@tonic-gate #pragma weak mutex_lock = __mutex_lock
17710Sstevel@tonic-gate #pragma weak _mutex_lock = __mutex_lock
17720Sstevel@tonic-gate #pragma weak pthread_mutex_lock = __mutex_lock
17730Sstevel@tonic-gate #pragma weak _pthread_mutex_lock = __mutex_lock
17740Sstevel@tonic-gate int
17750Sstevel@tonic-gate __mutex_lock(mutex_t *mp)
17760Sstevel@tonic-gate {
17770Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
17780Sstevel@tonic-gate 	return (mutex_lock_impl(mp, NULL));
17790Sstevel@tonic-gate }
17800Sstevel@tonic-gate 
17810Sstevel@tonic-gate #pragma weak pthread_mutex_timedlock = _pthread_mutex_timedlock
17820Sstevel@tonic-gate int
17830Sstevel@tonic-gate _pthread_mutex_timedlock(mutex_t *mp, const timespec_t *abstime)
17840Sstevel@tonic-gate {
17850Sstevel@tonic-gate 	timespec_t tslocal;
17860Sstevel@tonic-gate 	int error;
17870Sstevel@tonic-gate 
17880Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
17890Sstevel@tonic-gate 	abstime_to_reltime(CLOCK_REALTIME, abstime, &tslocal);
17900Sstevel@tonic-gate 	error = mutex_lock_impl(mp, &tslocal);
17910Sstevel@tonic-gate 	if (error == ETIME)
17920Sstevel@tonic-gate 		error = ETIMEDOUT;
17930Sstevel@tonic-gate 	return (error);
17940Sstevel@tonic-gate }
17950Sstevel@tonic-gate 
17960Sstevel@tonic-gate #pragma weak pthread_mutex_reltimedlock_np = _pthread_mutex_reltimedlock_np
17970Sstevel@tonic-gate int
17980Sstevel@tonic-gate _pthread_mutex_reltimedlock_np(mutex_t *mp, const timespec_t *reltime)
17990Sstevel@tonic-gate {
18000Sstevel@tonic-gate 	timespec_t tslocal;
18010Sstevel@tonic-gate 	int error;
18020Sstevel@tonic-gate 
18030Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
18040Sstevel@tonic-gate 	tslocal = *reltime;
18050Sstevel@tonic-gate 	error = mutex_lock_impl(mp, &tslocal);
18060Sstevel@tonic-gate 	if (error == ETIME)
18070Sstevel@tonic-gate 		error = ETIMEDOUT;
18080Sstevel@tonic-gate 	return (error);
18090Sstevel@tonic-gate }
18100Sstevel@tonic-gate 
18110Sstevel@tonic-gate static int
18120Sstevel@tonic-gate slow_trylock(mutex_t *mp, ulwp_t *self)
18130Sstevel@tonic-gate {
18140Sstevel@tonic-gate 	if (MUTEX_OWNER(mp) == self ||
18150Sstevel@tonic-gate 	    mutex_trylock_adaptive(mp) != 0) {
18160Sstevel@tonic-gate 		uberdata_t *udp = self->ul_uberdata;
18170Sstevel@tonic-gate 
18180Sstevel@tonic-gate 		if (__td_event_report(self, TD_LOCK_TRY, udp)) {
18190Sstevel@tonic-gate 			self->ul_td_evbuf.eventnum = TD_LOCK_TRY;
18200Sstevel@tonic-gate 			tdb_event(TD_LOCK_TRY, udp);
18210Sstevel@tonic-gate 		}
18220Sstevel@tonic-gate 		return (EBUSY);
18230Sstevel@tonic-gate 	}
18240Sstevel@tonic-gate 	return (0);
18250Sstevel@tonic-gate }
18260Sstevel@tonic-gate 
18270Sstevel@tonic-gate #pragma weak _private_mutex_trylock = __mutex_trylock
18280Sstevel@tonic-gate #pragma weak mutex_trylock = __mutex_trylock
18290Sstevel@tonic-gate #pragma weak _mutex_trylock = __mutex_trylock
18300Sstevel@tonic-gate #pragma weak pthread_mutex_trylock = __mutex_trylock
18310Sstevel@tonic-gate #pragma weak _pthread_mutex_trylock = __mutex_trylock
18320Sstevel@tonic-gate int
18330Sstevel@tonic-gate __mutex_trylock(mutex_t *mp)
18340Sstevel@tonic-gate {
18350Sstevel@tonic-gate 	ulwp_t *self = curthread;
18360Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
18370Sstevel@tonic-gate 	uberflags_t *gflags;
18380Sstevel@tonic-gate 	int mtype;
18390Sstevel@tonic-gate 
18400Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
18410Sstevel@tonic-gate 	/*
18420Sstevel@tonic-gate 	 * Optimize the case of USYNC_THREAD, including
18430Sstevel@tonic-gate 	 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
18440Sstevel@tonic-gate 	 * no error detection, no lock statistics,
18450Sstevel@tonic-gate 	 * and the process has only a single thread.
18460Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
18470Sstevel@tonic-gate 	 */
18480Sstevel@tonic-gate 	if ((((mtype = mp->mutex_type) & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) |
18490Sstevel@tonic-gate 	    udp->uberflags.uf_all) == 0) {
18500Sstevel@tonic-gate 		/*
18510Sstevel@tonic-gate 		 * Only one thread exists so we don't need an atomic operation.
18520Sstevel@tonic-gate 		 */
18530Sstevel@tonic-gate 		if (mp->mutex_lockw == 0) {
18540Sstevel@tonic-gate 			mp->mutex_lockw = LOCKSET;
18550Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
18560Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
18570Sstevel@tonic-gate 			return (0);
18580Sstevel@tonic-gate 		}
18590Sstevel@tonic-gate 		if (mtype && MUTEX_OWNER(mp) == self) {
18600Sstevel@tonic-gate 			if (mtype & LOCK_RECURSIVE) {
18610Sstevel@tonic-gate 				if (mp->mutex_rcount == RECURSION_MAX)
18620Sstevel@tonic-gate 					return (EAGAIN);
18630Sstevel@tonic-gate 				mp->mutex_rcount++;
18640Sstevel@tonic-gate 				DTRACE_PROBE3(plockstat, mutex__acquire, mp,
18650Sstevel@tonic-gate 				    1, 0);
18660Sstevel@tonic-gate 				return (0);
18670Sstevel@tonic-gate 			}
18680Sstevel@tonic-gate 			return (EDEADLK);	/* LOCK_ERRORCHECK */
18690Sstevel@tonic-gate 		}
18700Sstevel@tonic-gate 		return (EBUSY);
18710Sstevel@tonic-gate 	}
18720Sstevel@tonic-gate 
18730Sstevel@tonic-gate 	/*
18740Sstevel@tonic-gate 	 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
18750Sstevel@tonic-gate 	 * no error detection, and no lock statistics.
18760Sstevel@tonic-gate 	 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
18770Sstevel@tonic-gate 	 */
18780Sstevel@tonic-gate 	if ((gflags = self->ul_schedctl_called) != NULL &&
18790Sstevel@tonic-gate 	    (gflags->uf_trs_ted |
18800Sstevel@tonic-gate 	    (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) {
18810Sstevel@tonic-gate 
18820Sstevel@tonic-gate 		if (mtype & USYNC_PROCESS)
18830Sstevel@tonic-gate 			return (fast_process_lock(mp, NULL, mtype, MUTEX_TRY));
18840Sstevel@tonic-gate 
18850Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
18860Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
18870Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
18880Sstevel@tonic-gate 			return (0);
18890Sstevel@tonic-gate 		}
18900Sstevel@tonic-gate 
18910Sstevel@tonic-gate 		if (mtype && MUTEX_OWNER(mp) == self) {
18920Sstevel@tonic-gate 			if (mtype & LOCK_RECURSIVE) {
18930Sstevel@tonic-gate 				if (mp->mutex_rcount == RECURSION_MAX)
18940Sstevel@tonic-gate 					return (EAGAIN);
18950Sstevel@tonic-gate 				mp->mutex_rcount++;
18960Sstevel@tonic-gate 				DTRACE_PROBE3(plockstat, mutex__acquire, mp,
18970Sstevel@tonic-gate 				    1, 0);
18980Sstevel@tonic-gate 				return (0);
18990Sstevel@tonic-gate 			}
19000Sstevel@tonic-gate 			return (EBUSY);		/* LOCK_ERRORCHECK */
19010Sstevel@tonic-gate 		}
19020Sstevel@tonic-gate 
19030Sstevel@tonic-gate 		return (slow_trylock(mp, self));
19040Sstevel@tonic-gate 	}
19050Sstevel@tonic-gate 
19060Sstevel@tonic-gate 	/* else do it the long way */
19070Sstevel@tonic-gate 	return (mutex_lock_internal(mp, NULL, MUTEX_TRY));
19080Sstevel@tonic-gate }
19090Sstevel@tonic-gate 
19100Sstevel@tonic-gate int
19110Sstevel@tonic-gate mutex_unlock_internal(mutex_t *mp)
19120Sstevel@tonic-gate {
19130Sstevel@tonic-gate 	ulwp_t *self = curthread;
19140Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
19150Sstevel@tonic-gate 	int mtype = mp->mutex_type;
19160Sstevel@tonic-gate 	tdb_mutex_stats_t *msp;
19170Sstevel@tonic-gate 	int error;
19180Sstevel@tonic-gate 	lwpid_t lwpid;
19190Sstevel@tonic-gate 
19200Sstevel@tonic-gate 	if ((mtype & LOCK_ERRORCHECK) && !mutex_is_held(mp))
19210Sstevel@tonic-gate 		return (EPERM);
19220Sstevel@tonic-gate 
19230Sstevel@tonic-gate 	if (self->ul_error_detection && !mutex_is_held(mp))
19240Sstevel@tonic-gate 		lock_error(mp, "mutex_unlock", NULL, NULL);
19250Sstevel@tonic-gate 
19260Sstevel@tonic-gate 	if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
19270Sstevel@tonic-gate 		mp->mutex_rcount--;
19280Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
19290Sstevel@tonic-gate 		return (0);
19300Sstevel@tonic-gate 	}
19310Sstevel@tonic-gate 
19320Sstevel@tonic-gate 	if ((msp = MUTEX_STATS(mp, udp)) != NULL)
19330Sstevel@tonic-gate 		(void) record_hold_time(msp);
19340Sstevel@tonic-gate 
19350Sstevel@tonic-gate 	if (mtype &
19360Sstevel@tonic-gate 	    (USYNC_PROCESS_ROBUST|PTHREAD_PRIO_INHERIT|PTHREAD_PRIO_PROTECT)) {
19370Sstevel@tonic-gate 		no_preempt(self);
19380Sstevel@tonic-gate 		mp->mutex_owner = 0;
19390Sstevel@tonic-gate 		mp->mutex_ownerpid = 0;
19400Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
19410Sstevel@tonic-gate 		if (mtype & PTHREAD_PRIO_INHERIT) {
19420Sstevel@tonic-gate 			mp->mutex_lockw = LOCKCLEAR;
19430Sstevel@tonic-gate 			error = ___lwp_mutex_unlock(mp);
19440Sstevel@tonic-gate 		} else if (mtype & USYNC_PROCESS_ROBUST) {
19450Sstevel@tonic-gate 			error = ___lwp_mutex_unlock(mp);
19460Sstevel@tonic-gate 		} else {
1947*4570Sraf 			if (atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK)
19480Sstevel@tonic-gate 				(void) ___lwp_mutex_wakeup(mp);
19490Sstevel@tonic-gate 			error = 0;
19500Sstevel@tonic-gate 		}
19510Sstevel@tonic-gate 		if (mtype & PTHREAD_PRIO_PROTECT) {
19520Sstevel@tonic-gate 			if (_ceil_mylist_del(mp))
19530Sstevel@tonic-gate 				_ceil_prio_waive();
19540Sstevel@tonic-gate 		}
19550Sstevel@tonic-gate 		preempt(self);
19560Sstevel@tonic-gate 	} else if (mtype & USYNC_PROCESS) {
19570Sstevel@tonic-gate 		if (mp->mutex_lockword & WAITERMASK)
19580Sstevel@tonic-gate 			mutex_unlock_process(mp);
19590Sstevel@tonic-gate 		else {
19600Sstevel@tonic-gate 			mp->mutex_owner = 0;
19610Sstevel@tonic-gate 			mp->mutex_ownerpid = 0;
19620Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
1963*4570Sraf 			if (atomic_swap_32(&mp->mutex_lockword, 0) &
1964*4570Sraf 			    WAITERMASK) {
19650Sstevel@tonic-gate 				no_preempt(self);
19660Sstevel@tonic-gate 				(void) ___lwp_mutex_wakeup(mp);
19670Sstevel@tonic-gate 				preempt(self);
19680Sstevel@tonic-gate 			}
19690Sstevel@tonic-gate 		}
19700Sstevel@tonic-gate 		error = 0;
19710Sstevel@tonic-gate 	} else {	/* USYNC_THREAD */
19720Sstevel@tonic-gate 		if ((lwpid = mutex_unlock_queue(mp)) != 0) {
19730Sstevel@tonic-gate 			(void) __lwp_unpark(lwpid);
19740Sstevel@tonic-gate 			preempt(self);
19750Sstevel@tonic-gate 		}
19760Sstevel@tonic-gate 		error = 0;
19770Sstevel@tonic-gate 	}
19780Sstevel@tonic-gate 
19790Sstevel@tonic-gate 	return (error);
19800Sstevel@tonic-gate }
19810Sstevel@tonic-gate 
19820Sstevel@tonic-gate #pragma weak _private_mutex_unlock = __mutex_unlock
19830Sstevel@tonic-gate #pragma weak mutex_unlock = __mutex_unlock
19840Sstevel@tonic-gate #pragma weak _mutex_unlock = __mutex_unlock
19850Sstevel@tonic-gate #pragma weak pthread_mutex_unlock = __mutex_unlock
19860Sstevel@tonic-gate #pragma weak _pthread_mutex_unlock = __mutex_unlock
19870Sstevel@tonic-gate int
19880Sstevel@tonic-gate __mutex_unlock(mutex_t *mp)
19890Sstevel@tonic-gate {
19900Sstevel@tonic-gate 	ulwp_t *self = curthread;
19910Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
19920Sstevel@tonic-gate 	uberflags_t *gflags;
19930Sstevel@tonic-gate 	lwpid_t lwpid;
19940Sstevel@tonic-gate 	int mtype;
19950Sstevel@tonic-gate 	short el;
19960Sstevel@tonic-gate 
19970Sstevel@tonic-gate 	/*
19980Sstevel@tonic-gate 	 * Optimize the case of USYNC_THREAD, including
19990Sstevel@tonic-gate 	 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
20000Sstevel@tonic-gate 	 * no error detection, no lock statistics,
20010Sstevel@tonic-gate 	 * and the process has only a single thread.
20020Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
20030Sstevel@tonic-gate 	 */
20040Sstevel@tonic-gate 	if ((((mtype = mp->mutex_type) & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) |
20050Sstevel@tonic-gate 	    udp->uberflags.uf_all) == 0) {
20060Sstevel@tonic-gate 		if (mtype) {
20070Sstevel@tonic-gate 			/*
20080Sstevel@tonic-gate 			 * At this point we know that one or both of the
20090Sstevel@tonic-gate 			 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set.
20100Sstevel@tonic-gate 			 */
20110Sstevel@tonic-gate 			if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self))
20120Sstevel@tonic-gate 				return (EPERM);
20130Sstevel@tonic-gate 			if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
20140Sstevel@tonic-gate 				mp->mutex_rcount--;
20150Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
20160Sstevel@tonic-gate 				return (0);
20170Sstevel@tonic-gate 			}
20180Sstevel@tonic-gate 		}
20190Sstevel@tonic-gate 		/*
20200Sstevel@tonic-gate 		 * Only one thread exists so we don't need an atomic operation.
20210Sstevel@tonic-gate 		 * Also, there can be no waiters.
20220Sstevel@tonic-gate 		 */
20230Sstevel@tonic-gate 		mp->mutex_owner = 0;
20240Sstevel@tonic-gate 		mp->mutex_lockword = 0;
20250Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
20260Sstevel@tonic-gate 		return (0);
20270Sstevel@tonic-gate 	}
20280Sstevel@tonic-gate 
20290Sstevel@tonic-gate 	/*
20300Sstevel@tonic-gate 	 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
20310Sstevel@tonic-gate 	 * no error detection, and no lock statistics.
20320Sstevel@tonic-gate 	 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
20330Sstevel@tonic-gate 	 */
20340Sstevel@tonic-gate 	if ((gflags = self->ul_schedctl_called) != NULL) {
20350Sstevel@tonic-gate 		if (((el = gflags->uf_trs_ted) | mtype) == 0) {
20360Sstevel@tonic-gate fast_unlock:
20370Sstevel@tonic-gate 			if (!(mp->mutex_lockword & WAITERMASK)) {
20380Sstevel@tonic-gate 				/* no waiter exists right now */
20390Sstevel@tonic-gate 				mp->mutex_owner = 0;
20400Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
2041*4570Sraf 				if (atomic_swap_32(&mp->mutex_lockword, 0) &
20420Sstevel@tonic-gate 				    WAITERMASK) {
20430Sstevel@tonic-gate 					/* a waiter suddenly appeared */
20440Sstevel@tonic-gate 					no_preempt(self);
20450Sstevel@tonic-gate 					if ((lwpid = mutex_wakeup(mp)) != 0)
20460Sstevel@tonic-gate 						(void) __lwp_unpark(lwpid);
20470Sstevel@tonic-gate 					preempt(self);
20480Sstevel@tonic-gate 				}
20490Sstevel@tonic-gate 			} else if ((lwpid = mutex_unlock_queue(mp)) != 0) {
20500Sstevel@tonic-gate 				(void) __lwp_unpark(lwpid);
20510Sstevel@tonic-gate 				preempt(self);
20520Sstevel@tonic-gate 			}
20530Sstevel@tonic-gate 			return (0);
20540Sstevel@tonic-gate 		}
20550Sstevel@tonic-gate 		if (el)		/* error detection or lock statistics */
20560Sstevel@tonic-gate 			goto slow_unlock;
20570Sstevel@tonic-gate 		if ((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) {
20580Sstevel@tonic-gate 			/*
20590Sstevel@tonic-gate 			 * At this point we know that one or both of the
20600Sstevel@tonic-gate 			 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set.
20610Sstevel@tonic-gate 			 */
20620Sstevel@tonic-gate 			if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self))
20630Sstevel@tonic-gate 				return (EPERM);
20640Sstevel@tonic-gate 			if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
20650Sstevel@tonic-gate 				mp->mutex_rcount--;
20660Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
20670Sstevel@tonic-gate 				return (0);
20680Sstevel@tonic-gate 			}
20690Sstevel@tonic-gate 			goto fast_unlock;
20700Sstevel@tonic-gate 		}
20710Sstevel@tonic-gate 		if ((mtype &
20720Sstevel@tonic-gate 		    ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) {
20730Sstevel@tonic-gate 			/*
20740Sstevel@tonic-gate 			 * At this point we know that zero, one, or both of the
20750Sstevel@tonic-gate 			 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set and
20760Sstevel@tonic-gate 			 * that the USYNC_PROCESS flag is set.
20770Sstevel@tonic-gate 			 */
20780Sstevel@tonic-gate 			if ((mtype & LOCK_ERRORCHECK) && !shared_mutex_held(mp))
20790Sstevel@tonic-gate 				return (EPERM);
20800Sstevel@tonic-gate 			if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
20810Sstevel@tonic-gate 				mp->mutex_rcount--;
20820Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
20830Sstevel@tonic-gate 				return (0);
20840Sstevel@tonic-gate 			}
20850Sstevel@tonic-gate 			if (mp->mutex_lockword & WAITERMASK)
20860Sstevel@tonic-gate 				mutex_unlock_process(mp);
20870Sstevel@tonic-gate 			else {
20880Sstevel@tonic-gate 				mp->mutex_owner = 0;
20890Sstevel@tonic-gate 				mp->mutex_ownerpid = 0;
20900Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
2091*4570Sraf 				if (atomic_swap_32(&mp->mutex_lockword, 0) &
20920Sstevel@tonic-gate 				    WAITERMASK) {
20930Sstevel@tonic-gate 					no_preempt(self);
20940Sstevel@tonic-gate 					(void) ___lwp_mutex_wakeup(mp);
20950Sstevel@tonic-gate 					preempt(self);
20960Sstevel@tonic-gate 				}
20970Sstevel@tonic-gate 			}
20980Sstevel@tonic-gate 			return (0);
20990Sstevel@tonic-gate 		}
21000Sstevel@tonic-gate 	}
21010Sstevel@tonic-gate 
21020Sstevel@tonic-gate 	/* else do it the long way */
21030Sstevel@tonic-gate slow_unlock:
21040Sstevel@tonic-gate 	return (mutex_unlock_internal(mp));
21050Sstevel@tonic-gate }
21060Sstevel@tonic-gate 
21070Sstevel@tonic-gate /*
21080Sstevel@tonic-gate  * Internally to the library, almost all mutex lock/unlock actions
21090Sstevel@tonic-gate  * go through these lmutex_ functions, to protect critical regions.
21100Sstevel@tonic-gate  * We replicate a bit of code from __mutex_lock() and __mutex_unlock()
21110Sstevel@tonic-gate  * to make these functions faster since we know that the mutex type
21120Sstevel@tonic-gate  * of all internal locks is USYNC_THREAD.  We also know that internal
21130Sstevel@tonic-gate  * locking can never fail, so we panic if it does.
21140Sstevel@tonic-gate  */
21150Sstevel@tonic-gate void
21160Sstevel@tonic-gate lmutex_lock(mutex_t *mp)
21170Sstevel@tonic-gate {
21180Sstevel@tonic-gate 	ulwp_t *self = curthread;
21190Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
21200Sstevel@tonic-gate 
21210Sstevel@tonic-gate 	ASSERT(mp->mutex_type == USYNC_THREAD);
21220Sstevel@tonic-gate 
21230Sstevel@tonic-gate 	enter_critical(self);
21240Sstevel@tonic-gate 	/*
21250Sstevel@tonic-gate 	 * Optimize the case of no lock statistics and only a single thread.
21260Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
21270Sstevel@tonic-gate 	 */
21280Sstevel@tonic-gate 	if (udp->uberflags.uf_all == 0) {
21290Sstevel@tonic-gate 		/*
21300Sstevel@tonic-gate 		 * Only one thread exists; the mutex must be free.
21310Sstevel@tonic-gate 		 */
21320Sstevel@tonic-gate 		ASSERT(mp->mutex_lockw == 0);
21330Sstevel@tonic-gate 		mp->mutex_lockw = LOCKSET;
21340Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
21350Sstevel@tonic-gate 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
21360Sstevel@tonic-gate 	} else {
21370Sstevel@tonic-gate 		tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
21380Sstevel@tonic-gate 
21390Sstevel@tonic-gate 		if (!self->ul_schedctl_called)
21400Sstevel@tonic-gate 			(void) setup_schedctl();
21410Sstevel@tonic-gate 
21420Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
21430Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
21440Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
21450Sstevel@tonic-gate 		} else if (mutex_trylock_adaptive(mp) != 0) {
21460Sstevel@tonic-gate 			(void) mutex_lock_queue(self, msp, mp, NULL);
21470Sstevel@tonic-gate 		}
21480Sstevel@tonic-gate 
21490Sstevel@tonic-gate 		if (msp)
21500Sstevel@tonic-gate 			record_begin_hold(msp);
21510Sstevel@tonic-gate 	}
21520Sstevel@tonic-gate }
21530Sstevel@tonic-gate 
21540Sstevel@tonic-gate void
21550Sstevel@tonic-gate lmutex_unlock(mutex_t *mp)
21560Sstevel@tonic-gate {
21570Sstevel@tonic-gate 	ulwp_t *self = curthread;
21580Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
21590Sstevel@tonic-gate 
21600Sstevel@tonic-gate 	ASSERT(mp->mutex_type == USYNC_THREAD);
21610Sstevel@tonic-gate 
21620Sstevel@tonic-gate 	/*
21630Sstevel@tonic-gate 	 * Optimize the case of no lock statistics and only a single thread.
21640Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
21650Sstevel@tonic-gate 	 */
21660Sstevel@tonic-gate 	if (udp->uberflags.uf_all == 0) {
21670Sstevel@tonic-gate 		/*
21680Sstevel@tonic-gate 		 * Only one thread exists so there can be no waiters.
21690Sstevel@tonic-gate 		 */
21700Sstevel@tonic-gate 		mp->mutex_owner = 0;
21710Sstevel@tonic-gate 		mp->mutex_lockword = 0;
21720Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
21730Sstevel@tonic-gate 	} else {
21740Sstevel@tonic-gate 		tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
21750Sstevel@tonic-gate 		lwpid_t lwpid;
21760Sstevel@tonic-gate 
21770Sstevel@tonic-gate 		if (msp)
21780Sstevel@tonic-gate 			(void) record_hold_time(msp);
21790Sstevel@tonic-gate 		if ((lwpid = mutex_unlock_queue(mp)) != 0) {
21800Sstevel@tonic-gate 			(void) __lwp_unpark(lwpid);
21810Sstevel@tonic-gate 			preempt(self);
21820Sstevel@tonic-gate 		}
21830Sstevel@tonic-gate 	}
21840Sstevel@tonic-gate 	exit_critical(self);
21850Sstevel@tonic-gate }
21860Sstevel@tonic-gate 
21872248Sraf /*
21882248Sraf  * For specialized code in libc, like the asynchronous i/o code,
21892248Sraf  * the following sig_*() locking primitives are used in order
21902248Sraf  * to make the code asynchronous signal safe.  Signals are
21912248Sraf  * deferred while locks acquired by these functions are held.
21922248Sraf  */
21932248Sraf void
21942248Sraf sig_mutex_lock(mutex_t *mp)
21952248Sraf {
21962248Sraf 	sigoff(curthread);
21972248Sraf 	(void) _private_mutex_lock(mp);
21982248Sraf }
21992248Sraf 
22002248Sraf void
22012248Sraf sig_mutex_unlock(mutex_t *mp)
22022248Sraf {
22032248Sraf 	(void) _private_mutex_unlock(mp);
22042248Sraf 	sigon(curthread);
22052248Sraf }
22062248Sraf 
22072248Sraf int
22082248Sraf sig_mutex_trylock(mutex_t *mp)
22092248Sraf {
22102248Sraf 	int error;
22112248Sraf 
22122248Sraf 	sigoff(curthread);
22132248Sraf 	if ((error = _private_mutex_trylock(mp)) != 0)
22142248Sraf 		sigon(curthread);
22152248Sraf 	return (error);
22162248Sraf }
22172248Sraf 
22182248Sraf /*
22192248Sraf  * sig_cond_wait() is a cancellation point.
22202248Sraf  */
22212248Sraf int
22222248Sraf sig_cond_wait(cond_t *cv, mutex_t *mp)
22232248Sraf {
22242248Sraf 	int error;
22252248Sraf 
22262248Sraf 	ASSERT(curthread->ul_sigdefer != 0);
22272248Sraf 	_private_testcancel();
22282248Sraf 	error = _cond_wait(cv, mp);
22292248Sraf 	if (error == EINTR && curthread->ul_cursig) {
22302248Sraf 		sig_mutex_unlock(mp);
22312248Sraf 		/* take the deferred signal here */
22322248Sraf 		sig_mutex_lock(mp);
22332248Sraf 	}
22342248Sraf 	_private_testcancel();
22352248Sraf 	return (error);
22362248Sraf }
22372248Sraf 
22382248Sraf /*
22392248Sraf  * sig_cond_reltimedwait() is a cancellation point.
22402248Sraf  */
22412248Sraf int
22422248Sraf sig_cond_reltimedwait(cond_t *cv, mutex_t *mp, const timespec_t *ts)
22432248Sraf {
22442248Sraf 	int error;
22452248Sraf 
22462248Sraf 	ASSERT(curthread->ul_sigdefer != 0);
22472248Sraf 	_private_testcancel();
22482248Sraf 	error = _cond_reltimedwait(cv, mp, ts);
22492248Sraf 	if (error == EINTR && curthread->ul_cursig) {
22502248Sraf 		sig_mutex_unlock(mp);
22512248Sraf 		/* take the deferred signal here */
22522248Sraf 		sig_mutex_lock(mp);
22532248Sraf 	}
22542248Sraf 	_private_testcancel();
22552248Sraf 	return (error);
22562248Sraf }
22572248Sraf 
22580Sstevel@tonic-gate static int
22590Sstevel@tonic-gate shared_mutex_held(mutex_t *mparg)
22600Sstevel@tonic-gate {
22610Sstevel@tonic-gate 	/*
22620Sstevel@tonic-gate 	 * There is an inherent data race in the current ownership design.
22630Sstevel@tonic-gate 	 * The mutex_owner and mutex_ownerpid fields cannot be set or tested
22640Sstevel@tonic-gate 	 * atomically as a pair. The original implementation tested each
22650Sstevel@tonic-gate 	 * field just once. This was exposed to trivial false positives in
22660Sstevel@tonic-gate 	 * the case of multiple multithreaded processes with thread addresses
22670Sstevel@tonic-gate 	 * in common. To close the window to an acceptable level we now use a
22680Sstevel@tonic-gate 	 * sequence of five tests: pid-thr-pid-thr-pid. This ensures that any
22690Sstevel@tonic-gate 	 * single interruption will still leave one uninterrupted sequence of
22700Sstevel@tonic-gate 	 * pid-thr-pid tests intact.
22710Sstevel@tonic-gate 	 *
22720Sstevel@tonic-gate 	 * It is assumed that all updates are always ordered thr-pid and that
22730Sstevel@tonic-gate 	 * we have TSO hardware.
22740Sstevel@tonic-gate 	 */
22750Sstevel@tonic-gate 	volatile mutex_t *mp = (volatile mutex_t *)mparg;
22760Sstevel@tonic-gate 	ulwp_t *self = curthread;
22770Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
22780Sstevel@tonic-gate 
22790Sstevel@tonic-gate 	if (mp->mutex_ownerpid != udp->pid)
22800Sstevel@tonic-gate 		return (0);
22810Sstevel@tonic-gate 
22820Sstevel@tonic-gate 	if (!MUTEX_OWNED(mp, self))
22830Sstevel@tonic-gate 		return (0);
22840Sstevel@tonic-gate 
22850Sstevel@tonic-gate 	if (mp->mutex_ownerpid != udp->pid)
22860Sstevel@tonic-gate 		return (0);
22870Sstevel@tonic-gate 
22880Sstevel@tonic-gate 	if (!MUTEX_OWNED(mp, self))
22890Sstevel@tonic-gate 		return (0);
22900Sstevel@tonic-gate 
22910Sstevel@tonic-gate 	if (mp->mutex_ownerpid != udp->pid)
22920Sstevel@tonic-gate 		return (0);
22930Sstevel@tonic-gate 
22940Sstevel@tonic-gate 	return (1);
22950Sstevel@tonic-gate }
22960Sstevel@tonic-gate 
22970Sstevel@tonic-gate /*
22980Sstevel@tonic-gate  * Some crufty old programs define their own version of _mutex_held()
22990Sstevel@tonic-gate  * to be simply return(1).  This breaks internal libc logic, so we
23000Sstevel@tonic-gate  * define a private version for exclusive use by libc, mutex_is_held(),
23010Sstevel@tonic-gate  * and also a new public function, __mutex_held(), to be used in new
23020Sstevel@tonic-gate  * code to circumvent these crufty old programs.
23030Sstevel@tonic-gate  */
23040Sstevel@tonic-gate #pragma weak mutex_held = mutex_is_held
23050Sstevel@tonic-gate #pragma weak _mutex_held = mutex_is_held
23060Sstevel@tonic-gate #pragma weak __mutex_held = mutex_is_held
23070Sstevel@tonic-gate int
23080Sstevel@tonic-gate mutex_is_held(mutex_t *mp)
23090Sstevel@tonic-gate {
23100Sstevel@tonic-gate 	if (mp->mutex_type & (USYNC_PROCESS | USYNC_PROCESS_ROBUST))
23110Sstevel@tonic-gate 		return (shared_mutex_held(mp));
23120Sstevel@tonic-gate 	return (MUTEX_OWNED(mp, curthread));
23130Sstevel@tonic-gate }
23140Sstevel@tonic-gate 
23150Sstevel@tonic-gate #pragma weak _private_mutex_destroy = __mutex_destroy
23160Sstevel@tonic-gate #pragma weak mutex_destroy = __mutex_destroy
23170Sstevel@tonic-gate #pragma weak _mutex_destroy = __mutex_destroy
23180Sstevel@tonic-gate #pragma weak pthread_mutex_destroy = __mutex_destroy
23190Sstevel@tonic-gate #pragma weak _pthread_mutex_destroy = __mutex_destroy
23200Sstevel@tonic-gate int
23210Sstevel@tonic-gate __mutex_destroy(mutex_t *mp)
23220Sstevel@tonic-gate {
23230Sstevel@tonic-gate 	mp->mutex_magic = 0;
23240Sstevel@tonic-gate 	mp->mutex_flag &= ~LOCK_INITED;
23250Sstevel@tonic-gate 	tdb_sync_obj_deregister(mp);
23260Sstevel@tonic-gate 	return (0);
23270Sstevel@tonic-gate }
23280Sstevel@tonic-gate 
23290Sstevel@tonic-gate /*
23300Sstevel@tonic-gate  * Spin locks are separate from ordinary mutexes,
23310Sstevel@tonic-gate  * but we use the same data structure for them.
23320Sstevel@tonic-gate  */
23330Sstevel@tonic-gate 
23340Sstevel@tonic-gate #pragma weak pthread_spin_init = _pthread_spin_init
23350Sstevel@tonic-gate int
23360Sstevel@tonic-gate _pthread_spin_init(pthread_spinlock_t *lock, int pshared)
23370Sstevel@tonic-gate {
23380Sstevel@tonic-gate 	mutex_t *mp = (mutex_t *)lock;
23390Sstevel@tonic-gate 
23400Sstevel@tonic-gate 	(void) _memset(mp, 0, sizeof (*mp));
23410Sstevel@tonic-gate 	if (pshared == PTHREAD_PROCESS_SHARED)
23420Sstevel@tonic-gate 		mp->mutex_type = USYNC_PROCESS;
23430Sstevel@tonic-gate 	else
23440Sstevel@tonic-gate 		mp->mutex_type = USYNC_THREAD;
23450Sstevel@tonic-gate 	mp->mutex_flag = LOCK_INITED;
23460Sstevel@tonic-gate 	mp->mutex_magic = MUTEX_MAGIC;
23470Sstevel@tonic-gate 	return (0);
23480Sstevel@tonic-gate }
23490Sstevel@tonic-gate 
23500Sstevel@tonic-gate #pragma weak pthread_spin_destroy = _pthread_spin_destroy
23510Sstevel@tonic-gate int
23520Sstevel@tonic-gate _pthread_spin_destroy(pthread_spinlock_t *lock)
23530Sstevel@tonic-gate {
23540Sstevel@tonic-gate 	(void) _memset(lock, 0, sizeof (*lock));
23550Sstevel@tonic-gate 	return (0);
23560Sstevel@tonic-gate }
23570Sstevel@tonic-gate 
23580Sstevel@tonic-gate #pragma weak pthread_spin_trylock = _pthread_spin_trylock
23590Sstevel@tonic-gate int
23600Sstevel@tonic-gate _pthread_spin_trylock(pthread_spinlock_t *lock)
23610Sstevel@tonic-gate {
23620Sstevel@tonic-gate 	mutex_t *mp = (mutex_t *)lock;
23630Sstevel@tonic-gate 	ulwp_t *self = curthread;
23640Sstevel@tonic-gate 	int error = 0;
23650Sstevel@tonic-gate 
23660Sstevel@tonic-gate 	no_preempt(self);
23670Sstevel@tonic-gate 	if (set_lock_byte(&mp->mutex_lockw) != 0)
23680Sstevel@tonic-gate 		error = EBUSY;
23690Sstevel@tonic-gate 	else {
23700Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
23710Sstevel@tonic-gate 		if (mp->mutex_type == USYNC_PROCESS)
23720Sstevel@tonic-gate 			mp->mutex_ownerpid = self->ul_uberdata->pid;
23730Sstevel@tonic-gate 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
23740Sstevel@tonic-gate 	}
23750Sstevel@tonic-gate 	preempt(self);
23760Sstevel@tonic-gate 	return (error);
23770Sstevel@tonic-gate }
23780Sstevel@tonic-gate 
23790Sstevel@tonic-gate #pragma weak pthread_spin_lock = _pthread_spin_lock
23800Sstevel@tonic-gate int
23810Sstevel@tonic-gate _pthread_spin_lock(pthread_spinlock_t *lock)
23820Sstevel@tonic-gate {
23830Sstevel@tonic-gate 	volatile uint8_t *lockp =
23840Sstevel@tonic-gate 		(volatile uint8_t *)&((mutex_t *)lock)->mutex_lockw;
23850Sstevel@tonic-gate 
23860Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
23870Sstevel@tonic-gate 	/*
23880Sstevel@tonic-gate 	 * We don't care whether the owner is running on a processor.
23890Sstevel@tonic-gate 	 * We just spin because that's what this interface requires.
23900Sstevel@tonic-gate 	 */
23910Sstevel@tonic-gate 	for (;;) {
23920Sstevel@tonic-gate 		if (*lockp == 0) {	/* lock byte appears to be clear */
23930Sstevel@tonic-gate 			if (_pthread_spin_trylock(lock) == 0)
23940Sstevel@tonic-gate 				return (0);
23950Sstevel@tonic-gate 		}
23960Sstevel@tonic-gate 		SMT_PAUSE();
23970Sstevel@tonic-gate 	}
23980Sstevel@tonic-gate }
23990Sstevel@tonic-gate 
24000Sstevel@tonic-gate #pragma weak pthread_spin_unlock = _pthread_spin_unlock
24010Sstevel@tonic-gate int
24020Sstevel@tonic-gate _pthread_spin_unlock(pthread_spinlock_t *lock)
24030Sstevel@tonic-gate {
24040Sstevel@tonic-gate 	mutex_t *mp = (mutex_t *)lock;
24050Sstevel@tonic-gate 	ulwp_t *self = curthread;
24060Sstevel@tonic-gate 
24070Sstevel@tonic-gate 	no_preempt(self);
24080Sstevel@tonic-gate 	mp->mutex_owner = 0;
24090Sstevel@tonic-gate 	mp->mutex_ownerpid = 0;
24100Sstevel@tonic-gate 	DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
2411*4570Sraf 	(void) atomic_swap_32(&mp->mutex_lockword, 0);
24120Sstevel@tonic-gate 	preempt(self);
24130Sstevel@tonic-gate 	return (0);
24140Sstevel@tonic-gate }
24150Sstevel@tonic-gate 
24160Sstevel@tonic-gate #pragma weak cond_init = _cond_init
24170Sstevel@tonic-gate /* ARGSUSED2 */
24180Sstevel@tonic-gate int
24190Sstevel@tonic-gate _cond_init(cond_t *cvp, int type, void *arg)
24200Sstevel@tonic-gate {
24210Sstevel@tonic-gate 	if (type != USYNC_THREAD && type != USYNC_PROCESS)
24220Sstevel@tonic-gate 		return (EINVAL);
24230Sstevel@tonic-gate 	(void) _memset(cvp, 0, sizeof (*cvp));
24240Sstevel@tonic-gate 	cvp->cond_type = (uint16_t)type;
24250Sstevel@tonic-gate 	cvp->cond_magic = COND_MAGIC;
24260Sstevel@tonic-gate 	return (0);
24270Sstevel@tonic-gate }
24280Sstevel@tonic-gate 
24290Sstevel@tonic-gate /*
24300Sstevel@tonic-gate  * cond_sleep_queue(): utility function for cond_wait_queue().
24310Sstevel@tonic-gate  *
24320Sstevel@tonic-gate  * Go to sleep on a condvar sleep queue, expect to be waked up
24330Sstevel@tonic-gate  * by someone calling cond_signal() or cond_broadcast() or due
24340Sstevel@tonic-gate  * to receiving a UNIX signal or being cancelled, or just simply
24350Sstevel@tonic-gate  * due to a spurious wakeup (like someome calling forkall()).
24360Sstevel@tonic-gate  *
24370Sstevel@tonic-gate  * The associated mutex is *not* reacquired before returning.
24380Sstevel@tonic-gate  * That must be done by the caller of cond_sleep_queue().
24390Sstevel@tonic-gate  */
24400Sstevel@tonic-gate int
24410Sstevel@tonic-gate cond_sleep_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
24420Sstevel@tonic-gate {
24430Sstevel@tonic-gate 	ulwp_t *self = curthread;
24440Sstevel@tonic-gate 	queue_head_t *qp;
24450Sstevel@tonic-gate 	queue_head_t *mqp;
24460Sstevel@tonic-gate 	lwpid_t lwpid;
24470Sstevel@tonic-gate 	int signalled;
24480Sstevel@tonic-gate 	int error;
24490Sstevel@tonic-gate 
24500Sstevel@tonic-gate 	/*
24510Sstevel@tonic-gate 	 * Put ourself on the CV sleep queue, unlock the mutex, then
24520Sstevel@tonic-gate 	 * park ourself and unpark a candidate lwp to grab the mutex.
24530Sstevel@tonic-gate 	 * We must go onto the CV sleep queue before dropping the
24540Sstevel@tonic-gate 	 * mutex in order to guarantee atomicity of the operation.
24550Sstevel@tonic-gate 	 */
24560Sstevel@tonic-gate 	self->ul_sp = stkptr();
24570Sstevel@tonic-gate 	qp = queue_lock(cvp, CV);
24580Sstevel@tonic-gate 	enqueue(qp, self, cvp, CV);
24590Sstevel@tonic-gate 	cvp->cond_waiters_user = 1;
24600Sstevel@tonic-gate 	self->ul_cvmutex = mp;
24610Sstevel@tonic-gate 	self->ul_cv_wake = (tsp != NULL);
24620Sstevel@tonic-gate 	self->ul_signalled = 0;
24630Sstevel@tonic-gate 	lwpid = mutex_unlock_queue(mp);
24640Sstevel@tonic-gate 	for (;;) {
24650Sstevel@tonic-gate 		set_parking_flag(self, 1);
24660Sstevel@tonic-gate 		queue_unlock(qp);
24670Sstevel@tonic-gate 		if (lwpid != 0) {
24680Sstevel@tonic-gate 			lwpid = preempt_unpark(self, lwpid);
24690Sstevel@tonic-gate 			preempt(self);
24700Sstevel@tonic-gate 		}
24710Sstevel@tonic-gate 		/*
24720Sstevel@tonic-gate 		 * We may have a deferred signal present,
24730Sstevel@tonic-gate 		 * in which case we should return EINTR.
24740Sstevel@tonic-gate 		 * Also, we may have received a SIGCANCEL; if so
24750Sstevel@tonic-gate 		 * and we are cancelable we should return EINTR.
24760Sstevel@tonic-gate 		 * We force an immediate EINTR return from
24770Sstevel@tonic-gate 		 * __lwp_park() by turning our parking flag off.
24780Sstevel@tonic-gate 		 */
24790Sstevel@tonic-gate 		if (self->ul_cursig != 0 ||
24800Sstevel@tonic-gate 		    (self->ul_cancelable && self->ul_cancel_pending))
24810Sstevel@tonic-gate 			set_parking_flag(self, 0);
24820Sstevel@tonic-gate 		/*
24830Sstevel@tonic-gate 		 * __lwp_park() will return the residual time in tsp
24840Sstevel@tonic-gate 		 * if we are unparked before the timeout expires.
24850Sstevel@tonic-gate 		 */
24860Sstevel@tonic-gate 		error = __lwp_park(tsp, lwpid);
24870Sstevel@tonic-gate 		set_parking_flag(self, 0);
24880Sstevel@tonic-gate 		lwpid = 0;	/* unpark the other lwp only once */
24890Sstevel@tonic-gate 		/*
24900Sstevel@tonic-gate 		 * We were waked up by cond_signal(), cond_broadcast(),
24910Sstevel@tonic-gate 		 * by an interrupt or timeout (EINTR or ETIME),
24920Sstevel@tonic-gate 		 * or we may just have gotten a spurious wakeup.
24930Sstevel@tonic-gate 		 */
24940Sstevel@tonic-gate 		qp = queue_lock(cvp, CV);
24950Sstevel@tonic-gate 		mqp = queue_lock(mp, MX);
24960Sstevel@tonic-gate 		if (self->ul_sleepq == NULL)
24970Sstevel@tonic-gate 			break;
24980Sstevel@tonic-gate 		/*
24990Sstevel@tonic-gate 		 * We are on either the condvar sleep queue or the
25001893Sraf 		 * mutex sleep queue.  Break out of the sleep if we
25011893Sraf 		 * were interrupted or we timed out (EINTR or ETIME).
25020Sstevel@tonic-gate 		 * Else this is a spurious wakeup; continue the loop.
25030Sstevel@tonic-gate 		 */
25041893Sraf 		if (self->ul_sleepq == mqp) {		/* mutex queue */
25051893Sraf 			if (error) {
25061893Sraf 				mp->mutex_waiters = dequeue_self(mqp, mp);
25071893Sraf 				break;
25081893Sraf 			}
25091893Sraf 			tsp = NULL;	/* no more timeout */
25101893Sraf 		} else if (self->ul_sleepq == qp) {	/* condvar queue */
25110Sstevel@tonic-gate 			if (error) {
25120Sstevel@tonic-gate 				cvp->cond_waiters_user = dequeue_self(qp, cvp);
25130Sstevel@tonic-gate 				break;
25140Sstevel@tonic-gate 			}
25150Sstevel@tonic-gate 			/*
25160Sstevel@tonic-gate 			 * Else a spurious wakeup on the condvar queue.
25170Sstevel@tonic-gate 			 * __lwp_park() has already adjusted the timeout.
25180Sstevel@tonic-gate 			 */
25190Sstevel@tonic-gate 		} else {
25200Sstevel@tonic-gate 			thr_panic("cond_sleep_queue(): thread not on queue");
25210Sstevel@tonic-gate 		}
25220Sstevel@tonic-gate 		queue_unlock(mqp);
25230Sstevel@tonic-gate 	}
25240Sstevel@tonic-gate 
25250Sstevel@tonic-gate 	self->ul_sp = 0;
25260Sstevel@tonic-gate 	ASSERT(self->ul_cvmutex == NULL && self->ul_cv_wake == 0);
25270Sstevel@tonic-gate 	ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL &&
25280Sstevel@tonic-gate 	    self->ul_wchan == NULL);
25290Sstevel@tonic-gate 
25300Sstevel@tonic-gate 	signalled = self->ul_signalled;
25310Sstevel@tonic-gate 	self->ul_signalled = 0;
25320Sstevel@tonic-gate 	queue_unlock(qp);
25330Sstevel@tonic-gate 	queue_unlock(mqp);
25340Sstevel@tonic-gate 
25350Sstevel@tonic-gate 	/*
25360Sstevel@tonic-gate 	 * If we were concurrently cond_signal()d and any of:
25370Sstevel@tonic-gate 	 * received a UNIX signal, were cancelled, or got a timeout,
25380Sstevel@tonic-gate 	 * then perform another cond_signal() to avoid consuming it.
25390Sstevel@tonic-gate 	 */
25400Sstevel@tonic-gate 	if (error && signalled)
25410Sstevel@tonic-gate 		(void) cond_signal_internal(cvp);
25420Sstevel@tonic-gate 
25430Sstevel@tonic-gate 	return (error);
25440Sstevel@tonic-gate }
25450Sstevel@tonic-gate 
25460Sstevel@tonic-gate int
25470Sstevel@tonic-gate cond_wait_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp,
25480Sstevel@tonic-gate 	tdb_mutex_stats_t *msp)
25490Sstevel@tonic-gate {
25500Sstevel@tonic-gate 	ulwp_t *self = curthread;
25510Sstevel@tonic-gate 	int error;
25520Sstevel@tonic-gate 
25530Sstevel@tonic-gate 	/*
25540Sstevel@tonic-gate 	 * The old thread library was programmed to defer signals
25550Sstevel@tonic-gate 	 * while in cond_wait() so that the associated mutex would
25560Sstevel@tonic-gate 	 * be guaranteed to be held when the application signal
25570Sstevel@tonic-gate 	 * handler was invoked.
25580Sstevel@tonic-gate 	 *
25590Sstevel@tonic-gate 	 * We do not behave this way by default; the state of the
25600Sstevel@tonic-gate 	 * associated mutex in the signal handler is undefined.
25610Sstevel@tonic-gate 	 *
25620Sstevel@tonic-gate 	 * To accommodate applications that depend on the old
25630Sstevel@tonic-gate 	 * behavior, the _THREAD_COND_WAIT_DEFER environment
25640Sstevel@tonic-gate 	 * variable can be set to 1 and we will behave in the
25650Sstevel@tonic-gate 	 * old way with respect to cond_wait().
25660Sstevel@tonic-gate 	 */
25670Sstevel@tonic-gate 	if (self->ul_cond_wait_defer)
25680Sstevel@tonic-gate 		sigoff(self);
25690Sstevel@tonic-gate 
25700Sstevel@tonic-gate 	error = cond_sleep_queue(cvp, mp, tsp);
25710Sstevel@tonic-gate 
25720Sstevel@tonic-gate 	/*
25730Sstevel@tonic-gate 	 * Reacquire the mutex.
25740Sstevel@tonic-gate 	 */
25750Sstevel@tonic-gate 	if (set_lock_byte(&mp->mutex_lockw) == 0) {
25760Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
25770Sstevel@tonic-gate 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
25780Sstevel@tonic-gate 	} else if (mutex_trylock_adaptive(mp) != 0) {
25790Sstevel@tonic-gate 		(void) mutex_lock_queue(self, msp, mp, NULL);
25800Sstevel@tonic-gate 	}
25810Sstevel@tonic-gate 
25820Sstevel@tonic-gate 	if (msp)
25830Sstevel@tonic-gate 		record_begin_hold(msp);
25840Sstevel@tonic-gate 
25850Sstevel@tonic-gate 	/*
25860Sstevel@tonic-gate 	 * Take any deferred signal now, after we have reacquired the mutex.
25870Sstevel@tonic-gate 	 */
25880Sstevel@tonic-gate 	if (self->ul_cond_wait_defer)
25890Sstevel@tonic-gate 		sigon(self);
25900Sstevel@tonic-gate 
25910Sstevel@tonic-gate 	return (error);
25920Sstevel@tonic-gate }
25930Sstevel@tonic-gate 
25940Sstevel@tonic-gate /*
25950Sstevel@tonic-gate  * cond_sleep_kernel(): utility function for cond_wait_kernel().
25960Sstevel@tonic-gate  * See the comment ahead of cond_sleep_queue(), above.
25970Sstevel@tonic-gate  */
25980Sstevel@tonic-gate int
25990Sstevel@tonic-gate cond_sleep_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
26000Sstevel@tonic-gate {
26010Sstevel@tonic-gate 	int mtype = mp->mutex_type;
26020Sstevel@tonic-gate 	ulwp_t *self = curthread;
26030Sstevel@tonic-gate 	int error;
26040Sstevel@tonic-gate 
26050Sstevel@tonic-gate 	if (mtype & PTHREAD_PRIO_PROTECT) {
26060Sstevel@tonic-gate 		if (_ceil_mylist_del(mp))
26070Sstevel@tonic-gate 			_ceil_prio_waive();
26080Sstevel@tonic-gate 	}
26090Sstevel@tonic-gate 
26100Sstevel@tonic-gate 	self->ul_sp = stkptr();
26110Sstevel@tonic-gate 	self->ul_wchan = cvp;
26120Sstevel@tonic-gate 	mp->mutex_owner = 0;
26130Sstevel@tonic-gate 	mp->mutex_ownerpid = 0;
26140Sstevel@tonic-gate 	if (mtype & PTHREAD_PRIO_INHERIT)
26150Sstevel@tonic-gate 		mp->mutex_lockw = LOCKCLEAR;
26160Sstevel@tonic-gate 	/*
26170Sstevel@tonic-gate 	 * ___lwp_cond_wait() returns immediately with EINTR if
26180Sstevel@tonic-gate 	 * set_parking_flag(self,0) is called on this lwp before it
26190Sstevel@tonic-gate 	 * goes to sleep in the kernel.  sigacthandler() calls this
26200Sstevel@tonic-gate 	 * when a deferred signal is noted.  This assures that we don't
26210Sstevel@tonic-gate 	 * get stuck in ___lwp_cond_wait() with all signals blocked
26220Sstevel@tonic-gate 	 * due to taking a deferred signal before going to sleep.
26230Sstevel@tonic-gate 	 */
26240Sstevel@tonic-gate 	set_parking_flag(self, 1);
26250Sstevel@tonic-gate 	if (self->ul_cursig != 0 ||
26260Sstevel@tonic-gate 	    (self->ul_cancelable && self->ul_cancel_pending))
26270Sstevel@tonic-gate 		set_parking_flag(self, 0);
26280Sstevel@tonic-gate 	error = ___lwp_cond_wait(cvp, mp, tsp, 1);
26290Sstevel@tonic-gate 	set_parking_flag(self, 0);
26300Sstevel@tonic-gate 	self->ul_sp = 0;
26310Sstevel@tonic-gate 	self->ul_wchan = NULL;
26320Sstevel@tonic-gate 	return (error);
26330Sstevel@tonic-gate }
26340Sstevel@tonic-gate 
26350Sstevel@tonic-gate int
26360Sstevel@tonic-gate cond_wait_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
26370Sstevel@tonic-gate {
26380Sstevel@tonic-gate 	ulwp_t *self = curthread;
26390Sstevel@tonic-gate 	int error;
26400Sstevel@tonic-gate 	int merror;
26410Sstevel@tonic-gate 
26420Sstevel@tonic-gate 	/*
26430Sstevel@tonic-gate 	 * See the large comment in cond_wait_queue(), above.
26440Sstevel@tonic-gate 	 */
26450Sstevel@tonic-gate 	if (self->ul_cond_wait_defer)
26460Sstevel@tonic-gate 		sigoff(self);
26470Sstevel@tonic-gate 
26480Sstevel@tonic-gate 	error = cond_sleep_kernel(cvp, mp, tsp);
26490Sstevel@tonic-gate 
26500Sstevel@tonic-gate 	/*
26510Sstevel@tonic-gate 	 * Override the return code from ___lwp_cond_wait()
26520Sstevel@tonic-gate 	 * with any non-zero return code from mutex_lock().
26530Sstevel@tonic-gate 	 * This addresses robust lock failures in particular;
26540Sstevel@tonic-gate 	 * the caller must see the EOWNERDEAD or ENOTRECOVERABLE
26550Sstevel@tonic-gate 	 * errors in order to take corrective action.
26560Sstevel@tonic-gate 	 */
26570Sstevel@tonic-gate 	if ((merror = _private_mutex_lock(mp)) != 0)
26580Sstevel@tonic-gate 		error = merror;
26590Sstevel@tonic-gate 
26600Sstevel@tonic-gate 	/*
26610Sstevel@tonic-gate 	 * Take any deferred signal now, after we have reacquired the mutex.
26620Sstevel@tonic-gate 	 */
26630Sstevel@tonic-gate 	if (self->ul_cond_wait_defer)
26640Sstevel@tonic-gate 		sigon(self);
26650Sstevel@tonic-gate 
26660Sstevel@tonic-gate 	return (error);
26670Sstevel@tonic-gate }
26680Sstevel@tonic-gate 
26690Sstevel@tonic-gate /*
26700Sstevel@tonic-gate  * Common code for _cond_wait() and _cond_timedwait()
26710Sstevel@tonic-gate  */
26720Sstevel@tonic-gate int
26730Sstevel@tonic-gate cond_wait_common(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
26740Sstevel@tonic-gate {
26750Sstevel@tonic-gate 	int mtype = mp->mutex_type;
26760Sstevel@tonic-gate 	hrtime_t begin_sleep = 0;
26770Sstevel@tonic-gate 	ulwp_t *self = curthread;
26780Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
26790Sstevel@tonic-gate 	tdb_cond_stats_t *csp = COND_STATS(cvp, udp);
26800Sstevel@tonic-gate 	tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
26810Sstevel@tonic-gate 	uint8_t rcount;
26820Sstevel@tonic-gate 	int error = 0;
26830Sstevel@tonic-gate 
26840Sstevel@tonic-gate 	/*
26850Sstevel@tonic-gate 	 * The SUSV3 Posix spec for pthread_cond_timedwait() states:
26860Sstevel@tonic-gate 	 *	Except in the case of [ETIMEDOUT], all these error checks
26870Sstevel@tonic-gate 	 *	shall act as if they were performed immediately at the
26880Sstevel@tonic-gate 	 *	beginning of processing for the function and shall cause
26890Sstevel@tonic-gate 	 *	an error return, in effect, prior to modifying the state
26900Sstevel@tonic-gate 	 *	of the mutex specified by mutex or the condition variable
26910Sstevel@tonic-gate 	 *	specified by cond.
26920Sstevel@tonic-gate 	 * Therefore, we must return EINVAL now if the timout is invalid.
26930Sstevel@tonic-gate 	 */
26940Sstevel@tonic-gate 	if (tsp != NULL &&
26950Sstevel@tonic-gate 	    (tsp->tv_sec < 0 || (ulong_t)tsp->tv_nsec >= NANOSEC))
26960Sstevel@tonic-gate 		return (EINVAL);
26970Sstevel@tonic-gate 
26980Sstevel@tonic-gate 	if (__td_event_report(self, TD_SLEEP, udp)) {
26990Sstevel@tonic-gate 		self->ul_sp = stkptr();
27000Sstevel@tonic-gate 		self->ul_wchan = cvp;
27010Sstevel@tonic-gate 		self->ul_td_evbuf.eventnum = TD_SLEEP;
27020Sstevel@tonic-gate 		self->ul_td_evbuf.eventdata = cvp;
27030Sstevel@tonic-gate 		tdb_event(TD_SLEEP, udp);
27040Sstevel@tonic-gate 		self->ul_sp = 0;
27050Sstevel@tonic-gate 	}
27060Sstevel@tonic-gate 	if (csp) {
27070Sstevel@tonic-gate 		if (tsp)
27080Sstevel@tonic-gate 			tdb_incr(csp->cond_timedwait);
27090Sstevel@tonic-gate 		else
27100Sstevel@tonic-gate 			tdb_incr(csp->cond_wait);
27110Sstevel@tonic-gate 	}
27120Sstevel@tonic-gate 	if (msp)
27130Sstevel@tonic-gate 		begin_sleep = record_hold_time(msp);
27140Sstevel@tonic-gate 	else if (csp)
27150Sstevel@tonic-gate 		begin_sleep = gethrtime();
27160Sstevel@tonic-gate 
27170Sstevel@tonic-gate 	if (self->ul_error_detection) {
27180Sstevel@tonic-gate 		if (!mutex_is_held(mp))
27190Sstevel@tonic-gate 			lock_error(mp, "cond_wait", cvp, NULL);
27200Sstevel@tonic-gate 		if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0)
27210Sstevel@tonic-gate 			lock_error(mp, "recursive mutex in cond_wait",
27220Sstevel@tonic-gate 				cvp, NULL);
27230Sstevel@tonic-gate 		if (cvp->cond_type & USYNC_PROCESS) {
27240Sstevel@tonic-gate 			if (!(mtype & (USYNC_PROCESS | USYNC_PROCESS_ROBUST)))
27250Sstevel@tonic-gate 				lock_error(mp, "cond_wait", cvp,
27260Sstevel@tonic-gate 					"condvar process-shared, "
27270Sstevel@tonic-gate 					"mutex process-private");
27280Sstevel@tonic-gate 		} else {
27290Sstevel@tonic-gate 			if (mtype & (USYNC_PROCESS | USYNC_PROCESS_ROBUST))
27300Sstevel@tonic-gate 				lock_error(mp, "cond_wait", cvp,
27310Sstevel@tonic-gate 					"condvar process-private, "
27320Sstevel@tonic-gate 					"mutex process-shared");
27330Sstevel@tonic-gate 		}
27340Sstevel@tonic-gate 	}
27350Sstevel@tonic-gate 
27360Sstevel@tonic-gate 	/*
27370Sstevel@tonic-gate 	 * We deal with recursive mutexes by completely
27380Sstevel@tonic-gate 	 * dropping the lock and restoring the recursion
27390Sstevel@tonic-gate 	 * count after waking up.  This is arguably wrong,
27400Sstevel@tonic-gate 	 * but it obeys the principle of least astonishment.
27410Sstevel@tonic-gate 	 */
27420Sstevel@tonic-gate 	rcount = mp->mutex_rcount;
27430Sstevel@tonic-gate 	mp->mutex_rcount = 0;
27440Sstevel@tonic-gate 	if ((mtype & (USYNC_PROCESS | USYNC_PROCESS_ROBUST |
27450Sstevel@tonic-gate 	    PTHREAD_PRIO_INHERIT | PTHREAD_PRIO_PROTECT)) |
27460Sstevel@tonic-gate 	    (cvp->cond_type & USYNC_PROCESS))
27470Sstevel@tonic-gate 		error = cond_wait_kernel(cvp, mp, tsp);
27480Sstevel@tonic-gate 	else
27490Sstevel@tonic-gate 		error = cond_wait_queue(cvp, mp, tsp, msp);
27500Sstevel@tonic-gate 	mp->mutex_rcount = rcount;
27510Sstevel@tonic-gate 
27520Sstevel@tonic-gate 	if (csp) {
27530Sstevel@tonic-gate 		hrtime_t lapse = gethrtime() - begin_sleep;
27540Sstevel@tonic-gate 		if (tsp == NULL)
27550Sstevel@tonic-gate 			csp->cond_wait_sleep_time += lapse;
27560Sstevel@tonic-gate 		else {
27570Sstevel@tonic-gate 			csp->cond_timedwait_sleep_time += lapse;
27580Sstevel@tonic-gate 			if (error == ETIME)
27590Sstevel@tonic-gate 				tdb_incr(csp->cond_timedwait_timeout);
27600Sstevel@tonic-gate 		}
27610Sstevel@tonic-gate 	}
27620Sstevel@tonic-gate 	return (error);
27630Sstevel@tonic-gate }
27640Sstevel@tonic-gate 
27650Sstevel@tonic-gate /*
27660Sstevel@tonic-gate  * cond_wait() is a cancellation point but _cond_wait() is not.
27670Sstevel@tonic-gate  * System libraries call the non-cancellation version.
27680Sstevel@tonic-gate  * It is expected that only applications call the cancellation version.
27690Sstevel@tonic-gate  */
27700Sstevel@tonic-gate int
27710Sstevel@tonic-gate _cond_wait(cond_t *cvp, mutex_t *mp)
27720Sstevel@tonic-gate {
27730Sstevel@tonic-gate 	ulwp_t *self = curthread;
27740Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
27750Sstevel@tonic-gate 	uberflags_t *gflags;
27760Sstevel@tonic-gate 
27770Sstevel@tonic-gate 	/*
27780Sstevel@tonic-gate 	 * Optimize the common case of USYNC_THREAD plus
27790Sstevel@tonic-gate 	 * no error detection, no lock statistics, and no event tracing.
27800Sstevel@tonic-gate 	 */
27810Sstevel@tonic-gate 	if ((gflags = self->ul_schedctl_called) != NULL &&
27820Sstevel@tonic-gate 	    (cvp->cond_type | mp->mutex_type | gflags->uf_trs_ted |
27830Sstevel@tonic-gate 	    self->ul_td_events_enable |
27840Sstevel@tonic-gate 	    udp->tdb.tdb_ev_global_mask.event_bits[0]) == 0)
27850Sstevel@tonic-gate 		return (cond_wait_queue(cvp, mp, NULL, NULL));
27860Sstevel@tonic-gate 
27870Sstevel@tonic-gate 	/*
27880Sstevel@tonic-gate 	 * Else do it the long way.
27890Sstevel@tonic-gate 	 */
27900Sstevel@tonic-gate 	return (cond_wait_common(cvp, mp, NULL));
27910Sstevel@tonic-gate }
27920Sstevel@tonic-gate 
27930Sstevel@tonic-gate int
27940Sstevel@tonic-gate cond_wait(cond_t *cvp, mutex_t *mp)
27950Sstevel@tonic-gate {
27960Sstevel@tonic-gate 	int error;
27970Sstevel@tonic-gate 
27980Sstevel@tonic-gate 	_cancelon();
27990Sstevel@tonic-gate 	error = _cond_wait(cvp, mp);
28000Sstevel@tonic-gate 	if (error == EINTR)
28010Sstevel@tonic-gate 		_canceloff();
28020Sstevel@tonic-gate 	else
28030Sstevel@tonic-gate 		_canceloff_nocancel();
28040Sstevel@tonic-gate 	return (error);
28050Sstevel@tonic-gate }
28060Sstevel@tonic-gate 
28070Sstevel@tonic-gate #pragma weak pthread_cond_wait = _pthread_cond_wait
28080Sstevel@tonic-gate int
28090Sstevel@tonic-gate _pthread_cond_wait(cond_t *cvp, mutex_t *mp)
28100Sstevel@tonic-gate {
28110Sstevel@tonic-gate 	int error;
28120Sstevel@tonic-gate 
28130Sstevel@tonic-gate 	error = cond_wait(cvp, mp);
28140Sstevel@tonic-gate 	return ((error == EINTR)? 0 : error);
28150Sstevel@tonic-gate }
28160Sstevel@tonic-gate 
28170Sstevel@tonic-gate /*
28180Sstevel@tonic-gate  * cond_timedwait() is a cancellation point but _cond_timedwait() is not.
28190Sstevel@tonic-gate  * System libraries call the non-cancellation version.
28200Sstevel@tonic-gate  * It is expected that only applications call the cancellation version.
28210Sstevel@tonic-gate  */
28220Sstevel@tonic-gate int
28230Sstevel@tonic-gate _cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime)
28240Sstevel@tonic-gate {
28250Sstevel@tonic-gate 	clockid_t clock_id = cvp->cond_clockid;
28260Sstevel@tonic-gate 	timespec_t reltime;
28270Sstevel@tonic-gate 	int error;
28280Sstevel@tonic-gate 
28290Sstevel@tonic-gate 	if (clock_id != CLOCK_REALTIME && clock_id != CLOCK_HIGHRES)
28300Sstevel@tonic-gate 		clock_id = CLOCK_REALTIME;
28310Sstevel@tonic-gate 	abstime_to_reltime(clock_id, abstime, &reltime);
28320Sstevel@tonic-gate 	error = cond_wait_common(cvp, mp, &reltime);
28330Sstevel@tonic-gate 	if (error == ETIME && clock_id == CLOCK_HIGHRES) {
28340Sstevel@tonic-gate 		/*
28350Sstevel@tonic-gate 		 * Don't return ETIME if we didn't really get a timeout.
28360Sstevel@tonic-gate 		 * This can happen if we return because someone resets
28370Sstevel@tonic-gate 		 * the system clock.  Just return zero in this case,
28380Sstevel@tonic-gate 		 * giving a spurious wakeup but not a timeout.
28390Sstevel@tonic-gate 		 */
28400Sstevel@tonic-gate 		if ((hrtime_t)(uint32_t)abstime->tv_sec * NANOSEC +
28410Sstevel@tonic-gate 		    abstime->tv_nsec > gethrtime())
28420Sstevel@tonic-gate 			error = 0;
28430Sstevel@tonic-gate 	}
28440Sstevel@tonic-gate 	return (error);
28450Sstevel@tonic-gate }
28460Sstevel@tonic-gate 
28470Sstevel@tonic-gate int
28480Sstevel@tonic-gate cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime)
28490Sstevel@tonic-gate {
28500Sstevel@tonic-gate 	int error;
28510Sstevel@tonic-gate 
28520Sstevel@tonic-gate 	_cancelon();
28530Sstevel@tonic-gate 	error = _cond_timedwait(cvp, mp, abstime);
28540Sstevel@tonic-gate 	if (error == EINTR)
28550Sstevel@tonic-gate 		_canceloff();
28560Sstevel@tonic-gate 	else
28570Sstevel@tonic-gate 		_canceloff_nocancel();
28580Sstevel@tonic-gate 	return (error);
28590Sstevel@tonic-gate }
28600Sstevel@tonic-gate 
28610Sstevel@tonic-gate #pragma weak pthread_cond_timedwait = _pthread_cond_timedwait
28620Sstevel@tonic-gate int
28630Sstevel@tonic-gate _pthread_cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime)
28640Sstevel@tonic-gate {
28650Sstevel@tonic-gate 	int error;
28660Sstevel@tonic-gate 
28670Sstevel@tonic-gate 	error = cond_timedwait(cvp, mp, abstime);
28680Sstevel@tonic-gate 	if (error == ETIME)
28690Sstevel@tonic-gate 		error = ETIMEDOUT;
28700Sstevel@tonic-gate 	else if (error == EINTR)
28710Sstevel@tonic-gate 		error = 0;
28720Sstevel@tonic-gate 	return (error);
28730Sstevel@tonic-gate }
28740Sstevel@tonic-gate 
28750Sstevel@tonic-gate /*
28760Sstevel@tonic-gate  * cond_reltimedwait() is a cancellation point but _cond_reltimedwait()
28770Sstevel@tonic-gate  * is not.  System libraries call the non-cancellation version.
28780Sstevel@tonic-gate  * It is expected that only applications call the cancellation version.
28790Sstevel@tonic-gate  */
28800Sstevel@tonic-gate int
28810Sstevel@tonic-gate _cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime)
28820Sstevel@tonic-gate {
28830Sstevel@tonic-gate 	timespec_t tslocal = *reltime;
28840Sstevel@tonic-gate 
28850Sstevel@tonic-gate 	return (cond_wait_common(cvp, mp, &tslocal));
28860Sstevel@tonic-gate }
28870Sstevel@tonic-gate 
28880Sstevel@tonic-gate #pragma weak cond_reltimedwait = _cond_reltimedwait_cancel
28890Sstevel@tonic-gate int
28900Sstevel@tonic-gate _cond_reltimedwait_cancel(cond_t *cvp, mutex_t *mp, const timespec_t *reltime)
28910Sstevel@tonic-gate {
28920Sstevel@tonic-gate 	int error;
28930Sstevel@tonic-gate 
28940Sstevel@tonic-gate 	_cancelon();
28950Sstevel@tonic-gate 	error = _cond_reltimedwait(cvp, mp, reltime);
28960Sstevel@tonic-gate 	if (error == EINTR)
28970Sstevel@tonic-gate 		_canceloff();
28980Sstevel@tonic-gate 	else
28990Sstevel@tonic-gate 		_canceloff_nocancel();
29000Sstevel@tonic-gate 	return (error);
29010Sstevel@tonic-gate }
29020Sstevel@tonic-gate 
29030Sstevel@tonic-gate #pragma weak pthread_cond_reltimedwait_np = _pthread_cond_reltimedwait_np
29040Sstevel@tonic-gate int
29050Sstevel@tonic-gate _pthread_cond_reltimedwait_np(cond_t *cvp, mutex_t *mp,
29060Sstevel@tonic-gate 	const timespec_t *reltime)
29070Sstevel@tonic-gate {
29080Sstevel@tonic-gate 	int error;
29090Sstevel@tonic-gate 
29100Sstevel@tonic-gate 	error = _cond_reltimedwait_cancel(cvp, mp, reltime);
29110Sstevel@tonic-gate 	if (error == ETIME)
29120Sstevel@tonic-gate 		error = ETIMEDOUT;
29130Sstevel@tonic-gate 	else if (error == EINTR)
29140Sstevel@tonic-gate 		error = 0;
29150Sstevel@tonic-gate 	return (error);
29160Sstevel@tonic-gate }
29170Sstevel@tonic-gate 
29180Sstevel@tonic-gate #pragma weak pthread_cond_signal = cond_signal_internal
29190Sstevel@tonic-gate #pragma weak _pthread_cond_signal = cond_signal_internal
29200Sstevel@tonic-gate #pragma weak cond_signal = cond_signal_internal
29210Sstevel@tonic-gate #pragma weak _cond_signal = cond_signal_internal
29220Sstevel@tonic-gate int
29230Sstevel@tonic-gate cond_signal_internal(cond_t *cvp)
29240Sstevel@tonic-gate {
29250Sstevel@tonic-gate 	ulwp_t *self = curthread;
29260Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
29270Sstevel@tonic-gate 	tdb_cond_stats_t *csp = COND_STATS(cvp, udp);
29280Sstevel@tonic-gate 	int error = 0;
29290Sstevel@tonic-gate 	queue_head_t *qp;
29300Sstevel@tonic-gate 	mutex_t *mp;
29310Sstevel@tonic-gate 	queue_head_t *mqp;
29320Sstevel@tonic-gate 	ulwp_t **ulwpp;
29330Sstevel@tonic-gate 	ulwp_t *ulwp;
29340Sstevel@tonic-gate 	ulwp_t *prev = NULL;
29350Sstevel@tonic-gate 	ulwp_t *next;
29360Sstevel@tonic-gate 	ulwp_t **suspp = NULL;
29370Sstevel@tonic-gate 	ulwp_t *susprev;
29380Sstevel@tonic-gate 
29390Sstevel@tonic-gate 	if (csp)
29400Sstevel@tonic-gate 		tdb_incr(csp->cond_signal);
29410Sstevel@tonic-gate 
29420Sstevel@tonic-gate 	if (cvp->cond_waiters_kernel)	/* someone sleeping in the kernel? */
29430Sstevel@tonic-gate 		error = __lwp_cond_signal(cvp);
29440Sstevel@tonic-gate 
29450Sstevel@tonic-gate 	if (!cvp->cond_waiters_user)	/* no one sleeping at user-level */
29460Sstevel@tonic-gate 		return (error);
29470Sstevel@tonic-gate 
29480Sstevel@tonic-gate 	/*
29490Sstevel@tonic-gate 	 * Move someone from the condvar sleep queue to the mutex sleep
29500Sstevel@tonic-gate 	 * queue for the mutex that he will acquire on being waked up.
29510Sstevel@tonic-gate 	 * We can do this only if we own the mutex he will acquire.
29520Sstevel@tonic-gate 	 * If we do not own the mutex, or if his ul_cv_wake flag
29530Sstevel@tonic-gate 	 * is set, just dequeue and unpark him.
29540Sstevel@tonic-gate 	 */
29550Sstevel@tonic-gate 	qp = queue_lock(cvp, CV);
29560Sstevel@tonic-gate 	for (ulwpp = &qp->qh_head; (ulwp = *ulwpp) != NULL;
29570Sstevel@tonic-gate 	    prev = ulwp, ulwpp = &ulwp->ul_link) {
29580Sstevel@tonic-gate 		if (ulwp->ul_wchan == cvp) {
29590Sstevel@tonic-gate 			if (!ulwp->ul_stop)
29600Sstevel@tonic-gate 				break;
29610Sstevel@tonic-gate 			/*
29620Sstevel@tonic-gate 			 * Try not to dequeue a suspended thread.
29630Sstevel@tonic-gate 			 * This mimics the old libthread's behavior.
29640Sstevel@tonic-gate 			 */
29650Sstevel@tonic-gate 			if (suspp == NULL) {
29660Sstevel@tonic-gate 				suspp = ulwpp;
29670Sstevel@tonic-gate 				susprev = prev;
29680Sstevel@tonic-gate 			}
29690Sstevel@tonic-gate 		}
29700Sstevel@tonic-gate 	}
29710Sstevel@tonic-gate 	if (ulwp == NULL && suspp != NULL) {
29720Sstevel@tonic-gate 		ulwp = *(ulwpp = suspp);
29730Sstevel@tonic-gate 		prev = susprev;
29740Sstevel@tonic-gate 		suspp = NULL;
29750Sstevel@tonic-gate 	}
29760Sstevel@tonic-gate 	if (ulwp == NULL) {	/* no one on the sleep queue */
29770Sstevel@tonic-gate 		cvp->cond_waiters_user = 0;
29780Sstevel@tonic-gate 		queue_unlock(qp);
29790Sstevel@tonic-gate 		return (error);
29800Sstevel@tonic-gate 	}
29810Sstevel@tonic-gate 	/*
29820Sstevel@tonic-gate 	 * Scan the remainder of the CV queue for another waiter.
29830Sstevel@tonic-gate 	 */
29840Sstevel@tonic-gate 	if (suspp != NULL) {
29850Sstevel@tonic-gate 		next = *suspp;
29860Sstevel@tonic-gate 	} else {
29870Sstevel@tonic-gate 		for (next = ulwp->ul_link; next != NULL; next = next->ul_link)
29880Sstevel@tonic-gate 			if (next->ul_wchan == cvp)
29890Sstevel@tonic-gate 				break;
29900Sstevel@tonic-gate 	}
29910Sstevel@tonic-gate 	if (next == NULL)
29920Sstevel@tonic-gate 		cvp->cond_waiters_user = 0;
29930Sstevel@tonic-gate 
29940Sstevel@tonic-gate 	/*
29950Sstevel@tonic-gate 	 * Inform the thread that he was the recipient of a cond_signal().
29960Sstevel@tonic-gate 	 * This lets him deal with cond_signal() and, concurrently,
29970Sstevel@tonic-gate 	 * one or more of a cancellation, a UNIX signal, or a timeout.
29980Sstevel@tonic-gate 	 * These latter conditions must not consume a cond_signal().
29990Sstevel@tonic-gate 	 */
30000Sstevel@tonic-gate 	ulwp->ul_signalled = 1;
30010Sstevel@tonic-gate 
30020Sstevel@tonic-gate 	/*
30030Sstevel@tonic-gate 	 * Dequeue the waiter but leave his ul_sleepq non-NULL
30040Sstevel@tonic-gate 	 * while we move him to the mutex queue so that he can
30050Sstevel@tonic-gate 	 * deal properly with spurious wakeups.
30060Sstevel@tonic-gate 	 */
30070Sstevel@tonic-gate 	*ulwpp = ulwp->ul_link;
30080Sstevel@tonic-gate 	if (qp->qh_tail == ulwp)
30090Sstevel@tonic-gate 		qp->qh_tail = prev;
30100Sstevel@tonic-gate 	qp->qh_qlen--;
30110Sstevel@tonic-gate 	ulwp->ul_link = NULL;
30120Sstevel@tonic-gate 
30130Sstevel@tonic-gate 	mp = ulwp->ul_cvmutex;		/* the mutex he will acquire */
30140Sstevel@tonic-gate 	ulwp->ul_cvmutex = NULL;
30150Sstevel@tonic-gate 	ASSERT(mp != NULL);
30160Sstevel@tonic-gate 
30170Sstevel@tonic-gate 	if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) {
30180Sstevel@tonic-gate 		lwpid_t lwpid = ulwp->ul_lwpid;
30190Sstevel@tonic-gate 
30200Sstevel@tonic-gate 		no_preempt(self);
30210Sstevel@tonic-gate 		ulwp->ul_sleepq = NULL;
30220Sstevel@tonic-gate 		ulwp->ul_wchan = NULL;
30230Sstevel@tonic-gate 		ulwp->ul_cv_wake = 0;
30240Sstevel@tonic-gate 		queue_unlock(qp);
30250Sstevel@tonic-gate 		(void) __lwp_unpark(lwpid);
30260Sstevel@tonic-gate 		preempt(self);
30270Sstevel@tonic-gate 	} else {
30280Sstevel@tonic-gate 		mqp = queue_lock(mp, MX);
30290Sstevel@tonic-gate 		enqueue(mqp, ulwp, mp, MX);
30300Sstevel@tonic-gate 		mp->mutex_waiters = 1;
30310Sstevel@tonic-gate 		queue_unlock(mqp);
30320Sstevel@tonic-gate 		queue_unlock(qp);
30330Sstevel@tonic-gate 	}
30340Sstevel@tonic-gate 
30350Sstevel@tonic-gate 	return (error);
30360Sstevel@tonic-gate }
30370Sstevel@tonic-gate 
3038*4570Sraf /*
3039*4570Sraf  * Utility function called from cond_broadcast() and rw_queue_release()
3040*4570Sraf  * to (re)allocate a big buffer to hold the lwpids of all the threads
3041*4570Sraf  * to be set running after they are removed from their sleep queues.
3042*4570Sraf  * Since we are holding a queue lock, we cannot call any function
3043*4570Sraf  * that might acquire a lock.  mmap(), munmap() and lwp_unpark_all()
3044*4570Sraf  * are simple system calls and are safe in this regard.
3045*4570Sraf  */
3046*4570Sraf lwpid_t *
3047*4570Sraf alloc_lwpids(lwpid_t *lwpid, int *nlwpid_ptr, int *maxlwps_ptr)
3048*4570Sraf {
3049*4570Sraf 	/*
3050*4570Sraf 	 * Allocate NEWLWPS ids on the first overflow.
3051*4570Sraf 	 * Double the allocation each time after that.
3052*4570Sraf 	 */
3053*4570Sraf 	int nlwpid = *nlwpid_ptr;
3054*4570Sraf 	int maxlwps = *maxlwps_ptr;
3055*4570Sraf 	int first_allocation;
3056*4570Sraf 	int newlwps;
3057*4570Sraf 	void *vaddr;
3058*4570Sraf 
3059*4570Sraf 	ASSERT(nlwpid == maxlwps);
3060*4570Sraf 
3061*4570Sraf 	first_allocation = (maxlwps == MAXLWPS);
3062*4570Sraf 	newlwps = first_allocation? NEWLWPS : 2 * maxlwps;
3063*4570Sraf 	vaddr = _private_mmap(NULL, newlwps * sizeof (lwpid_t),
3064*4570Sraf 	    PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0);
3065*4570Sraf 
3066*4570Sraf 	if (vaddr == MAP_FAILED) {
3067*4570Sraf 		/*
3068*4570Sraf 		 * Let's hope this never happens.
3069*4570Sraf 		 * If it does, then we have a terrible
3070*4570Sraf 		 * thundering herd on our hands.
3071*4570Sraf 		 */
3072*4570Sraf 		(void) __lwp_unpark_all(lwpid, nlwpid);
3073*4570Sraf 		*nlwpid_ptr = 0;
3074*4570Sraf 	} else {
3075*4570Sraf 		(void) _memcpy(vaddr, lwpid, maxlwps * sizeof (lwpid_t));
3076*4570Sraf 		if (!first_allocation)
3077*4570Sraf 			(void) _private_munmap(lwpid,
3078*4570Sraf 			    maxlwps * sizeof (lwpid_t));
3079*4570Sraf 		lwpid = vaddr;
3080*4570Sraf 		*maxlwps_ptr = newlwps;
3081*4570Sraf 	}
3082*4570Sraf 
3083*4570Sraf 	return (lwpid);
3084*4570Sraf }
30850Sstevel@tonic-gate 
30860Sstevel@tonic-gate #pragma weak pthread_cond_broadcast = cond_broadcast_internal
30870Sstevel@tonic-gate #pragma weak _pthread_cond_broadcast = cond_broadcast_internal
30880Sstevel@tonic-gate #pragma weak cond_broadcast = cond_broadcast_internal
30890Sstevel@tonic-gate #pragma weak _cond_broadcast = cond_broadcast_internal
30900Sstevel@tonic-gate int
30910Sstevel@tonic-gate cond_broadcast_internal(cond_t *cvp)
30920Sstevel@tonic-gate {
30930Sstevel@tonic-gate 	ulwp_t *self = curthread;
30940Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
30950Sstevel@tonic-gate 	tdb_cond_stats_t *csp = COND_STATS(cvp, udp);
30960Sstevel@tonic-gate 	int error = 0;
30970Sstevel@tonic-gate 	queue_head_t *qp;
30980Sstevel@tonic-gate 	mutex_t *mp;
30990Sstevel@tonic-gate 	mutex_t *mp_cache = NULL;
3100*4570Sraf 	queue_head_t *mqp = NULL;
31010Sstevel@tonic-gate 	ulwp_t **ulwpp;
31020Sstevel@tonic-gate 	ulwp_t *ulwp;
31030Sstevel@tonic-gate 	ulwp_t *prev = NULL;
3104*4570Sraf 	int nlwpid = 0;
3105*4570Sraf 	int maxlwps = MAXLWPS;
31060Sstevel@tonic-gate 	lwpid_t buffer[MAXLWPS];
31070Sstevel@tonic-gate 	lwpid_t *lwpid = buffer;
31080Sstevel@tonic-gate 
31090Sstevel@tonic-gate 	if (csp)
31100Sstevel@tonic-gate 		tdb_incr(csp->cond_broadcast);
31110Sstevel@tonic-gate 
31120Sstevel@tonic-gate 	if (cvp->cond_waiters_kernel)	/* someone sleeping in the kernel? */
31130Sstevel@tonic-gate 		error = __lwp_cond_broadcast(cvp);
31140Sstevel@tonic-gate 
31150Sstevel@tonic-gate 	if (!cvp->cond_waiters_user)	/* no one sleeping at user-level */
31160Sstevel@tonic-gate 		return (error);
31170Sstevel@tonic-gate 
31180Sstevel@tonic-gate 	/*
31190Sstevel@tonic-gate 	 * Move everyone from the condvar sleep queue to the mutex sleep
31200Sstevel@tonic-gate 	 * queue for the mutex that they will acquire on being waked up.
31210Sstevel@tonic-gate 	 * We can do this only if we own the mutex they will acquire.
31220Sstevel@tonic-gate 	 * If we do not own the mutex, or if their ul_cv_wake flag
31230Sstevel@tonic-gate 	 * is set, just dequeue and unpark them.
31240Sstevel@tonic-gate 	 *
31250Sstevel@tonic-gate 	 * We keep track of lwpids that are to be unparked in lwpid[].
31260Sstevel@tonic-gate 	 * __lwp_unpark_all() is called to unpark all of them after
31270Sstevel@tonic-gate 	 * they have been removed from the sleep queue and the sleep
31280Sstevel@tonic-gate 	 * queue lock has been dropped.  If we run out of space in our
31290Sstevel@tonic-gate 	 * on-stack buffer, we need to allocate more but we can't call
31300Sstevel@tonic-gate 	 * lmalloc() because we are holding a queue lock when the overflow
31310Sstevel@tonic-gate 	 * occurs and lmalloc() acquires a lock.  We can't use alloca()
3132*4570Sraf 	 * either because the application may have allocated a small
3133*4570Sraf 	 * stack and we don't want to overrun the stack.  So we call
3134*4570Sraf 	 * alloc_lwpids() to allocate a bigger buffer using the mmap()
31350Sstevel@tonic-gate 	 * system call directly since that path acquires no locks.
31360Sstevel@tonic-gate 	 */
31370Sstevel@tonic-gate 	qp = queue_lock(cvp, CV);
31380Sstevel@tonic-gate 	cvp->cond_waiters_user = 0;
31390Sstevel@tonic-gate 	ulwpp = &qp->qh_head;
31400Sstevel@tonic-gate 	while ((ulwp = *ulwpp) != NULL) {
31410Sstevel@tonic-gate 		if (ulwp->ul_wchan != cvp) {
31420Sstevel@tonic-gate 			prev = ulwp;
31430Sstevel@tonic-gate 			ulwpp = &ulwp->ul_link;
31440Sstevel@tonic-gate 			continue;
31450Sstevel@tonic-gate 		}
31460Sstevel@tonic-gate 		*ulwpp = ulwp->ul_link;
31470Sstevel@tonic-gate 		if (qp->qh_tail == ulwp)
31480Sstevel@tonic-gate 			qp->qh_tail = prev;
31490Sstevel@tonic-gate 		qp->qh_qlen--;
31500Sstevel@tonic-gate 		ulwp->ul_link = NULL;
31510Sstevel@tonic-gate 		mp = ulwp->ul_cvmutex;		/* his mutex */
31520Sstevel@tonic-gate 		ulwp->ul_cvmutex = NULL;
31530Sstevel@tonic-gate 		ASSERT(mp != NULL);
31540Sstevel@tonic-gate 		if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) {
31550Sstevel@tonic-gate 			ulwp->ul_sleepq = NULL;
31560Sstevel@tonic-gate 			ulwp->ul_wchan = NULL;
31570Sstevel@tonic-gate 			ulwp->ul_cv_wake = 0;
3158*4570Sraf 			if (nlwpid == maxlwps)
3159*4570Sraf 				lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps);
31600Sstevel@tonic-gate 			lwpid[nlwpid++] = ulwp->ul_lwpid;
31610Sstevel@tonic-gate 		} else {
31620Sstevel@tonic-gate 			if (mp != mp_cache) {
31630Sstevel@tonic-gate 				mp_cache = mp;
3164*4570Sraf 				if (mqp != NULL)
3165*4570Sraf 					queue_unlock(mqp);
3166*4570Sraf 				mqp = queue_lock(mp, MX);
31670Sstevel@tonic-gate 			}
31680Sstevel@tonic-gate 			enqueue(mqp, ulwp, mp, MX);
31690Sstevel@tonic-gate 			mp->mutex_waiters = 1;
31700Sstevel@tonic-gate 		}
31710Sstevel@tonic-gate 	}
3172*4570Sraf 	if (mqp != NULL)
3173*4570Sraf 		queue_unlock(mqp);
3174*4570Sraf 	if (nlwpid == 0) {
3175*4570Sraf 		queue_unlock(qp);
3176*4570Sraf 	} else {
3177*4570Sraf 		no_preempt(self);
3178*4570Sraf 		queue_unlock(qp);
31790Sstevel@tonic-gate 		if (nlwpid == 1)
31800Sstevel@tonic-gate 			(void) __lwp_unpark(lwpid[0]);
31810Sstevel@tonic-gate 		else
31820Sstevel@tonic-gate 			(void) __lwp_unpark_all(lwpid, nlwpid);
3183*4570Sraf 		preempt(self);
31840Sstevel@tonic-gate 	}
31850Sstevel@tonic-gate 	if (lwpid != buffer)
31860Sstevel@tonic-gate 		(void) _private_munmap(lwpid, maxlwps * sizeof (lwpid_t));
31870Sstevel@tonic-gate 	return (error);
31880Sstevel@tonic-gate }
31890Sstevel@tonic-gate 
31900Sstevel@tonic-gate #pragma weak pthread_cond_destroy = _cond_destroy
31910Sstevel@tonic-gate #pragma weak _pthread_cond_destroy = _cond_destroy
31920Sstevel@tonic-gate #pragma weak cond_destroy = _cond_destroy
31930Sstevel@tonic-gate int
31940Sstevel@tonic-gate _cond_destroy(cond_t *cvp)
31950Sstevel@tonic-gate {
31960Sstevel@tonic-gate 	cvp->cond_magic = 0;
31970Sstevel@tonic-gate 	tdb_sync_obj_deregister(cvp);
31980Sstevel@tonic-gate 	return (0);
31990Sstevel@tonic-gate }
32000Sstevel@tonic-gate 
32010Sstevel@tonic-gate #if defined(THREAD_DEBUG)
32020Sstevel@tonic-gate void
32030Sstevel@tonic-gate assert_no_libc_locks_held(void)
32040Sstevel@tonic-gate {
32050Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
32060Sstevel@tonic-gate }
32070Sstevel@tonic-gate #endif
32080Sstevel@tonic-gate 
32090Sstevel@tonic-gate /* protected by link_lock */
32100Sstevel@tonic-gate uint64_t spin_lock_spin;
32110Sstevel@tonic-gate uint64_t spin_lock_spin2;
32120Sstevel@tonic-gate uint64_t spin_lock_sleep;
32130Sstevel@tonic-gate uint64_t spin_lock_wakeup;
32140Sstevel@tonic-gate 
32150Sstevel@tonic-gate /*
32160Sstevel@tonic-gate  * Record spin lock statistics.
32170Sstevel@tonic-gate  * Called by a thread exiting itself in thrp_exit().
32180Sstevel@tonic-gate  * Also called via atexit() from the thread calling
32190Sstevel@tonic-gate  * exit() to do all the other threads as well.
32200Sstevel@tonic-gate  */
32210Sstevel@tonic-gate void
32220Sstevel@tonic-gate record_spin_locks(ulwp_t *ulwp)
32230Sstevel@tonic-gate {
32240Sstevel@tonic-gate 	spin_lock_spin += ulwp->ul_spin_lock_spin;
32250Sstevel@tonic-gate 	spin_lock_spin2 += ulwp->ul_spin_lock_spin2;
32260Sstevel@tonic-gate 	spin_lock_sleep += ulwp->ul_spin_lock_sleep;
32270Sstevel@tonic-gate 	spin_lock_wakeup += ulwp->ul_spin_lock_wakeup;
32280Sstevel@tonic-gate 	ulwp->ul_spin_lock_spin = 0;
32290Sstevel@tonic-gate 	ulwp->ul_spin_lock_spin2 = 0;
32300Sstevel@tonic-gate 	ulwp->ul_spin_lock_sleep = 0;
32310Sstevel@tonic-gate 	ulwp->ul_spin_lock_wakeup = 0;
32320Sstevel@tonic-gate }
32330Sstevel@tonic-gate 
32340Sstevel@tonic-gate /*
32350Sstevel@tonic-gate  * atexit function:  dump the queue statistics to stderr.
32360Sstevel@tonic-gate  */
32371219Sraf #if !defined(__lint)
32381219Sraf #define	fprintf	_fprintf
32391219Sraf #endif
32400Sstevel@tonic-gate #include <stdio.h>
32410Sstevel@tonic-gate void
32420Sstevel@tonic-gate dump_queue_statistics(void)
32430Sstevel@tonic-gate {
32440Sstevel@tonic-gate 	uberdata_t *udp = curthread->ul_uberdata;
32450Sstevel@tonic-gate 	queue_head_t *qp;
32460Sstevel@tonic-gate 	int qn;
32470Sstevel@tonic-gate 	uint64_t spin_lock_total = 0;
32480Sstevel@tonic-gate 
32490Sstevel@tonic-gate 	if (udp->queue_head == NULL || thread_queue_dump == 0)
32500Sstevel@tonic-gate 		return;
32510Sstevel@tonic-gate 
32520Sstevel@tonic-gate 	if (fprintf(stderr, "\n%5d mutex queues:\n", QHASHSIZE) < 0 ||
32530Sstevel@tonic-gate 	    fprintf(stderr, "queue#   lockcount    max qlen\n") < 0)
32540Sstevel@tonic-gate 		return;
32550Sstevel@tonic-gate 	for (qn = 0, qp = udp->queue_head; qn < QHASHSIZE; qn++, qp++) {
32560Sstevel@tonic-gate 		if (qp->qh_lockcount == 0)
32570Sstevel@tonic-gate 			continue;
32580Sstevel@tonic-gate 		spin_lock_total += qp->qh_lockcount;
32590Sstevel@tonic-gate 		if (fprintf(stderr, "%5d %12llu%12u\n", qn,
32600Sstevel@tonic-gate 			(u_longlong_t)qp->qh_lockcount, qp->qh_qmax) < 0)
32610Sstevel@tonic-gate 				return;
32620Sstevel@tonic-gate 	}
32630Sstevel@tonic-gate 
32640Sstevel@tonic-gate 	if (fprintf(stderr, "\n%5d condvar queues:\n", QHASHSIZE) < 0 ||
32650Sstevel@tonic-gate 	    fprintf(stderr, "queue#   lockcount    max qlen\n") < 0)
32660Sstevel@tonic-gate 		return;
32670Sstevel@tonic-gate 	for (qn = 0; qn < QHASHSIZE; qn++, qp++) {
32680Sstevel@tonic-gate 		if (qp->qh_lockcount == 0)
32690Sstevel@tonic-gate 			continue;
32700Sstevel@tonic-gate 		spin_lock_total += qp->qh_lockcount;
32710Sstevel@tonic-gate 		if (fprintf(stderr, "%5d %12llu%12u\n", qn,
32720Sstevel@tonic-gate 			(u_longlong_t)qp->qh_lockcount, qp->qh_qmax) < 0)
32730Sstevel@tonic-gate 				return;
32740Sstevel@tonic-gate 	}
32750Sstevel@tonic-gate 
32760Sstevel@tonic-gate 	(void) fprintf(stderr, "\n  spin_lock_total  = %10llu\n",
32770Sstevel@tonic-gate 		(u_longlong_t)spin_lock_total);
32780Sstevel@tonic-gate 	(void) fprintf(stderr, "  spin_lock_spin   = %10llu\n",
32790Sstevel@tonic-gate 		(u_longlong_t)spin_lock_spin);
32800Sstevel@tonic-gate 	(void) fprintf(stderr, "  spin_lock_spin2  = %10llu\n",
32810Sstevel@tonic-gate 		(u_longlong_t)spin_lock_spin2);
32820Sstevel@tonic-gate 	(void) fprintf(stderr, "  spin_lock_sleep  = %10llu\n",
32830Sstevel@tonic-gate 		(u_longlong_t)spin_lock_sleep);
32840Sstevel@tonic-gate 	(void) fprintf(stderr, "  spin_lock_wakeup = %10llu\n",
32850Sstevel@tonic-gate 		(u_longlong_t)spin_lock_wakeup);
32860Sstevel@tonic-gate }
3287