xref: /onnv-gate/usr/src/lib/libc/port/threads/synch.c (revision 1219:f89f56c2d9ac)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
50Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
60Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
70Sstevel@tonic-gate  * with the License.
80Sstevel@tonic-gate  *
90Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
100Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
110Sstevel@tonic-gate  * See the License for the specific language governing permissions
120Sstevel@tonic-gate  * and limitations under the License.
130Sstevel@tonic-gate  *
140Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
150Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
160Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
170Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
180Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
190Sstevel@tonic-gate  *
200Sstevel@tonic-gate  * CDDL HEADER END
210Sstevel@tonic-gate  */
22*1219Sraf 
230Sstevel@tonic-gate /*
24*1219Sraf  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
250Sstevel@tonic-gate  * Use is subject to license terms.
260Sstevel@tonic-gate  */
270Sstevel@tonic-gate 
280Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
290Sstevel@tonic-gate 
300Sstevel@tonic-gate #include <sys/sdt.h>
310Sstevel@tonic-gate 
320Sstevel@tonic-gate #include "lint.h"
330Sstevel@tonic-gate #include "thr_uberdata.h"
340Sstevel@tonic-gate 
350Sstevel@tonic-gate /*
360Sstevel@tonic-gate  * This mutex is initialized to be held by lwp#1.
370Sstevel@tonic-gate  * It is used to block a thread that has returned from a mutex_lock()
380Sstevel@tonic-gate  * of a PTHREAD_PRIO_INHERIT mutex with an unrecoverable error.
390Sstevel@tonic-gate  */
400Sstevel@tonic-gate mutex_t	stall_mutex = DEFAULTMUTEX;
410Sstevel@tonic-gate 
420Sstevel@tonic-gate static int shared_mutex_held(mutex_t *);
430Sstevel@tonic-gate 
440Sstevel@tonic-gate /*
450Sstevel@tonic-gate  * Lock statistics support functions.
460Sstevel@tonic-gate  */
470Sstevel@tonic-gate void
480Sstevel@tonic-gate record_begin_hold(tdb_mutex_stats_t *msp)
490Sstevel@tonic-gate {
500Sstevel@tonic-gate 	tdb_incr(msp->mutex_lock);
510Sstevel@tonic-gate 	msp->mutex_begin_hold = gethrtime();
520Sstevel@tonic-gate }
530Sstevel@tonic-gate 
540Sstevel@tonic-gate hrtime_t
550Sstevel@tonic-gate record_hold_time(tdb_mutex_stats_t *msp)
560Sstevel@tonic-gate {
570Sstevel@tonic-gate 	hrtime_t now = gethrtime();
580Sstevel@tonic-gate 
590Sstevel@tonic-gate 	if (msp->mutex_begin_hold)
600Sstevel@tonic-gate 		msp->mutex_hold_time += now - msp->mutex_begin_hold;
610Sstevel@tonic-gate 	msp->mutex_begin_hold = 0;
620Sstevel@tonic-gate 	return (now);
630Sstevel@tonic-gate }
640Sstevel@tonic-gate 
650Sstevel@tonic-gate /*
660Sstevel@tonic-gate  * Called once at library initialization.
670Sstevel@tonic-gate  */
680Sstevel@tonic-gate void
690Sstevel@tonic-gate mutex_setup(void)
700Sstevel@tonic-gate {
710Sstevel@tonic-gate 	if (set_lock_byte(&stall_mutex.mutex_lockw))
720Sstevel@tonic-gate 		thr_panic("mutex_setup() cannot acquire stall_mutex");
730Sstevel@tonic-gate 	stall_mutex.mutex_owner = (uintptr_t)curthread;
740Sstevel@tonic-gate }
750Sstevel@tonic-gate 
760Sstevel@tonic-gate /*
770Sstevel@tonic-gate  * The default spin counts of 1000 and 500 are experimentally determined.
780Sstevel@tonic-gate  * On sun4u machines with any number of processors they could be raised
790Sstevel@tonic-gate  * to 10,000 but that (experimentally) makes almost no difference.
800Sstevel@tonic-gate  * The environment variables:
810Sstevel@tonic-gate  *	_THREAD_ADAPTIVE_SPIN=count
820Sstevel@tonic-gate  *	_THREAD_RELEASE_SPIN=count
830Sstevel@tonic-gate  * can be used to override and set the counts in the range [0 .. 1,000,000].
840Sstevel@tonic-gate  */
850Sstevel@tonic-gate int	thread_adaptive_spin = 1000;
860Sstevel@tonic-gate uint_t	thread_max_spinners = 100;
870Sstevel@tonic-gate int	thread_release_spin = 500;
880Sstevel@tonic-gate int	thread_queue_verify = 0;
890Sstevel@tonic-gate static	int	ncpus;
900Sstevel@tonic-gate 
910Sstevel@tonic-gate /*
920Sstevel@tonic-gate  * Distinguish spinning for queue locks from spinning for regular locks.
930Sstevel@tonic-gate  * The environment variable:
940Sstevel@tonic-gate  *	_THREAD_QUEUE_SPIN=count
950Sstevel@tonic-gate  * can be used to override and set the count in the range [0 .. 1,000,000].
960Sstevel@tonic-gate  * There is no release spin concept for queue locks.
970Sstevel@tonic-gate  */
980Sstevel@tonic-gate int	thread_queue_spin = 1000;
990Sstevel@tonic-gate 
1000Sstevel@tonic-gate /*
1010Sstevel@tonic-gate  * Use the otherwise-unused 'mutex_ownerpid' field of a USYNC_THREAD
1020Sstevel@tonic-gate  * mutex to be a count of adaptive spins in progress.
1030Sstevel@tonic-gate  */
1040Sstevel@tonic-gate #define	mutex_spinners	mutex_ownerpid
1050Sstevel@tonic-gate 
1060Sstevel@tonic-gate void
1070Sstevel@tonic-gate _mutex_set_typeattr(mutex_t *mp, int attr)
1080Sstevel@tonic-gate {
1090Sstevel@tonic-gate 	mp->mutex_type |= (uint8_t)attr;
1100Sstevel@tonic-gate }
1110Sstevel@tonic-gate 
1120Sstevel@tonic-gate /*
1130Sstevel@tonic-gate  * 'type' can be one of USYNC_THREAD or USYNC_PROCESS, possibly
1140Sstevel@tonic-gate  * augmented by the flags LOCK_RECURSIVE and/or LOCK_ERRORCHECK,
1150Sstevel@tonic-gate  * or it can be USYNC_PROCESS_ROBUST with no extra flags.
1160Sstevel@tonic-gate  */
1170Sstevel@tonic-gate #pragma weak _private_mutex_init = __mutex_init
1180Sstevel@tonic-gate #pragma weak mutex_init = __mutex_init
1190Sstevel@tonic-gate #pragma weak _mutex_init = __mutex_init
1200Sstevel@tonic-gate /* ARGSUSED2 */
1210Sstevel@tonic-gate int
1220Sstevel@tonic-gate __mutex_init(mutex_t *mp, int type, void *arg)
1230Sstevel@tonic-gate {
1240Sstevel@tonic-gate 	int error;
1250Sstevel@tonic-gate 
1260Sstevel@tonic-gate 	switch (type & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) {
1270Sstevel@tonic-gate 	case USYNC_THREAD:
1280Sstevel@tonic-gate 	case USYNC_PROCESS:
1290Sstevel@tonic-gate 		(void) _memset(mp, 0, sizeof (*mp));
1300Sstevel@tonic-gate 		mp->mutex_type = (uint8_t)type;
1310Sstevel@tonic-gate 		mp->mutex_flag = LOCK_INITED;
1320Sstevel@tonic-gate 		error = 0;
1330Sstevel@tonic-gate 		break;
1340Sstevel@tonic-gate 	case USYNC_PROCESS_ROBUST:
1350Sstevel@tonic-gate 		if (type & (LOCK_RECURSIVE|LOCK_ERRORCHECK))
1360Sstevel@tonic-gate 			error = EINVAL;
1370Sstevel@tonic-gate 		else
1380Sstevel@tonic-gate 			error = ___lwp_mutex_init(mp, type);
1390Sstevel@tonic-gate 		break;
1400Sstevel@tonic-gate 	default:
1410Sstevel@tonic-gate 		error = EINVAL;
1420Sstevel@tonic-gate 		break;
1430Sstevel@tonic-gate 	}
1440Sstevel@tonic-gate 	if (error == 0)
1450Sstevel@tonic-gate 		mp->mutex_magic = MUTEX_MAGIC;
1460Sstevel@tonic-gate 	return (error);
1470Sstevel@tonic-gate }
1480Sstevel@tonic-gate 
1490Sstevel@tonic-gate /*
1500Sstevel@tonic-gate  * Delete mp from list of ceil mutexes owned by curthread.
1510Sstevel@tonic-gate  * Return 1 if the head of the chain was updated.
1520Sstevel@tonic-gate  */
1530Sstevel@tonic-gate int
1540Sstevel@tonic-gate _ceil_mylist_del(mutex_t *mp)
1550Sstevel@tonic-gate {
1560Sstevel@tonic-gate 	ulwp_t *self = curthread;
1570Sstevel@tonic-gate 	mxchain_t **mcpp;
1580Sstevel@tonic-gate 	mxchain_t *mcp;
1590Sstevel@tonic-gate 
1600Sstevel@tonic-gate 	mcpp = &self->ul_mxchain;
1610Sstevel@tonic-gate 	while ((*mcpp)->mxchain_mx != mp)
1620Sstevel@tonic-gate 		mcpp = &(*mcpp)->mxchain_next;
1630Sstevel@tonic-gate 	mcp = *mcpp;
1640Sstevel@tonic-gate 	*mcpp = mcp->mxchain_next;
1650Sstevel@tonic-gate 	lfree(mcp, sizeof (*mcp));
1660Sstevel@tonic-gate 	return (mcpp == &self->ul_mxchain);
1670Sstevel@tonic-gate }
1680Sstevel@tonic-gate 
1690Sstevel@tonic-gate /*
1700Sstevel@tonic-gate  * Add mp to head of list of ceil mutexes owned by curthread.
1710Sstevel@tonic-gate  * Return ENOMEM if no memory could be allocated.
1720Sstevel@tonic-gate  */
1730Sstevel@tonic-gate int
1740Sstevel@tonic-gate _ceil_mylist_add(mutex_t *mp)
1750Sstevel@tonic-gate {
1760Sstevel@tonic-gate 	ulwp_t *self = curthread;
1770Sstevel@tonic-gate 	mxchain_t *mcp;
1780Sstevel@tonic-gate 
1790Sstevel@tonic-gate 	if ((mcp = lmalloc(sizeof (*mcp))) == NULL)
1800Sstevel@tonic-gate 		return (ENOMEM);
1810Sstevel@tonic-gate 	mcp->mxchain_mx = mp;
1820Sstevel@tonic-gate 	mcp->mxchain_next = self->ul_mxchain;
1830Sstevel@tonic-gate 	self->ul_mxchain = mcp;
1840Sstevel@tonic-gate 	return (0);
1850Sstevel@tonic-gate }
1860Sstevel@tonic-gate 
1870Sstevel@tonic-gate /*
1880Sstevel@tonic-gate  * Inherit priority from ceiling.  The inheritance impacts the effective
1890Sstevel@tonic-gate  * priority, not the assigned priority.  See _thread_setschedparam_main().
1900Sstevel@tonic-gate  */
1910Sstevel@tonic-gate void
1920Sstevel@tonic-gate _ceil_prio_inherit(int ceil)
1930Sstevel@tonic-gate {
1940Sstevel@tonic-gate 	ulwp_t *self = curthread;
1950Sstevel@tonic-gate 	struct sched_param param;
1960Sstevel@tonic-gate 
1970Sstevel@tonic-gate 	(void) _memset(&param, 0, sizeof (param));
1980Sstevel@tonic-gate 	param.sched_priority = ceil;
1990Sstevel@tonic-gate 	if (_thread_setschedparam_main(self->ul_lwpid,
2000Sstevel@tonic-gate 	    self->ul_policy, &param, PRIO_INHERIT)) {
2010Sstevel@tonic-gate 		/*
2020Sstevel@tonic-gate 		 * Panic since unclear what error code to return.
2030Sstevel@tonic-gate 		 * If we do return the error codes returned by above
2040Sstevel@tonic-gate 		 * called routine, update the man page...
2050Sstevel@tonic-gate 		 */
2060Sstevel@tonic-gate 		thr_panic("_thread_setschedparam_main() fails");
2070Sstevel@tonic-gate 	}
2080Sstevel@tonic-gate }
2090Sstevel@tonic-gate 
2100Sstevel@tonic-gate /*
2110Sstevel@tonic-gate  * Waive inherited ceiling priority.  Inherit from head of owned ceiling locks
2120Sstevel@tonic-gate  * if holding at least one ceiling lock.  If no ceiling locks are held at this
2130Sstevel@tonic-gate  * point, disinherit completely, reverting back to assigned priority.
2140Sstevel@tonic-gate  */
2150Sstevel@tonic-gate void
2160Sstevel@tonic-gate _ceil_prio_waive(void)
2170Sstevel@tonic-gate {
2180Sstevel@tonic-gate 	ulwp_t *self = curthread;
2190Sstevel@tonic-gate 	struct sched_param param;
2200Sstevel@tonic-gate 
2210Sstevel@tonic-gate 	(void) _memset(&param, 0, sizeof (param));
2220Sstevel@tonic-gate 	if (self->ul_mxchain == NULL) {
2230Sstevel@tonic-gate 		/*
2240Sstevel@tonic-gate 		 * No ceil locks held.  Zero the epri, revert back to ul_pri.
2250Sstevel@tonic-gate 		 * Since thread's hash lock is not held, one cannot just
2260Sstevel@tonic-gate 		 * read ul_pri here...do it in the called routine...
2270Sstevel@tonic-gate 		 */
2280Sstevel@tonic-gate 		param.sched_priority = self->ul_pri;	/* ignored */
2290Sstevel@tonic-gate 		if (_thread_setschedparam_main(self->ul_lwpid,
2300Sstevel@tonic-gate 		    self->ul_policy, &param, PRIO_DISINHERIT))
2310Sstevel@tonic-gate 			thr_panic("_thread_setschedparam_main() fails");
2320Sstevel@tonic-gate 	} else {
2330Sstevel@tonic-gate 		/*
2340Sstevel@tonic-gate 		 * Set priority to that of the mutex at the head
2350Sstevel@tonic-gate 		 * of the ceilmutex chain.
2360Sstevel@tonic-gate 		 */
2370Sstevel@tonic-gate 		param.sched_priority =
2380Sstevel@tonic-gate 		    self->ul_mxchain->mxchain_mx->mutex_ceiling;
2390Sstevel@tonic-gate 		if (_thread_setschedparam_main(self->ul_lwpid,
2400Sstevel@tonic-gate 		    self->ul_policy, &param, PRIO_INHERIT))
2410Sstevel@tonic-gate 			thr_panic("_thread_setschedparam_main() fails");
2420Sstevel@tonic-gate 	}
2430Sstevel@tonic-gate }
2440Sstevel@tonic-gate 
2450Sstevel@tonic-gate /*
2460Sstevel@tonic-gate  * Non-preemptive spin locks.  Used by queue_lock().
2470Sstevel@tonic-gate  * No lock statistics are gathered for these locks.
2480Sstevel@tonic-gate  */
2490Sstevel@tonic-gate void
2500Sstevel@tonic-gate spin_lock_set(mutex_t *mp)
2510Sstevel@tonic-gate {
2520Sstevel@tonic-gate 	ulwp_t *self = curthread;
2530Sstevel@tonic-gate 
2540Sstevel@tonic-gate 	no_preempt(self);
2550Sstevel@tonic-gate 	if (set_lock_byte(&mp->mutex_lockw) == 0) {
2560Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
2570Sstevel@tonic-gate 		return;
2580Sstevel@tonic-gate 	}
2590Sstevel@tonic-gate 	/*
2600Sstevel@tonic-gate 	 * Spin for a while, attempting to acquire the lock.
2610Sstevel@tonic-gate 	 */
2620Sstevel@tonic-gate 	if (self->ul_spin_lock_spin != UINT_MAX)
2630Sstevel@tonic-gate 		self->ul_spin_lock_spin++;
2640Sstevel@tonic-gate 	if (mutex_queuelock_adaptive(mp) == 0 ||
2650Sstevel@tonic-gate 	    set_lock_byte(&mp->mutex_lockw) == 0) {
2660Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
2670Sstevel@tonic-gate 		return;
2680Sstevel@tonic-gate 	}
2690Sstevel@tonic-gate 	/*
2700Sstevel@tonic-gate 	 * Try harder if we were previously at a no premption level.
2710Sstevel@tonic-gate 	 */
2720Sstevel@tonic-gate 	if (self->ul_preempt > 1) {
2730Sstevel@tonic-gate 		if (self->ul_spin_lock_spin2 != UINT_MAX)
2740Sstevel@tonic-gate 			self->ul_spin_lock_spin2++;
2750Sstevel@tonic-gate 		if (mutex_queuelock_adaptive(mp) == 0 ||
2760Sstevel@tonic-gate 		    set_lock_byte(&mp->mutex_lockw) == 0) {
2770Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
2780Sstevel@tonic-gate 			return;
2790Sstevel@tonic-gate 		}
2800Sstevel@tonic-gate 	}
2810Sstevel@tonic-gate 	/*
2820Sstevel@tonic-gate 	 * Give up and block in the kernel for the mutex.
2830Sstevel@tonic-gate 	 */
2840Sstevel@tonic-gate 	if (self->ul_spin_lock_sleep != UINT_MAX)
2850Sstevel@tonic-gate 		self->ul_spin_lock_sleep++;
2860Sstevel@tonic-gate 	(void) ___lwp_mutex_timedlock(mp, NULL);
2870Sstevel@tonic-gate 	mp->mutex_owner = (uintptr_t)self;
2880Sstevel@tonic-gate }
2890Sstevel@tonic-gate 
2900Sstevel@tonic-gate void
2910Sstevel@tonic-gate spin_lock_clear(mutex_t *mp)
2920Sstevel@tonic-gate {
2930Sstevel@tonic-gate 	ulwp_t *self = curthread;
2940Sstevel@tonic-gate 
2950Sstevel@tonic-gate 	mp->mutex_owner = 0;
2960Sstevel@tonic-gate 	if (swap32(&mp->mutex_lockword, 0) & WAITERMASK) {
2970Sstevel@tonic-gate 		(void) ___lwp_mutex_wakeup(mp);
2980Sstevel@tonic-gate 		if (self->ul_spin_lock_wakeup != UINT_MAX)
2990Sstevel@tonic-gate 			self->ul_spin_lock_wakeup++;
3000Sstevel@tonic-gate 	}
3010Sstevel@tonic-gate 	preempt(self);
3020Sstevel@tonic-gate }
3030Sstevel@tonic-gate 
3040Sstevel@tonic-gate /*
3050Sstevel@tonic-gate  * Allocate the sleep queue hash table.
3060Sstevel@tonic-gate  */
3070Sstevel@tonic-gate void
3080Sstevel@tonic-gate queue_alloc(void)
3090Sstevel@tonic-gate {
3100Sstevel@tonic-gate 	ulwp_t *self = curthread;
3110Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
3120Sstevel@tonic-gate 	void *data;
3130Sstevel@tonic-gate 	int i;
3140Sstevel@tonic-gate 
3150Sstevel@tonic-gate 	/*
3160Sstevel@tonic-gate 	 * No locks are needed; we call here only when single-threaded.
3170Sstevel@tonic-gate 	 */
3180Sstevel@tonic-gate 	ASSERT(self == udp->ulwp_one);
3190Sstevel@tonic-gate 	ASSERT(!udp->uberflags.uf_mt);
3200Sstevel@tonic-gate 	if ((data = _private_mmap(NULL, 2 * QHASHSIZE * sizeof (queue_head_t),
3210Sstevel@tonic-gate 	    PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
3220Sstevel@tonic-gate 	    == MAP_FAILED)
3230Sstevel@tonic-gate 		thr_panic("cannot allocate thread queue_head table");
3240Sstevel@tonic-gate 	udp->queue_head = (queue_head_t *)data;
3250Sstevel@tonic-gate 	for (i = 0; i < 2 * QHASHSIZE; i++)
3260Sstevel@tonic-gate 		udp->queue_head[i].qh_lock.mutex_magic = MUTEX_MAGIC;
3270Sstevel@tonic-gate }
3280Sstevel@tonic-gate 
3290Sstevel@tonic-gate #if defined(THREAD_DEBUG)
3300Sstevel@tonic-gate 
3310Sstevel@tonic-gate /*
3320Sstevel@tonic-gate  * Debugging: verify correctness of a sleep queue.
3330Sstevel@tonic-gate  */
3340Sstevel@tonic-gate void
3350Sstevel@tonic-gate QVERIFY(queue_head_t *qp)
3360Sstevel@tonic-gate {
3370Sstevel@tonic-gate 	ulwp_t *self = curthread;
3380Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
3390Sstevel@tonic-gate 	ulwp_t *ulwp;
3400Sstevel@tonic-gate 	ulwp_t *prev;
3410Sstevel@tonic-gate 	uint_t index;
3420Sstevel@tonic-gate 	uint32_t cnt = 0;
3430Sstevel@tonic-gate 	char qtype;
3440Sstevel@tonic-gate 	void *wchan;
3450Sstevel@tonic-gate 
3460Sstevel@tonic-gate 	ASSERT(qp >= udp->queue_head && (qp - udp->queue_head) < 2 * QHASHSIZE);
3470Sstevel@tonic-gate 	ASSERT(MUTEX_OWNED(&qp->qh_lock, self));
3480Sstevel@tonic-gate 	ASSERT((qp->qh_head != NULL && qp->qh_tail != NULL) ||
3490Sstevel@tonic-gate 		(qp->qh_head == NULL && qp->qh_tail == NULL));
3500Sstevel@tonic-gate 	if (!thread_queue_verify)
3510Sstevel@tonic-gate 		return;
3520Sstevel@tonic-gate 	/* real expensive stuff, only for _THREAD_QUEUE_VERIFY */
3530Sstevel@tonic-gate 	qtype = ((qp - udp->queue_head) < QHASHSIZE)? MX : CV;
3540Sstevel@tonic-gate 	for (prev = NULL, ulwp = qp->qh_head; ulwp != NULL;
3550Sstevel@tonic-gate 	    prev = ulwp, ulwp = ulwp->ul_link, cnt++) {
3560Sstevel@tonic-gate 		ASSERT(ulwp->ul_qtype == qtype);
3570Sstevel@tonic-gate 		ASSERT(ulwp->ul_wchan != NULL);
3580Sstevel@tonic-gate 		ASSERT(ulwp->ul_sleepq == qp);
3590Sstevel@tonic-gate 		wchan = ulwp->ul_wchan;
3600Sstevel@tonic-gate 		index = QUEUE_HASH(wchan, qtype);
3610Sstevel@tonic-gate 		ASSERT(&udp->queue_head[index] == qp);
3620Sstevel@tonic-gate 	}
3630Sstevel@tonic-gate 	ASSERT(qp->qh_tail == prev);
3640Sstevel@tonic-gate 	ASSERT(qp->qh_qlen == cnt);
3650Sstevel@tonic-gate }
3660Sstevel@tonic-gate 
3670Sstevel@tonic-gate #else	/* THREAD_DEBUG */
3680Sstevel@tonic-gate 
3690Sstevel@tonic-gate #define	QVERIFY(qp)
3700Sstevel@tonic-gate 
3710Sstevel@tonic-gate #endif	/* THREAD_DEBUG */
3720Sstevel@tonic-gate 
3730Sstevel@tonic-gate /*
3740Sstevel@tonic-gate  * Acquire a queue head.
3750Sstevel@tonic-gate  */
3760Sstevel@tonic-gate queue_head_t *
3770Sstevel@tonic-gate queue_lock(void *wchan, int qtype)
3780Sstevel@tonic-gate {
3790Sstevel@tonic-gate 	uberdata_t *udp = curthread->ul_uberdata;
3800Sstevel@tonic-gate 	queue_head_t *qp;
3810Sstevel@tonic-gate 
3820Sstevel@tonic-gate 	ASSERT(qtype == MX || qtype == CV);
3830Sstevel@tonic-gate 
3840Sstevel@tonic-gate 	/*
3850Sstevel@tonic-gate 	 * It is possible that we could be called while still single-threaded.
3860Sstevel@tonic-gate 	 * If so, we call queue_alloc() to allocate the queue_head[] array.
3870Sstevel@tonic-gate 	 */
3880Sstevel@tonic-gate 	if ((qp = udp->queue_head) == NULL) {
3890Sstevel@tonic-gate 		queue_alloc();
3900Sstevel@tonic-gate 		qp = udp->queue_head;
3910Sstevel@tonic-gate 	}
3920Sstevel@tonic-gate 	qp += QUEUE_HASH(wchan, qtype);
3930Sstevel@tonic-gate 	spin_lock_set(&qp->qh_lock);
3940Sstevel@tonic-gate 	/*
3950Sstevel@tonic-gate 	 * At once per nanosecond, qh_lockcount will wrap after 512 years.
3960Sstevel@tonic-gate 	 * Were we to care about this, we could peg the value at UINT64_MAX.
3970Sstevel@tonic-gate 	 */
3980Sstevel@tonic-gate 	qp->qh_lockcount++;
3990Sstevel@tonic-gate 	QVERIFY(qp);
4000Sstevel@tonic-gate 	return (qp);
4010Sstevel@tonic-gate }
4020Sstevel@tonic-gate 
4030Sstevel@tonic-gate /*
4040Sstevel@tonic-gate  * Release a queue head.
4050Sstevel@tonic-gate  */
4060Sstevel@tonic-gate void
4070Sstevel@tonic-gate queue_unlock(queue_head_t *qp)
4080Sstevel@tonic-gate {
4090Sstevel@tonic-gate 	QVERIFY(qp);
4100Sstevel@tonic-gate 	spin_lock_clear(&qp->qh_lock);
4110Sstevel@tonic-gate }
4120Sstevel@tonic-gate 
4130Sstevel@tonic-gate /*
4140Sstevel@tonic-gate  * For rwlock queueing, we must queue writers ahead of readers of the
4150Sstevel@tonic-gate  * same priority.  We do this by making writers appear to have a half
4160Sstevel@tonic-gate  * point higher priority for purposes of priority comparisons below.
4170Sstevel@tonic-gate  */
4180Sstevel@tonic-gate #define	CMP_PRIO(ulwp)	((real_priority(ulwp) << 1) + (ulwp)->ul_writer)
4190Sstevel@tonic-gate 
4200Sstevel@tonic-gate void
4210Sstevel@tonic-gate enqueue(queue_head_t *qp, ulwp_t *ulwp, void *wchan, int qtype)
4220Sstevel@tonic-gate {
4230Sstevel@tonic-gate 	ulwp_t **ulwpp;
4240Sstevel@tonic-gate 	ulwp_t *next;
4250Sstevel@tonic-gate 	int pri = CMP_PRIO(ulwp);
4260Sstevel@tonic-gate 	int force_fifo = (qtype & FIFOQ);
4270Sstevel@tonic-gate 	int do_fifo;
4280Sstevel@tonic-gate 
4290Sstevel@tonic-gate 	qtype &= ~FIFOQ;
4300Sstevel@tonic-gate 	ASSERT(qtype == MX || qtype == CV);
4310Sstevel@tonic-gate 	ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread));
4320Sstevel@tonic-gate 	ASSERT(ulwp->ul_sleepq != qp);
4330Sstevel@tonic-gate 
4340Sstevel@tonic-gate 	/*
4350Sstevel@tonic-gate 	 * LIFO queue ordering is unfair and can lead to starvation,
4360Sstevel@tonic-gate 	 * but it gives better performance for heavily contended locks.
4370Sstevel@tonic-gate 	 * We use thread_queue_fifo (range is 0..8) to determine
4380Sstevel@tonic-gate 	 * the frequency of FIFO vs LIFO queuing:
4390Sstevel@tonic-gate 	 *	0 : every 256th time	(almost always LIFO)
4400Sstevel@tonic-gate 	 *	1 : every 128th time
4410Sstevel@tonic-gate 	 *	2 : every 64th  time
4420Sstevel@tonic-gate 	 *	3 : every 32nd  time
4430Sstevel@tonic-gate 	 *	4 : every 16th  time	(the default value, mostly LIFO)
4440Sstevel@tonic-gate 	 *	5 : every 8th   time
4450Sstevel@tonic-gate 	 *	6 : every 4th   time
4460Sstevel@tonic-gate 	 *	7 : every 2nd   time
4470Sstevel@tonic-gate 	 *	8 : every time		(never LIFO, always FIFO)
4480Sstevel@tonic-gate 	 * Note that there is always some degree of FIFO ordering.
4490Sstevel@tonic-gate 	 * This breaks live lock conditions that occur in applications
4500Sstevel@tonic-gate 	 * that are written assuming (incorrectly) that threads acquire
4510Sstevel@tonic-gate 	 * locks fairly, that is, in roughly round-robin order.
4520Sstevel@tonic-gate 	 * In any event, the queue is maintained in priority order.
4530Sstevel@tonic-gate 	 *
4540Sstevel@tonic-gate 	 * If we are given the FIFOQ flag in qtype, fifo queueing is forced.
4550Sstevel@tonic-gate 	 * SUSV3 requires this for semaphores.
4560Sstevel@tonic-gate 	 */
4570Sstevel@tonic-gate 	do_fifo = (force_fifo ||
4580Sstevel@tonic-gate 		((++qp->qh_qcnt << curthread->ul_queue_fifo) & 0xff) == 0);
4590Sstevel@tonic-gate 
4600Sstevel@tonic-gate 	if (qp->qh_head == NULL) {
4610Sstevel@tonic-gate 		/*
4620Sstevel@tonic-gate 		 * The queue is empty.  LIFO/FIFO doesn't matter.
4630Sstevel@tonic-gate 		 */
4640Sstevel@tonic-gate 		ASSERT(qp->qh_tail == NULL);
4650Sstevel@tonic-gate 		ulwpp = &qp->qh_head;
4660Sstevel@tonic-gate 	} else if (do_fifo) {
4670Sstevel@tonic-gate 		/*
4680Sstevel@tonic-gate 		 * Enqueue after the last thread whose priority is greater
4690Sstevel@tonic-gate 		 * than or equal to the priority of the thread being queued.
4700Sstevel@tonic-gate 		 * Attempt first to go directly onto the tail of the queue.
4710Sstevel@tonic-gate 		 */
4720Sstevel@tonic-gate 		if (pri <= CMP_PRIO(qp->qh_tail))
4730Sstevel@tonic-gate 			ulwpp = &qp->qh_tail->ul_link;
4740Sstevel@tonic-gate 		else {
4750Sstevel@tonic-gate 			for (ulwpp = &qp->qh_head; (next = *ulwpp) != NULL;
4760Sstevel@tonic-gate 			    ulwpp = &next->ul_link)
4770Sstevel@tonic-gate 				if (pri > CMP_PRIO(next))
4780Sstevel@tonic-gate 					break;
4790Sstevel@tonic-gate 		}
4800Sstevel@tonic-gate 	} else {
4810Sstevel@tonic-gate 		/*
4820Sstevel@tonic-gate 		 * Enqueue before the first thread whose priority is less
4830Sstevel@tonic-gate 		 * than or equal to the priority of the thread being queued.
4840Sstevel@tonic-gate 		 * Hopefully we can go directly onto the head of the queue.
4850Sstevel@tonic-gate 		 */
4860Sstevel@tonic-gate 		for (ulwpp = &qp->qh_head; (next = *ulwpp) != NULL;
4870Sstevel@tonic-gate 		    ulwpp = &next->ul_link)
4880Sstevel@tonic-gate 			if (pri >= CMP_PRIO(next))
4890Sstevel@tonic-gate 				break;
4900Sstevel@tonic-gate 	}
4910Sstevel@tonic-gate 	if ((ulwp->ul_link = *ulwpp) == NULL)
4920Sstevel@tonic-gate 		qp->qh_tail = ulwp;
4930Sstevel@tonic-gate 	*ulwpp = ulwp;
4940Sstevel@tonic-gate 
4950Sstevel@tonic-gate 	ulwp->ul_sleepq = qp;
4960Sstevel@tonic-gate 	ulwp->ul_wchan = wchan;
4970Sstevel@tonic-gate 	ulwp->ul_qtype = qtype;
4980Sstevel@tonic-gate 	if (qp->qh_qmax < ++qp->qh_qlen)
4990Sstevel@tonic-gate 		qp->qh_qmax = qp->qh_qlen;
5000Sstevel@tonic-gate }
5010Sstevel@tonic-gate 
5020Sstevel@tonic-gate /*
5030Sstevel@tonic-gate  * Return a pointer to the queue slot of the
5040Sstevel@tonic-gate  * highest priority thread on the queue.
5050Sstevel@tonic-gate  * On return, prevp, if not NULL, will contain a pointer
5060Sstevel@tonic-gate  * to the thread's predecessor on the queue
5070Sstevel@tonic-gate  */
5080Sstevel@tonic-gate static ulwp_t **
5090Sstevel@tonic-gate queue_slot(queue_head_t *qp, void *wchan, int *more, ulwp_t **prevp)
5100Sstevel@tonic-gate {
5110Sstevel@tonic-gate 	ulwp_t **ulwpp;
5120Sstevel@tonic-gate 	ulwp_t *ulwp;
5130Sstevel@tonic-gate 	ulwp_t *prev = NULL;
5140Sstevel@tonic-gate 	ulwp_t **suspp = NULL;
5150Sstevel@tonic-gate 	ulwp_t *susprev;
5160Sstevel@tonic-gate 
5170Sstevel@tonic-gate 	ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread));
5180Sstevel@tonic-gate 
5190Sstevel@tonic-gate 	/*
5200Sstevel@tonic-gate 	 * Find a waiter on the sleep queue.
5210Sstevel@tonic-gate 	 */
5220Sstevel@tonic-gate 	for (ulwpp = &qp->qh_head; (ulwp = *ulwpp) != NULL;
5230Sstevel@tonic-gate 	    prev = ulwp, ulwpp = &ulwp->ul_link) {
5240Sstevel@tonic-gate 		if (ulwp->ul_wchan == wchan) {
5250Sstevel@tonic-gate 			if (!ulwp->ul_stop)
5260Sstevel@tonic-gate 				break;
5270Sstevel@tonic-gate 			/*
5280Sstevel@tonic-gate 			 * Try not to return a suspended thread.
5290Sstevel@tonic-gate 			 * This mimics the old libthread's behavior.
5300Sstevel@tonic-gate 			 */
5310Sstevel@tonic-gate 			if (suspp == NULL) {
5320Sstevel@tonic-gate 				suspp = ulwpp;
5330Sstevel@tonic-gate 				susprev = prev;
5340Sstevel@tonic-gate 			}
5350Sstevel@tonic-gate 		}
5360Sstevel@tonic-gate 	}
5370Sstevel@tonic-gate 
5380Sstevel@tonic-gate 	if (ulwp == NULL && suspp != NULL) {
5390Sstevel@tonic-gate 		ulwp = *(ulwpp = suspp);
5400Sstevel@tonic-gate 		prev = susprev;
5410Sstevel@tonic-gate 		suspp = NULL;
5420Sstevel@tonic-gate 	}
5430Sstevel@tonic-gate 	if (ulwp == NULL) {
5440Sstevel@tonic-gate 		if (more != NULL)
5450Sstevel@tonic-gate 			*more = 0;
5460Sstevel@tonic-gate 		return (NULL);
5470Sstevel@tonic-gate 	}
5480Sstevel@tonic-gate 
5490Sstevel@tonic-gate 	if (prevp != NULL)
5500Sstevel@tonic-gate 		*prevp = prev;
5510Sstevel@tonic-gate 	if (more == NULL)
5520Sstevel@tonic-gate 		return (ulwpp);
5530Sstevel@tonic-gate 
5540Sstevel@tonic-gate 	/*
5550Sstevel@tonic-gate 	 * Scan the remainder of the queue for another waiter.
5560Sstevel@tonic-gate 	 */
5570Sstevel@tonic-gate 	if (suspp != NULL) {
5580Sstevel@tonic-gate 		*more = 1;
5590Sstevel@tonic-gate 		return (ulwpp);
5600Sstevel@tonic-gate 	}
5610Sstevel@tonic-gate 	for (ulwp = ulwp->ul_link; ulwp != NULL; ulwp = ulwp->ul_link) {
5620Sstevel@tonic-gate 		if (ulwp->ul_wchan == wchan) {
5630Sstevel@tonic-gate 			*more = 1;
5640Sstevel@tonic-gate 			return (ulwpp);
5650Sstevel@tonic-gate 		}
5660Sstevel@tonic-gate 	}
5670Sstevel@tonic-gate 
5680Sstevel@tonic-gate 	*more = 0;
5690Sstevel@tonic-gate 	return (ulwpp);
5700Sstevel@tonic-gate }
5710Sstevel@tonic-gate 
5720Sstevel@tonic-gate ulwp_t *
5730Sstevel@tonic-gate dequeue(queue_head_t *qp, void *wchan, int *more)
5740Sstevel@tonic-gate {
5750Sstevel@tonic-gate 	ulwp_t **ulwpp;
5760Sstevel@tonic-gate 	ulwp_t *ulwp;
5770Sstevel@tonic-gate 	ulwp_t *prev;
5780Sstevel@tonic-gate 
5790Sstevel@tonic-gate 	if ((ulwpp = queue_slot(qp, wchan, more, &prev)) == NULL)
5800Sstevel@tonic-gate 		return (NULL);
5810Sstevel@tonic-gate 
5820Sstevel@tonic-gate 	/*
5830Sstevel@tonic-gate 	 * Dequeue the waiter.
5840Sstevel@tonic-gate 	 */
5850Sstevel@tonic-gate 	ulwp = *ulwpp;
5860Sstevel@tonic-gate 	*ulwpp = ulwp->ul_link;
5870Sstevel@tonic-gate 	ulwp->ul_link = NULL;
5880Sstevel@tonic-gate 	if (qp->qh_tail == ulwp)
5890Sstevel@tonic-gate 		qp->qh_tail = prev;
5900Sstevel@tonic-gate 	qp->qh_qlen--;
5910Sstevel@tonic-gate 	ulwp->ul_sleepq = NULL;
5920Sstevel@tonic-gate 	ulwp->ul_wchan = NULL;
5930Sstevel@tonic-gate 
5940Sstevel@tonic-gate 	return (ulwp);
5950Sstevel@tonic-gate }
5960Sstevel@tonic-gate 
5970Sstevel@tonic-gate /*
5980Sstevel@tonic-gate  * Return a pointer to the highest priority thread sleeping on wchan.
5990Sstevel@tonic-gate  */
6000Sstevel@tonic-gate ulwp_t *
6010Sstevel@tonic-gate queue_waiter(queue_head_t *qp, void *wchan)
6020Sstevel@tonic-gate {
6030Sstevel@tonic-gate 	ulwp_t **ulwpp;
6040Sstevel@tonic-gate 
6050Sstevel@tonic-gate 	if ((ulwpp = queue_slot(qp, wchan, NULL, NULL)) == NULL)
6060Sstevel@tonic-gate 		return (NULL);
6070Sstevel@tonic-gate 	return (*ulwpp);
6080Sstevel@tonic-gate }
6090Sstevel@tonic-gate 
6100Sstevel@tonic-gate uint8_t
6110Sstevel@tonic-gate dequeue_self(queue_head_t *qp, void *wchan)
6120Sstevel@tonic-gate {
6130Sstevel@tonic-gate 	ulwp_t *self = curthread;
6140Sstevel@tonic-gate 	ulwp_t **ulwpp;
6150Sstevel@tonic-gate 	ulwp_t *ulwp;
6160Sstevel@tonic-gate 	ulwp_t *prev = NULL;
6170Sstevel@tonic-gate 	int found = 0;
6180Sstevel@tonic-gate 	int more = 0;
6190Sstevel@tonic-gate 
6200Sstevel@tonic-gate 	ASSERT(MUTEX_OWNED(&qp->qh_lock, self));
6210Sstevel@tonic-gate 
6220Sstevel@tonic-gate 	/* find self on the sleep queue */
6230Sstevel@tonic-gate 	for (ulwpp = &qp->qh_head; (ulwp = *ulwpp) != NULL;
6240Sstevel@tonic-gate 	    prev = ulwp, ulwpp = &ulwp->ul_link) {
6250Sstevel@tonic-gate 		if (ulwp == self) {
6260Sstevel@tonic-gate 			/* dequeue ourself */
6270Sstevel@tonic-gate 			*ulwpp = self->ul_link;
6280Sstevel@tonic-gate 			if (qp->qh_tail == self)
6290Sstevel@tonic-gate 				qp->qh_tail = prev;
6300Sstevel@tonic-gate 			qp->qh_qlen--;
6310Sstevel@tonic-gate 			ASSERT(self->ul_wchan == wchan);
6320Sstevel@tonic-gate 			self->ul_cvmutex = NULL;
6330Sstevel@tonic-gate 			self->ul_sleepq = NULL;
6340Sstevel@tonic-gate 			self->ul_wchan = NULL;
6350Sstevel@tonic-gate 			self->ul_cv_wake = 0;
6360Sstevel@tonic-gate 			self->ul_link = NULL;
6370Sstevel@tonic-gate 			found = 1;
6380Sstevel@tonic-gate 			break;
6390Sstevel@tonic-gate 		}
6400Sstevel@tonic-gate 		if (ulwp->ul_wchan == wchan)
6410Sstevel@tonic-gate 			more = 1;
6420Sstevel@tonic-gate 	}
6430Sstevel@tonic-gate 
6440Sstevel@tonic-gate 	if (!found)
6450Sstevel@tonic-gate 		thr_panic("dequeue_self(): curthread not found on queue");
6460Sstevel@tonic-gate 
6470Sstevel@tonic-gate 	if (more)
6480Sstevel@tonic-gate 		return (1);
6490Sstevel@tonic-gate 
6500Sstevel@tonic-gate 	/* scan the remainder of the queue for another waiter */
6510Sstevel@tonic-gate 	for (ulwp = *ulwpp; ulwp != NULL; ulwp = ulwp->ul_link) {
6520Sstevel@tonic-gate 		if (ulwp->ul_wchan == wchan)
6530Sstevel@tonic-gate 			return (1);
6540Sstevel@tonic-gate 	}
6550Sstevel@tonic-gate 
6560Sstevel@tonic-gate 	return (0);
6570Sstevel@tonic-gate }
6580Sstevel@tonic-gate 
6590Sstevel@tonic-gate /*
6600Sstevel@tonic-gate  * Called from call_user_handler() and _thrp_suspend() to take
6610Sstevel@tonic-gate  * ourself off of our sleep queue so we can grab locks.
6620Sstevel@tonic-gate  */
6630Sstevel@tonic-gate void
6640Sstevel@tonic-gate unsleep_self(void)
6650Sstevel@tonic-gate {
6660Sstevel@tonic-gate 	ulwp_t *self = curthread;
6670Sstevel@tonic-gate 	queue_head_t *qp;
6680Sstevel@tonic-gate 
6690Sstevel@tonic-gate 	/*
6700Sstevel@tonic-gate 	 * Calling enter_critical()/exit_critical() here would lead
6710Sstevel@tonic-gate 	 * to recursion.  Just manipulate self->ul_critical directly.
6720Sstevel@tonic-gate 	 */
6730Sstevel@tonic-gate 	self->ul_critical++;
6740Sstevel@tonic-gate 	self->ul_writer = 0;
6750Sstevel@tonic-gate 	while (self->ul_sleepq != NULL) {
6760Sstevel@tonic-gate 		qp = queue_lock(self->ul_wchan, self->ul_qtype);
6770Sstevel@tonic-gate 		/*
6780Sstevel@tonic-gate 		 * We may have been moved from a CV queue to a
6790Sstevel@tonic-gate 		 * mutex queue while we were attempting queue_lock().
6800Sstevel@tonic-gate 		 * If so, just loop around and try again.
6810Sstevel@tonic-gate 		 * dequeue_self() clears self->ul_sleepq.
6820Sstevel@tonic-gate 		 */
6830Sstevel@tonic-gate 		if (qp == self->ul_sleepq)
6840Sstevel@tonic-gate 			(void) dequeue_self(qp, self->ul_wchan);
6850Sstevel@tonic-gate 		queue_unlock(qp);
6860Sstevel@tonic-gate 	}
6870Sstevel@tonic-gate 	self->ul_critical--;
6880Sstevel@tonic-gate }
6890Sstevel@tonic-gate 
6900Sstevel@tonic-gate /*
6910Sstevel@tonic-gate  * Common code for calling the the ___lwp_mutex_timedlock() system call.
6920Sstevel@tonic-gate  * Returns with mutex_owner and mutex_ownerpid set correctly.
6930Sstevel@tonic-gate  */
6940Sstevel@tonic-gate int
6950Sstevel@tonic-gate mutex_lock_kernel(mutex_t *mp, timespec_t *tsp, tdb_mutex_stats_t *msp)
6960Sstevel@tonic-gate {
6970Sstevel@tonic-gate 	ulwp_t *self = curthread;
6980Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
6990Sstevel@tonic-gate 	hrtime_t begin_sleep;
7000Sstevel@tonic-gate 	int error;
7010Sstevel@tonic-gate 
7020Sstevel@tonic-gate 	self->ul_sp = stkptr();
7030Sstevel@tonic-gate 	self->ul_wchan = mp;
7040Sstevel@tonic-gate 	if (__td_event_report(self, TD_SLEEP, udp)) {
7050Sstevel@tonic-gate 		self->ul_td_evbuf.eventnum = TD_SLEEP;
7060Sstevel@tonic-gate 		self->ul_td_evbuf.eventdata = mp;
7070Sstevel@tonic-gate 		tdb_event(TD_SLEEP, udp);
7080Sstevel@tonic-gate 	}
7090Sstevel@tonic-gate 	if (msp) {
7100Sstevel@tonic-gate 		tdb_incr(msp->mutex_sleep);
7110Sstevel@tonic-gate 		begin_sleep = gethrtime();
7120Sstevel@tonic-gate 	}
7130Sstevel@tonic-gate 
7140Sstevel@tonic-gate 	DTRACE_PROBE1(plockstat, mutex__block, mp);
7150Sstevel@tonic-gate 
7160Sstevel@tonic-gate 	for (;;) {
7170Sstevel@tonic-gate 		if ((error = ___lwp_mutex_timedlock(mp, tsp)) != 0) {
7180Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0);
7190Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__error, mp, error);
7200Sstevel@tonic-gate 			break;
7210Sstevel@tonic-gate 		}
7220Sstevel@tonic-gate 
7230Sstevel@tonic-gate 		if (mp->mutex_type & (USYNC_PROCESS | USYNC_PROCESS_ROBUST)) {
7240Sstevel@tonic-gate 			/*
7250Sstevel@tonic-gate 			 * Defend against forkall().  We may be the child,
7260Sstevel@tonic-gate 			 * in which case we don't actually own the mutex.
7270Sstevel@tonic-gate 			 */
7280Sstevel@tonic-gate 			enter_critical(self);
7290Sstevel@tonic-gate 			if (mp->mutex_ownerpid == udp->pid) {
7300Sstevel@tonic-gate 				mp->mutex_owner = (uintptr_t)self;
7310Sstevel@tonic-gate 				exit_critical(self);
7320Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1);
7330Sstevel@tonic-gate 				DTRACE_PROBE3(plockstat, mutex__acquire, mp,
7340Sstevel@tonic-gate 				    0, 0);
7350Sstevel@tonic-gate 				break;
7360Sstevel@tonic-gate 			}
7370Sstevel@tonic-gate 			exit_critical(self);
7380Sstevel@tonic-gate 		} else {
7390Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
7400Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1);
7410Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
7420Sstevel@tonic-gate 			break;
7430Sstevel@tonic-gate 		}
7440Sstevel@tonic-gate 	}
7450Sstevel@tonic-gate 	if (msp)
7460Sstevel@tonic-gate 		msp->mutex_sleep_time += gethrtime() - begin_sleep;
7470Sstevel@tonic-gate 	self->ul_wchan = NULL;
7480Sstevel@tonic-gate 	self->ul_sp = 0;
7490Sstevel@tonic-gate 
7500Sstevel@tonic-gate 	return (error);
7510Sstevel@tonic-gate }
7520Sstevel@tonic-gate 
7530Sstevel@tonic-gate /*
7540Sstevel@tonic-gate  * Common code for calling the ___lwp_mutex_trylock() system call.
7550Sstevel@tonic-gate  * Returns with mutex_owner and mutex_ownerpid set correctly.
7560Sstevel@tonic-gate  */
7570Sstevel@tonic-gate int
7580Sstevel@tonic-gate mutex_trylock_kernel(mutex_t *mp)
7590Sstevel@tonic-gate {
7600Sstevel@tonic-gate 	ulwp_t *self = curthread;
7610Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
7620Sstevel@tonic-gate 	int error;
7630Sstevel@tonic-gate 
7640Sstevel@tonic-gate 	for (;;) {
7650Sstevel@tonic-gate 		if ((error = ___lwp_mutex_trylock(mp)) != 0) {
7660Sstevel@tonic-gate 			if (error != EBUSY) {
7670Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__error, mp,
7680Sstevel@tonic-gate 				    error);
7690Sstevel@tonic-gate 			}
7700Sstevel@tonic-gate 			break;
7710Sstevel@tonic-gate 		}
7720Sstevel@tonic-gate 
7730Sstevel@tonic-gate 		if (mp->mutex_type & (USYNC_PROCESS | USYNC_PROCESS_ROBUST)) {
7740Sstevel@tonic-gate 			/*
7750Sstevel@tonic-gate 			 * Defend against forkall().  We may be the child,
7760Sstevel@tonic-gate 			 * in which case we don't actually own the mutex.
7770Sstevel@tonic-gate 			 */
7780Sstevel@tonic-gate 			enter_critical(self);
7790Sstevel@tonic-gate 			if (mp->mutex_ownerpid == udp->pid) {
7800Sstevel@tonic-gate 				mp->mutex_owner = (uintptr_t)self;
7810Sstevel@tonic-gate 				exit_critical(self);
7820Sstevel@tonic-gate 				DTRACE_PROBE3(plockstat, mutex__acquire, mp,
7830Sstevel@tonic-gate 				    0, 0);
7840Sstevel@tonic-gate 				break;
7850Sstevel@tonic-gate 			}
7860Sstevel@tonic-gate 			exit_critical(self);
7870Sstevel@tonic-gate 		} else {
7880Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
7890Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
7900Sstevel@tonic-gate 			break;
7910Sstevel@tonic-gate 		}
7920Sstevel@tonic-gate 	}
7930Sstevel@tonic-gate 
7940Sstevel@tonic-gate 	return (error);
7950Sstevel@tonic-gate }
7960Sstevel@tonic-gate 
7970Sstevel@tonic-gate volatile sc_shared_t *
7980Sstevel@tonic-gate setup_schedctl(void)
7990Sstevel@tonic-gate {
8000Sstevel@tonic-gate 	ulwp_t *self = curthread;
8010Sstevel@tonic-gate 	volatile sc_shared_t *scp;
8020Sstevel@tonic-gate 	sc_shared_t *tmp;
8030Sstevel@tonic-gate 
8040Sstevel@tonic-gate 	if ((scp = self->ul_schedctl) == NULL && /* no shared state yet */
8050Sstevel@tonic-gate 	    !self->ul_vfork &&			/* not a child of vfork() */
8060Sstevel@tonic-gate 	    !self->ul_schedctl_called) {	/* haven't been called before */
8070Sstevel@tonic-gate 		enter_critical(self);
8080Sstevel@tonic-gate 		self->ul_schedctl_called = &self->ul_uberdata->uberflags;
8090Sstevel@tonic-gate 		if ((tmp = __schedctl()) != (sc_shared_t *)(-1))
8100Sstevel@tonic-gate 			self->ul_schedctl = scp = tmp;
8110Sstevel@tonic-gate 		exit_critical(self);
8120Sstevel@tonic-gate 	}
8130Sstevel@tonic-gate 	/*
8140Sstevel@tonic-gate 	 * Unless the call to setup_schedctl() is surrounded
8150Sstevel@tonic-gate 	 * by enter_critical()/exit_critical(), the address
8160Sstevel@tonic-gate 	 * we are returning could be invalid due to a forkall()
8170Sstevel@tonic-gate 	 * having occurred in another thread.
8180Sstevel@tonic-gate 	 */
8190Sstevel@tonic-gate 	return (scp);
8200Sstevel@tonic-gate }
8210Sstevel@tonic-gate 
8220Sstevel@tonic-gate /*
8230Sstevel@tonic-gate  * Interfaces from libsched, incorporated into libc.
8240Sstevel@tonic-gate  * libsched.so.1 is now a filter library onto libc.
8250Sstevel@tonic-gate  */
8260Sstevel@tonic-gate #pragma weak schedctl_lookup = _schedctl_init
8270Sstevel@tonic-gate #pragma weak _schedctl_lookup = _schedctl_init
8280Sstevel@tonic-gate #pragma weak schedctl_init = _schedctl_init
8290Sstevel@tonic-gate schedctl_t *
8300Sstevel@tonic-gate _schedctl_init(void)
8310Sstevel@tonic-gate {
8320Sstevel@tonic-gate 	volatile sc_shared_t *scp = setup_schedctl();
8330Sstevel@tonic-gate 	return ((scp == NULL)? NULL : (schedctl_t *)&scp->sc_preemptctl);
8340Sstevel@tonic-gate }
8350Sstevel@tonic-gate 
8360Sstevel@tonic-gate #pragma weak schedctl_exit = _schedctl_exit
8370Sstevel@tonic-gate void
8380Sstevel@tonic-gate _schedctl_exit(void)
8390Sstevel@tonic-gate {
8400Sstevel@tonic-gate }
8410Sstevel@tonic-gate 
8420Sstevel@tonic-gate /*
8430Sstevel@tonic-gate  * Contract private interface for java.
8440Sstevel@tonic-gate  * Set up the schedctl data if it doesn't exist yet.
8450Sstevel@tonic-gate  * Return a pointer to the pointer to the schedctl data.
8460Sstevel@tonic-gate  */
8470Sstevel@tonic-gate volatile sc_shared_t *volatile *
8480Sstevel@tonic-gate _thr_schedctl(void)
8490Sstevel@tonic-gate {
8500Sstevel@tonic-gate 	ulwp_t *self = curthread;
8510Sstevel@tonic-gate 	volatile sc_shared_t *volatile *ptr;
8520Sstevel@tonic-gate 
8530Sstevel@tonic-gate 	if (self->ul_vfork)
8540Sstevel@tonic-gate 		return (NULL);
8550Sstevel@tonic-gate 	if (*(ptr = &self->ul_schedctl) == NULL)
8560Sstevel@tonic-gate 		(void) setup_schedctl();
8570Sstevel@tonic-gate 	return (ptr);
8580Sstevel@tonic-gate }
8590Sstevel@tonic-gate 
8600Sstevel@tonic-gate /*
8610Sstevel@tonic-gate  * Block signals and attempt to block preemption.
8620Sstevel@tonic-gate  * no_preempt()/preempt() must be used in pairs but can be nested.
8630Sstevel@tonic-gate  */
8640Sstevel@tonic-gate void
8650Sstevel@tonic-gate no_preempt(ulwp_t *self)
8660Sstevel@tonic-gate {
8670Sstevel@tonic-gate 	volatile sc_shared_t *scp;
8680Sstevel@tonic-gate 
8690Sstevel@tonic-gate 	if (self->ul_preempt++ == 0) {
8700Sstevel@tonic-gate 		enter_critical(self);
8710Sstevel@tonic-gate 		if ((scp = self->ul_schedctl) != NULL ||
8720Sstevel@tonic-gate 		    (scp = setup_schedctl()) != NULL) {
8730Sstevel@tonic-gate 			/*
8740Sstevel@tonic-gate 			 * Save the pre-existing preempt value.
8750Sstevel@tonic-gate 			 */
8760Sstevel@tonic-gate 			self->ul_savpreempt = scp->sc_preemptctl.sc_nopreempt;
8770Sstevel@tonic-gate 			scp->sc_preemptctl.sc_nopreempt = 1;
8780Sstevel@tonic-gate 		}
8790Sstevel@tonic-gate 	}
8800Sstevel@tonic-gate }
8810Sstevel@tonic-gate 
8820Sstevel@tonic-gate /*
8830Sstevel@tonic-gate  * Undo the effects of no_preempt().
8840Sstevel@tonic-gate  */
8850Sstevel@tonic-gate void
8860Sstevel@tonic-gate preempt(ulwp_t *self)
8870Sstevel@tonic-gate {
8880Sstevel@tonic-gate 	volatile sc_shared_t *scp;
8890Sstevel@tonic-gate 
8900Sstevel@tonic-gate 	ASSERT(self->ul_preempt > 0);
8910Sstevel@tonic-gate 	if (--self->ul_preempt == 0) {
8920Sstevel@tonic-gate 		if ((scp = self->ul_schedctl) != NULL) {
8930Sstevel@tonic-gate 			/*
8940Sstevel@tonic-gate 			 * Restore the pre-existing preempt value.
8950Sstevel@tonic-gate 			 */
8960Sstevel@tonic-gate 			scp->sc_preemptctl.sc_nopreempt = self->ul_savpreempt;
8970Sstevel@tonic-gate 			if (scp->sc_preemptctl.sc_yield &&
8980Sstevel@tonic-gate 			    scp->sc_preemptctl.sc_nopreempt == 0) {
8990Sstevel@tonic-gate 				lwp_yield();
9000Sstevel@tonic-gate 				if (scp->sc_preemptctl.sc_yield) {
9010Sstevel@tonic-gate 					/*
9020Sstevel@tonic-gate 					 * Shouldn't happen.  This is either
9030Sstevel@tonic-gate 					 * a race condition or the thread
9040Sstevel@tonic-gate 					 * just entered the real-time class.
9050Sstevel@tonic-gate 					 */
9060Sstevel@tonic-gate 					lwp_yield();
9070Sstevel@tonic-gate 					scp->sc_preemptctl.sc_yield = 0;
9080Sstevel@tonic-gate 				}
9090Sstevel@tonic-gate 			}
9100Sstevel@tonic-gate 		}
9110Sstevel@tonic-gate 		exit_critical(self);
9120Sstevel@tonic-gate 	}
9130Sstevel@tonic-gate }
9140Sstevel@tonic-gate 
9150Sstevel@tonic-gate /*
9160Sstevel@tonic-gate  * If a call to preempt() would cause the current thread to yield or to
9170Sstevel@tonic-gate  * take deferred actions in exit_critical(), then unpark the specified
9180Sstevel@tonic-gate  * lwp so it can run while we delay.  Return the original lwpid if the
9190Sstevel@tonic-gate  * unpark was not performed, else return zero.  The tests are a repeat
9200Sstevel@tonic-gate  * of some of the tests in preempt(), above.  This is a statistical
9210Sstevel@tonic-gate  * optimization solely for cond_sleep_queue(), below.
9220Sstevel@tonic-gate  */
9230Sstevel@tonic-gate static lwpid_t
9240Sstevel@tonic-gate preempt_unpark(ulwp_t *self, lwpid_t lwpid)
9250Sstevel@tonic-gate {
9260Sstevel@tonic-gate 	volatile sc_shared_t *scp = self->ul_schedctl;
9270Sstevel@tonic-gate 
9280Sstevel@tonic-gate 	ASSERT(self->ul_preempt == 1 && self->ul_critical > 0);
9290Sstevel@tonic-gate 	if ((scp != NULL && scp->sc_preemptctl.sc_yield) ||
9300Sstevel@tonic-gate 	    (self->ul_curplease && self->ul_critical == 1)) {
9310Sstevel@tonic-gate 		(void) __lwp_unpark(lwpid);
9320Sstevel@tonic-gate 		lwpid = 0;
9330Sstevel@tonic-gate 	}
9340Sstevel@tonic-gate 	return (lwpid);
9350Sstevel@tonic-gate }
9360Sstevel@tonic-gate 
9370Sstevel@tonic-gate /*
9380Sstevel@tonic-gate  * Spin for a while, trying to grab the lock.  We know that we
9390Sstevel@tonic-gate  * failed set_lock_byte(&mp->mutex_lockw) once before coming here.
9400Sstevel@tonic-gate  * If this fails, return EBUSY and let the caller deal with it.
9410Sstevel@tonic-gate  * If this succeeds, return 0 with mutex_owner set to curthread.
9420Sstevel@tonic-gate  */
9430Sstevel@tonic-gate int
9440Sstevel@tonic-gate mutex_trylock_adaptive(mutex_t *mp)
9450Sstevel@tonic-gate {
9460Sstevel@tonic-gate 	ulwp_t *self = curthread;
9470Sstevel@tonic-gate 	ulwp_t *ulwp;
9480Sstevel@tonic-gate 	volatile sc_shared_t *scp;
9490Sstevel@tonic-gate 	volatile uint8_t *lockp;
9500Sstevel@tonic-gate 	volatile uint64_t *ownerp;
9510Sstevel@tonic-gate 	int count, max = self->ul_adaptive_spin;
9520Sstevel@tonic-gate 
9530Sstevel@tonic-gate 	ASSERT(!(mp->mutex_type & (USYNC_PROCESS | USYNC_PROCESS_ROBUST)));
9540Sstevel@tonic-gate 
9550Sstevel@tonic-gate 	if (max == 0 || (mp->mutex_spinners >= self->ul_max_spinners))
9560Sstevel@tonic-gate 		return (EBUSY);
9570Sstevel@tonic-gate 
9580Sstevel@tonic-gate 	lockp = (volatile uint8_t *)&mp->mutex_lockw;
9590Sstevel@tonic-gate 	ownerp = (volatile uint64_t *)&mp->mutex_owner;
9600Sstevel@tonic-gate 
9610Sstevel@tonic-gate 	DTRACE_PROBE1(plockstat, mutex__spin, mp);
9620Sstevel@tonic-gate 
9630Sstevel@tonic-gate 	/*
9640Sstevel@tonic-gate 	 * This spin loop is unfair to lwps that have already dropped into
9650Sstevel@tonic-gate 	 * the kernel to sleep.  They will starve on a highly-contended mutex.
9660Sstevel@tonic-gate 	 * This is just too bad.  The adaptive spin algorithm is intended
9670Sstevel@tonic-gate 	 * to allow programs with highly-contended locks (that is, broken
9680Sstevel@tonic-gate 	 * programs) to execute with reasonable speed despite their contention.
9690Sstevel@tonic-gate 	 * Being fair would reduce the speed of such programs and well-written
9700Sstevel@tonic-gate 	 * programs will not suffer in any case.
9710Sstevel@tonic-gate 	 */
9720Sstevel@tonic-gate 	enter_critical(self);		/* protects ul_schedctl */
9730Sstevel@tonic-gate 	incr32(&mp->mutex_spinners);
9740Sstevel@tonic-gate 	for (count = 0; count < max; count++) {
9750Sstevel@tonic-gate 		if (*lockp == 0 && set_lock_byte(lockp) == 0) {
9760Sstevel@tonic-gate 			*ownerp = (uintptr_t)self;
9770Sstevel@tonic-gate 			decr32(&mp->mutex_spinners);
9780Sstevel@tonic-gate 			exit_critical(self);
9790Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__spun, 1, count);
9800Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count);
9810Sstevel@tonic-gate 			return (0);
9820Sstevel@tonic-gate 		}
9830Sstevel@tonic-gate 		SMT_PAUSE();
9840Sstevel@tonic-gate 		/*
9850Sstevel@tonic-gate 		 * Stop spinning if the mutex owner is not running on
9860Sstevel@tonic-gate 		 * a processor; it will not drop the lock any time soon
9870Sstevel@tonic-gate 		 * and we would just be wasting time to keep spinning.
9880Sstevel@tonic-gate 		 *
9890Sstevel@tonic-gate 		 * Note that we are looking at another thread (ulwp_t)
9900Sstevel@tonic-gate 		 * without ensuring that the other thread does not exit.
9910Sstevel@tonic-gate 		 * The scheme relies on ulwp_t structures never being
9920Sstevel@tonic-gate 		 * deallocated by the library (the library employs a free
9930Sstevel@tonic-gate 		 * list of ulwp_t structs that are reused when new threads
9940Sstevel@tonic-gate 		 * are created) and on schedctl shared memory never being
9950Sstevel@tonic-gate 		 * deallocated once created via __schedctl().
9960Sstevel@tonic-gate 		 *
9970Sstevel@tonic-gate 		 * Thus, the worst that can happen when the spinning thread
9980Sstevel@tonic-gate 		 * looks at the owner's schedctl data is that it is looking
9990Sstevel@tonic-gate 		 * at some other thread's schedctl data.  This almost never
10000Sstevel@tonic-gate 		 * happens and is benign when it does.
10010Sstevel@tonic-gate 		 */
10020Sstevel@tonic-gate 		if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL &&
10030Sstevel@tonic-gate 		    ((scp = ulwp->ul_schedctl) == NULL ||
10040Sstevel@tonic-gate 		    scp->sc_state != SC_ONPROC))
10050Sstevel@tonic-gate 			break;
10060Sstevel@tonic-gate 	}
10070Sstevel@tonic-gate 	decr32(&mp->mutex_spinners);
10080Sstevel@tonic-gate 	exit_critical(self);
10090Sstevel@tonic-gate 
10100Sstevel@tonic-gate 	DTRACE_PROBE2(plockstat, mutex__spun, 0, count);
10110Sstevel@tonic-gate 
10120Sstevel@tonic-gate 	return (EBUSY);
10130Sstevel@tonic-gate }
10140Sstevel@tonic-gate 
10150Sstevel@tonic-gate /*
10160Sstevel@tonic-gate  * Same as mutex_trylock_adaptive(), except specifically for queue locks.
10170Sstevel@tonic-gate  * The owner field is not set here; the caller (spin_lock_set()) sets it.
10180Sstevel@tonic-gate  */
10190Sstevel@tonic-gate int
10200Sstevel@tonic-gate mutex_queuelock_adaptive(mutex_t *mp)
10210Sstevel@tonic-gate {
10220Sstevel@tonic-gate 	ulwp_t *ulwp;
10230Sstevel@tonic-gate 	volatile sc_shared_t *scp;
10240Sstevel@tonic-gate 	volatile uint8_t *lockp;
10250Sstevel@tonic-gate 	volatile uint64_t *ownerp;
10260Sstevel@tonic-gate 	int count = curthread->ul_queue_spin;
10270Sstevel@tonic-gate 
10280Sstevel@tonic-gate 	ASSERT(mp->mutex_type == USYNC_THREAD);
10290Sstevel@tonic-gate 
10300Sstevel@tonic-gate 	if (count == 0)
10310Sstevel@tonic-gate 		return (EBUSY);
10320Sstevel@tonic-gate 
10330Sstevel@tonic-gate 	lockp = (volatile uint8_t *)&mp->mutex_lockw;
10340Sstevel@tonic-gate 	ownerp = (volatile uint64_t *)&mp->mutex_owner;
10350Sstevel@tonic-gate 	while (--count >= 0) {
10360Sstevel@tonic-gate 		if (*lockp == 0 && set_lock_byte(lockp) == 0)
10370Sstevel@tonic-gate 			return (0);
10380Sstevel@tonic-gate 		SMT_PAUSE();
10390Sstevel@tonic-gate 		if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL &&
10400Sstevel@tonic-gate 		    ((scp = ulwp->ul_schedctl) == NULL ||
10410Sstevel@tonic-gate 		    scp->sc_state != SC_ONPROC))
10420Sstevel@tonic-gate 			break;
10430Sstevel@tonic-gate 	}
10440Sstevel@tonic-gate 
10450Sstevel@tonic-gate 	return (EBUSY);
10460Sstevel@tonic-gate }
10470Sstevel@tonic-gate 
10480Sstevel@tonic-gate /*
10490Sstevel@tonic-gate  * Like mutex_trylock_adaptive(), but for process-shared mutexes.
10500Sstevel@tonic-gate  * Spin for a while, trying to grab the lock.  We know that we
10510Sstevel@tonic-gate  * failed set_lock_byte(&mp->mutex_lockw) once before coming here.
10520Sstevel@tonic-gate  * If this fails, return EBUSY and let the caller deal with it.
10530Sstevel@tonic-gate  * If this succeeds, return 0 with mutex_owner set to curthread
10540Sstevel@tonic-gate  * and mutex_ownerpid set to the current pid.
10550Sstevel@tonic-gate  */
10560Sstevel@tonic-gate int
10570Sstevel@tonic-gate mutex_trylock_process(mutex_t *mp)
10580Sstevel@tonic-gate {
10590Sstevel@tonic-gate 	ulwp_t *self = curthread;
10600Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
10610Sstevel@tonic-gate 	int count;
10620Sstevel@tonic-gate 	volatile uint8_t *lockp;
10630Sstevel@tonic-gate 	volatile uint64_t *ownerp;
10640Sstevel@tonic-gate 	volatile int32_t *pidp;
10650Sstevel@tonic-gate 	pid_t pid, newpid;
10660Sstevel@tonic-gate 	uint64_t owner, newowner;
10670Sstevel@tonic-gate 
10680Sstevel@tonic-gate 	if ((count = ncpus) == 0)
10690Sstevel@tonic-gate 		count = ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN);
10700Sstevel@tonic-gate 	count = (count > 1)? self->ul_adaptive_spin : 0;
10710Sstevel@tonic-gate 
10720Sstevel@tonic-gate 	ASSERT((mp->mutex_type & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) ==
10730Sstevel@tonic-gate 		USYNC_PROCESS);
10740Sstevel@tonic-gate 
10750Sstevel@tonic-gate 	if (count == 0)
10760Sstevel@tonic-gate 		return (EBUSY);
10770Sstevel@tonic-gate 
10780Sstevel@tonic-gate 	lockp = (volatile uint8_t *)&mp->mutex_lockw;
10790Sstevel@tonic-gate 	ownerp = (volatile uint64_t *)&mp->mutex_owner;
10800Sstevel@tonic-gate 	pidp = (volatile int32_t *)&mp->mutex_ownerpid;
10810Sstevel@tonic-gate 	owner = *ownerp;
10820Sstevel@tonic-gate 	pid = *pidp;
10830Sstevel@tonic-gate 	/*
10840Sstevel@tonic-gate 	 * This is a process-shared mutex.
10850Sstevel@tonic-gate 	 * We cannot know if the owner is running on a processor.
10860Sstevel@tonic-gate 	 * We just spin and hope that it is on a processor.
10870Sstevel@tonic-gate 	 */
10880Sstevel@tonic-gate 	while (--count >= 0) {
10890Sstevel@tonic-gate 		if (*lockp == 0) {
10900Sstevel@tonic-gate 			enter_critical(self);
10910Sstevel@tonic-gate 			if (set_lock_byte(lockp) == 0) {
10920Sstevel@tonic-gate 				*ownerp = (uintptr_t)self;
10930Sstevel@tonic-gate 				*pidp = udp->pid;
10940Sstevel@tonic-gate 				exit_critical(self);
10950Sstevel@tonic-gate 				DTRACE_PROBE3(plockstat, mutex__acquire, mp,
10960Sstevel@tonic-gate 				    0, 0);
10970Sstevel@tonic-gate 				return (0);
10980Sstevel@tonic-gate 			}
10990Sstevel@tonic-gate 			exit_critical(self);
11000Sstevel@tonic-gate 		} else if ((newowner = *ownerp) == owner &&
11010Sstevel@tonic-gate 		    (newpid = *pidp) == pid) {
11020Sstevel@tonic-gate 			SMT_PAUSE();
11030Sstevel@tonic-gate 			continue;
11040Sstevel@tonic-gate 		}
11050Sstevel@tonic-gate 		/*
11060Sstevel@tonic-gate 		 * The owner of the lock changed; start the count over again.
11070Sstevel@tonic-gate 		 * This may be too aggressive; it needs testing.
11080Sstevel@tonic-gate 		 */
11090Sstevel@tonic-gate 		owner = newowner;
11100Sstevel@tonic-gate 		pid = newpid;
11110Sstevel@tonic-gate 		count = self->ul_adaptive_spin;
11120Sstevel@tonic-gate 	}
11130Sstevel@tonic-gate 
11140Sstevel@tonic-gate 	return (EBUSY);
11150Sstevel@tonic-gate }
11160Sstevel@tonic-gate 
11170Sstevel@tonic-gate /*
11180Sstevel@tonic-gate  * Mutex wakeup code for releasing a USYNC_THREAD mutex.
11190Sstevel@tonic-gate  * Returns the lwpid of the thread that was dequeued, if any.
11200Sstevel@tonic-gate  * The caller of mutex_wakeup() must call __lwp_unpark(lwpid)
11210Sstevel@tonic-gate  * to wake up the specified lwp.
11220Sstevel@tonic-gate  */
11230Sstevel@tonic-gate lwpid_t
11240Sstevel@tonic-gate mutex_wakeup(mutex_t *mp)
11250Sstevel@tonic-gate {
11260Sstevel@tonic-gate 	lwpid_t lwpid = 0;
11270Sstevel@tonic-gate 	queue_head_t *qp;
11280Sstevel@tonic-gate 	ulwp_t *ulwp;
11290Sstevel@tonic-gate 	int more;
11300Sstevel@tonic-gate 
11310Sstevel@tonic-gate 	/*
11320Sstevel@tonic-gate 	 * Dequeue a waiter from the sleep queue.  Don't touch the mutex
11330Sstevel@tonic-gate 	 * waiters bit if no one was found on the queue because the mutex
11340Sstevel@tonic-gate 	 * might have been deallocated or reallocated for another purpose.
11350Sstevel@tonic-gate 	 */
11360Sstevel@tonic-gate 	qp = queue_lock(mp, MX);
11370Sstevel@tonic-gate 	if ((ulwp = dequeue(qp, mp, &more)) != NULL) {
11380Sstevel@tonic-gate 		lwpid = ulwp->ul_lwpid;
11390Sstevel@tonic-gate 		mp->mutex_waiters = (more? 1 : 0);
11400Sstevel@tonic-gate 	}
11410Sstevel@tonic-gate 	queue_unlock(qp);
11420Sstevel@tonic-gate 	return (lwpid);
11430Sstevel@tonic-gate }
11440Sstevel@tonic-gate 
11450Sstevel@tonic-gate /*
11460Sstevel@tonic-gate  * Spin for a while, testing to see if the lock has been grabbed.
11470Sstevel@tonic-gate  * If this fails, call mutex_wakeup() to release a waiter.
11480Sstevel@tonic-gate  */
11490Sstevel@tonic-gate lwpid_t
11500Sstevel@tonic-gate mutex_unlock_queue(mutex_t *mp)
11510Sstevel@tonic-gate {
11520Sstevel@tonic-gate 	ulwp_t *self = curthread;
11530Sstevel@tonic-gate 	uint32_t *lockw = &mp->mutex_lockword;
11540Sstevel@tonic-gate 	lwpid_t lwpid;
11550Sstevel@tonic-gate 	volatile uint8_t *lockp;
11560Sstevel@tonic-gate 	volatile uint32_t *spinp;
11570Sstevel@tonic-gate 	int count;
11580Sstevel@tonic-gate 
11590Sstevel@tonic-gate 	/*
11600Sstevel@tonic-gate 	 * We use the swap primitive to clear the lock, but we must
11610Sstevel@tonic-gate 	 * atomically retain the waiters bit for the remainder of this
11620Sstevel@tonic-gate 	 * code to work.  We first check to see if the waiters bit is
11630Sstevel@tonic-gate 	 * set and if so clear the lock by swapping in a word containing
11640Sstevel@tonic-gate 	 * only the waiters bit.  This could produce a false positive test
11650Sstevel@tonic-gate 	 * for whether there are waiters that need to be waked up, but
11660Sstevel@tonic-gate 	 * this just causes an extra call to mutex_wakeup() to do nothing.
11670Sstevel@tonic-gate 	 * The opposite case is more delicate:  If there are no waiters,
11680Sstevel@tonic-gate 	 * we swap in a zero lock byte and a zero waiters bit.  The result
11690Sstevel@tonic-gate 	 * of the swap could indicate that there really was a waiter so in
11700Sstevel@tonic-gate 	 * this case we go directly to mutex_wakeup() without performing
11710Sstevel@tonic-gate 	 * any of the adaptive code because the waiter bit has been cleared
11720Sstevel@tonic-gate 	 * and the adaptive code is unreliable in this case.
11730Sstevel@tonic-gate 	 */
11740Sstevel@tonic-gate 	if (!(*lockw & WAITERMASK)) {	/* no waiter exists right now */
11750Sstevel@tonic-gate 		mp->mutex_owner = 0;
11760Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
11770Sstevel@tonic-gate 		if (!(swap32(lockw, 0) & WAITERMASK))	/* still no waiters */
11780Sstevel@tonic-gate 			return (0);
11790Sstevel@tonic-gate 		no_preempt(self);	/* ensure a prompt wakeup */
11800Sstevel@tonic-gate 		lwpid = mutex_wakeup(mp);
11810Sstevel@tonic-gate 	} else {
11820Sstevel@tonic-gate 		no_preempt(self);	/* ensure a prompt wakeup */
11830Sstevel@tonic-gate 		lockp = (volatile uint8_t *)&mp->mutex_lockw;
11840Sstevel@tonic-gate 		spinp = (volatile uint32_t *)&mp->mutex_spinners;
11850Sstevel@tonic-gate 		mp->mutex_owner = 0;
11860Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
11870Sstevel@tonic-gate 		(void) swap32(lockw, WAITER);	/* clear lock, retain waiter */
11880Sstevel@tonic-gate 
11890Sstevel@tonic-gate 		/*
11900Sstevel@tonic-gate 		 * We spin here fewer times than mutex_trylock_adaptive().
11910Sstevel@tonic-gate 		 * We are trying to balance two conflicting goals:
11920Sstevel@tonic-gate 		 * 1. Avoid waking up anyone if a spinning thread
11930Sstevel@tonic-gate 		 *    grabs the lock.
11940Sstevel@tonic-gate 		 * 2. Wake up a sleeping thread promptly to get on
11950Sstevel@tonic-gate 		 *    with useful work.
11960Sstevel@tonic-gate 		 * We don't spin at all if there is no acquiring spinner;
11970Sstevel@tonic-gate 		 * (mp->mutex_spinners is non-zero if there are spinners).
11980Sstevel@tonic-gate 		 */
11990Sstevel@tonic-gate 		for (count = self->ul_release_spin;
12000Sstevel@tonic-gate 		    *spinp && count > 0; count--) {
12010Sstevel@tonic-gate 			/*
12020Sstevel@tonic-gate 			 * There is a waiter that we will have to wake
12030Sstevel@tonic-gate 			 * up unless someone else grabs the lock while
12040Sstevel@tonic-gate 			 * we are busy spinning.  Like the spin loop in
12050Sstevel@tonic-gate 			 * mutex_trylock_adaptive(), this spin loop is
12060Sstevel@tonic-gate 			 * unfair to lwps that have already dropped into
12070Sstevel@tonic-gate 			 * the kernel to sleep.  They will starve on a
12080Sstevel@tonic-gate 			 * highly-contended mutex.  Too bad.
12090Sstevel@tonic-gate 			 */
12100Sstevel@tonic-gate 			if (*lockp != 0) {	/* somebody grabbed the lock */
12110Sstevel@tonic-gate 				preempt(self);
12120Sstevel@tonic-gate 				return (0);
12130Sstevel@tonic-gate 			}
12140Sstevel@tonic-gate 			SMT_PAUSE();
12150Sstevel@tonic-gate 		}
12160Sstevel@tonic-gate 
12170Sstevel@tonic-gate 		/*
12180Sstevel@tonic-gate 		 * No one grabbed the lock.
12190Sstevel@tonic-gate 		 * Wake up some lwp that is waiting for it.
12200Sstevel@tonic-gate 		 */
12210Sstevel@tonic-gate 		mp->mutex_waiters = 0;
12220Sstevel@tonic-gate 		lwpid = mutex_wakeup(mp);
12230Sstevel@tonic-gate 	}
12240Sstevel@tonic-gate 
12250Sstevel@tonic-gate 	if (lwpid == 0)
12260Sstevel@tonic-gate 		preempt(self);
12270Sstevel@tonic-gate 	return (lwpid);
12280Sstevel@tonic-gate }
12290Sstevel@tonic-gate 
12300Sstevel@tonic-gate /*
12310Sstevel@tonic-gate  * Like mutex_unlock_queue(), but for process-shared mutexes.
12320Sstevel@tonic-gate  * We tested the waiters field before calling here and it was non-zero.
12330Sstevel@tonic-gate  */
12340Sstevel@tonic-gate void
12350Sstevel@tonic-gate mutex_unlock_process(mutex_t *mp)
12360Sstevel@tonic-gate {
12370Sstevel@tonic-gate 	ulwp_t *self = curthread;
12380Sstevel@tonic-gate 	int count;
12390Sstevel@tonic-gate 	volatile uint8_t *lockp;
12400Sstevel@tonic-gate 
12410Sstevel@tonic-gate 	/*
12420Sstevel@tonic-gate 	 * See the comments in mutex_unlock_queue(), above.
12430Sstevel@tonic-gate 	 */
12440Sstevel@tonic-gate 	if ((count = ncpus) == 0)
12450Sstevel@tonic-gate 		count = ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN);
12460Sstevel@tonic-gate 	count = (count > 1)? self->ul_release_spin : 0;
12470Sstevel@tonic-gate 	no_preempt(self);
12480Sstevel@tonic-gate 	mp->mutex_owner = 0;
12490Sstevel@tonic-gate 	mp->mutex_ownerpid = 0;
12500Sstevel@tonic-gate 	DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
12510Sstevel@tonic-gate 	if (count == 0) {
12520Sstevel@tonic-gate 		/* clear lock, test waiter */
12530Sstevel@tonic-gate 		if (!(swap32(&mp->mutex_lockword, 0) & WAITERMASK)) {
12540Sstevel@tonic-gate 			/* no waiters now */
12550Sstevel@tonic-gate 			preempt(self);
12560Sstevel@tonic-gate 			return;
12570Sstevel@tonic-gate 		}
12580Sstevel@tonic-gate 	} else {
12590Sstevel@tonic-gate 		/* clear lock, retain waiter */
12600Sstevel@tonic-gate 		(void) swap32(&mp->mutex_lockword, WAITER);
12610Sstevel@tonic-gate 		lockp = (volatile uint8_t *)&mp->mutex_lockw;
12620Sstevel@tonic-gate 		while (--count >= 0) {
12630Sstevel@tonic-gate 			if (*lockp != 0) {
12640Sstevel@tonic-gate 				/* somebody grabbed the lock */
12650Sstevel@tonic-gate 				preempt(self);
12660Sstevel@tonic-gate 				return;
12670Sstevel@tonic-gate 			}
12680Sstevel@tonic-gate 			SMT_PAUSE();
12690Sstevel@tonic-gate 		}
12700Sstevel@tonic-gate 		/*
12710Sstevel@tonic-gate 		 * We must clear the waiters field before going
12720Sstevel@tonic-gate 		 * to the kernel, else it could remain set forever.
12730Sstevel@tonic-gate 		 */
12740Sstevel@tonic-gate 		mp->mutex_waiters = 0;
12750Sstevel@tonic-gate 	}
12760Sstevel@tonic-gate 	(void) ___lwp_mutex_wakeup(mp);
12770Sstevel@tonic-gate 	preempt(self);
12780Sstevel@tonic-gate }
12790Sstevel@tonic-gate 
12800Sstevel@tonic-gate /*
12810Sstevel@tonic-gate  * Return the real priority of a thread.
12820Sstevel@tonic-gate  */
12830Sstevel@tonic-gate int
12840Sstevel@tonic-gate real_priority(ulwp_t *ulwp)
12850Sstevel@tonic-gate {
12860Sstevel@tonic-gate 	if (ulwp->ul_epri == 0)
12870Sstevel@tonic-gate 		return (ulwp->ul_mappedpri? ulwp->ul_mappedpri : ulwp->ul_pri);
12880Sstevel@tonic-gate 	return (ulwp->ul_emappedpri? ulwp->ul_emappedpri : ulwp->ul_epri);
12890Sstevel@tonic-gate }
12900Sstevel@tonic-gate 
12910Sstevel@tonic-gate void
12920Sstevel@tonic-gate stall(void)
12930Sstevel@tonic-gate {
12940Sstevel@tonic-gate 	for (;;)
12950Sstevel@tonic-gate 		(void) mutex_lock_kernel(&stall_mutex, NULL, NULL);
12960Sstevel@tonic-gate }
12970Sstevel@tonic-gate 
12980Sstevel@tonic-gate /*
12990Sstevel@tonic-gate  * Acquire a USYNC_THREAD mutex via user-level sleep queues.
13000Sstevel@tonic-gate  * We failed set_lock_byte(&mp->mutex_lockw) before coming here.
13010Sstevel@tonic-gate  * Returns with mutex_owner set correctly.
13020Sstevel@tonic-gate  */
13030Sstevel@tonic-gate int
13040Sstevel@tonic-gate mutex_lock_queue(ulwp_t *self, tdb_mutex_stats_t *msp, mutex_t *mp,
13050Sstevel@tonic-gate 	timespec_t *tsp)
13060Sstevel@tonic-gate {
13070Sstevel@tonic-gate 	uberdata_t *udp = curthread->ul_uberdata;
13080Sstevel@tonic-gate 	queue_head_t *qp;
13090Sstevel@tonic-gate 	hrtime_t begin_sleep;
13100Sstevel@tonic-gate 	int error = 0;
13110Sstevel@tonic-gate 
13120Sstevel@tonic-gate 	self->ul_sp = stkptr();
13130Sstevel@tonic-gate 	if (__td_event_report(self, TD_SLEEP, udp)) {
13140Sstevel@tonic-gate 		self->ul_wchan = mp;
13150Sstevel@tonic-gate 		self->ul_td_evbuf.eventnum = TD_SLEEP;
13160Sstevel@tonic-gate 		self->ul_td_evbuf.eventdata = mp;
13170Sstevel@tonic-gate 		tdb_event(TD_SLEEP, udp);
13180Sstevel@tonic-gate 	}
13190Sstevel@tonic-gate 	if (msp) {
13200Sstevel@tonic-gate 		tdb_incr(msp->mutex_sleep);
13210Sstevel@tonic-gate 		begin_sleep = gethrtime();
13220Sstevel@tonic-gate 	}
13230Sstevel@tonic-gate 
13240Sstevel@tonic-gate 	DTRACE_PROBE1(plockstat, mutex__block, mp);
13250Sstevel@tonic-gate 
13260Sstevel@tonic-gate 	/*
13270Sstevel@tonic-gate 	 * Put ourself on the sleep queue, and while we are
13280Sstevel@tonic-gate 	 * unable to grab the lock, go park in the kernel.
13290Sstevel@tonic-gate 	 * Take ourself off the sleep queue after we acquire the lock.
13300Sstevel@tonic-gate 	 * The waiter bit can be set/cleared only while holding the queue lock.
13310Sstevel@tonic-gate 	 */
13320Sstevel@tonic-gate 	qp = queue_lock(mp, MX);
13330Sstevel@tonic-gate 	enqueue(qp, self, mp, MX);
13340Sstevel@tonic-gate 	mp->mutex_waiters = 1;
13350Sstevel@tonic-gate 	for (;;) {
13360Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
13370Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
13380Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1);
13390Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
13400Sstevel@tonic-gate 			mp->mutex_waiters = dequeue_self(qp, mp);
13410Sstevel@tonic-gate 			break;
13420Sstevel@tonic-gate 		}
13430Sstevel@tonic-gate 		set_parking_flag(self, 1);
13440Sstevel@tonic-gate 		queue_unlock(qp);
13450Sstevel@tonic-gate 		/*
13460Sstevel@tonic-gate 		 * __lwp_park() will return the residual time in tsp
13470Sstevel@tonic-gate 		 * if we are unparked before the timeout expires.
13480Sstevel@tonic-gate 		 */
13490Sstevel@tonic-gate 		if ((error = __lwp_park(tsp, 0)) == EINTR)
13500Sstevel@tonic-gate 			error = 0;
13510Sstevel@tonic-gate 		set_parking_flag(self, 0);
13520Sstevel@tonic-gate 		/*
13530Sstevel@tonic-gate 		 * We could have taken a signal or suspended ourself.
13540Sstevel@tonic-gate 		 * If we did, then we removed ourself from the queue.
13550Sstevel@tonic-gate 		 * Someone else may have removed us from the queue
13560Sstevel@tonic-gate 		 * as a consequence of mutex_unlock().  We may have
13570Sstevel@tonic-gate 		 * gotten a timeout from __lwp_park().  Or we may still
13580Sstevel@tonic-gate 		 * be on the queue and this is just a spurious wakeup.
13590Sstevel@tonic-gate 		 */
13600Sstevel@tonic-gate 		qp = queue_lock(mp, MX);
13610Sstevel@tonic-gate 		if (self->ul_sleepq == NULL) {
13620Sstevel@tonic-gate 			if (error) {
13630Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0);
13640Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__error, mp,
13650Sstevel@tonic-gate 				    error);
13660Sstevel@tonic-gate 				break;
13670Sstevel@tonic-gate 			}
13680Sstevel@tonic-gate 			if (set_lock_byte(&mp->mutex_lockw) == 0) {
13690Sstevel@tonic-gate 				mp->mutex_owner = (uintptr_t)self;
13700Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1);
13710Sstevel@tonic-gate 				DTRACE_PROBE3(plockstat, mutex__acquire, mp,
13720Sstevel@tonic-gate 				    0, 0);
13730Sstevel@tonic-gate 				break;
13740Sstevel@tonic-gate 			}
13750Sstevel@tonic-gate 			enqueue(qp, self, mp, MX);
13760Sstevel@tonic-gate 			mp->mutex_waiters = 1;
13770Sstevel@tonic-gate 		}
13780Sstevel@tonic-gate 		ASSERT(self->ul_sleepq == qp &&
13790Sstevel@tonic-gate 		    self->ul_qtype == MX &&
13800Sstevel@tonic-gate 		    self->ul_wchan == mp);
13810Sstevel@tonic-gate 		if (error) {
13820Sstevel@tonic-gate 			mp->mutex_waiters = dequeue_self(qp, mp);
13830Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0);
13840Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__error, mp, error);
13850Sstevel@tonic-gate 			break;
13860Sstevel@tonic-gate 		}
13870Sstevel@tonic-gate 	}
13880Sstevel@tonic-gate 
13890Sstevel@tonic-gate 	ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL &&
13900Sstevel@tonic-gate 	    self->ul_wchan == NULL);
13910Sstevel@tonic-gate 	self->ul_sp = 0;
13920Sstevel@tonic-gate 
13930Sstevel@tonic-gate 	queue_unlock(qp);
13940Sstevel@tonic-gate 	if (msp)
13950Sstevel@tonic-gate 		msp->mutex_sleep_time += gethrtime() - begin_sleep;
13960Sstevel@tonic-gate 
13970Sstevel@tonic-gate 	ASSERT(error == 0 || error == EINVAL || error == ETIME);
13980Sstevel@tonic-gate 	return (error);
13990Sstevel@tonic-gate }
14000Sstevel@tonic-gate 
14010Sstevel@tonic-gate /*
14020Sstevel@tonic-gate  * Returns with mutex_owner set correctly.
14030Sstevel@tonic-gate  */
14040Sstevel@tonic-gate int
14050Sstevel@tonic-gate mutex_lock_internal(mutex_t *mp, timespec_t *tsp, int try)
14060Sstevel@tonic-gate {
14070Sstevel@tonic-gate 	ulwp_t *self = curthread;
14080Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
14090Sstevel@tonic-gate 	int mtype = mp->mutex_type;
14100Sstevel@tonic-gate 	tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
14110Sstevel@tonic-gate 	int error = 0;
14120Sstevel@tonic-gate 
14130Sstevel@tonic-gate 	ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK);
14140Sstevel@tonic-gate 
14150Sstevel@tonic-gate 	if (!self->ul_schedctl_called)
14160Sstevel@tonic-gate 		(void) setup_schedctl();
14170Sstevel@tonic-gate 
14180Sstevel@tonic-gate 	if (msp && try == MUTEX_TRY)
14190Sstevel@tonic-gate 		tdb_incr(msp->mutex_try);
14200Sstevel@tonic-gate 
14210Sstevel@tonic-gate 	if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && mutex_is_held(mp)) {
14220Sstevel@tonic-gate 		if (mtype & LOCK_RECURSIVE) {
14230Sstevel@tonic-gate 			if (mp->mutex_rcount == RECURSION_MAX) {
14240Sstevel@tonic-gate 				error = EAGAIN;
14250Sstevel@tonic-gate 			} else {
14260Sstevel@tonic-gate 				mp->mutex_rcount++;
14270Sstevel@tonic-gate 				DTRACE_PROBE3(plockstat, mutex__acquire, mp,
14280Sstevel@tonic-gate 				    1, 0);
14290Sstevel@tonic-gate 				return (0);
14300Sstevel@tonic-gate 			}
14310Sstevel@tonic-gate 		} else if (try == MUTEX_TRY) {
14320Sstevel@tonic-gate 			return (EBUSY);
14330Sstevel@tonic-gate 		} else {
14340Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK);
14350Sstevel@tonic-gate 			return (EDEADLK);
14360Sstevel@tonic-gate 		}
14370Sstevel@tonic-gate 	}
14380Sstevel@tonic-gate 
14390Sstevel@tonic-gate 	if (self->ul_error_detection && try == MUTEX_LOCK &&
14400Sstevel@tonic-gate 	    tsp == NULL && mutex_is_held(mp))
14410Sstevel@tonic-gate 		lock_error(mp, "mutex_lock", NULL, NULL);
14420Sstevel@tonic-gate 
14430Sstevel@tonic-gate 	if (mtype &
14440Sstevel@tonic-gate 	    (USYNC_PROCESS_ROBUST|PTHREAD_PRIO_INHERIT|PTHREAD_PRIO_PROTECT)) {
14450Sstevel@tonic-gate 		uint8_t ceil;
14460Sstevel@tonic-gate 		int myprio;
14470Sstevel@tonic-gate 
14480Sstevel@tonic-gate 		if (mtype & PTHREAD_PRIO_PROTECT) {
14490Sstevel@tonic-gate 			ceil = mp->mutex_ceiling;
14500Sstevel@tonic-gate 			ASSERT(_validate_rt_prio(SCHED_FIFO, ceil) == 0);
14510Sstevel@tonic-gate 			myprio = real_priority(self);
14520Sstevel@tonic-gate 			if (myprio > ceil) {
14530Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__error, mp,
14540Sstevel@tonic-gate 				    EINVAL);
14550Sstevel@tonic-gate 				return (EINVAL);
14560Sstevel@tonic-gate 			}
14570Sstevel@tonic-gate 			if ((error = _ceil_mylist_add(mp)) != 0) {
14580Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__error, mp,
14590Sstevel@tonic-gate 				    error);
14600Sstevel@tonic-gate 				return (error);
14610Sstevel@tonic-gate 			}
14620Sstevel@tonic-gate 			if (myprio < ceil)
14630Sstevel@tonic-gate 				_ceil_prio_inherit(ceil);
14640Sstevel@tonic-gate 		}
14650Sstevel@tonic-gate 
14660Sstevel@tonic-gate 		if (mtype & PTHREAD_PRIO_INHERIT) {
14670Sstevel@tonic-gate 			/* go straight to the kernel */
14680Sstevel@tonic-gate 			if (try == MUTEX_TRY)
14690Sstevel@tonic-gate 				error = mutex_trylock_kernel(mp);
14700Sstevel@tonic-gate 			else	/* MUTEX_LOCK */
14710Sstevel@tonic-gate 				error = mutex_lock_kernel(mp, tsp, msp);
14720Sstevel@tonic-gate 			/*
14730Sstevel@tonic-gate 			 * The kernel never sets or clears the lock byte
14740Sstevel@tonic-gate 			 * for PTHREAD_PRIO_INHERIT mutexes.
14750Sstevel@tonic-gate 			 * Set it here for debugging consistency.
14760Sstevel@tonic-gate 			 */
14770Sstevel@tonic-gate 			switch (error) {
14780Sstevel@tonic-gate 			case 0:
14790Sstevel@tonic-gate 			case EOWNERDEAD:
14800Sstevel@tonic-gate 				mp->mutex_lockw = LOCKSET;
14810Sstevel@tonic-gate 				break;
14820Sstevel@tonic-gate 			}
14830Sstevel@tonic-gate 		} else if (mtype & USYNC_PROCESS_ROBUST) {
14840Sstevel@tonic-gate 			/* go straight to the kernel */
14850Sstevel@tonic-gate 			if (try == MUTEX_TRY)
14860Sstevel@tonic-gate 				error = mutex_trylock_kernel(mp);
14870Sstevel@tonic-gate 			else	/* MUTEX_LOCK */
14880Sstevel@tonic-gate 				error = mutex_lock_kernel(mp, tsp, msp);
14890Sstevel@tonic-gate 		} else {	/* PTHREAD_PRIO_PROTECT */
14900Sstevel@tonic-gate 			/*
14910Sstevel@tonic-gate 			 * Try once at user level before going to the kernel.
14920Sstevel@tonic-gate 			 * If this is a process shared mutex then protect
14930Sstevel@tonic-gate 			 * against forkall() while setting mp->mutex_ownerpid.
14940Sstevel@tonic-gate 			 */
14950Sstevel@tonic-gate 			if (mtype & (USYNC_PROCESS | USYNC_PROCESS_ROBUST)) {
14960Sstevel@tonic-gate 				enter_critical(self);
14970Sstevel@tonic-gate 				if (set_lock_byte(&mp->mutex_lockw) == 0) {
14980Sstevel@tonic-gate 					mp->mutex_owner = (uintptr_t)self;
14990Sstevel@tonic-gate 					mp->mutex_ownerpid = udp->pid;
15000Sstevel@tonic-gate 					exit_critical(self);
15010Sstevel@tonic-gate 					DTRACE_PROBE3(plockstat,
15020Sstevel@tonic-gate 					    mutex__acquire, mp, 0, 0);
15030Sstevel@tonic-gate 				} else {
15040Sstevel@tonic-gate 					exit_critical(self);
15050Sstevel@tonic-gate 					error = EBUSY;
15060Sstevel@tonic-gate 				}
15070Sstevel@tonic-gate 			} else {
15080Sstevel@tonic-gate 				if (set_lock_byte(&mp->mutex_lockw) == 0) {
15090Sstevel@tonic-gate 					mp->mutex_owner = (uintptr_t)self;
15100Sstevel@tonic-gate 					DTRACE_PROBE3(plockstat,
15110Sstevel@tonic-gate 					    mutex__acquire, mp, 0, 0);
15120Sstevel@tonic-gate 				} else {
15130Sstevel@tonic-gate 					error = EBUSY;
15140Sstevel@tonic-gate 				}
15150Sstevel@tonic-gate 			}
15160Sstevel@tonic-gate 			if (error && try == MUTEX_LOCK)
15170Sstevel@tonic-gate 				error = mutex_lock_kernel(mp, tsp, msp);
15180Sstevel@tonic-gate 		}
15190Sstevel@tonic-gate 
15200Sstevel@tonic-gate 		if (error) {
15210Sstevel@tonic-gate 			if (mtype & PTHREAD_PRIO_INHERIT) {
15220Sstevel@tonic-gate 				switch (error) {
15230Sstevel@tonic-gate 				case EOWNERDEAD:
15240Sstevel@tonic-gate 				case ENOTRECOVERABLE:
15250Sstevel@tonic-gate 					if (mtype & PTHREAD_MUTEX_ROBUST_NP)
15260Sstevel@tonic-gate 						break;
15270Sstevel@tonic-gate 					if (error == EOWNERDEAD) {
15280Sstevel@tonic-gate 						/*
15290Sstevel@tonic-gate 						 * We own the mutex; unlock it.
15300Sstevel@tonic-gate 						 * It becomes ENOTRECOVERABLE.
15310Sstevel@tonic-gate 						 * All waiters are waked up.
15320Sstevel@tonic-gate 						 */
15330Sstevel@tonic-gate 						mp->mutex_owner = 0;
15340Sstevel@tonic-gate 						mp->mutex_ownerpid = 0;
15350Sstevel@tonic-gate 						DTRACE_PROBE2(plockstat,
15360Sstevel@tonic-gate 						    mutex__release, mp, 0);
15370Sstevel@tonic-gate 						mp->mutex_lockw = LOCKCLEAR;
15380Sstevel@tonic-gate 						(void) ___lwp_mutex_unlock(mp);
15390Sstevel@tonic-gate 					}
15400Sstevel@tonic-gate 					/* FALLTHROUGH */
15410Sstevel@tonic-gate 				case EDEADLK:
15420Sstevel@tonic-gate 					if (try == MUTEX_LOCK)
15430Sstevel@tonic-gate 						stall();
15440Sstevel@tonic-gate 					error = EBUSY;
15450Sstevel@tonic-gate 					break;
15460Sstevel@tonic-gate 				}
15470Sstevel@tonic-gate 			}
15480Sstevel@tonic-gate 			if ((mtype & PTHREAD_PRIO_PROTECT) &&
15490Sstevel@tonic-gate 			    error != EOWNERDEAD) {
15500Sstevel@tonic-gate 				(void) _ceil_mylist_del(mp);
15510Sstevel@tonic-gate 				if (myprio < ceil)
15520Sstevel@tonic-gate 					_ceil_prio_waive();
15530Sstevel@tonic-gate 			}
15540Sstevel@tonic-gate 		}
15550Sstevel@tonic-gate 	} else if (mtype & USYNC_PROCESS) {
15560Sstevel@tonic-gate 		/*
15570Sstevel@tonic-gate 		 * This is a process shared mutex.  Protect against
15580Sstevel@tonic-gate 		 * forkall() while setting mp->mutex_ownerpid.
15590Sstevel@tonic-gate 		 */
15600Sstevel@tonic-gate 		enter_critical(self);
15610Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
15620Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
15630Sstevel@tonic-gate 			mp->mutex_ownerpid = udp->pid;
15640Sstevel@tonic-gate 			exit_critical(self);
15650Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
15660Sstevel@tonic-gate 		} else {
15670Sstevel@tonic-gate 			/* try a little harder */
15680Sstevel@tonic-gate 			exit_critical(self);
15690Sstevel@tonic-gate 			error = mutex_trylock_process(mp);
15700Sstevel@tonic-gate 		}
15710Sstevel@tonic-gate 		if (error && try == MUTEX_LOCK)
15720Sstevel@tonic-gate 			error = mutex_lock_kernel(mp, tsp, msp);
15730Sstevel@tonic-gate 	} else  {	/* USYNC_THREAD */
15740Sstevel@tonic-gate 		/* try once */
15750Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
15760Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
15770Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
15780Sstevel@tonic-gate 		} else {
15790Sstevel@tonic-gate 			/* try a little harder if we don't own the mutex */
15800Sstevel@tonic-gate 			error = EBUSY;
15810Sstevel@tonic-gate 			if (MUTEX_OWNER(mp) != self)
15820Sstevel@tonic-gate 				error = mutex_trylock_adaptive(mp);
15830Sstevel@tonic-gate 			if (error && try == MUTEX_LOCK)		/* go park */
15840Sstevel@tonic-gate 				error = mutex_lock_queue(self, msp, mp, tsp);
15850Sstevel@tonic-gate 		}
15860Sstevel@tonic-gate 	}
15870Sstevel@tonic-gate 
15880Sstevel@tonic-gate 	switch (error) {
15890Sstevel@tonic-gate 	case EOWNERDEAD:
15900Sstevel@tonic-gate 	case ELOCKUNMAPPED:
15910Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
15920Sstevel@tonic-gate 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
15930Sstevel@tonic-gate 		/* FALLTHROUGH */
15940Sstevel@tonic-gate 	case 0:
15950Sstevel@tonic-gate 		if (msp)
15960Sstevel@tonic-gate 			record_begin_hold(msp);
15970Sstevel@tonic-gate 		break;
15980Sstevel@tonic-gate 	default:
15990Sstevel@tonic-gate 		if (try == MUTEX_TRY) {
16000Sstevel@tonic-gate 			if (msp)
16010Sstevel@tonic-gate 				tdb_incr(msp->mutex_try_fail);
16020Sstevel@tonic-gate 			if (__td_event_report(self, TD_LOCK_TRY, udp)) {
16030Sstevel@tonic-gate 				self->ul_td_evbuf.eventnum = TD_LOCK_TRY;
16040Sstevel@tonic-gate 				tdb_event(TD_LOCK_TRY, udp);
16050Sstevel@tonic-gate 			}
16060Sstevel@tonic-gate 		}
16070Sstevel@tonic-gate 		break;
16080Sstevel@tonic-gate 	}
16090Sstevel@tonic-gate 
16100Sstevel@tonic-gate 	return (error);
16110Sstevel@tonic-gate }
16120Sstevel@tonic-gate 
16130Sstevel@tonic-gate int
16140Sstevel@tonic-gate fast_process_lock(mutex_t *mp, timespec_t *tsp, int mtype, int try)
16150Sstevel@tonic-gate {
16160Sstevel@tonic-gate 	ulwp_t *self = curthread;
16170Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
16180Sstevel@tonic-gate 
16190Sstevel@tonic-gate 	/*
16200Sstevel@tonic-gate 	 * We know that USYNC_PROCESS is set in mtype and that
16210Sstevel@tonic-gate 	 * zero, one, or both of the flags LOCK_RECURSIVE and
16220Sstevel@tonic-gate 	 * LOCK_ERRORCHECK are set, and that no other flags are set.
16230Sstevel@tonic-gate 	 */
16240Sstevel@tonic-gate 	enter_critical(self);
16250Sstevel@tonic-gate 	if (set_lock_byte(&mp->mutex_lockw) == 0) {
16260Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
16270Sstevel@tonic-gate 		mp->mutex_ownerpid = udp->pid;
16280Sstevel@tonic-gate 		exit_critical(self);
16290Sstevel@tonic-gate 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
16300Sstevel@tonic-gate 		return (0);
16310Sstevel@tonic-gate 	}
16320Sstevel@tonic-gate 	exit_critical(self);
16330Sstevel@tonic-gate 
16340Sstevel@tonic-gate 	if ((mtype & ~USYNC_PROCESS) && shared_mutex_held(mp)) {
16350Sstevel@tonic-gate 		if (mtype & LOCK_RECURSIVE) {
16360Sstevel@tonic-gate 			if (mp->mutex_rcount == RECURSION_MAX)
16370Sstevel@tonic-gate 				return (EAGAIN);
16380Sstevel@tonic-gate 			mp->mutex_rcount++;
16390Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1, 0);
16400Sstevel@tonic-gate 			return (0);
16410Sstevel@tonic-gate 		}
16420Sstevel@tonic-gate 		if (try == MUTEX_LOCK) {
16430Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK);
16440Sstevel@tonic-gate 			return (EDEADLK);
16450Sstevel@tonic-gate 		}
16460Sstevel@tonic-gate 		return (EBUSY);
16470Sstevel@tonic-gate 	}
16480Sstevel@tonic-gate 
16490Sstevel@tonic-gate 	/* try a little harder if we don't own the mutex */
16500Sstevel@tonic-gate 	if (!shared_mutex_held(mp) && mutex_trylock_process(mp) == 0)
16510Sstevel@tonic-gate 		return (0);
16520Sstevel@tonic-gate 
16530Sstevel@tonic-gate 	if (try == MUTEX_LOCK)
16540Sstevel@tonic-gate 		return (mutex_lock_kernel(mp, tsp, NULL));
16550Sstevel@tonic-gate 
16560Sstevel@tonic-gate 	if (__td_event_report(self, TD_LOCK_TRY, udp)) {
16570Sstevel@tonic-gate 		self->ul_td_evbuf.eventnum = TD_LOCK_TRY;
16580Sstevel@tonic-gate 		tdb_event(TD_LOCK_TRY, udp);
16590Sstevel@tonic-gate 	}
16600Sstevel@tonic-gate 	return (EBUSY);
16610Sstevel@tonic-gate }
16620Sstevel@tonic-gate 
16630Sstevel@tonic-gate static int
16640Sstevel@tonic-gate slow_lock(ulwp_t *self, mutex_t *mp, timespec_t *tsp)
16650Sstevel@tonic-gate {
16660Sstevel@tonic-gate 	int error = 0;
16670Sstevel@tonic-gate 
16680Sstevel@tonic-gate 	if (MUTEX_OWNER(mp) == self || mutex_trylock_adaptive(mp) != 0)
16690Sstevel@tonic-gate 		error = mutex_lock_queue(self, NULL, mp, tsp);
16700Sstevel@tonic-gate 	return (error);
16710Sstevel@tonic-gate }
16720Sstevel@tonic-gate 
16730Sstevel@tonic-gate int
16740Sstevel@tonic-gate mutex_lock_impl(mutex_t *mp, timespec_t *tsp)
16750Sstevel@tonic-gate {
16760Sstevel@tonic-gate 	ulwp_t *self = curthread;
16770Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
16780Sstevel@tonic-gate 	uberflags_t *gflags;
16790Sstevel@tonic-gate 	int mtype;
16800Sstevel@tonic-gate 
16810Sstevel@tonic-gate 	/*
16820Sstevel@tonic-gate 	 * Optimize the case of USYNC_THREAD, including
16830Sstevel@tonic-gate 	 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
16840Sstevel@tonic-gate 	 * no error detection, no lock statistics,
16850Sstevel@tonic-gate 	 * and the process has only a single thread.
16860Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
16870Sstevel@tonic-gate 	 */
16880Sstevel@tonic-gate 	if ((((mtype = mp->mutex_type) & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) |
16890Sstevel@tonic-gate 	    udp->uberflags.uf_all) == 0) {
16900Sstevel@tonic-gate 		/*
16910Sstevel@tonic-gate 		 * Only one thread exists so we don't need an atomic operation.
16920Sstevel@tonic-gate 		 */
16930Sstevel@tonic-gate 		if (mp->mutex_lockw == 0) {
16940Sstevel@tonic-gate 			mp->mutex_lockw = LOCKSET;
16950Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
16960Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
16970Sstevel@tonic-gate 			return (0);
16980Sstevel@tonic-gate 		}
16990Sstevel@tonic-gate 		if (mtype && MUTEX_OWNER(mp) == self) {
17000Sstevel@tonic-gate 			/*
17010Sstevel@tonic-gate 			 * LOCK_RECURSIVE, LOCK_ERRORCHECK, or both.
17020Sstevel@tonic-gate 			 */
17030Sstevel@tonic-gate 			if (mtype & LOCK_RECURSIVE) {
17040Sstevel@tonic-gate 				if (mp->mutex_rcount == RECURSION_MAX)
17050Sstevel@tonic-gate 					return (EAGAIN);
17060Sstevel@tonic-gate 				mp->mutex_rcount++;
17070Sstevel@tonic-gate 				DTRACE_PROBE3(plockstat, mutex__acquire, mp,
17080Sstevel@tonic-gate 				    1, 0);
17090Sstevel@tonic-gate 				return (0);
17100Sstevel@tonic-gate 			}
17110Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK);
17120Sstevel@tonic-gate 			return (EDEADLK);	/* LOCK_ERRORCHECK */
17130Sstevel@tonic-gate 		}
17140Sstevel@tonic-gate 		/*
17150Sstevel@tonic-gate 		 * We have reached a deadlock, probably because the
17160Sstevel@tonic-gate 		 * process is executing non-async-signal-safe code in
17170Sstevel@tonic-gate 		 * a signal handler and is attempting to acquire a lock
17180Sstevel@tonic-gate 		 * that it already owns.  This is not surprising, given
17190Sstevel@tonic-gate 		 * bad programming practices over the years that has
17200Sstevel@tonic-gate 		 * resulted in applications calling printf() and such
17210Sstevel@tonic-gate 		 * in their signal handlers.  Unless the user has told
17220Sstevel@tonic-gate 		 * us that the signal handlers are safe by setting:
17230Sstevel@tonic-gate 		 *	export _THREAD_ASYNC_SAFE=1
17240Sstevel@tonic-gate 		 * we return EDEADLK rather than actually deadlocking.
17250Sstevel@tonic-gate 		 */
17260Sstevel@tonic-gate 		if (tsp == NULL &&
17270Sstevel@tonic-gate 		    MUTEX_OWNER(mp) == self && !self->ul_async_safe) {
17280Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK);
17290Sstevel@tonic-gate 			return (EDEADLK);
17300Sstevel@tonic-gate 		}
17310Sstevel@tonic-gate 	}
17320Sstevel@tonic-gate 
17330Sstevel@tonic-gate 	/*
17340Sstevel@tonic-gate 	 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
17350Sstevel@tonic-gate 	 * no error detection, and no lock statistics.
17360Sstevel@tonic-gate 	 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
17370Sstevel@tonic-gate 	 */
17380Sstevel@tonic-gate 	if ((gflags = self->ul_schedctl_called) != NULL &&
17390Sstevel@tonic-gate 	    (gflags->uf_trs_ted |
17400Sstevel@tonic-gate 	    (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) {
17410Sstevel@tonic-gate 
17420Sstevel@tonic-gate 		if (mtype & USYNC_PROCESS)
17430Sstevel@tonic-gate 			return (fast_process_lock(mp, tsp, mtype, MUTEX_LOCK));
17440Sstevel@tonic-gate 
17450Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
17460Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
17470Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
17480Sstevel@tonic-gate 			return (0);
17490Sstevel@tonic-gate 		}
17500Sstevel@tonic-gate 
17510Sstevel@tonic-gate 		if (mtype && MUTEX_OWNER(mp) == self) {
17520Sstevel@tonic-gate 			if (mtype & LOCK_RECURSIVE) {
17530Sstevel@tonic-gate 				if (mp->mutex_rcount == RECURSION_MAX)
17540Sstevel@tonic-gate 					return (EAGAIN);
17550Sstevel@tonic-gate 				mp->mutex_rcount++;
17560Sstevel@tonic-gate 				DTRACE_PROBE3(plockstat, mutex__acquire, mp,
17570Sstevel@tonic-gate 				    1, 0);
17580Sstevel@tonic-gate 				return (0);
17590Sstevel@tonic-gate 			}
17600Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK);
17610Sstevel@tonic-gate 			return (EDEADLK);	/* LOCK_ERRORCHECK */
17620Sstevel@tonic-gate 		}
17630Sstevel@tonic-gate 
17640Sstevel@tonic-gate 		return (slow_lock(self, mp, tsp));
17650Sstevel@tonic-gate 	}
17660Sstevel@tonic-gate 
17670Sstevel@tonic-gate 	/* else do it the long way */
17680Sstevel@tonic-gate 	return (mutex_lock_internal(mp, tsp, MUTEX_LOCK));
17690Sstevel@tonic-gate }
17700Sstevel@tonic-gate 
17710Sstevel@tonic-gate #pragma weak _private_mutex_lock = __mutex_lock
17720Sstevel@tonic-gate #pragma weak mutex_lock = __mutex_lock
17730Sstevel@tonic-gate #pragma weak _mutex_lock = __mutex_lock
17740Sstevel@tonic-gate #pragma weak pthread_mutex_lock = __mutex_lock
17750Sstevel@tonic-gate #pragma weak _pthread_mutex_lock = __mutex_lock
17760Sstevel@tonic-gate int
17770Sstevel@tonic-gate __mutex_lock(mutex_t *mp)
17780Sstevel@tonic-gate {
17790Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
17800Sstevel@tonic-gate 	return (mutex_lock_impl(mp, NULL));
17810Sstevel@tonic-gate }
17820Sstevel@tonic-gate 
17830Sstevel@tonic-gate #pragma weak pthread_mutex_timedlock = _pthread_mutex_timedlock
17840Sstevel@tonic-gate int
17850Sstevel@tonic-gate _pthread_mutex_timedlock(mutex_t *mp, const timespec_t *abstime)
17860Sstevel@tonic-gate {
17870Sstevel@tonic-gate 	timespec_t tslocal;
17880Sstevel@tonic-gate 	int error;
17890Sstevel@tonic-gate 
17900Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
17910Sstevel@tonic-gate 	abstime_to_reltime(CLOCK_REALTIME, abstime, &tslocal);
17920Sstevel@tonic-gate 	error = mutex_lock_impl(mp, &tslocal);
17930Sstevel@tonic-gate 	if (error == ETIME)
17940Sstevel@tonic-gate 		error = ETIMEDOUT;
17950Sstevel@tonic-gate 	return (error);
17960Sstevel@tonic-gate }
17970Sstevel@tonic-gate 
17980Sstevel@tonic-gate #pragma weak pthread_mutex_reltimedlock_np = _pthread_mutex_reltimedlock_np
17990Sstevel@tonic-gate int
18000Sstevel@tonic-gate _pthread_mutex_reltimedlock_np(mutex_t *mp, const timespec_t *reltime)
18010Sstevel@tonic-gate {
18020Sstevel@tonic-gate 	timespec_t tslocal;
18030Sstevel@tonic-gate 	int error;
18040Sstevel@tonic-gate 
18050Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
18060Sstevel@tonic-gate 	tslocal = *reltime;
18070Sstevel@tonic-gate 	error = mutex_lock_impl(mp, &tslocal);
18080Sstevel@tonic-gate 	if (error == ETIME)
18090Sstevel@tonic-gate 		error = ETIMEDOUT;
18100Sstevel@tonic-gate 	return (error);
18110Sstevel@tonic-gate }
18120Sstevel@tonic-gate 
18130Sstevel@tonic-gate static int
18140Sstevel@tonic-gate slow_trylock(mutex_t *mp, ulwp_t *self)
18150Sstevel@tonic-gate {
18160Sstevel@tonic-gate 	if (MUTEX_OWNER(mp) == self ||
18170Sstevel@tonic-gate 	    mutex_trylock_adaptive(mp) != 0) {
18180Sstevel@tonic-gate 		uberdata_t *udp = self->ul_uberdata;
18190Sstevel@tonic-gate 
18200Sstevel@tonic-gate 		if (__td_event_report(self, TD_LOCK_TRY, udp)) {
18210Sstevel@tonic-gate 			self->ul_td_evbuf.eventnum = TD_LOCK_TRY;
18220Sstevel@tonic-gate 			tdb_event(TD_LOCK_TRY, udp);
18230Sstevel@tonic-gate 		}
18240Sstevel@tonic-gate 		return (EBUSY);
18250Sstevel@tonic-gate 	}
18260Sstevel@tonic-gate 	return (0);
18270Sstevel@tonic-gate }
18280Sstevel@tonic-gate 
18290Sstevel@tonic-gate #pragma weak _private_mutex_trylock = __mutex_trylock
18300Sstevel@tonic-gate #pragma weak mutex_trylock = __mutex_trylock
18310Sstevel@tonic-gate #pragma weak _mutex_trylock = __mutex_trylock
18320Sstevel@tonic-gate #pragma weak pthread_mutex_trylock = __mutex_trylock
18330Sstevel@tonic-gate #pragma weak _pthread_mutex_trylock = __mutex_trylock
18340Sstevel@tonic-gate int
18350Sstevel@tonic-gate __mutex_trylock(mutex_t *mp)
18360Sstevel@tonic-gate {
18370Sstevel@tonic-gate 	ulwp_t *self = curthread;
18380Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
18390Sstevel@tonic-gate 	uberflags_t *gflags;
18400Sstevel@tonic-gate 	int mtype;
18410Sstevel@tonic-gate 
18420Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
18430Sstevel@tonic-gate 	/*
18440Sstevel@tonic-gate 	 * Optimize the case of USYNC_THREAD, including
18450Sstevel@tonic-gate 	 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
18460Sstevel@tonic-gate 	 * no error detection, no lock statistics,
18470Sstevel@tonic-gate 	 * and the process has only a single thread.
18480Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
18490Sstevel@tonic-gate 	 */
18500Sstevel@tonic-gate 	if ((((mtype = mp->mutex_type) & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) |
18510Sstevel@tonic-gate 	    udp->uberflags.uf_all) == 0) {
18520Sstevel@tonic-gate 		/*
18530Sstevel@tonic-gate 		 * Only one thread exists so we don't need an atomic operation.
18540Sstevel@tonic-gate 		 */
18550Sstevel@tonic-gate 		if (mp->mutex_lockw == 0) {
18560Sstevel@tonic-gate 			mp->mutex_lockw = LOCKSET;
18570Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
18580Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
18590Sstevel@tonic-gate 			return (0);
18600Sstevel@tonic-gate 		}
18610Sstevel@tonic-gate 		if (mtype && MUTEX_OWNER(mp) == self) {
18620Sstevel@tonic-gate 			if (mtype & LOCK_RECURSIVE) {
18630Sstevel@tonic-gate 				if (mp->mutex_rcount == RECURSION_MAX)
18640Sstevel@tonic-gate 					return (EAGAIN);
18650Sstevel@tonic-gate 				mp->mutex_rcount++;
18660Sstevel@tonic-gate 				DTRACE_PROBE3(plockstat, mutex__acquire, mp,
18670Sstevel@tonic-gate 				    1, 0);
18680Sstevel@tonic-gate 				return (0);
18690Sstevel@tonic-gate 			}
18700Sstevel@tonic-gate 			return (EDEADLK);	/* LOCK_ERRORCHECK */
18710Sstevel@tonic-gate 		}
18720Sstevel@tonic-gate 		return (EBUSY);
18730Sstevel@tonic-gate 	}
18740Sstevel@tonic-gate 
18750Sstevel@tonic-gate 	/*
18760Sstevel@tonic-gate 	 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
18770Sstevel@tonic-gate 	 * no error detection, and no lock statistics.
18780Sstevel@tonic-gate 	 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
18790Sstevel@tonic-gate 	 */
18800Sstevel@tonic-gate 	if ((gflags = self->ul_schedctl_called) != NULL &&
18810Sstevel@tonic-gate 	    (gflags->uf_trs_ted |
18820Sstevel@tonic-gate 	    (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) {
18830Sstevel@tonic-gate 
18840Sstevel@tonic-gate 		if (mtype & USYNC_PROCESS)
18850Sstevel@tonic-gate 			return (fast_process_lock(mp, NULL, mtype, MUTEX_TRY));
18860Sstevel@tonic-gate 
18870Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
18880Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
18890Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
18900Sstevel@tonic-gate 			return (0);
18910Sstevel@tonic-gate 		}
18920Sstevel@tonic-gate 
18930Sstevel@tonic-gate 		if (mtype && MUTEX_OWNER(mp) == self) {
18940Sstevel@tonic-gate 			if (mtype & LOCK_RECURSIVE) {
18950Sstevel@tonic-gate 				if (mp->mutex_rcount == RECURSION_MAX)
18960Sstevel@tonic-gate 					return (EAGAIN);
18970Sstevel@tonic-gate 				mp->mutex_rcount++;
18980Sstevel@tonic-gate 				DTRACE_PROBE3(plockstat, mutex__acquire, mp,
18990Sstevel@tonic-gate 				    1, 0);
19000Sstevel@tonic-gate 				return (0);
19010Sstevel@tonic-gate 			}
19020Sstevel@tonic-gate 			return (EBUSY);		/* LOCK_ERRORCHECK */
19030Sstevel@tonic-gate 		}
19040Sstevel@tonic-gate 
19050Sstevel@tonic-gate 		return (slow_trylock(mp, self));
19060Sstevel@tonic-gate 	}
19070Sstevel@tonic-gate 
19080Sstevel@tonic-gate 	/* else do it the long way */
19090Sstevel@tonic-gate 	return (mutex_lock_internal(mp, NULL, MUTEX_TRY));
19100Sstevel@tonic-gate }
19110Sstevel@tonic-gate 
19120Sstevel@tonic-gate int
19130Sstevel@tonic-gate mutex_unlock_internal(mutex_t *mp)
19140Sstevel@tonic-gate {
19150Sstevel@tonic-gate 	ulwp_t *self = curthread;
19160Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
19170Sstevel@tonic-gate 	int mtype = mp->mutex_type;
19180Sstevel@tonic-gate 	tdb_mutex_stats_t *msp;
19190Sstevel@tonic-gate 	int error;
19200Sstevel@tonic-gate 	lwpid_t lwpid;
19210Sstevel@tonic-gate 
19220Sstevel@tonic-gate 	if ((mtype & LOCK_ERRORCHECK) && !mutex_is_held(mp))
19230Sstevel@tonic-gate 		return (EPERM);
19240Sstevel@tonic-gate 
19250Sstevel@tonic-gate 	if (self->ul_error_detection && !mutex_is_held(mp))
19260Sstevel@tonic-gate 		lock_error(mp, "mutex_unlock", NULL, NULL);
19270Sstevel@tonic-gate 
19280Sstevel@tonic-gate 	if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
19290Sstevel@tonic-gate 		mp->mutex_rcount--;
19300Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
19310Sstevel@tonic-gate 		return (0);
19320Sstevel@tonic-gate 	}
19330Sstevel@tonic-gate 
19340Sstevel@tonic-gate 	if ((msp = MUTEX_STATS(mp, udp)) != NULL)
19350Sstevel@tonic-gate 		(void) record_hold_time(msp);
19360Sstevel@tonic-gate 
19370Sstevel@tonic-gate 	if (mtype &
19380Sstevel@tonic-gate 	    (USYNC_PROCESS_ROBUST|PTHREAD_PRIO_INHERIT|PTHREAD_PRIO_PROTECT)) {
19390Sstevel@tonic-gate 		no_preempt(self);
19400Sstevel@tonic-gate 		mp->mutex_owner = 0;
19410Sstevel@tonic-gate 		mp->mutex_ownerpid = 0;
19420Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
19430Sstevel@tonic-gate 		if (mtype & PTHREAD_PRIO_INHERIT) {
19440Sstevel@tonic-gate 			mp->mutex_lockw = LOCKCLEAR;
19450Sstevel@tonic-gate 			error = ___lwp_mutex_unlock(mp);
19460Sstevel@tonic-gate 		} else if (mtype & USYNC_PROCESS_ROBUST) {
19470Sstevel@tonic-gate 			error = ___lwp_mutex_unlock(mp);
19480Sstevel@tonic-gate 		} else {
19490Sstevel@tonic-gate 			if (swap32(&mp->mutex_lockword, 0) & WAITERMASK)
19500Sstevel@tonic-gate 				(void) ___lwp_mutex_wakeup(mp);
19510Sstevel@tonic-gate 			error = 0;
19520Sstevel@tonic-gate 		}
19530Sstevel@tonic-gate 		if (mtype & PTHREAD_PRIO_PROTECT) {
19540Sstevel@tonic-gate 			if (_ceil_mylist_del(mp))
19550Sstevel@tonic-gate 				_ceil_prio_waive();
19560Sstevel@tonic-gate 		}
19570Sstevel@tonic-gate 		preempt(self);
19580Sstevel@tonic-gate 	} else if (mtype & USYNC_PROCESS) {
19590Sstevel@tonic-gate 		if (mp->mutex_lockword & WAITERMASK)
19600Sstevel@tonic-gate 			mutex_unlock_process(mp);
19610Sstevel@tonic-gate 		else {
19620Sstevel@tonic-gate 			mp->mutex_owner = 0;
19630Sstevel@tonic-gate 			mp->mutex_ownerpid = 0;
19640Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
19650Sstevel@tonic-gate 			if (swap32(&mp->mutex_lockword, 0) & WAITERMASK) {
19660Sstevel@tonic-gate 				no_preempt(self);
19670Sstevel@tonic-gate 				(void) ___lwp_mutex_wakeup(mp);
19680Sstevel@tonic-gate 				preempt(self);
19690Sstevel@tonic-gate 			}
19700Sstevel@tonic-gate 		}
19710Sstevel@tonic-gate 		error = 0;
19720Sstevel@tonic-gate 	} else {	/* USYNC_THREAD */
19730Sstevel@tonic-gate 		if ((lwpid = mutex_unlock_queue(mp)) != 0) {
19740Sstevel@tonic-gate 			(void) __lwp_unpark(lwpid);
19750Sstevel@tonic-gate 			preempt(self);
19760Sstevel@tonic-gate 		}
19770Sstevel@tonic-gate 		error = 0;
19780Sstevel@tonic-gate 	}
19790Sstevel@tonic-gate 
19800Sstevel@tonic-gate 	return (error);
19810Sstevel@tonic-gate }
19820Sstevel@tonic-gate 
19830Sstevel@tonic-gate #pragma weak _private_mutex_unlock = __mutex_unlock
19840Sstevel@tonic-gate #pragma weak mutex_unlock = __mutex_unlock
19850Sstevel@tonic-gate #pragma weak _mutex_unlock = __mutex_unlock
19860Sstevel@tonic-gate #pragma weak pthread_mutex_unlock = __mutex_unlock
19870Sstevel@tonic-gate #pragma weak _pthread_mutex_unlock = __mutex_unlock
19880Sstevel@tonic-gate int
19890Sstevel@tonic-gate __mutex_unlock(mutex_t *mp)
19900Sstevel@tonic-gate {
19910Sstevel@tonic-gate 	ulwp_t *self = curthread;
19920Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
19930Sstevel@tonic-gate 	uberflags_t *gflags;
19940Sstevel@tonic-gate 	lwpid_t lwpid;
19950Sstevel@tonic-gate 	int mtype;
19960Sstevel@tonic-gate 	short el;
19970Sstevel@tonic-gate 
19980Sstevel@tonic-gate 	/*
19990Sstevel@tonic-gate 	 * Optimize the case of USYNC_THREAD, including
20000Sstevel@tonic-gate 	 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
20010Sstevel@tonic-gate 	 * no error detection, no lock statistics,
20020Sstevel@tonic-gate 	 * and the process has only a single thread.
20030Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
20040Sstevel@tonic-gate 	 */
20050Sstevel@tonic-gate 	if ((((mtype = mp->mutex_type) & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) |
20060Sstevel@tonic-gate 	    udp->uberflags.uf_all) == 0) {
20070Sstevel@tonic-gate 		if (mtype) {
20080Sstevel@tonic-gate 			/*
20090Sstevel@tonic-gate 			 * At this point we know that one or both of the
20100Sstevel@tonic-gate 			 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set.
20110Sstevel@tonic-gate 			 */
20120Sstevel@tonic-gate 			if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self))
20130Sstevel@tonic-gate 				return (EPERM);
20140Sstevel@tonic-gate 			if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
20150Sstevel@tonic-gate 				mp->mutex_rcount--;
20160Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
20170Sstevel@tonic-gate 				return (0);
20180Sstevel@tonic-gate 			}
20190Sstevel@tonic-gate 		}
20200Sstevel@tonic-gate 		/*
20210Sstevel@tonic-gate 		 * Only one thread exists so we don't need an atomic operation.
20220Sstevel@tonic-gate 		 * Also, there can be no waiters.
20230Sstevel@tonic-gate 		 */
20240Sstevel@tonic-gate 		mp->mutex_owner = 0;
20250Sstevel@tonic-gate 		mp->mutex_lockword = 0;
20260Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
20270Sstevel@tonic-gate 		return (0);
20280Sstevel@tonic-gate 	}
20290Sstevel@tonic-gate 
20300Sstevel@tonic-gate 	/*
20310Sstevel@tonic-gate 	 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
20320Sstevel@tonic-gate 	 * no error detection, and no lock statistics.
20330Sstevel@tonic-gate 	 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
20340Sstevel@tonic-gate 	 */
20350Sstevel@tonic-gate 	if ((gflags = self->ul_schedctl_called) != NULL) {
20360Sstevel@tonic-gate 		if (((el = gflags->uf_trs_ted) | mtype) == 0) {
20370Sstevel@tonic-gate fast_unlock:
20380Sstevel@tonic-gate 			if (!(mp->mutex_lockword & WAITERMASK)) {
20390Sstevel@tonic-gate 				/* no waiter exists right now */
20400Sstevel@tonic-gate 				mp->mutex_owner = 0;
20410Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
20420Sstevel@tonic-gate 				if (swap32(&mp->mutex_lockword, 0) &
20430Sstevel@tonic-gate 				    WAITERMASK) {
20440Sstevel@tonic-gate 					/* a waiter suddenly appeared */
20450Sstevel@tonic-gate 					no_preempt(self);
20460Sstevel@tonic-gate 					if ((lwpid = mutex_wakeup(mp)) != 0)
20470Sstevel@tonic-gate 						(void) __lwp_unpark(lwpid);
20480Sstevel@tonic-gate 					preempt(self);
20490Sstevel@tonic-gate 				}
20500Sstevel@tonic-gate 			} else if ((lwpid = mutex_unlock_queue(mp)) != 0) {
20510Sstevel@tonic-gate 				(void) __lwp_unpark(lwpid);
20520Sstevel@tonic-gate 				preempt(self);
20530Sstevel@tonic-gate 			}
20540Sstevel@tonic-gate 			return (0);
20550Sstevel@tonic-gate 		}
20560Sstevel@tonic-gate 		if (el)		/* error detection or lock statistics */
20570Sstevel@tonic-gate 			goto slow_unlock;
20580Sstevel@tonic-gate 		if ((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) {
20590Sstevel@tonic-gate 			/*
20600Sstevel@tonic-gate 			 * At this point we know that one or both of the
20610Sstevel@tonic-gate 			 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set.
20620Sstevel@tonic-gate 			 */
20630Sstevel@tonic-gate 			if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self))
20640Sstevel@tonic-gate 				return (EPERM);
20650Sstevel@tonic-gate 			if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
20660Sstevel@tonic-gate 				mp->mutex_rcount--;
20670Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
20680Sstevel@tonic-gate 				return (0);
20690Sstevel@tonic-gate 			}
20700Sstevel@tonic-gate 			goto fast_unlock;
20710Sstevel@tonic-gate 		}
20720Sstevel@tonic-gate 		if ((mtype &
20730Sstevel@tonic-gate 		    ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) {
20740Sstevel@tonic-gate 			/*
20750Sstevel@tonic-gate 			 * At this point we know that zero, one, or both of the
20760Sstevel@tonic-gate 			 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set and
20770Sstevel@tonic-gate 			 * that the USYNC_PROCESS flag is set.
20780Sstevel@tonic-gate 			 */
20790Sstevel@tonic-gate 			if ((mtype & LOCK_ERRORCHECK) && !shared_mutex_held(mp))
20800Sstevel@tonic-gate 				return (EPERM);
20810Sstevel@tonic-gate 			if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
20820Sstevel@tonic-gate 				mp->mutex_rcount--;
20830Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
20840Sstevel@tonic-gate 				return (0);
20850Sstevel@tonic-gate 			}
20860Sstevel@tonic-gate 			if (mp->mutex_lockword & WAITERMASK)
20870Sstevel@tonic-gate 				mutex_unlock_process(mp);
20880Sstevel@tonic-gate 			else {
20890Sstevel@tonic-gate 				mp->mutex_owner = 0;
20900Sstevel@tonic-gate 				mp->mutex_ownerpid = 0;
20910Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
20920Sstevel@tonic-gate 				if (swap32(&mp->mutex_lockword, 0) &
20930Sstevel@tonic-gate 				    WAITERMASK) {
20940Sstevel@tonic-gate 					no_preempt(self);
20950Sstevel@tonic-gate 					(void) ___lwp_mutex_wakeup(mp);
20960Sstevel@tonic-gate 					preempt(self);
20970Sstevel@tonic-gate 				}
20980Sstevel@tonic-gate 			}
20990Sstevel@tonic-gate 			return (0);
21000Sstevel@tonic-gate 		}
21010Sstevel@tonic-gate 	}
21020Sstevel@tonic-gate 
21030Sstevel@tonic-gate 	/* else do it the long way */
21040Sstevel@tonic-gate slow_unlock:
21050Sstevel@tonic-gate 	return (mutex_unlock_internal(mp));
21060Sstevel@tonic-gate }
21070Sstevel@tonic-gate 
21080Sstevel@tonic-gate /*
21090Sstevel@tonic-gate  * Internally to the library, almost all mutex lock/unlock actions
21100Sstevel@tonic-gate  * go through these lmutex_ functions, to protect critical regions.
21110Sstevel@tonic-gate  * We replicate a bit of code from __mutex_lock() and __mutex_unlock()
21120Sstevel@tonic-gate  * to make these functions faster since we know that the mutex type
21130Sstevel@tonic-gate  * of all internal locks is USYNC_THREAD.  We also know that internal
21140Sstevel@tonic-gate  * locking can never fail, so we panic if it does.
21150Sstevel@tonic-gate  */
21160Sstevel@tonic-gate void
21170Sstevel@tonic-gate lmutex_lock(mutex_t *mp)
21180Sstevel@tonic-gate {
21190Sstevel@tonic-gate 	ulwp_t *self = curthread;
21200Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
21210Sstevel@tonic-gate 
21220Sstevel@tonic-gate 	ASSERT(mp->mutex_type == USYNC_THREAD);
21230Sstevel@tonic-gate 
21240Sstevel@tonic-gate 	enter_critical(self);
21250Sstevel@tonic-gate 	/*
21260Sstevel@tonic-gate 	 * Optimize the case of no lock statistics and only a single thread.
21270Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
21280Sstevel@tonic-gate 	 */
21290Sstevel@tonic-gate 	if (udp->uberflags.uf_all == 0) {
21300Sstevel@tonic-gate 		/*
21310Sstevel@tonic-gate 		 * Only one thread exists; the mutex must be free.
21320Sstevel@tonic-gate 		 */
21330Sstevel@tonic-gate 		ASSERT(mp->mutex_lockw == 0);
21340Sstevel@tonic-gate 		mp->mutex_lockw = LOCKSET;
21350Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
21360Sstevel@tonic-gate 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
21370Sstevel@tonic-gate 	} else {
21380Sstevel@tonic-gate 		tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
21390Sstevel@tonic-gate 
21400Sstevel@tonic-gate 		if (!self->ul_schedctl_called)
21410Sstevel@tonic-gate 			(void) setup_schedctl();
21420Sstevel@tonic-gate 
21430Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
21440Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
21450Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
21460Sstevel@tonic-gate 		} else if (mutex_trylock_adaptive(mp) != 0) {
21470Sstevel@tonic-gate 			(void) mutex_lock_queue(self, msp, mp, NULL);
21480Sstevel@tonic-gate 		}
21490Sstevel@tonic-gate 
21500Sstevel@tonic-gate 		if (msp)
21510Sstevel@tonic-gate 			record_begin_hold(msp);
21520Sstevel@tonic-gate 	}
21530Sstevel@tonic-gate }
21540Sstevel@tonic-gate 
21550Sstevel@tonic-gate void
21560Sstevel@tonic-gate lmutex_unlock(mutex_t *mp)
21570Sstevel@tonic-gate {
21580Sstevel@tonic-gate 	ulwp_t *self = curthread;
21590Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
21600Sstevel@tonic-gate 
21610Sstevel@tonic-gate 	ASSERT(mp->mutex_type == USYNC_THREAD);
21620Sstevel@tonic-gate 
21630Sstevel@tonic-gate 	/*
21640Sstevel@tonic-gate 	 * Optimize the case of no lock statistics and only a single thread.
21650Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
21660Sstevel@tonic-gate 	 */
21670Sstevel@tonic-gate 	if (udp->uberflags.uf_all == 0) {
21680Sstevel@tonic-gate 		/*
21690Sstevel@tonic-gate 		 * Only one thread exists so there can be no waiters.
21700Sstevel@tonic-gate 		 */
21710Sstevel@tonic-gate 		mp->mutex_owner = 0;
21720Sstevel@tonic-gate 		mp->mutex_lockword = 0;
21730Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
21740Sstevel@tonic-gate 	} else {
21750Sstevel@tonic-gate 		tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
21760Sstevel@tonic-gate 		lwpid_t lwpid;
21770Sstevel@tonic-gate 
21780Sstevel@tonic-gate 		if (msp)
21790Sstevel@tonic-gate 			(void) record_hold_time(msp);
21800Sstevel@tonic-gate 		if ((lwpid = mutex_unlock_queue(mp)) != 0) {
21810Sstevel@tonic-gate 			(void) __lwp_unpark(lwpid);
21820Sstevel@tonic-gate 			preempt(self);
21830Sstevel@tonic-gate 		}
21840Sstevel@tonic-gate 	}
21850Sstevel@tonic-gate 	exit_critical(self);
21860Sstevel@tonic-gate }
21870Sstevel@tonic-gate 
21880Sstevel@tonic-gate static int
21890Sstevel@tonic-gate shared_mutex_held(mutex_t *mparg)
21900Sstevel@tonic-gate {
21910Sstevel@tonic-gate 	/*
21920Sstevel@tonic-gate 	 * There is an inherent data race in the current ownership design.
21930Sstevel@tonic-gate 	 * The mutex_owner and mutex_ownerpid fields cannot be set or tested
21940Sstevel@tonic-gate 	 * atomically as a pair. The original implementation tested each
21950Sstevel@tonic-gate 	 * field just once. This was exposed to trivial false positives in
21960Sstevel@tonic-gate 	 * the case of multiple multithreaded processes with thread addresses
21970Sstevel@tonic-gate 	 * in common. To close the window to an acceptable level we now use a
21980Sstevel@tonic-gate 	 * sequence of five tests: pid-thr-pid-thr-pid. This ensures that any
21990Sstevel@tonic-gate 	 * single interruption will still leave one uninterrupted sequence of
22000Sstevel@tonic-gate 	 * pid-thr-pid tests intact.
22010Sstevel@tonic-gate 	 *
22020Sstevel@tonic-gate 	 * It is assumed that all updates are always ordered thr-pid and that
22030Sstevel@tonic-gate 	 * we have TSO hardware.
22040Sstevel@tonic-gate 	 */
22050Sstevel@tonic-gate 	volatile mutex_t *mp = (volatile mutex_t *)mparg;
22060Sstevel@tonic-gate 	ulwp_t *self = curthread;
22070Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
22080Sstevel@tonic-gate 
22090Sstevel@tonic-gate 	if (mp->mutex_ownerpid != udp->pid)
22100Sstevel@tonic-gate 		return (0);
22110Sstevel@tonic-gate 
22120Sstevel@tonic-gate 	if (!MUTEX_OWNED(mp, self))
22130Sstevel@tonic-gate 		return (0);
22140Sstevel@tonic-gate 
22150Sstevel@tonic-gate 	if (mp->mutex_ownerpid != udp->pid)
22160Sstevel@tonic-gate 		return (0);
22170Sstevel@tonic-gate 
22180Sstevel@tonic-gate 	if (!MUTEX_OWNED(mp, self))
22190Sstevel@tonic-gate 		return (0);
22200Sstevel@tonic-gate 
22210Sstevel@tonic-gate 	if (mp->mutex_ownerpid != udp->pid)
22220Sstevel@tonic-gate 		return (0);
22230Sstevel@tonic-gate 
22240Sstevel@tonic-gate 	return (1);
22250Sstevel@tonic-gate }
22260Sstevel@tonic-gate 
22270Sstevel@tonic-gate /*
22280Sstevel@tonic-gate  * Some crufty old programs define their own version of _mutex_held()
22290Sstevel@tonic-gate  * to be simply return(1).  This breaks internal libc logic, so we
22300Sstevel@tonic-gate  * define a private version for exclusive use by libc, mutex_is_held(),
22310Sstevel@tonic-gate  * and also a new public function, __mutex_held(), to be used in new
22320Sstevel@tonic-gate  * code to circumvent these crufty old programs.
22330Sstevel@tonic-gate  */
22340Sstevel@tonic-gate #pragma weak mutex_held = mutex_is_held
22350Sstevel@tonic-gate #pragma weak _mutex_held = mutex_is_held
22360Sstevel@tonic-gate #pragma weak __mutex_held = mutex_is_held
22370Sstevel@tonic-gate int
22380Sstevel@tonic-gate mutex_is_held(mutex_t *mp)
22390Sstevel@tonic-gate {
22400Sstevel@tonic-gate 	if (mp->mutex_type & (USYNC_PROCESS | USYNC_PROCESS_ROBUST))
22410Sstevel@tonic-gate 		return (shared_mutex_held(mp));
22420Sstevel@tonic-gate 	return (MUTEX_OWNED(mp, curthread));
22430Sstevel@tonic-gate }
22440Sstevel@tonic-gate 
22450Sstevel@tonic-gate #pragma weak _private_mutex_destroy = __mutex_destroy
22460Sstevel@tonic-gate #pragma weak mutex_destroy = __mutex_destroy
22470Sstevel@tonic-gate #pragma weak _mutex_destroy = __mutex_destroy
22480Sstevel@tonic-gate #pragma weak pthread_mutex_destroy = __mutex_destroy
22490Sstevel@tonic-gate #pragma weak _pthread_mutex_destroy = __mutex_destroy
22500Sstevel@tonic-gate int
22510Sstevel@tonic-gate __mutex_destroy(mutex_t *mp)
22520Sstevel@tonic-gate {
22530Sstevel@tonic-gate 	mp->mutex_magic = 0;
22540Sstevel@tonic-gate 	mp->mutex_flag &= ~LOCK_INITED;
22550Sstevel@tonic-gate 	tdb_sync_obj_deregister(mp);
22560Sstevel@tonic-gate 	return (0);
22570Sstevel@tonic-gate }
22580Sstevel@tonic-gate 
22590Sstevel@tonic-gate /*
22600Sstevel@tonic-gate  * Spin locks are separate from ordinary mutexes,
22610Sstevel@tonic-gate  * but we use the same data structure for them.
22620Sstevel@tonic-gate  */
22630Sstevel@tonic-gate 
22640Sstevel@tonic-gate #pragma weak pthread_spin_init = _pthread_spin_init
22650Sstevel@tonic-gate int
22660Sstevel@tonic-gate _pthread_spin_init(pthread_spinlock_t *lock, int pshared)
22670Sstevel@tonic-gate {
22680Sstevel@tonic-gate 	mutex_t *mp = (mutex_t *)lock;
22690Sstevel@tonic-gate 
22700Sstevel@tonic-gate 	(void) _memset(mp, 0, sizeof (*mp));
22710Sstevel@tonic-gate 	if (pshared == PTHREAD_PROCESS_SHARED)
22720Sstevel@tonic-gate 		mp->mutex_type = USYNC_PROCESS;
22730Sstevel@tonic-gate 	else
22740Sstevel@tonic-gate 		mp->mutex_type = USYNC_THREAD;
22750Sstevel@tonic-gate 	mp->mutex_flag = LOCK_INITED;
22760Sstevel@tonic-gate 	mp->mutex_magic = MUTEX_MAGIC;
22770Sstevel@tonic-gate 	return (0);
22780Sstevel@tonic-gate }
22790Sstevel@tonic-gate 
22800Sstevel@tonic-gate #pragma weak pthread_spin_destroy = _pthread_spin_destroy
22810Sstevel@tonic-gate int
22820Sstevel@tonic-gate _pthread_spin_destroy(pthread_spinlock_t *lock)
22830Sstevel@tonic-gate {
22840Sstevel@tonic-gate 	(void) _memset(lock, 0, sizeof (*lock));
22850Sstevel@tonic-gate 	return (0);
22860Sstevel@tonic-gate }
22870Sstevel@tonic-gate 
22880Sstevel@tonic-gate #pragma weak pthread_spin_trylock = _pthread_spin_trylock
22890Sstevel@tonic-gate int
22900Sstevel@tonic-gate _pthread_spin_trylock(pthread_spinlock_t *lock)
22910Sstevel@tonic-gate {
22920Sstevel@tonic-gate 	mutex_t *mp = (mutex_t *)lock;
22930Sstevel@tonic-gate 	ulwp_t *self = curthread;
22940Sstevel@tonic-gate 	int error = 0;
22950Sstevel@tonic-gate 
22960Sstevel@tonic-gate 	no_preempt(self);
22970Sstevel@tonic-gate 	if (set_lock_byte(&mp->mutex_lockw) != 0)
22980Sstevel@tonic-gate 		error = EBUSY;
22990Sstevel@tonic-gate 	else {
23000Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
23010Sstevel@tonic-gate 		if (mp->mutex_type == USYNC_PROCESS)
23020Sstevel@tonic-gate 			mp->mutex_ownerpid = self->ul_uberdata->pid;
23030Sstevel@tonic-gate 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
23040Sstevel@tonic-gate 	}
23050Sstevel@tonic-gate 	preempt(self);
23060Sstevel@tonic-gate 	return (error);
23070Sstevel@tonic-gate }
23080Sstevel@tonic-gate 
23090Sstevel@tonic-gate #pragma weak pthread_spin_lock = _pthread_spin_lock
23100Sstevel@tonic-gate int
23110Sstevel@tonic-gate _pthread_spin_lock(pthread_spinlock_t *lock)
23120Sstevel@tonic-gate {
23130Sstevel@tonic-gate 	volatile uint8_t *lockp =
23140Sstevel@tonic-gate 		(volatile uint8_t *)&((mutex_t *)lock)->mutex_lockw;
23150Sstevel@tonic-gate 
23160Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
23170Sstevel@tonic-gate 	/*
23180Sstevel@tonic-gate 	 * We don't care whether the owner is running on a processor.
23190Sstevel@tonic-gate 	 * We just spin because that's what this interface requires.
23200Sstevel@tonic-gate 	 */
23210Sstevel@tonic-gate 	for (;;) {
23220Sstevel@tonic-gate 		if (*lockp == 0) {	/* lock byte appears to be clear */
23230Sstevel@tonic-gate 			if (_pthread_spin_trylock(lock) == 0)
23240Sstevel@tonic-gate 				return (0);
23250Sstevel@tonic-gate 		}
23260Sstevel@tonic-gate 		SMT_PAUSE();
23270Sstevel@tonic-gate 	}
23280Sstevel@tonic-gate }
23290Sstevel@tonic-gate 
23300Sstevel@tonic-gate #pragma weak pthread_spin_unlock = _pthread_spin_unlock
23310Sstevel@tonic-gate int
23320Sstevel@tonic-gate _pthread_spin_unlock(pthread_spinlock_t *lock)
23330Sstevel@tonic-gate {
23340Sstevel@tonic-gate 	mutex_t *mp = (mutex_t *)lock;
23350Sstevel@tonic-gate 	ulwp_t *self = curthread;
23360Sstevel@tonic-gate 
23370Sstevel@tonic-gate 	no_preempt(self);
23380Sstevel@tonic-gate 	mp->mutex_owner = 0;
23390Sstevel@tonic-gate 	mp->mutex_ownerpid = 0;
23400Sstevel@tonic-gate 	DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
23410Sstevel@tonic-gate 	(void) swap32(&mp->mutex_lockword, 0);
23420Sstevel@tonic-gate 	preempt(self);
23430Sstevel@tonic-gate 	return (0);
23440Sstevel@tonic-gate }
23450Sstevel@tonic-gate 
23460Sstevel@tonic-gate #pragma weak cond_init = _cond_init
23470Sstevel@tonic-gate /* ARGSUSED2 */
23480Sstevel@tonic-gate int
23490Sstevel@tonic-gate _cond_init(cond_t *cvp, int type, void *arg)
23500Sstevel@tonic-gate {
23510Sstevel@tonic-gate 	if (type != USYNC_THREAD && type != USYNC_PROCESS)
23520Sstevel@tonic-gate 		return (EINVAL);
23530Sstevel@tonic-gate 	(void) _memset(cvp, 0, sizeof (*cvp));
23540Sstevel@tonic-gate 	cvp->cond_type = (uint16_t)type;
23550Sstevel@tonic-gate 	cvp->cond_magic = COND_MAGIC;
23560Sstevel@tonic-gate 	return (0);
23570Sstevel@tonic-gate }
23580Sstevel@tonic-gate 
23590Sstevel@tonic-gate /*
23600Sstevel@tonic-gate  * cond_sleep_queue(): utility function for cond_wait_queue().
23610Sstevel@tonic-gate  *
23620Sstevel@tonic-gate  * Go to sleep on a condvar sleep queue, expect to be waked up
23630Sstevel@tonic-gate  * by someone calling cond_signal() or cond_broadcast() or due
23640Sstevel@tonic-gate  * to receiving a UNIX signal or being cancelled, or just simply
23650Sstevel@tonic-gate  * due to a spurious wakeup (like someome calling forkall()).
23660Sstevel@tonic-gate  *
23670Sstevel@tonic-gate  * The associated mutex is *not* reacquired before returning.
23680Sstevel@tonic-gate  * That must be done by the caller of cond_sleep_queue().
23690Sstevel@tonic-gate  */
23700Sstevel@tonic-gate int
23710Sstevel@tonic-gate cond_sleep_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
23720Sstevel@tonic-gate {
23730Sstevel@tonic-gate 	ulwp_t *self = curthread;
23740Sstevel@tonic-gate 	queue_head_t *qp;
23750Sstevel@tonic-gate 	queue_head_t *mqp;
23760Sstevel@tonic-gate 	lwpid_t lwpid;
23770Sstevel@tonic-gate 	int signalled;
23780Sstevel@tonic-gate 	int error;
23790Sstevel@tonic-gate 
23800Sstevel@tonic-gate 	/*
23810Sstevel@tonic-gate 	 * Put ourself on the CV sleep queue, unlock the mutex, then
23820Sstevel@tonic-gate 	 * park ourself and unpark a candidate lwp to grab the mutex.
23830Sstevel@tonic-gate 	 * We must go onto the CV sleep queue before dropping the
23840Sstevel@tonic-gate 	 * mutex in order to guarantee atomicity of the operation.
23850Sstevel@tonic-gate 	 */
23860Sstevel@tonic-gate 	self->ul_sp = stkptr();
23870Sstevel@tonic-gate 	qp = queue_lock(cvp, CV);
23880Sstevel@tonic-gate 	enqueue(qp, self, cvp, CV);
23890Sstevel@tonic-gate 	cvp->cond_waiters_user = 1;
23900Sstevel@tonic-gate 	self->ul_cvmutex = mp;
23910Sstevel@tonic-gate 	self->ul_cv_wake = (tsp != NULL);
23920Sstevel@tonic-gate 	self->ul_signalled = 0;
23930Sstevel@tonic-gate 	lwpid = mutex_unlock_queue(mp);
23940Sstevel@tonic-gate 	for (;;) {
23950Sstevel@tonic-gate 		set_parking_flag(self, 1);
23960Sstevel@tonic-gate 		queue_unlock(qp);
23970Sstevel@tonic-gate 		if (lwpid != 0) {
23980Sstevel@tonic-gate 			lwpid = preempt_unpark(self, lwpid);
23990Sstevel@tonic-gate 			preempt(self);
24000Sstevel@tonic-gate 		}
24010Sstevel@tonic-gate 		/*
24020Sstevel@tonic-gate 		 * We may have a deferred signal present,
24030Sstevel@tonic-gate 		 * in which case we should return EINTR.
24040Sstevel@tonic-gate 		 * Also, we may have received a SIGCANCEL; if so
24050Sstevel@tonic-gate 		 * and we are cancelable we should return EINTR.
24060Sstevel@tonic-gate 		 * We force an immediate EINTR return from
24070Sstevel@tonic-gate 		 * __lwp_park() by turning our parking flag off.
24080Sstevel@tonic-gate 		 */
24090Sstevel@tonic-gate 		if (self->ul_cursig != 0 ||
24100Sstevel@tonic-gate 		    (self->ul_cancelable && self->ul_cancel_pending))
24110Sstevel@tonic-gate 			set_parking_flag(self, 0);
24120Sstevel@tonic-gate 		/*
24130Sstevel@tonic-gate 		 * __lwp_park() will return the residual time in tsp
24140Sstevel@tonic-gate 		 * if we are unparked before the timeout expires.
24150Sstevel@tonic-gate 		 */
24160Sstevel@tonic-gate 		error = __lwp_park(tsp, lwpid);
24170Sstevel@tonic-gate 		set_parking_flag(self, 0);
24180Sstevel@tonic-gate 		lwpid = 0;	/* unpark the other lwp only once */
24190Sstevel@tonic-gate 		/*
24200Sstevel@tonic-gate 		 * We were waked up by cond_signal(), cond_broadcast(),
24210Sstevel@tonic-gate 		 * by an interrupt or timeout (EINTR or ETIME),
24220Sstevel@tonic-gate 		 * or we may just have gotten a spurious wakeup.
24230Sstevel@tonic-gate 		 */
24240Sstevel@tonic-gate 		qp = queue_lock(cvp, CV);
24250Sstevel@tonic-gate 		mqp = queue_lock(mp, MX);
24260Sstevel@tonic-gate 		if (self->ul_sleepq == NULL)
24270Sstevel@tonic-gate 			break;
24280Sstevel@tonic-gate 		/*
24290Sstevel@tonic-gate 		 * We are on either the condvar sleep queue or the
24300Sstevel@tonic-gate 		 * mutex sleep queue.  If we are on the mutex sleep
24310Sstevel@tonic-gate 		 * queue, continue sleeping.  If we are on the condvar
24320Sstevel@tonic-gate 		 * sleep queue, break out of the sleep if we were
24330Sstevel@tonic-gate 		 * interrupted or we timed out (EINTR or ETIME).
24340Sstevel@tonic-gate 		 * Else this is a spurious wakeup; continue the loop.
24350Sstevel@tonic-gate 		 */
24360Sstevel@tonic-gate 		if (self->ul_sleepq == mqp)		/* mutex queue */
24370Sstevel@tonic-gate 			tsp = NULL;
24380Sstevel@tonic-gate 		else if (self->ul_sleepq == qp) {	/* condvar queue */
24390Sstevel@tonic-gate 			if (error) {
24400Sstevel@tonic-gate 				cvp->cond_waiters_user = dequeue_self(qp, cvp);
24410Sstevel@tonic-gate 				break;
24420Sstevel@tonic-gate 			}
24430Sstevel@tonic-gate 			/*
24440Sstevel@tonic-gate 			 * Else a spurious wakeup on the condvar queue.
24450Sstevel@tonic-gate 			 * __lwp_park() has already adjusted the timeout.
24460Sstevel@tonic-gate 			 */
24470Sstevel@tonic-gate 		} else {
24480Sstevel@tonic-gate 			thr_panic("cond_sleep_queue(): thread not on queue");
24490Sstevel@tonic-gate 		}
24500Sstevel@tonic-gate 		queue_unlock(mqp);
24510Sstevel@tonic-gate 	}
24520Sstevel@tonic-gate 
24530Sstevel@tonic-gate 	self->ul_sp = 0;
24540Sstevel@tonic-gate 	ASSERT(self->ul_cvmutex == NULL && self->ul_cv_wake == 0);
24550Sstevel@tonic-gate 	ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL &&
24560Sstevel@tonic-gate 	    self->ul_wchan == NULL);
24570Sstevel@tonic-gate 
24580Sstevel@tonic-gate 	signalled = self->ul_signalled;
24590Sstevel@tonic-gate 	self->ul_signalled = 0;
24600Sstevel@tonic-gate 	queue_unlock(qp);
24610Sstevel@tonic-gate 	queue_unlock(mqp);
24620Sstevel@tonic-gate 
24630Sstevel@tonic-gate 	/*
24640Sstevel@tonic-gate 	 * If we were concurrently cond_signal()d and any of:
24650Sstevel@tonic-gate 	 * received a UNIX signal, were cancelled, or got a timeout,
24660Sstevel@tonic-gate 	 * then perform another cond_signal() to avoid consuming it.
24670Sstevel@tonic-gate 	 */
24680Sstevel@tonic-gate 	if (error && signalled)
24690Sstevel@tonic-gate 		(void) cond_signal_internal(cvp);
24700Sstevel@tonic-gate 
24710Sstevel@tonic-gate 	return (error);
24720Sstevel@tonic-gate }
24730Sstevel@tonic-gate 
24740Sstevel@tonic-gate int
24750Sstevel@tonic-gate cond_wait_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp,
24760Sstevel@tonic-gate 	tdb_mutex_stats_t *msp)
24770Sstevel@tonic-gate {
24780Sstevel@tonic-gate 	ulwp_t *self = curthread;
24790Sstevel@tonic-gate 	int error;
24800Sstevel@tonic-gate 
24810Sstevel@tonic-gate 	/*
24820Sstevel@tonic-gate 	 * The old thread library was programmed to defer signals
24830Sstevel@tonic-gate 	 * while in cond_wait() so that the associated mutex would
24840Sstevel@tonic-gate 	 * be guaranteed to be held when the application signal
24850Sstevel@tonic-gate 	 * handler was invoked.
24860Sstevel@tonic-gate 	 *
24870Sstevel@tonic-gate 	 * We do not behave this way by default; the state of the
24880Sstevel@tonic-gate 	 * associated mutex in the signal handler is undefined.
24890Sstevel@tonic-gate 	 *
24900Sstevel@tonic-gate 	 * To accommodate applications that depend on the old
24910Sstevel@tonic-gate 	 * behavior, the _THREAD_COND_WAIT_DEFER environment
24920Sstevel@tonic-gate 	 * variable can be set to 1 and we will behave in the
24930Sstevel@tonic-gate 	 * old way with respect to cond_wait().
24940Sstevel@tonic-gate 	 */
24950Sstevel@tonic-gate 	if (self->ul_cond_wait_defer)
24960Sstevel@tonic-gate 		sigoff(self);
24970Sstevel@tonic-gate 
24980Sstevel@tonic-gate 	error = cond_sleep_queue(cvp, mp, tsp);
24990Sstevel@tonic-gate 
25000Sstevel@tonic-gate 	/*
25010Sstevel@tonic-gate 	 * Reacquire the mutex.
25020Sstevel@tonic-gate 	 */
25030Sstevel@tonic-gate 	if (set_lock_byte(&mp->mutex_lockw) == 0) {
25040Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
25050Sstevel@tonic-gate 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
25060Sstevel@tonic-gate 	} else if (mutex_trylock_adaptive(mp) != 0) {
25070Sstevel@tonic-gate 		(void) mutex_lock_queue(self, msp, mp, NULL);
25080Sstevel@tonic-gate 	}
25090Sstevel@tonic-gate 
25100Sstevel@tonic-gate 	if (msp)
25110Sstevel@tonic-gate 		record_begin_hold(msp);
25120Sstevel@tonic-gate 
25130Sstevel@tonic-gate 	/*
25140Sstevel@tonic-gate 	 * Take any deferred signal now, after we have reacquired the mutex.
25150Sstevel@tonic-gate 	 */
25160Sstevel@tonic-gate 	if (self->ul_cond_wait_defer)
25170Sstevel@tonic-gate 		sigon(self);
25180Sstevel@tonic-gate 
25190Sstevel@tonic-gate 	return (error);
25200Sstevel@tonic-gate }
25210Sstevel@tonic-gate 
25220Sstevel@tonic-gate /*
25230Sstevel@tonic-gate  * cond_sleep_kernel(): utility function for cond_wait_kernel().
25240Sstevel@tonic-gate  * See the comment ahead of cond_sleep_queue(), above.
25250Sstevel@tonic-gate  */
25260Sstevel@tonic-gate int
25270Sstevel@tonic-gate cond_sleep_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
25280Sstevel@tonic-gate {
25290Sstevel@tonic-gate 	int mtype = mp->mutex_type;
25300Sstevel@tonic-gate 	ulwp_t *self = curthread;
25310Sstevel@tonic-gate 	int error;
25320Sstevel@tonic-gate 
25330Sstevel@tonic-gate 	if (mtype & PTHREAD_PRIO_PROTECT) {
25340Sstevel@tonic-gate 		if (_ceil_mylist_del(mp))
25350Sstevel@tonic-gate 			_ceil_prio_waive();
25360Sstevel@tonic-gate 	}
25370Sstevel@tonic-gate 
25380Sstevel@tonic-gate 	self->ul_sp = stkptr();
25390Sstevel@tonic-gate 	self->ul_wchan = cvp;
25400Sstevel@tonic-gate 	mp->mutex_owner = 0;
25410Sstevel@tonic-gate 	mp->mutex_ownerpid = 0;
25420Sstevel@tonic-gate 	if (mtype & PTHREAD_PRIO_INHERIT)
25430Sstevel@tonic-gate 		mp->mutex_lockw = LOCKCLEAR;
25440Sstevel@tonic-gate 	/*
25450Sstevel@tonic-gate 	 * ___lwp_cond_wait() returns immediately with EINTR if
25460Sstevel@tonic-gate 	 * set_parking_flag(self,0) is called on this lwp before it
25470Sstevel@tonic-gate 	 * goes to sleep in the kernel.  sigacthandler() calls this
25480Sstevel@tonic-gate 	 * when a deferred signal is noted.  This assures that we don't
25490Sstevel@tonic-gate 	 * get stuck in ___lwp_cond_wait() with all signals blocked
25500Sstevel@tonic-gate 	 * due to taking a deferred signal before going to sleep.
25510Sstevel@tonic-gate 	 */
25520Sstevel@tonic-gate 	set_parking_flag(self, 1);
25530Sstevel@tonic-gate 	if (self->ul_cursig != 0 ||
25540Sstevel@tonic-gate 	    (self->ul_cancelable && self->ul_cancel_pending))
25550Sstevel@tonic-gate 		set_parking_flag(self, 0);
25560Sstevel@tonic-gate 	error = ___lwp_cond_wait(cvp, mp, tsp, 1);
25570Sstevel@tonic-gate 	set_parking_flag(self, 0);
25580Sstevel@tonic-gate 	self->ul_sp = 0;
25590Sstevel@tonic-gate 	self->ul_wchan = NULL;
25600Sstevel@tonic-gate 	return (error);
25610Sstevel@tonic-gate }
25620Sstevel@tonic-gate 
25630Sstevel@tonic-gate int
25640Sstevel@tonic-gate cond_wait_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
25650Sstevel@tonic-gate {
25660Sstevel@tonic-gate 	ulwp_t *self = curthread;
25670Sstevel@tonic-gate 	int error;
25680Sstevel@tonic-gate 	int merror;
25690Sstevel@tonic-gate 
25700Sstevel@tonic-gate 	/*
25710Sstevel@tonic-gate 	 * See the large comment in cond_wait_queue(), above.
25720Sstevel@tonic-gate 	 */
25730Sstevel@tonic-gate 	if (self->ul_cond_wait_defer)
25740Sstevel@tonic-gate 		sigoff(self);
25750Sstevel@tonic-gate 
25760Sstevel@tonic-gate 	error = cond_sleep_kernel(cvp, mp, tsp);
25770Sstevel@tonic-gate 
25780Sstevel@tonic-gate 	/*
25790Sstevel@tonic-gate 	 * Override the return code from ___lwp_cond_wait()
25800Sstevel@tonic-gate 	 * with any non-zero return code from mutex_lock().
25810Sstevel@tonic-gate 	 * This addresses robust lock failures in particular;
25820Sstevel@tonic-gate 	 * the caller must see the EOWNERDEAD or ENOTRECOVERABLE
25830Sstevel@tonic-gate 	 * errors in order to take corrective action.
25840Sstevel@tonic-gate 	 */
25850Sstevel@tonic-gate 	if ((merror = _private_mutex_lock(mp)) != 0)
25860Sstevel@tonic-gate 		error = merror;
25870Sstevel@tonic-gate 
25880Sstevel@tonic-gate 	/*
25890Sstevel@tonic-gate 	 * Take any deferred signal now, after we have reacquired the mutex.
25900Sstevel@tonic-gate 	 */
25910Sstevel@tonic-gate 	if (self->ul_cond_wait_defer)
25920Sstevel@tonic-gate 		sigon(self);
25930Sstevel@tonic-gate 
25940Sstevel@tonic-gate 	return (error);
25950Sstevel@tonic-gate }
25960Sstevel@tonic-gate 
25970Sstevel@tonic-gate /*
25980Sstevel@tonic-gate  * Common code for _cond_wait() and _cond_timedwait()
25990Sstevel@tonic-gate  */
26000Sstevel@tonic-gate int
26010Sstevel@tonic-gate cond_wait_common(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
26020Sstevel@tonic-gate {
26030Sstevel@tonic-gate 	int mtype = mp->mutex_type;
26040Sstevel@tonic-gate 	hrtime_t begin_sleep = 0;
26050Sstevel@tonic-gate 	ulwp_t *self = curthread;
26060Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
26070Sstevel@tonic-gate 	tdb_cond_stats_t *csp = COND_STATS(cvp, udp);
26080Sstevel@tonic-gate 	tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
26090Sstevel@tonic-gate 	uint8_t rcount;
26100Sstevel@tonic-gate 	int error = 0;
26110Sstevel@tonic-gate 
26120Sstevel@tonic-gate 	/*
26130Sstevel@tonic-gate 	 * The SUSV3 Posix spec for pthread_cond_timedwait() states:
26140Sstevel@tonic-gate 	 *	Except in the case of [ETIMEDOUT], all these error checks
26150Sstevel@tonic-gate 	 *	shall act as if they were performed immediately at the
26160Sstevel@tonic-gate 	 *	beginning of processing for the function and shall cause
26170Sstevel@tonic-gate 	 *	an error return, in effect, prior to modifying the state
26180Sstevel@tonic-gate 	 *	of the mutex specified by mutex or the condition variable
26190Sstevel@tonic-gate 	 *	specified by cond.
26200Sstevel@tonic-gate 	 * Therefore, we must return EINVAL now if the timout is invalid.
26210Sstevel@tonic-gate 	 */
26220Sstevel@tonic-gate 	if (tsp != NULL &&
26230Sstevel@tonic-gate 	    (tsp->tv_sec < 0 || (ulong_t)tsp->tv_nsec >= NANOSEC))
26240Sstevel@tonic-gate 		return (EINVAL);
26250Sstevel@tonic-gate 
26260Sstevel@tonic-gate 	if (__td_event_report(self, TD_SLEEP, udp)) {
26270Sstevel@tonic-gate 		self->ul_sp = stkptr();
26280Sstevel@tonic-gate 		self->ul_wchan = cvp;
26290Sstevel@tonic-gate 		self->ul_td_evbuf.eventnum = TD_SLEEP;
26300Sstevel@tonic-gate 		self->ul_td_evbuf.eventdata = cvp;
26310Sstevel@tonic-gate 		tdb_event(TD_SLEEP, udp);
26320Sstevel@tonic-gate 		self->ul_sp = 0;
26330Sstevel@tonic-gate 	}
26340Sstevel@tonic-gate 	if (csp) {
26350Sstevel@tonic-gate 		if (tsp)
26360Sstevel@tonic-gate 			tdb_incr(csp->cond_timedwait);
26370Sstevel@tonic-gate 		else
26380Sstevel@tonic-gate 			tdb_incr(csp->cond_wait);
26390Sstevel@tonic-gate 	}
26400Sstevel@tonic-gate 	if (msp)
26410Sstevel@tonic-gate 		begin_sleep = record_hold_time(msp);
26420Sstevel@tonic-gate 	else if (csp)
26430Sstevel@tonic-gate 		begin_sleep = gethrtime();
26440Sstevel@tonic-gate 
26450Sstevel@tonic-gate 	if (self->ul_error_detection) {
26460Sstevel@tonic-gate 		if (!mutex_is_held(mp))
26470Sstevel@tonic-gate 			lock_error(mp, "cond_wait", cvp, NULL);
26480Sstevel@tonic-gate 		if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0)
26490Sstevel@tonic-gate 			lock_error(mp, "recursive mutex in cond_wait",
26500Sstevel@tonic-gate 				cvp, NULL);
26510Sstevel@tonic-gate 		if (cvp->cond_type & USYNC_PROCESS) {
26520Sstevel@tonic-gate 			if (!(mtype & (USYNC_PROCESS | USYNC_PROCESS_ROBUST)))
26530Sstevel@tonic-gate 				lock_error(mp, "cond_wait", cvp,
26540Sstevel@tonic-gate 					"condvar process-shared, "
26550Sstevel@tonic-gate 					"mutex process-private");
26560Sstevel@tonic-gate 		} else {
26570Sstevel@tonic-gate 			if (mtype & (USYNC_PROCESS | USYNC_PROCESS_ROBUST))
26580Sstevel@tonic-gate 				lock_error(mp, "cond_wait", cvp,
26590Sstevel@tonic-gate 					"condvar process-private, "
26600Sstevel@tonic-gate 					"mutex process-shared");
26610Sstevel@tonic-gate 		}
26620Sstevel@tonic-gate 	}
26630Sstevel@tonic-gate 
26640Sstevel@tonic-gate 	/*
26650Sstevel@tonic-gate 	 * We deal with recursive mutexes by completely
26660Sstevel@tonic-gate 	 * dropping the lock and restoring the recursion
26670Sstevel@tonic-gate 	 * count after waking up.  This is arguably wrong,
26680Sstevel@tonic-gate 	 * but it obeys the principle of least astonishment.
26690Sstevel@tonic-gate 	 */
26700Sstevel@tonic-gate 	rcount = mp->mutex_rcount;
26710Sstevel@tonic-gate 	mp->mutex_rcount = 0;
26720Sstevel@tonic-gate 	if ((mtype & (USYNC_PROCESS | USYNC_PROCESS_ROBUST |
26730Sstevel@tonic-gate 	    PTHREAD_PRIO_INHERIT | PTHREAD_PRIO_PROTECT)) |
26740Sstevel@tonic-gate 	    (cvp->cond_type & USYNC_PROCESS))
26750Sstevel@tonic-gate 		error = cond_wait_kernel(cvp, mp, tsp);
26760Sstevel@tonic-gate 	else
26770Sstevel@tonic-gate 		error = cond_wait_queue(cvp, mp, tsp, msp);
26780Sstevel@tonic-gate 	mp->mutex_rcount = rcount;
26790Sstevel@tonic-gate 
26800Sstevel@tonic-gate 	if (csp) {
26810Sstevel@tonic-gate 		hrtime_t lapse = gethrtime() - begin_sleep;
26820Sstevel@tonic-gate 		if (tsp == NULL)
26830Sstevel@tonic-gate 			csp->cond_wait_sleep_time += lapse;
26840Sstevel@tonic-gate 		else {
26850Sstevel@tonic-gate 			csp->cond_timedwait_sleep_time += lapse;
26860Sstevel@tonic-gate 			if (error == ETIME)
26870Sstevel@tonic-gate 				tdb_incr(csp->cond_timedwait_timeout);
26880Sstevel@tonic-gate 		}
26890Sstevel@tonic-gate 	}
26900Sstevel@tonic-gate 	return (error);
26910Sstevel@tonic-gate }
26920Sstevel@tonic-gate 
26930Sstevel@tonic-gate /*
26940Sstevel@tonic-gate  * cond_wait() is a cancellation point but _cond_wait() is not.
26950Sstevel@tonic-gate  * System libraries call the non-cancellation version.
26960Sstevel@tonic-gate  * It is expected that only applications call the cancellation version.
26970Sstevel@tonic-gate  */
26980Sstevel@tonic-gate int
26990Sstevel@tonic-gate _cond_wait(cond_t *cvp, mutex_t *mp)
27000Sstevel@tonic-gate {
27010Sstevel@tonic-gate 	ulwp_t *self = curthread;
27020Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
27030Sstevel@tonic-gate 	uberflags_t *gflags;
27040Sstevel@tonic-gate 
27050Sstevel@tonic-gate 	/*
27060Sstevel@tonic-gate 	 * Optimize the common case of USYNC_THREAD plus
27070Sstevel@tonic-gate 	 * no error detection, no lock statistics, and no event tracing.
27080Sstevel@tonic-gate 	 */
27090Sstevel@tonic-gate 	if ((gflags = self->ul_schedctl_called) != NULL &&
27100Sstevel@tonic-gate 	    (cvp->cond_type | mp->mutex_type | gflags->uf_trs_ted |
27110Sstevel@tonic-gate 	    self->ul_td_events_enable |
27120Sstevel@tonic-gate 	    udp->tdb.tdb_ev_global_mask.event_bits[0]) == 0)
27130Sstevel@tonic-gate 		return (cond_wait_queue(cvp, mp, NULL, NULL));
27140Sstevel@tonic-gate 
27150Sstevel@tonic-gate 	/*
27160Sstevel@tonic-gate 	 * Else do it the long way.
27170Sstevel@tonic-gate 	 */
27180Sstevel@tonic-gate 	return (cond_wait_common(cvp, mp, NULL));
27190Sstevel@tonic-gate }
27200Sstevel@tonic-gate 
27210Sstevel@tonic-gate int
27220Sstevel@tonic-gate cond_wait(cond_t *cvp, mutex_t *mp)
27230Sstevel@tonic-gate {
27240Sstevel@tonic-gate 	int error;
27250Sstevel@tonic-gate 
27260Sstevel@tonic-gate 	_cancelon();
27270Sstevel@tonic-gate 	error = _cond_wait(cvp, mp);
27280Sstevel@tonic-gate 	if (error == EINTR)
27290Sstevel@tonic-gate 		_canceloff();
27300Sstevel@tonic-gate 	else
27310Sstevel@tonic-gate 		_canceloff_nocancel();
27320Sstevel@tonic-gate 	return (error);
27330Sstevel@tonic-gate }
27340Sstevel@tonic-gate 
27350Sstevel@tonic-gate #pragma weak pthread_cond_wait = _pthread_cond_wait
27360Sstevel@tonic-gate int
27370Sstevel@tonic-gate _pthread_cond_wait(cond_t *cvp, mutex_t *mp)
27380Sstevel@tonic-gate {
27390Sstevel@tonic-gate 	int error;
27400Sstevel@tonic-gate 
27410Sstevel@tonic-gate 	error = cond_wait(cvp, mp);
27420Sstevel@tonic-gate 	return ((error == EINTR)? 0 : error);
27430Sstevel@tonic-gate }
27440Sstevel@tonic-gate 
27450Sstevel@tonic-gate /*
27460Sstevel@tonic-gate  * cond_timedwait() is a cancellation point but _cond_timedwait() is not.
27470Sstevel@tonic-gate  * System libraries call the non-cancellation version.
27480Sstevel@tonic-gate  * It is expected that only applications call the cancellation version.
27490Sstevel@tonic-gate  */
27500Sstevel@tonic-gate int
27510Sstevel@tonic-gate _cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime)
27520Sstevel@tonic-gate {
27530Sstevel@tonic-gate 	clockid_t clock_id = cvp->cond_clockid;
27540Sstevel@tonic-gate 	timespec_t reltime;
27550Sstevel@tonic-gate 	int error;
27560Sstevel@tonic-gate 
27570Sstevel@tonic-gate 	if (clock_id != CLOCK_REALTIME && clock_id != CLOCK_HIGHRES)
27580Sstevel@tonic-gate 		clock_id = CLOCK_REALTIME;
27590Sstevel@tonic-gate 	abstime_to_reltime(clock_id, abstime, &reltime);
27600Sstevel@tonic-gate 	error = cond_wait_common(cvp, mp, &reltime);
27610Sstevel@tonic-gate 	if (error == ETIME && clock_id == CLOCK_HIGHRES) {
27620Sstevel@tonic-gate 		/*
27630Sstevel@tonic-gate 		 * Don't return ETIME if we didn't really get a timeout.
27640Sstevel@tonic-gate 		 * This can happen if we return because someone resets
27650Sstevel@tonic-gate 		 * the system clock.  Just return zero in this case,
27660Sstevel@tonic-gate 		 * giving a spurious wakeup but not a timeout.
27670Sstevel@tonic-gate 		 */
27680Sstevel@tonic-gate 		if ((hrtime_t)(uint32_t)abstime->tv_sec * NANOSEC +
27690Sstevel@tonic-gate 		    abstime->tv_nsec > gethrtime())
27700Sstevel@tonic-gate 			error = 0;
27710Sstevel@tonic-gate 	}
27720Sstevel@tonic-gate 	return (error);
27730Sstevel@tonic-gate }
27740Sstevel@tonic-gate 
27750Sstevel@tonic-gate int
27760Sstevel@tonic-gate cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime)
27770Sstevel@tonic-gate {
27780Sstevel@tonic-gate 	int error;
27790Sstevel@tonic-gate 
27800Sstevel@tonic-gate 	_cancelon();
27810Sstevel@tonic-gate 	error = _cond_timedwait(cvp, mp, abstime);
27820Sstevel@tonic-gate 	if (error == EINTR)
27830Sstevel@tonic-gate 		_canceloff();
27840Sstevel@tonic-gate 	else
27850Sstevel@tonic-gate 		_canceloff_nocancel();
27860Sstevel@tonic-gate 	return (error);
27870Sstevel@tonic-gate }
27880Sstevel@tonic-gate 
27890Sstevel@tonic-gate #pragma weak pthread_cond_timedwait = _pthread_cond_timedwait
27900Sstevel@tonic-gate int
27910Sstevel@tonic-gate _pthread_cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime)
27920Sstevel@tonic-gate {
27930Sstevel@tonic-gate 	int error;
27940Sstevel@tonic-gate 
27950Sstevel@tonic-gate 	error = cond_timedwait(cvp, mp, abstime);
27960Sstevel@tonic-gate 	if (error == ETIME)
27970Sstevel@tonic-gate 		error = ETIMEDOUT;
27980Sstevel@tonic-gate 	else if (error == EINTR)
27990Sstevel@tonic-gate 		error = 0;
28000Sstevel@tonic-gate 	return (error);
28010Sstevel@tonic-gate }
28020Sstevel@tonic-gate 
28030Sstevel@tonic-gate /*
28040Sstevel@tonic-gate  * cond_reltimedwait() is a cancellation point but _cond_reltimedwait()
28050Sstevel@tonic-gate  * is not.  System libraries call the non-cancellation version.
28060Sstevel@tonic-gate  * It is expected that only applications call the cancellation version.
28070Sstevel@tonic-gate  */
28080Sstevel@tonic-gate int
28090Sstevel@tonic-gate _cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime)
28100Sstevel@tonic-gate {
28110Sstevel@tonic-gate 	timespec_t tslocal = *reltime;
28120Sstevel@tonic-gate 
28130Sstevel@tonic-gate 	return (cond_wait_common(cvp, mp, &tslocal));
28140Sstevel@tonic-gate }
28150Sstevel@tonic-gate 
28160Sstevel@tonic-gate #pragma weak cond_reltimedwait = _cond_reltimedwait_cancel
28170Sstevel@tonic-gate int
28180Sstevel@tonic-gate _cond_reltimedwait_cancel(cond_t *cvp, mutex_t *mp, const timespec_t *reltime)
28190Sstevel@tonic-gate {
28200Sstevel@tonic-gate 	int error;
28210Sstevel@tonic-gate 
28220Sstevel@tonic-gate 	_cancelon();
28230Sstevel@tonic-gate 	error = _cond_reltimedwait(cvp, mp, reltime);
28240Sstevel@tonic-gate 	if (error == EINTR)
28250Sstevel@tonic-gate 		_canceloff();
28260Sstevel@tonic-gate 	else
28270Sstevel@tonic-gate 		_canceloff_nocancel();
28280Sstevel@tonic-gate 	return (error);
28290Sstevel@tonic-gate }
28300Sstevel@tonic-gate 
28310Sstevel@tonic-gate #pragma weak pthread_cond_reltimedwait_np = _pthread_cond_reltimedwait_np
28320Sstevel@tonic-gate int
28330Sstevel@tonic-gate _pthread_cond_reltimedwait_np(cond_t *cvp, mutex_t *mp,
28340Sstevel@tonic-gate 	const timespec_t *reltime)
28350Sstevel@tonic-gate {
28360Sstevel@tonic-gate 	int error;
28370Sstevel@tonic-gate 
28380Sstevel@tonic-gate 	error = _cond_reltimedwait_cancel(cvp, mp, reltime);
28390Sstevel@tonic-gate 	if (error == ETIME)
28400Sstevel@tonic-gate 		error = ETIMEDOUT;
28410Sstevel@tonic-gate 	else if (error == EINTR)
28420Sstevel@tonic-gate 		error = 0;
28430Sstevel@tonic-gate 	return (error);
28440Sstevel@tonic-gate }
28450Sstevel@tonic-gate 
28460Sstevel@tonic-gate #pragma weak pthread_cond_signal = cond_signal_internal
28470Sstevel@tonic-gate #pragma weak _pthread_cond_signal = cond_signal_internal
28480Sstevel@tonic-gate #pragma weak cond_signal = cond_signal_internal
28490Sstevel@tonic-gate #pragma weak _cond_signal = cond_signal_internal
28500Sstevel@tonic-gate int
28510Sstevel@tonic-gate cond_signal_internal(cond_t *cvp)
28520Sstevel@tonic-gate {
28530Sstevel@tonic-gate 	ulwp_t *self = curthread;
28540Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
28550Sstevel@tonic-gate 	tdb_cond_stats_t *csp = COND_STATS(cvp, udp);
28560Sstevel@tonic-gate 	int error = 0;
28570Sstevel@tonic-gate 	queue_head_t *qp;
28580Sstevel@tonic-gate 	mutex_t *mp;
28590Sstevel@tonic-gate 	queue_head_t *mqp;
28600Sstevel@tonic-gate 	ulwp_t **ulwpp;
28610Sstevel@tonic-gate 	ulwp_t *ulwp;
28620Sstevel@tonic-gate 	ulwp_t *prev = NULL;
28630Sstevel@tonic-gate 	ulwp_t *next;
28640Sstevel@tonic-gate 	ulwp_t **suspp = NULL;
28650Sstevel@tonic-gate 	ulwp_t *susprev;
28660Sstevel@tonic-gate 
28670Sstevel@tonic-gate 	if (csp)
28680Sstevel@tonic-gate 		tdb_incr(csp->cond_signal);
28690Sstevel@tonic-gate 
28700Sstevel@tonic-gate 	if (cvp->cond_waiters_kernel)	/* someone sleeping in the kernel? */
28710Sstevel@tonic-gate 		error = __lwp_cond_signal(cvp);
28720Sstevel@tonic-gate 
28730Sstevel@tonic-gate 	if (!cvp->cond_waiters_user)	/* no one sleeping at user-level */
28740Sstevel@tonic-gate 		return (error);
28750Sstevel@tonic-gate 
28760Sstevel@tonic-gate 	/*
28770Sstevel@tonic-gate 	 * Move someone from the condvar sleep queue to the mutex sleep
28780Sstevel@tonic-gate 	 * queue for the mutex that he will acquire on being waked up.
28790Sstevel@tonic-gate 	 * We can do this only if we own the mutex he will acquire.
28800Sstevel@tonic-gate 	 * If we do not own the mutex, or if his ul_cv_wake flag
28810Sstevel@tonic-gate 	 * is set, just dequeue and unpark him.
28820Sstevel@tonic-gate 	 */
28830Sstevel@tonic-gate 	qp = queue_lock(cvp, CV);
28840Sstevel@tonic-gate 	for (ulwpp = &qp->qh_head; (ulwp = *ulwpp) != NULL;
28850Sstevel@tonic-gate 	    prev = ulwp, ulwpp = &ulwp->ul_link) {
28860Sstevel@tonic-gate 		if (ulwp->ul_wchan == cvp) {
28870Sstevel@tonic-gate 			if (!ulwp->ul_stop)
28880Sstevel@tonic-gate 				break;
28890Sstevel@tonic-gate 			/*
28900Sstevel@tonic-gate 			 * Try not to dequeue a suspended thread.
28910Sstevel@tonic-gate 			 * This mimics the old libthread's behavior.
28920Sstevel@tonic-gate 			 */
28930Sstevel@tonic-gate 			if (suspp == NULL) {
28940Sstevel@tonic-gate 				suspp = ulwpp;
28950Sstevel@tonic-gate 				susprev = prev;
28960Sstevel@tonic-gate 			}
28970Sstevel@tonic-gate 		}
28980Sstevel@tonic-gate 	}
28990Sstevel@tonic-gate 	if (ulwp == NULL && suspp != NULL) {
29000Sstevel@tonic-gate 		ulwp = *(ulwpp = suspp);
29010Sstevel@tonic-gate 		prev = susprev;
29020Sstevel@tonic-gate 		suspp = NULL;
29030Sstevel@tonic-gate 	}
29040Sstevel@tonic-gate 	if (ulwp == NULL) {	/* no one on the sleep queue */
29050Sstevel@tonic-gate 		cvp->cond_waiters_user = 0;
29060Sstevel@tonic-gate 		queue_unlock(qp);
29070Sstevel@tonic-gate 		return (error);
29080Sstevel@tonic-gate 	}
29090Sstevel@tonic-gate 	/*
29100Sstevel@tonic-gate 	 * Scan the remainder of the CV queue for another waiter.
29110Sstevel@tonic-gate 	 */
29120Sstevel@tonic-gate 	if (suspp != NULL) {
29130Sstevel@tonic-gate 		next = *suspp;
29140Sstevel@tonic-gate 	} else {
29150Sstevel@tonic-gate 		for (next = ulwp->ul_link; next != NULL; next = next->ul_link)
29160Sstevel@tonic-gate 			if (next->ul_wchan == cvp)
29170Sstevel@tonic-gate 				break;
29180Sstevel@tonic-gate 	}
29190Sstevel@tonic-gate 	if (next == NULL)
29200Sstevel@tonic-gate 		cvp->cond_waiters_user = 0;
29210Sstevel@tonic-gate 
29220Sstevel@tonic-gate 	/*
29230Sstevel@tonic-gate 	 * Inform the thread that he was the recipient of a cond_signal().
29240Sstevel@tonic-gate 	 * This lets him deal with cond_signal() and, concurrently,
29250Sstevel@tonic-gate 	 * one or more of a cancellation, a UNIX signal, or a timeout.
29260Sstevel@tonic-gate 	 * These latter conditions must not consume a cond_signal().
29270Sstevel@tonic-gate 	 */
29280Sstevel@tonic-gate 	ulwp->ul_signalled = 1;
29290Sstevel@tonic-gate 
29300Sstevel@tonic-gate 	/*
29310Sstevel@tonic-gate 	 * Dequeue the waiter but leave his ul_sleepq non-NULL
29320Sstevel@tonic-gate 	 * while we move him to the mutex queue so that he can
29330Sstevel@tonic-gate 	 * deal properly with spurious wakeups.
29340Sstevel@tonic-gate 	 */
29350Sstevel@tonic-gate 	*ulwpp = ulwp->ul_link;
29360Sstevel@tonic-gate 	if (qp->qh_tail == ulwp)
29370Sstevel@tonic-gate 		qp->qh_tail = prev;
29380Sstevel@tonic-gate 	qp->qh_qlen--;
29390Sstevel@tonic-gate 	ulwp->ul_link = NULL;
29400Sstevel@tonic-gate 
29410Sstevel@tonic-gate 	mp = ulwp->ul_cvmutex;		/* the mutex he will acquire */
29420Sstevel@tonic-gate 	ulwp->ul_cvmutex = NULL;
29430Sstevel@tonic-gate 	ASSERT(mp != NULL);
29440Sstevel@tonic-gate 
29450Sstevel@tonic-gate 	if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) {
29460Sstevel@tonic-gate 		lwpid_t lwpid = ulwp->ul_lwpid;
29470Sstevel@tonic-gate 
29480Sstevel@tonic-gate 		no_preempt(self);
29490Sstevel@tonic-gate 		ulwp->ul_sleepq = NULL;
29500Sstevel@tonic-gate 		ulwp->ul_wchan = NULL;
29510Sstevel@tonic-gate 		ulwp->ul_cv_wake = 0;
29520Sstevel@tonic-gate 		queue_unlock(qp);
29530Sstevel@tonic-gate 		(void) __lwp_unpark(lwpid);
29540Sstevel@tonic-gate 		preempt(self);
29550Sstevel@tonic-gate 	} else {
29560Sstevel@tonic-gate 		mqp = queue_lock(mp, MX);
29570Sstevel@tonic-gate 		enqueue(mqp, ulwp, mp, MX);
29580Sstevel@tonic-gate 		mp->mutex_waiters = 1;
29590Sstevel@tonic-gate 		queue_unlock(mqp);
29600Sstevel@tonic-gate 		queue_unlock(qp);
29610Sstevel@tonic-gate 	}
29620Sstevel@tonic-gate 
29630Sstevel@tonic-gate 	return (error);
29640Sstevel@tonic-gate }
29650Sstevel@tonic-gate 
29660Sstevel@tonic-gate #define	MAXLWPS	128	/* max remembered lwpids before overflow */
29670Sstevel@tonic-gate #define	NEWLWPS	2048	/* max remembered lwpids at first overflow */
29680Sstevel@tonic-gate 
29690Sstevel@tonic-gate #pragma weak pthread_cond_broadcast = cond_broadcast_internal
29700Sstevel@tonic-gate #pragma weak _pthread_cond_broadcast = cond_broadcast_internal
29710Sstevel@tonic-gate #pragma weak cond_broadcast = cond_broadcast_internal
29720Sstevel@tonic-gate #pragma weak _cond_broadcast = cond_broadcast_internal
29730Sstevel@tonic-gate int
29740Sstevel@tonic-gate cond_broadcast_internal(cond_t *cvp)
29750Sstevel@tonic-gate {
29760Sstevel@tonic-gate 	ulwp_t *self = curthread;
29770Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
29780Sstevel@tonic-gate 	tdb_cond_stats_t *csp = COND_STATS(cvp, udp);
29790Sstevel@tonic-gate 	int error = 0;
29800Sstevel@tonic-gate 	queue_head_t *qp;
29810Sstevel@tonic-gate 	mutex_t *mp;
29820Sstevel@tonic-gate 	queue_head_t *mqp;
29830Sstevel@tonic-gate 	mutex_t *mp_cache = NULL;
29840Sstevel@tonic-gate 	queue_head_t *mqp_cache = NULL;
29850Sstevel@tonic-gate 	ulwp_t **ulwpp;
29860Sstevel@tonic-gate 	ulwp_t *ulwp;
29870Sstevel@tonic-gate 	ulwp_t *prev = NULL;
29880Sstevel@tonic-gate 	lwpid_t buffer[MAXLWPS];
29890Sstevel@tonic-gate 	lwpid_t *lwpid = buffer;
29900Sstevel@tonic-gate 	int nlwpid = 0;
29910Sstevel@tonic-gate 	int maxlwps = MAXLWPS;
29920Sstevel@tonic-gate 
29930Sstevel@tonic-gate 	if (csp)
29940Sstevel@tonic-gate 		tdb_incr(csp->cond_broadcast);
29950Sstevel@tonic-gate 
29960Sstevel@tonic-gate 	if (cvp->cond_waiters_kernel)	/* someone sleeping in the kernel? */
29970Sstevel@tonic-gate 		error = __lwp_cond_broadcast(cvp);
29980Sstevel@tonic-gate 
29990Sstevel@tonic-gate 	if (!cvp->cond_waiters_user)	/* no one sleeping at user-level */
30000Sstevel@tonic-gate 		return (error);
30010Sstevel@tonic-gate 
30020Sstevel@tonic-gate 	/*
30030Sstevel@tonic-gate 	 * Move everyone from the condvar sleep queue to the mutex sleep
30040Sstevel@tonic-gate 	 * queue for the mutex that they will acquire on being waked up.
30050Sstevel@tonic-gate 	 * We can do this only if we own the mutex they will acquire.
30060Sstevel@tonic-gate 	 * If we do not own the mutex, or if their ul_cv_wake flag
30070Sstevel@tonic-gate 	 * is set, just dequeue and unpark them.
30080Sstevel@tonic-gate 	 *
30090Sstevel@tonic-gate 	 * We keep track of lwpids that are to be unparked in lwpid[].
30100Sstevel@tonic-gate 	 * __lwp_unpark_all() is called to unpark all of them after
30110Sstevel@tonic-gate 	 * they have been removed from the sleep queue and the sleep
30120Sstevel@tonic-gate 	 * queue lock has been dropped.  If we run out of space in our
30130Sstevel@tonic-gate 	 * on-stack buffer, we need to allocate more but we can't call
30140Sstevel@tonic-gate 	 * lmalloc() because we are holding a queue lock when the overflow
30150Sstevel@tonic-gate 	 * occurs and lmalloc() acquires a lock.  We can't use alloca()
30160Sstevel@tonic-gate 	 * either because the application may have allocated a small stack
30170Sstevel@tonic-gate 	 * and we don't want to overrun the stack.  So we use the mmap()
30180Sstevel@tonic-gate 	 * system call directly since that path acquires no locks.
30190Sstevel@tonic-gate 	 */
30200Sstevel@tonic-gate 	qp = queue_lock(cvp, CV);
30210Sstevel@tonic-gate 	cvp->cond_waiters_user = 0;
30220Sstevel@tonic-gate 	ulwpp = &qp->qh_head;
30230Sstevel@tonic-gate 	while ((ulwp = *ulwpp) != NULL) {
30240Sstevel@tonic-gate 
30250Sstevel@tonic-gate 		if (ulwp->ul_wchan != cvp) {
30260Sstevel@tonic-gate 			prev = ulwp;
30270Sstevel@tonic-gate 			ulwpp = &ulwp->ul_link;
30280Sstevel@tonic-gate 			continue;
30290Sstevel@tonic-gate 		}
30300Sstevel@tonic-gate 
30310Sstevel@tonic-gate 		*ulwpp = ulwp->ul_link;
30320Sstevel@tonic-gate 		if (qp->qh_tail == ulwp)
30330Sstevel@tonic-gate 			qp->qh_tail = prev;
30340Sstevel@tonic-gate 		qp->qh_qlen--;
30350Sstevel@tonic-gate 		ulwp->ul_link = NULL;
30360Sstevel@tonic-gate 
30370Sstevel@tonic-gate 		mp = ulwp->ul_cvmutex;		/* his mutex */
30380Sstevel@tonic-gate 		ulwp->ul_cvmutex = NULL;
30390Sstevel@tonic-gate 		ASSERT(mp != NULL);
30400Sstevel@tonic-gate 
30410Sstevel@tonic-gate 		if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) {
30420Sstevel@tonic-gate 			ulwp->ul_sleepq = NULL;
30430Sstevel@tonic-gate 			ulwp->ul_wchan = NULL;
30440Sstevel@tonic-gate 			ulwp->ul_cv_wake = 0;
30450Sstevel@tonic-gate 			if (nlwpid == maxlwps) {
30460Sstevel@tonic-gate 				/*
30470Sstevel@tonic-gate 				 * Allocate NEWLWPS ids on the first overflow.
30480Sstevel@tonic-gate 				 * Double the allocation each time after that.
30490Sstevel@tonic-gate 				 */
30500Sstevel@tonic-gate 				int newlwps = (lwpid == buffer)? NEWLWPS :
30510Sstevel@tonic-gate 						2 * maxlwps;
30520Sstevel@tonic-gate 				void *vaddr = _private_mmap(NULL,
30530Sstevel@tonic-gate 					newlwps * sizeof (lwpid_t),
30540Sstevel@tonic-gate 					PROT_READ|PROT_WRITE,
30550Sstevel@tonic-gate 					MAP_PRIVATE|MAP_ANON, -1, (off_t)0);
30560Sstevel@tonic-gate 				if (vaddr == MAP_FAILED) {
30570Sstevel@tonic-gate 					/*
30580Sstevel@tonic-gate 					 * Let's hope this never happens.
30590Sstevel@tonic-gate 					 * If it does, then we have a terrible
30600Sstevel@tonic-gate 					 * thundering herd on our hands.
30610Sstevel@tonic-gate 					 */
30620Sstevel@tonic-gate 					(void) __lwp_unpark_all(lwpid, nlwpid);
30630Sstevel@tonic-gate 					nlwpid = 0;
30640Sstevel@tonic-gate 				} else {
30650Sstevel@tonic-gate 					(void) _memcpy(vaddr, lwpid,
30660Sstevel@tonic-gate 						maxlwps * sizeof (lwpid_t));
30670Sstevel@tonic-gate 					if (lwpid != buffer)
30680Sstevel@tonic-gate 						(void) _private_munmap(lwpid,
30690Sstevel@tonic-gate 						    maxlwps * sizeof (lwpid_t));
30700Sstevel@tonic-gate 					lwpid = vaddr;
30710Sstevel@tonic-gate 					maxlwps = newlwps;
30720Sstevel@tonic-gate 				}
30730Sstevel@tonic-gate 			}
30740Sstevel@tonic-gate 			lwpid[nlwpid++] = ulwp->ul_lwpid;
30750Sstevel@tonic-gate 		} else {
30760Sstevel@tonic-gate 			if (mp != mp_cache) {
30770Sstevel@tonic-gate 				if (mqp_cache != NULL)
30780Sstevel@tonic-gate 					queue_unlock(mqp_cache);
30790Sstevel@tonic-gate 				mqp_cache = queue_lock(mp, MX);
30800Sstevel@tonic-gate 				mp_cache = mp;
30810Sstevel@tonic-gate 			}
30820Sstevel@tonic-gate 			mqp = mqp_cache;
30830Sstevel@tonic-gate 			enqueue(mqp, ulwp, mp, MX);
30840Sstevel@tonic-gate 			mp->mutex_waiters = 1;
30850Sstevel@tonic-gate 		}
30860Sstevel@tonic-gate 	}
30870Sstevel@tonic-gate 	if (mqp_cache != NULL)
30880Sstevel@tonic-gate 		queue_unlock(mqp_cache);
30890Sstevel@tonic-gate 	queue_unlock(qp);
30900Sstevel@tonic-gate 	if (nlwpid) {
30910Sstevel@tonic-gate 		if (nlwpid == 1)
30920Sstevel@tonic-gate 			(void) __lwp_unpark(lwpid[0]);
30930Sstevel@tonic-gate 		else
30940Sstevel@tonic-gate 			(void) __lwp_unpark_all(lwpid, nlwpid);
30950Sstevel@tonic-gate 	}
30960Sstevel@tonic-gate 	if (lwpid != buffer)
30970Sstevel@tonic-gate 		(void) _private_munmap(lwpid, maxlwps * sizeof (lwpid_t));
30980Sstevel@tonic-gate 
30990Sstevel@tonic-gate 	return (error);
31000Sstevel@tonic-gate }
31010Sstevel@tonic-gate 
31020Sstevel@tonic-gate #pragma weak pthread_cond_destroy = _cond_destroy
31030Sstevel@tonic-gate #pragma weak _pthread_cond_destroy = _cond_destroy
31040Sstevel@tonic-gate #pragma weak cond_destroy = _cond_destroy
31050Sstevel@tonic-gate int
31060Sstevel@tonic-gate _cond_destroy(cond_t *cvp)
31070Sstevel@tonic-gate {
31080Sstevel@tonic-gate 	cvp->cond_magic = 0;
31090Sstevel@tonic-gate 	tdb_sync_obj_deregister(cvp);
31100Sstevel@tonic-gate 	return (0);
31110Sstevel@tonic-gate }
31120Sstevel@tonic-gate 
31130Sstevel@tonic-gate #if defined(THREAD_DEBUG)
31140Sstevel@tonic-gate void
31150Sstevel@tonic-gate assert_no_libc_locks_held(void)
31160Sstevel@tonic-gate {
31170Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
31180Sstevel@tonic-gate }
31190Sstevel@tonic-gate #endif
31200Sstevel@tonic-gate 
31210Sstevel@tonic-gate /* protected by link_lock */
31220Sstevel@tonic-gate uint64_t spin_lock_spin;
31230Sstevel@tonic-gate uint64_t spin_lock_spin2;
31240Sstevel@tonic-gate uint64_t spin_lock_sleep;
31250Sstevel@tonic-gate uint64_t spin_lock_wakeup;
31260Sstevel@tonic-gate 
31270Sstevel@tonic-gate /*
31280Sstevel@tonic-gate  * Record spin lock statistics.
31290Sstevel@tonic-gate  * Called by a thread exiting itself in thrp_exit().
31300Sstevel@tonic-gate  * Also called via atexit() from the thread calling
31310Sstevel@tonic-gate  * exit() to do all the other threads as well.
31320Sstevel@tonic-gate  */
31330Sstevel@tonic-gate void
31340Sstevel@tonic-gate record_spin_locks(ulwp_t *ulwp)
31350Sstevel@tonic-gate {
31360Sstevel@tonic-gate 	spin_lock_spin += ulwp->ul_spin_lock_spin;
31370Sstevel@tonic-gate 	spin_lock_spin2 += ulwp->ul_spin_lock_spin2;
31380Sstevel@tonic-gate 	spin_lock_sleep += ulwp->ul_spin_lock_sleep;
31390Sstevel@tonic-gate 	spin_lock_wakeup += ulwp->ul_spin_lock_wakeup;
31400Sstevel@tonic-gate 	ulwp->ul_spin_lock_spin = 0;
31410Sstevel@tonic-gate 	ulwp->ul_spin_lock_spin2 = 0;
31420Sstevel@tonic-gate 	ulwp->ul_spin_lock_sleep = 0;
31430Sstevel@tonic-gate 	ulwp->ul_spin_lock_wakeup = 0;
31440Sstevel@tonic-gate }
31450Sstevel@tonic-gate 
31460Sstevel@tonic-gate /*
31470Sstevel@tonic-gate  * atexit function:  dump the queue statistics to stderr.
31480Sstevel@tonic-gate  */
3149*1219Sraf #if !defined(__lint)
3150*1219Sraf #define	fprintf	_fprintf
3151*1219Sraf #endif
31520Sstevel@tonic-gate #include <stdio.h>
31530Sstevel@tonic-gate void
31540Sstevel@tonic-gate dump_queue_statistics(void)
31550Sstevel@tonic-gate {
31560Sstevel@tonic-gate 	uberdata_t *udp = curthread->ul_uberdata;
31570Sstevel@tonic-gate 	queue_head_t *qp;
31580Sstevel@tonic-gate 	int qn;
31590Sstevel@tonic-gate 	uint64_t spin_lock_total = 0;
31600Sstevel@tonic-gate 
31610Sstevel@tonic-gate 	if (udp->queue_head == NULL || thread_queue_dump == 0)
31620Sstevel@tonic-gate 		return;
31630Sstevel@tonic-gate 
31640Sstevel@tonic-gate 	if (fprintf(stderr, "\n%5d mutex queues:\n", QHASHSIZE) < 0 ||
31650Sstevel@tonic-gate 	    fprintf(stderr, "queue#   lockcount    max qlen\n") < 0)
31660Sstevel@tonic-gate 		return;
31670Sstevel@tonic-gate 	for (qn = 0, qp = udp->queue_head; qn < QHASHSIZE; qn++, qp++) {
31680Sstevel@tonic-gate 		if (qp->qh_lockcount == 0)
31690Sstevel@tonic-gate 			continue;
31700Sstevel@tonic-gate 		spin_lock_total += qp->qh_lockcount;
31710Sstevel@tonic-gate 		if (fprintf(stderr, "%5d %12llu%12u\n", qn,
31720Sstevel@tonic-gate 			(u_longlong_t)qp->qh_lockcount, qp->qh_qmax) < 0)
31730Sstevel@tonic-gate 				return;
31740Sstevel@tonic-gate 	}
31750Sstevel@tonic-gate 
31760Sstevel@tonic-gate 	if (fprintf(stderr, "\n%5d condvar queues:\n", QHASHSIZE) < 0 ||
31770Sstevel@tonic-gate 	    fprintf(stderr, "queue#   lockcount    max qlen\n") < 0)
31780Sstevel@tonic-gate 		return;
31790Sstevel@tonic-gate 	for (qn = 0; qn < QHASHSIZE; qn++, qp++) {
31800Sstevel@tonic-gate 		if (qp->qh_lockcount == 0)
31810Sstevel@tonic-gate 			continue;
31820Sstevel@tonic-gate 		spin_lock_total += qp->qh_lockcount;
31830Sstevel@tonic-gate 		if (fprintf(stderr, "%5d %12llu%12u\n", qn,
31840Sstevel@tonic-gate 			(u_longlong_t)qp->qh_lockcount, qp->qh_qmax) < 0)
31850Sstevel@tonic-gate 				return;
31860Sstevel@tonic-gate 	}
31870Sstevel@tonic-gate 
31880Sstevel@tonic-gate 	(void) fprintf(stderr, "\n  spin_lock_total  = %10llu\n",
31890Sstevel@tonic-gate 		(u_longlong_t)spin_lock_total);
31900Sstevel@tonic-gate 	(void) fprintf(stderr, "  spin_lock_spin   = %10llu\n",
31910Sstevel@tonic-gate 		(u_longlong_t)spin_lock_spin);
31920Sstevel@tonic-gate 	(void) fprintf(stderr, "  spin_lock_spin2  = %10llu\n",
31930Sstevel@tonic-gate 		(u_longlong_t)spin_lock_spin2);
31940Sstevel@tonic-gate 	(void) fprintf(stderr, "  spin_lock_sleep  = %10llu\n",
31950Sstevel@tonic-gate 		(u_longlong_t)spin_lock_sleep);
31960Sstevel@tonic-gate 	(void) fprintf(stderr, "  spin_lock_wakeup = %10llu\n",
31970Sstevel@tonic-gate 		(u_longlong_t)spin_lock_wakeup);
31980Sstevel@tonic-gate }
3199