xref: /onnv-gate/usr/src/lib/libc/port/threads/synch.c (revision 6812:febeba71273d)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51893Sraf  * Common Development and Distribution License (the "License").
61893Sraf  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
211219Sraf 
220Sstevel@tonic-gate /*
235891Sraf  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
290Sstevel@tonic-gate #include "lint.h"
300Sstevel@tonic-gate #include "thr_uberdata.h"
316247Sraf #include <sys/rtpriocntl.h>
326057Sraf #include <sys/sdt.h>
336057Sraf #include <atomic.h>
340Sstevel@tonic-gate 
356247Sraf #if defined(THREAD_DEBUG)
366247Sraf #define	INCR32(x)	(((x) != UINT32_MAX)? (x)++ : 0)
376247Sraf #define	INCR(x)		((x)++)
386247Sraf #define	DECR(x)		((x)--)
396247Sraf #define	MAXINCR(m, x)	((m < ++x)? (m = x) : 0)
406247Sraf #else
416247Sraf #define	INCR32(x)
426247Sraf #define	INCR(x)
436247Sraf #define	DECR(x)
446247Sraf #define	MAXINCR(m, x)
456247Sraf #endif
466247Sraf 
470Sstevel@tonic-gate /*
480Sstevel@tonic-gate  * This mutex is initialized to be held by lwp#1.
490Sstevel@tonic-gate  * It is used to block a thread that has returned from a mutex_lock()
504574Sraf  * of a LOCK_PRIO_INHERIT mutex with an unrecoverable error.
510Sstevel@tonic-gate  */
520Sstevel@tonic-gate mutex_t	stall_mutex = DEFAULTMUTEX;
530Sstevel@tonic-gate 
540Sstevel@tonic-gate static int shared_mutex_held(mutex_t *);
554574Sraf static int mutex_queuelock_adaptive(mutex_t *);
564574Sraf static void mutex_wakeup_all(mutex_t *);
570Sstevel@tonic-gate 
580Sstevel@tonic-gate /*
590Sstevel@tonic-gate  * Lock statistics support functions.
600Sstevel@tonic-gate  */
610Sstevel@tonic-gate void
620Sstevel@tonic-gate record_begin_hold(tdb_mutex_stats_t *msp)
630Sstevel@tonic-gate {
640Sstevel@tonic-gate 	tdb_incr(msp->mutex_lock);
650Sstevel@tonic-gate 	msp->mutex_begin_hold = gethrtime();
660Sstevel@tonic-gate }
670Sstevel@tonic-gate 
680Sstevel@tonic-gate hrtime_t
690Sstevel@tonic-gate record_hold_time(tdb_mutex_stats_t *msp)
700Sstevel@tonic-gate {
710Sstevel@tonic-gate 	hrtime_t now = gethrtime();
720Sstevel@tonic-gate 
730Sstevel@tonic-gate 	if (msp->mutex_begin_hold)
740Sstevel@tonic-gate 		msp->mutex_hold_time += now - msp->mutex_begin_hold;
750Sstevel@tonic-gate 	msp->mutex_begin_hold = 0;
760Sstevel@tonic-gate 	return (now);
770Sstevel@tonic-gate }
780Sstevel@tonic-gate 
790Sstevel@tonic-gate /*
800Sstevel@tonic-gate  * Called once at library initialization.
810Sstevel@tonic-gate  */
820Sstevel@tonic-gate void
830Sstevel@tonic-gate mutex_setup(void)
840Sstevel@tonic-gate {
850Sstevel@tonic-gate 	if (set_lock_byte(&stall_mutex.mutex_lockw))
860Sstevel@tonic-gate 		thr_panic("mutex_setup() cannot acquire stall_mutex");
870Sstevel@tonic-gate 	stall_mutex.mutex_owner = (uintptr_t)curthread;
880Sstevel@tonic-gate }
890Sstevel@tonic-gate 
900Sstevel@tonic-gate /*
915629Sraf  * The default spin count of 1000 is experimentally determined.
925629Sraf  * On sun4u machines with any number of processors it could be raised
930Sstevel@tonic-gate  * to 10,000 but that (experimentally) makes almost no difference.
945629Sraf  * The environment variable:
950Sstevel@tonic-gate  *	_THREAD_ADAPTIVE_SPIN=count
965629Sraf  * can be used to override and set the count in the range [0 .. 1,000,000].
970Sstevel@tonic-gate  */
980Sstevel@tonic-gate int	thread_adaptive_spin = 1000;
990Sstevel@tonic-gate uint_t	thread_max_spinners = 100;
1000Sstevel@tonic-gate int	thread_queue_verify = 0;
1010Sstevel@tonic-gate static	int	ncpus;
1020Sstevel@tonic-gate 
1030Sstevel@tonic-gate /*
1040Sstevel@tonic-gate  * Distinguish spinning for queue locks from spinning for regular locks.
1055629Sraf  * We try harder to acquire queue locks by spinning.
1060Sstevel@tonic-gate  * The environment variable:
1070Sstevel@tonic-gate  *	_THREAD_QUEUE_SPIN=count
1080Sstevel@tonic-gate  * can be used to override and set the count in the range [0 .. 1,000,000].
1090Sstevel@tonic-gate  */
1105629Sraf int	thread_queue_spin = 10000;
1110Sstevel@tonic-gate 
1124574Sraf #define	ALL_ATTRIBUTES				\
1134574Sraf 	(LOCK_RECURSIVE | LOCK_ERRORCHECK |	\
1144574Sraf 	LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT |	\
1154574Sraf 	LOCK_ROBUST)
1160Sstevel@tonic-gate 
1170Sstevel@tonic-gate /*
1184574Sraf  * 'type' can be one of USYNC_THREAD, USYNC_PROCESS, or USYNC_PROCESS_ROBUST,
1194574Sraf  * augmented by zero or more the flags:
1204574Sraf  *	LOCK_RECURSIVE
1214574Sraf  *	LOCK_ERRORCHECK
1224574Sraf  *	LOCK_PRIO_INHERIT
1234574Sraf  *	LOCK_PRIO_PROTECT
1244574Sraf  *	LOCK_ROBUST
1250Sstevel@tonic-gate  */
126*6812Sraf #pragma weak _mutex_init = mutex_init
1270Sstevel@tonic-gate /* ARGSUSED2 */
1280Sstevel@tonic-gate int
129*6812Sraf mutex_init(mutex_t *mp, int type, void *arg)
1300Sstevel@tonic-gate {
1314574Sraf 	int basetype = (type & ~ALL_ATTRIBUTES);
1326247Sraf 	const pcclass_t *pccp;
1334574Sraf 	int error = 0;
1346247Sraf 	int ceil;
1354574Sraf 
1364574Sraf 	if (basetype == USYNC_PROCESS_ROBUST) {
1374574Sraf 		/*
1384574Sraf 		 * USYNC_PROCESS_ROBUST is a deprecated historical type.
1394574Sraf 		 * We change it into (USYNC_PROCESS | LOCK_ROBUST) but
1404574Sraf 		 * retain the USYNC_PROCESS_ROBUST flag so we can return
1414574Sraf 		 * ELOCKUNMAPPED when necessary (only USYNC_PROCESS_ROBUST
1424574Sraf 		 * mutexes will ever draw ELOCKUNMAPPED).
1434574Sraf 		 */
1444574Sraf 		type |= (USYNC_PROCESS | LOCK_ROBUST);
1454574Sraf 		basetype = USYNC_PROCESS;
1464574Sraf 	}
1474574Sraf 
1486247Sraf 	if (type & LOCK_PRIO_PROTECT)
1496247Sraf 		pccp = get_info_by_policy(SCHED_FIFO);
1506247Sraf 	if ((basetype != USYNC_THREAD && basetype != USYNC_PROCESS) ||
1514574Sraf 	    (type & (LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT))
1526247Sraf 	    == (LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT) ||
1536247Sraf 	    ((type & LOCK_PRIO_PROTECT) &&
1546247Sraf 	    ((ceil = *(int *)arg) < pccp->pcc_primin ||
1556247Sraf 	    ceil > pccp->pcc_primax))) {
1564574Sraf 		error = EINVAL;
1574574Sraf 	} else if (type & LOCK_ROBUST) {
1584574Sraf 		/*
1594574Sraf 		 * Callers of mutex_init() with the LOCK_ROBUST attribute
1604574Sraf 		 * are required to pass an initially all-zero mutex.
1614574Sraf 		 * Multiple calls to mutex_init() are allowed; all but
1624574Sraf 		 * the first return EBUSY.  A call to mutex_init() is
1634574Sraf 		 * allowed to make an inconsistent robust lock consistent
1644574Sraf 		 * (for historical usage, even though the proper interface
1654574Sraf 		 * for this is mutex_consistent()).  Note that we use
1664574Sraf 		 * atomic_or_16() to set the LOCK_INITED flag so as
1674574Sraf 		 * not to disturb surrounding bits (LOCK_OWNERDEAD, etc).
1684574Sraf 		 */
1694574Sraf 		if (!(mp->mutex_flag & LOCK_INITED)) {
1704574Sraf 			mp->mutex_type = (uint8_t)type;
171*6812Sraf 			atomic_or_16(&mp->mutex_flag, LOCK_INITED);
1724574Sraf 			mp->mutex_magic = MUTEX_MAGIC;
1734574Sraf 		} else if (type != mp->mutex_type ||
1746247Sraf 		    ((type & LOCK_PRIO_PROTECT) && mp->mutex_ceiling != ceil)) {
1754574Sraf 			error = EINVAL;
176*6812Sraf 		} else if (mutex_consistent(mp) != 0) {
1774574Sraf 			error = EBUSY;
1784574Sraf 		}
1794574Sraf 		/* register a process robust mutex with the kernel */
1804574Sraf 		if (basetype == USYNC_PROCESS)
1814574Sraf 			register_lock(mp);
1824574Sraf 	} else {
1836515Sraf 		(void) memset(mp, 0, sizeof (*mp));
1840Sstevel@tonic-gate 		mp->mutex_type = (uint8_t)type;
1850Sstevel@tonic-gate 		mp->mutex_flag = LOCK_INITED;
1864574Sraf 		mp->mutex_magic = MUTEX_MAGIC;
1870Sstevel@tonic-gate 	}
1884574Sraf 
1896247Sraf 	if (error == 0 && (type & LOCK_PRIO_PROTECT)) {
1906247Sraf 		mp->mutex_ceiling = ceil;
1916247Sraf 	}
1924574Sraf 
1930Sstevel@tonic-gate 	return (error);
1940Sstevel@tonic-gate }
1950Sstevel@tonic-gate 
1960Sstevel@tonic-gate /*
1976247Sraf  * Delete mp from list of ceiling mutexes owned by curthread.
1980Sstevel@tonic-gate  * Return 1 if the head of the chain was updated.
1990Sstevel@tonic-gate  */
2000Sstevel@tonic-gate int
2010Sstevel@tonic-gate _ceil_mylist_del(mutex_t *mp)
2020Sstevel@tonic-gate {
2030Sstevel@tonic-gate 	ulwp_t *self = curthread;
2040Sstevel@tonic-gate 	mxchain_t **mcpp;
2050Sstevel@tonic-gate 	mxchain_t *mcp;
2060Sstevel@tonic-gate 
2076247Sraf 	for (mcpp = &self->ul_mxchain;
2086247Sraf 	    (mcp = *mcpp) != NULL;
2096247Sraf 	    mcpp = &mcp->mxchain_next) {
2106247Sraf 		if (mcp->mxchain_mx == mp) {
2116247Sraf 			*mcpp = mcp->mxchain_next;
2126247Sraf 			lfree(mcp, sizeof (*mcp));
2136247Sraf 			return (mcpp == &self->ul_mxchain);
2146247Sraf 		}
2156247Sraf 	}
2166247Sraf 	return (0);
2170Sstevel@tonic-gate }
2180Sstevel@tonic-gate 
2190Sstevel@tonic-gate /*
2206247Sraf  * Add mp to the list of ceiling mutexes owned by curthread.
2210Sstevel@tonic-gate  * Return ENOMEM if no memory could be allocated.
2220Sstevel@tonic-gate  */
2230Sstevel@tonic-gate int
2240Sstevel@tonic-gate _ceil_mylist_add(mutex_t *mp)
2250Sstevel@tonic-gate {
2260Sstevel@tonic-gate 	ulwp_t *self = curthread;
2270Sstevel@tonic-gate 	mxchain_t *mcp;
2280Sstevel@tonic-gate 
2290Sstevel@tonic-gate 	if ((mcp = lmalloc(sizeof (*mcp))) == NULL)
2300Sstevel@tonic-gate 		return (ENOMEM);
2310Sstevel@tonic-gate 	mcp->mxchain_mx = mp;
2320Sstevel@tonic-gate 	mcp->mxchain_next = self->ul_mxchain;
2330Sstevel@tonic-gate 	self->ul_mxchain = mcp;
2340Sstevel@tonic-gate 	return (0);
2350Sstevel@tonic-gate }
2360Sstevel@tonic-gate 
2370Sstevel@tonic-gate /*
2386247Sraf  * Helper function for _ceil_prio_inherit() and _ceil_prio_waive(), below.
2396247Sraf  */
2406247Sraf static void
2416247Sraf set_rt_priority(ulwp_t *self, int prio)
2426247Sraf {
2436247Sraf 	pcparms_t pcparm;
2446247Sraf 
2456247Sraf 	pcparm.pc_cid = self->ul_rtclassid;
2466247Sraf 	((rtparms_t *)pcparm.pc_clparms)->rt_tqnsecs = RT_NOCHANGE;
2476247Sraf 	((rtparms_t *)pcparm.pc_clparms)->rt_pri = prio;
2486515Sraf 	(void) priocntl(P_LWPID, self->ul_lwpid, PC_SETPARMS, &pcparm);
2496247Sraf }
2506247Sraf 
2516247Sraf /*
2526247Sraf  * Inherit priority from ceiling.
2536247Sraf  * This changes the effective priority, not the assigned priority.
2540Sstevel@tonic-gate  */
2550Sstevel@tonic-gate void
2566247Sraf _ceil_prio_inherit(int prio)
2570Sstevel@tonic-gate {
2580Sstevel@tonic-gate 	ulwp_t *self = curthread;
2596247Sraf 
2606247Sraf 	self->ul_epri = prio;
2616247Sraf 	set_rt_priority(self, prio);
2620Sstevel@tonic-gate }
2630Sstevel@tonic-gate 
2640Sstevel@tonic-gate /*
2650Sstevel@tonic-gate  * Waive inherited ceiling priority.  Inherit from head of owned ceiling locks
2660Sstevel@tonic-gate  * if holding at least one ceiling lock.  If no ceiling locks are held at this
2670Sstevel@tonic-gate  * point, disinherit completely, reverting back to assigned priority.
2680Sstevel@tonic-gate  */
2690Sstevel@tonic-gate void
2700Sstevel@tonic-gate _ceil_prio_waive(void)
2710Sstevel@tonic-gate {
2720Sstevel@tonic-gate 	ulwp_t *self = curthread;
2736247Sraf 	mxchain_t *mcp = self->ul_mxchain;
2746247Sraf 	int prio;
2756247Sraf 
2766247Sraf 	if (mcp == NULL) {
2776247Sraf 		prio = self->ul_pri;
2786247Sraf 		self->ul_epri = 0;
2790Sstevel@tonic-gate 	} else {
2806247Sraf 		prio = mcp->mxchain_mx->mutex_ceiling;
2816247Sraf 		self->ul_epri = prio;
2820Sstevel@tonic-gate 	}
2836247Sraf 	set_rt_priority(self, prio);
2840Sstevel@tonic-gate }
2850Sstevel@tonic-gate 
2860Sstevel@tonic-gate /*
2875629Sraf  * Clear the lock byte.  Retain the waiters byte and the spinners byte.
2885629Sraf  * Return the old value of the lock word.
2895629Sraf  */
2905629Sraf static uint32_t
2915629Sraf clear_lockbyte(volatile uint32_t *lockword)
2925629Sraf {
2935629Sraf 	uint32_t old;
2945629Sraf 	uint32_t new;
2955629Sraf 
2965629Sraf 	do {
2975629Sraf 		old = *lockword;
2985629Sraf 		new = old & ~LOCKMASK;
2995629Sraf 	} while (atomic_cas_32(lockword, old, new) != old);
3005629Sraf 
3015629Sraf 	return (old);
3025629Sraf }
3035629Sraf 
3045629Sraf /*
3056057Sraf  * Same as clear_lockbyte(), but operates on mutex_lockword64.
3066057Sraf  * The mutex_ownerpid field is cleared along with the lock byte.
3076057Sraf  */
3086057Sraf static uint64_t
3096057Sraf clear_lockbyte64(volatile uint64_t *lockword64)
3106057Sraf {
3116057Sraf 	uint64_t old;
3126057Sraf 	uint64_t new;
3136057Sraf 
3146057Sraf 	do {
3156057Sraf 		old = *lockword64;
3166057Sraf 		new = old & ~LOCKMASK64;
3176057Sraf 	} while (atomic_cas_64(lockword64, old, new) != old);
3186057Sraf 
3196057Sraf 	return (old);
3206057Sraf }
3216057Sraf 
3226057Sraf /*
3236057Sraf  * Similar to set_lock_byte(), which only tries to set the lock byte.
3246057Sraf  * Here, we attempt to set the lock byte AND the mutex_ownerpid,
3256057Sraf  * keeping the remaining bytes constant.
3266057Sraf  */
3276057Sraf static int
3286057Sraf set_lock_byte64(volatile uint64_t *lockword64, pid_t ownerpid)
3296057Sraf {
3306057Sraf 	uint64_t old;
3316057Sraf 	uint64_t new;
3326057Sraf 
3336057Sraf 	old = *lockword64 & ~LOCKMASK64;
3346057Sraf 	new = old | ((uint64_t)(uint_t)ownerpid << PIDSHIFT) | LOCKBYTE64;
3356057Sraf 	if (atomic_cas_64(lockword64, old, new) == old)
3366057Sraf 		return (LOCKCLEAR);
3376057Sraf 
3386057Sraf 	return (LOCKSET);
3396057Sraf }
3406057Sraf 
3416057Sraf /*
3425629Sraf  * Increment the spinners count in the mutex lock word.
3435629Sraf  * Return 0 on success.  Return -1 if the count would overflow.
3445629Sraf  */
3455629Sraf static int
3465629Sraf spinners_incr(volatile uint32_t *lockword, uint8_t max_spinners)
3475629Sraf {
3485629Sraf 	uint32_t old;
3495629Sraf 	uint32_t new;
3505629Sraf 
3515629Sraf 	do {
3525629Sraf 		old = *lockword;
3535629Sraf 		if (((old & SPINNERMASK) >> SPINNERSHIFT) >= max_spinners)
3545629Sraf 			return (-1);
3555629Sraf 		new = old + (1 << SPINNERSHIFT);
3565629Sraf 	} while (atomic_cas_32(lockword, old, new) != old);
3575629Sraf 
3585629Sraf 	return (0);
3595629Sraf }
3605629Sraf 
3615629Sraf /*
3625629Sraf  * Decrement the spinners count in the mutex lock word.
3635629Sraf  * Return the new value of the lock word.
3645629Sraf  */
3655629Sraf static uint32_t
3665629Sraf spinners_decr(volatile uint32_t *lockword)
3675629Sraf {
3685629Sraf 	uint32_t old;
3695629Sraf 	uint32_t new;
3705629Sraf 
3715629Sraf 	do {
3725629Sraf 		new = old = *lockword;
3735629Sraf 		if (new & SPINNERMASK)
3745629Sraf 			new -= (1 << SPINNERSHIFT);
3755629Sraf 	} while (atomic_cas_32(lockword, old, new) != old);
3765629Sraf 
3775629Sraf 	return (new);
3785629Sraf }
3795629Sraf 
3805629Sraf /*
3810Sstevel@tonic-gate  * Non-preemptive spin locks.  Used by queue_lock().
3820Sstevel@tonic-gate  * No lock statistics are gathered for these locks.
3835629Sraf  * No DTrace probes are provided for these locks.
3840Sstevel@tonic-gate  */
3850Sstevel@tonic-gate void
3860Sstevel@tonic-gate spin_lock_set(mutex_t *mp)
3870Sstevel@tonic-gate {
3880Sstevel@tonic-gate 	ulwp_t *self = curthread;
3890Sstevel@tonic-gate 
3900Sstevel@tonic-gate 	no_preempt(self);
3910Sstevel@tonic-gate 	if (set_lock_byte(&mp->mutex_lockw) == 0) {
3920Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
3930Sstevel@tonic-gate 		return;
3940Sstevel@tonic-gate 	}
3950Sstevel@tonic-gate 	/*
3960Sstevel@tonic-gate 	 * Spin for a while, attempting to acquire the lock.
3970Sstevel@tonic-gate 	 */
3986247Sraf 	INCR32(self->ul_spin_lock_spin);
3990Sstevel@tonic-gate 	if (mutex_queuelock_adaptive(mp) == 0 ||
4000Sstevel@tonic-gate 	    set_lock_byte(&mp->mutex_lockw) == 0) {
4010Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
4020Sstevel@tonic-gate 		return;
4030Sstevel@tonic-gate 	}
4040Sstevel@tonic-gate 	/*
4050Sstevel@tonic-gate 	 * Try harder if we were previously at a no premption level.
4060Sstevel@tonic-gate 	 */
4070Sstevel@tonic-gate 	if (self->ul_preempt > 1) {
4086247Sraf 		INCR32(self->ul_spin_lock_spin2);
4090Sstevel@tonic-gate 		if (mutex_queuelock_adaptive(mp) == 0 ||
4100Sstevel@tonic-gate 		    set_lock_byte(&mp->mutex_lockw) == 0) {
4110Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
4120Sstevel@tonic-gate 			return;
4130Sstevel@tonic-gate 		}
4140Sstevel@tonic-gate 	}
4150Sstevel@tonic-gate 	/*
4160Sstevel@tonic-gate 	 * Give up and block in the kernel for the mutex.
4170Sstevel@tonic-gate 	 */
4186247Sraf 	INCR32(self->ul_spin_lock_sleep);
4190Sstevel@tonic-gate 	(void) ___lwp_mutex_timedlock(mp, NULL);
4200Sstevel@tonic-gate 	mp->mutex_owner = (uintptr_t)self;
4210Sstevel@tonic-gate }
4220Sstevel@tonic-gate 
4230Sstevel@tonic-gate void
4240Sstevel@tonic-gate spin_lock_clear(mutex_t *mp)
4250Sstevel@tonic-gate {
4260Sstevel@tonic-gate 	ulwp_t *self = curthread;
4270Sstevel@tonic-gate 
4280Sstevel@tonic-gate 	mp->mutex_owner = 0;
4294570Sraf 	if (atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK) {
4304574Sraf 		(void) ___lwp_mutex_wakeup(mp, 0);
4316247Sraf 		INCR32(self->ul_spin_lock_wakeup);
4320Sstevel@tonic-gate 	}
4330Sstevel@tonic-gate 	preempt(self);
4340Sstevel@tonic-gate }
4350Sstevel@tonic-gate 
4360Sstevel@tonic-gate /*
4370Sstevel@tonic-gate  * Allocate the sleep queue hash table.
4380Sstevel@tonic-gate  */
4390Sstevel@tonic-gate void
4400Sstevel@tonic-gate queue_alloc(void)
4410Sstevel@tonic-gate {
4420Sstevel@tonic-gate 	ulwp_t *self = curthread;
4430Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
4446247Sraf 	queue_head_t *qp;
4450Sstevel@tonic-gate 	void *data;
4460Sstevel@tonic-gate 	int i;
4470Sstevel@tonic-gate 
4480Sstevel@tonic-gate 	/*
4490Sstevel@tonic-gate 	 * No locks are needed; we call here only when single-threaded.
4500Sstevel@tonic-gate 	 */
4510Sstevel@tonic-gate 	ASSERT(self == udp->ulwp_one);
4520Sstevel@tonic-gate 	ASSERT(!udp->uberflags.uf_mt);
4536515Sraf 	if ((data = mmap(NULL, 2 * QHASHSIZE * sizeof (queue_head_t),
4540Sstevel@tonic-gate 	    PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
4550Sstevel@tonic-gate 	    == MAP_FAILED)
4560Sstevel@tonic-gate 		thr_panic("cannot allocate thread queue_head table");
4576247Sraf 	udp->queue_head = qp = (queue_head_t *)data;
4586247Sraf 	for (i = 0; i < 2 * QHASHSIZE; qp++, i++) {
4596247Sraf 		qp->qh_type = (i < QHASHSIZE)? MX : CV;
4606247Sraf 		qp->qh_lock.mutex_flag = LOCK_INITED;
4616247Sraf 		qp->qh_lock.mutex_magic = MUTEX_MAGIC;
4626247Sraf 		qp->qh_hlist = &qp->qh_def_root;
4636247Sraf #if defined(THREAD_DEBUG)
4646247Sraf 		qp->qh_hlen = 1;
4656247Sraf 		qp->qh_hmax = 1;
4666247Sraf #endif
4674574Sraf 	}
4680Sstevel@tonic-gate }
4690Sstevel@tonic-gate 
4700Sstevel@tonic-gate #if defined(THREAD_DEBUG)
4710Sstevel@tonic-gate 
4720Sstevel@tonic-gate /*
4730Sstevel@tonic-gate  * Debugging: verify correctness of a sleep queue.
4740Sstevel@tonic-gate  */
4750Sstevel@tonic-gate void
4760Sstevel@tonic-gate QVERIFY(queue_head_t *qp)
4770Sstevel@tonic-gate {
4780Sstevel@tonic-gate 	ulwp_t *self = curthread;
4790Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
4806247Sraf 	queue_root_t *qrp;
4810Sstevel@tonic-gate 	ulwp_t *ulwp;
4820Sstevel@tonic-gate 	ulwp_t *prev;
4830Sstevel@tonic-gate 	uint_t index;
4846247Sraf 	uint32_t cnt;
4850Sstevel@tonic-gate 	char qtype;
4860Sstevel@tonic-gate 	void *wchan;
4870Sstevel@tonic-gate 
4880Sstevel@tonic-gate 	ASSERT(qp >= udp->queue_head && (qp - udp->queue_head) < 2 * QHASHSIZE);
4890Sstevel@tonic-gate 	ASSERT(MUTEX_OWNED(&qp->qh_lock, self));
4906247Sraf 	for (cnt = 0, qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next) {
4916247Sraf 		cnt++;
4926247Sraf 		ASSERT((qrp->qr_head != NULL && qrp->qr_tail != NULL) ||
4936247Sraf 		    (qrp->qr_head == NULL && qrp->qr_tail == NULL));
4946247Sraf 	}
4956247Sraf 	ASSERT(qp->qh_hlen == cnt && qp->qh_hmax >= cnt);
4966247Sraf 	qtype = ((qp - udp->queue_head) < QHASHSIZE)? MX : CV;
4976247Sraf 	ASSERT(qp->qh_type == qtype);
4980Sstevel@tonic-gate 	if (!thread_queue_verify)
4990Sstevel@tonic-gate 		return;
5000Sstevel@tonic-gate 	/* real expensive stuff, only for _THREAD_QUEUE_VERIFY */
5016247Sraf 	for (cnt = 0, qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next) {
5026247Sraf 		for (prev = NULL, ulwp = qrp->qr_head; ulwp != NULL;
5036247Sraf 		    prev = ulwp, ulwp = ulwp->ul_link) {
5046247Sraf 			cnt++;
5056247Sraf 			if (ulwp->ul_writer)
5066247Sraf 				ASSERT(prev == NULL || prev->ul_writer);
5076247Sraf 			ASSERT(ulwp->ul_qtype == qtype);
5086247Sraf 			ASSERT(ulwp->ul_wchan != NULL);
5096247Sraf 			ASSERT(ulwp->ul_sleepq == qp);
5106247Sraf 			wchan = ulwp->ul_wchan;
5116247Sraf 			ASSERT(qrp->qr_wchan == wchan);
5126247Sraf 			index = QUEUE_HASH(wchan, qtype);
5136247Sraf 			ASSERT(&udp->queue_head[index] == qp);
5146247Sraf 		}
5156247Sraf 		ASSERT(qrp->qr_tail == prev);
5160Sstevel@tonic-gate 	}
5170Sstevel@tonic-gate 	ASSERT(qp->qh_qlen == cnt);
5180Sstevel@tonic-gate }
5190Sstevel@tonic-gate 
5200Sstevel@tonic-gate #else	/* THREAD_DEBUG */
5210Sstevel@tonic-gate 
5220Sstevel@tonic-gate #define	QVERIFY(qp)
5230Sstevel@tonic-gate 
5240Sstevel@tonic-gate #endif	/* THREAD_DEBUG */
5250Sstevel@tonic-gate 
5260Sstevel@tonic-gate /*
5270Sstevel@tonic-gate  * Acquire a queue head.
5280Sstevel@tonic-gate  */
5290Sstevel@tonic-gate queue_head_t *
5300Sstevel@tonic-gate queue_lock(void *wchan, int qtype)
5310Sstevel@tonic-gate {
5320Sstevel@tonic-gate 	uberdata_t *udp = curthread->ul_uberdata;
5330Sstevel@tonic-gate 	queue_head_t *qp;
5346247Sraf 	queue_root_t *qrp;
5350Sstevel@tonic-gate 
5360Sstevel@tonic-gate 	ASSERT(qtype == MX || qtype == CV);
5370Sstevel@tonic-gate 
5380Sstevel@tonic-gate 	/*
5390Sstevel@tonic-gate 	 * It is possible that we could be called while still single-threaded.
5400Sstevel@tonic-gate 	 * If so, we call queue_alloc() to allocate the queue_head[] array.
5410Sstevel@tonic-gate 	 */
5420Sstevel@tonic-gate 	if ((qp = udp->queue_head) == NULL) {
5430Sstevel@tonic-gate 		queue_alloc();
5440Sstevel@tonic-gate 		qp = udp->queue_head;
5450Sstevel@tonic-gate 	}
5460Sstevel@tonic-gate 	qp += QUEUE_HASH(wchan, qtype);
5470Sstevel@tonic-gate 	spin_lock_set(&qp->qh_lock);
5486247Sraf 	for (qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next)
5496247Sraf 		if (qrp->qr_wchan == wchan)
5506247Sraf 			break;
5516247Sraf 	if (qrp == NULL && qp->qh_def_root.qr_head == NULL) {
5526247Sraf 		/* the default queue root is available; use it */
5536247Sraf 		qrp = &qp->qh_def_root;
5546247Sraf 		qrp->qr_wchan = wchan;
5556247Sraf 		ASSERT(qrp->qr_next == NULL);
5566247Sraf 		ASSERT(qrp->qr_tail == NULL &&
5576247Sraf 		    qrp->qr_rtcount == 0 && qrp->qr_qlen == 0);
5586247Sraf 	}
5596247Sraf 	qp->qh_wchan = wchan;	/* valid until queue_unlock() is called */
5606247Sraf 	qp->qh_root = qrp;	/* valid until queue_unlock() is called */
5616247Sraf 	INCR32(qp->qh_lockcount);
5620Sstevel@tonic-gate 	QVERIFY(qp);
5630Sstevel@tonic-gate 	return (qp);
5640Sstevel@tonic-gate }
5650Sstevel@tonic-gate 
5660Sstevel@tonic-gate /*
5670Sstevel@tonic-gate  * Release a queue head.
5680Sstevel@tonic-gate  */
5690Sstevel@tonic-gate void
5700Sstevel@tonic-gate queue_unlock(queue_head_t *qp)
5710Sstevel@tonic-gate {
5720Sstevel@tonic-gate 	QVERIFY(qp);
5730Sstevel@tonic-gate 	spin_lock_clear(&qp->qh_lock);
5740Sstevel@tonic-gate }
5750Sstevel@tonic-gate 
5760Sstevel@tonic-gate /*
5770Sstevel@tonic-gate  * For rwlock queueing, we must queue writers ahead of readers of the
5780Sstevel@tonic-gate  * same priority.  We do this by making writers appear to have a half
5790Sstevel@tonic-gate  * point higher priority for purposes of priority comparisons below.
5800Sstevel@tonic-gate  */
5810Sstevel@tonic-gate #define	CMP_PRIO(ulwp)	((real_priority(ulwp) << 1) + (ulwp)->ul_writer)
5820Sstevel@tonic-gate 
5830Sstevel@tonic-gate void
5846247Sraf enqueue(queue_head_t *qp, ulwp_t *ulwp, int force_fifo)
5850Sstevel@tonic-gate {
5866247Sraf 	queue_root_t *qrp;
5870Sstevel@tonic-gate 	ulwp_t **ulwpp;
5880Sstevel@tonic-gate 	ulwp_t *next;
5890Sstevel@tonic-gate 	int pri = CMP_PRIO(ulwp);
5906247Sraf 
5910Sstevel@tonic-gate 	ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread));
5920Sstevel@tonic-gate 	ASSERT(ulwp->ul_sleepq != qp);
5930Sstevel@tonic-gate 
5946247Sraf 	if ((qrp = qp->qh_root) == NULL) {
5956247Sraf 		/* use the thread's queue root for the linkage */
5966247Sraf 		qrp = &ulwp->ul_queue_root;
5976247Sraf 		qrp->qr_next = qp->qh_hlist;
5986247Sraf 		qrp->qr_prev = NULL;
5996247Sraf 		qrp->qr_head = NULL;
6006247Sraf 		qrp->qr_tail = NULL;
6016247Sraf 		qrp->qr_wchan = qp->qh_wchan;
6026247Sraf 		qrp->qr_rtcount = 0;
6036247Sraf 		qrp->qr_qlen = 0;
6046247Sraf 		qrp->qr_qmax = 0;
6056247Sraf 		qp->qh_hlist->qr_prev = qrp;
6066247Sraf 		qp->qh_hlist = qrp;
6076247Sraf 		qp->qh_root = qrp;
6086247Sraf 		MAXINCR(qp->qh_hmax, qp->qh_hlen);
6096247Sraf 	}
6106247Sraf 
6110Sstevel@tonic-gate 	/*
6120Sstevel@tonic-gate 	 * LIFO queue ordering is unfair and can lead to starvation,
6130Sstevel@tonic-gate 	 * but it gives better performance for heavily contended locks.
6140Sstevel@tonic-gate 	 * We use thread_queue_fifo (range is 0..8) to determine
6150Sstevel@tonic-gate 	 * the frequency of FIFO vs LIFO queuing:
6160Sstevel@tonic-gate 	 *	0 : every 256th time	(almost always LIFO)
6170Sstevel@tonic-gate 	 *	1 : every 128th time
6180Sstevel@tonic-gate 	 *	2 : every 64th  time
6190Sstevel@tonic-gate 	 *	3 : every 32nd  time
6200Sstevel@tonic-gate 	 *	4 : every 16th  time	(the default value, mostly LIFO)
6210Sstevel@tonic-gate 	 *	5 : every 8th   time
6220Sstevel@tonic-gate 	 *	6 : every 4th   time
6230Sstevel@tonic-gate 	 *	7 : every 2nd   time
6240Sstevel@tonic-gate 	 *	8 : every time		(never LIFO, always FIFO)
6250Sstevel@tonic-gate 	 * Note that there is always some degree of FIFO ordering.
6260Sstevel@tonic-gate 	 * This breaks live lock conditions that occur in applications
6270Sstevel@tonic-gate 	 * that are written assuming (incorrectly) that threads acquire
6280Sstevel@tonic-gate 	 * locks fairly, that is, in roughly round-robin order.
6296247Sraf 	 * In any event, the queue is maintained in kernel priority order.
6300Sstevel@tonic-gate 	 *
6316247Sraf 	 * If force_fifo is non-zero, fifo queueing is forced.
6320Sstevel@tonic-gate 	 * SUSV3 requires this for semaphores.
6330Sstevel@tonic-gate 	 */
6346247Sraf 	if (qrp->qr_head == NULL) {
6350Sstevel@tonic-gate 		/*
6360Sstevel@tonic-gate 		 * The queue is empty.  LIFO/FIFO doesn't matter.
6370Sstevel@tonic-gate 		 */
6386247Sraf 		ASSERT(qrp->qr_tail == NULL);
6396247Sraf 		ulwpp = &qrp->qr_head;
6406247Sraf 	} else if (force_fifo |
6416247Sraf 	    (((++qp->qh_qcnt << curthread->ul_queue_fifo) & 0xff) == 0)) {
6420Sstevel@tonic-gate 		/*
6430Sstevel@tonic-gate 		 * Enqueue after the last thread whose priority is greater
6440Sstevel@tonic-gate 		 * than or equal to the priority of the thread being queued.
6450Sstevel@tonic-gate 		 * Attempt first to go directly onto the tail of the queue.
6460Sstevel@tonic-gate 		 */
6476247Sraf 		if (pri <= CMP_PRIO(qrp->qr_tail))
6486247Sraf 			ulwpp = &qrp->qr_tail->ul_link;
6490Sstevel@tonic-gate 		else {
6506247Sraf 			for (ulwpp = &qrp->qr_head; (next = *ulwpp) != NULL;
6510Sstevel@tonic-gate 			    ulwpp = &next->ul_link)
6520Sstevel@tonic-gate 				if (pri > CMP_PRIO(next))
6530Sstevel@tonic-gate 					break;
6540Sstevel@tonic-gate 		}
6550Sstevel@tonic-gate 	} else {
6560Sstevel@tonic-gate 		/*
6570Sstevel@tonic-gate 		 * Enqueue before the first thread whose priority is less
6580Sstevel@tonic-gate 		 * than or equal to the priority of the thread being queued.
6590Sstevel@tonic-gate 		 * Hopefully we can go directly onto the head of the queue.
6600Sstevel@tonic-gate 		 */
6616247Sraf 		for (ulwpp = &qrp->qr_head; (next = *ulwpp) != NULL;
6620Sstevel@tonic-gate 		    ulwpp = &next->ul_link)
6630Sstevel@tonic-gate 			if (pri >= CMP_PRIO(next))
6640Sstevel@tonic-gate 				break;
6650Sstevel@tonic-gate 	}
6660Sstevel@tonic-gate 	if ((ulwp->ul_link = *ulwpp) == NULL)
6676247Sraf 		qrp->qr_tail = ulwp;
6680Sstevel@tonic-gate 	*ulwpp = ulwp;
6690Sstevel@tonic-gate 
6700Sstevel@tonic-gate 	ulwp->ul_sleepq = qp;
6716247Sraf 	ulwp->ul_wchan = qp->qh_wchan;
6726247Sraf 	ulwp->ul_qtype = qp->qh_type;
6736247Sraf 	if ((ulwp->ul_schedctl != NULL &&
6746247Sraf 	    ulwp->ul_schedctl->sc_cid == ulwp->ul_rtclassid) |
6756247Sraf 	    ulwp->ul_pilocks) {
6766247Sraf 		ulwp->ul_rtqueued = 1;
6776247Sraf 		qrp->qr_rtcount++;
6786247Sraf 	}
6796247Sraf 	MAXINCR(qrp->qr_qmax, qrp->qr_qlen);
6806247Sraf 	MAXINCR(qp->qh_qmax, qp->qh_qlen);
6816247Sraf }
6826247Sraf 
6836247Sraf /*
6846247Sraf  * Helper function for queue_slot() and queue_slot_rt().
6856247Sraf  * Try to find a non-suspended thread on the queue.
6866247Sraf  */
6876247Sraf static ulwp_t **
6886247Sraf queue_slot_runnable(ulwp_t **ulwpp, ulwp_t **prevp, int rt)
6896247Sraf {
6906247Sraf 	ulwp_t *ulwp;
6916247Sraf 	ulwp_t **foundpp = NULL;
6926247Sraf 	int priority = -1;
6936247Sraf 	ulwp_t *prev;
6946247Sraf 	int tpri;
6956247Sraf 
6966247Sraf 	for (prev = NULL;
6976247Sraf 	    (ulwp = *ulwpp) != NULL;
6986247Sraf 	    prev = ulwp, ulwpp = &ulwp->ul_link) {
6996247Sraf 		if (ulwp->ul_stop)	/* skip suspended threads */
7006247Sraf 			continue;
7016247Sraf 		tpri = rt? CMP_PRIO(ulwp) : 0;
7026247Sraf 		if (tpri > priority) {
7036247Sraf 			foundpp = ulwpp;
7046247Sraf 			*prevp = prev;
7056247Sraf 			priority = tpri;
7066247Sraf 			if (!rt)
7076247Sraf 				break;
7086247Sraf 		}
7096247Sraf 	}
7106247Sraf 	return (foundpp);
7110Sstevel@tonic-gate }
7120Sstevel@tonic-gate 
7130Sstevel@tonic-gate /*
7146247Sraf  * For real-time, we search the entire queue because the dispatch
7156247Sraf  * (kernel) priorities may have changed since enqueueing.
7160Sstevel@tonic-gate  */
7170Sstevel@tonic-gate static ulwp_t **
7186247Sraf queue_slot_rt(ulwp_t **ulwpp_org, ulwp_t **prevp)
7196247Sraf {
7206247Sraf 	ulwp_t **ulwpp = ulwpp_org;
7216247Sraf 	ulwp_t *ulwp = *ulwpp;
7226247Sraf 	ulwp_t **foundpp = ulwpp;
7236247Sraf 	int priority = CMP_PRIO(ulwp);
7246247Sraf 	ulwp_t *prev;
7256247Sraf 	int tpri;
7266247Sraf 
7276247Sraf 	for (prev = ulwp, ulwpp = &ulwp->ul_link;
7286247Sraf 	    (ulwp = *ulwpp) != NULL;
7296247Sraf 	    prev = ulwp, ulwpp = &ulwp->ul_link) {
7306247Sraf 		tpri = CMP_PRIO(ulwp);
7316247Sraf 		if (tpri > priority) {
7326247Sraf 			foundpp = ulwpp;
7336247Sraf 			*prevp = prev;
7346247Sraf 			priority = tpri;
7356247Sraf 		}
7366247Sraf 	}
7376247Sraf 	ulwp = *foundpp;
7386247Sraf 
7396247Sraf 	/*
7406247Sraf 	 * Try not to return a suspended thread.
7416247Sraf 	 * This mimics the old libthread's behavior.
7426247Sraf 	 */
7436247Sraf 	if (ulwp->ul_stop &&
7446247Sraf 	    (ulwpp = queue_slot_runnable(ulwpp_org, prevp, 1)) != NULL) {
7456247Sraf 		foundpp = ulwpp;
7466247Sraf 		ulwp = *foundpp;
7476247Sraf 	}
7486247Sraf 	ulwp->ul_rt = 1;
7496247Sraf 	return (foundpp);
7506247Sraf }
7516247Sraf 
7526247Sraf ulwp_t **
7536247Sraf queue_slot(queue_head_t *qp, ulwp_t **prevp, int *more)
7546247Sraf {
7556247Sraf 	queue_root_t *qrp;
7566247Sraf 	ulwp_t **ulwpp;
7576247Sraf 	ulwp_t *ulwp;
7586247Sraf 	int rt;
7596247Sraf 
7606247Sraf 	ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread));
7616247Sraf 
7626247Sraf 	if ((qrp = qp->qh_root) == NULL || (ulwp = qrp->qr_head) == NULL) {
7636247Sraf 		*more = 0;
7646247Sraf 		return (NULL);		/* no lwps on the queue */
7656247Sraf 	}
7666247Sraf 	rt = (qrp->qr_rtcount != 0);
7676247Sraf 	*prevp = NULL;
7686247Sraf 	if (ulwp->ul_link == NULL) {	/* only one lwp on the queue */
7696247Sraf 		*more = 0;
7706247Sraf 		ulwp->ul_rt = rt;
7716247Sraf 		return (&qrp->qr_head);
7726247Sraf 	}
7736247Sraf 	*more = 1;
7746247Sraf 
7756247Sraf 	if (rt)		/* real-time queue */
7766247Sraf 		return (queue_slot_rt(&qrp->qr_head, prevp));
7776247Sraf 	/*
7786247Sraf 	 * Try not to return a suspended thread.
7796247Sraf 	 * This mimics the old libthread's behavior.
7806247Sraf 	 */
7816247Sraf 	if (ulwp->ul_stop &&
7826247Sraf 	    (ulwpp = queue_slot_runnable(&qrp->qr_head, prevp, 0)) != NULL) {
7836247Sraf 		ulwp = *ulwpp;
7846247Sraf 		ulwp->ul_rt = 0;
7856247Sraf 		return (ulwpp);
7866247Sraf 	}
7876247Sraf 	/*
7886247Sraf 	 * The common case; just pick the first thread on the queue.
7896247Sraf 	 */
7906247Sraf 	ulwp->ul_rt = 0;
7916247Sraf 	return (&qrp->qr_head);
7926247Sraf }
7936247Sraf 
7946247Sraf /*
7956247Sraf  * Common code for unlinking an lwp from a user-level sleep queue.
7966247Sraf  */
7976247Sraf void
7986247Sraf queue_unlink(queue_head_t *qp, ulwp_t **ulwpp, ulwp_t *prev)
7996247Sraf {
8006247Sraf 	queue_root_t *qrp = qp->qh_root;
8016247Sraf 	queue_root_t *nqrp;
8026247Sraf 	ulwp_t *ulwp = *ulwpp;
8036247Sraf 	ulwp_t *next;
8046247Sraf 
8056247Sraf 	ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread));
8066247Sraf 	ASSERT(qp->qh_wchan != NULL && ulwp->ul_wchan == qp->qh_wchan);
8076247Sraf 
8086247Sraf 	DECR(qp->qh_qlen);
8096247Sraf 	DECR(qrp->qr_qlen);
8106247Sraf 	if (ulwp->ul_rtqueued) {
8116247Sraf 		ulwp->ul_rtqueued = 0;
8126247Sraf 		qrp->qr_rtcount--;
8136247Sraf 	}
8146247Sraf 	next = ulwp->ul_link;
8156247Sraf 	*ulwpp = next;
8166247Sraf 	ulwp->ul_link = NULL;
8176247Sraf 	if (qrp->qr_tail == ulwp)
8186247Sraf 		qrp->qr_tail = prev;
8196247Sraf 	if (qrp == &ulwp->ul_queue_root) {
8206247Sraf 		/*
8216247Sraf 		 * We can't continue to use the unlinked thread's
8226247Sraf 		 * queue root for the linkage.
8236247Sraf 		 */
8246247Sraf 		queue_root_t *qr_next = qrp->qr_next;
8256247Sraf 		queue_root_t *qr_prev = qrp->qr_prev;
8266247Sraf 
8276247Sraf 		if (qrp->qr_tail) {
8286247Sraf 			/* switch to using the last thread's queue root */
8296247Sraf 			ASSERT(qrp->qr_qlen != 0);
8306247Sraf 			nqrp = &qrp->qr_tail->ul_queue_root;
8316247Sraf 			*nqrp = *qrp;
8326247Sraf 			if (qr_next)
8336247Sraf 				qr_next->qr_prev = nqrp;
8346247Sraf 			if (qr_prev)
8356247Sraf 				qr_prev->qr_next = nqrp;
8366247Sraf 			else
8376247Sraf 				qp->qh_hlist = nqrp;
8386247Sraf 			qp->qh_root = nqrp;
8396247Sraf 		} else {
8406247Sraf 			/* empty queue root; just delete from the hash list */
8416247Sraf 			ASSERT(qrp->qr_qlen == 0);
8426247Sraf 			if (qr_next)
8436247Sraf 				qr_next->qr_prev = qr_prev;
8446247Sraf 			if (qr_prev)
8456247Sraf 				qr_prev->qr_next = qr_next;
8466247Sraf 			else
8476247Sraf 				qp->qh_hlist = qr_next;
8486247Sraf 			qp->qh_root = NULL;
8496247Sraf 			DECR(qp->qh_hlen);
8506247Sraf 		}
8516247Sraf 	}
8526247Sraf }
8536247Sraf 
8546247Sraf ulwp_t *
8556247Sraf dequeue(queue_head_t *qp, int *more)
8560Sstevel@tonic-gate {
8570Sstevel@tonic-gate 	ulwp_t **ulwpp;
8580Sstevel@tonic-gate 	ulwp_t *ulwp;
8596247Sraf 	ulwp_t *prev;
8606247Sraf 
8616247Sraf 	if ((ulwpp = queue_slot(qp, &prev, more)) == NULL)
8620Sstevel@tonic-gate 		return (NULL);
8630Sstevel@tonic-gate 	ulwp = *ulwpp;
8646247Sraf 	queue_unlink(qp, ulwpp, prev);
8650Sstevel@tonic-gate 	ulwp->ul_sleepq = NULL;
8660Sstevel@tonic-gate 	ulwp->ul_wchan = NULL;
8670Sstevel@tonic-gate 	return (ulwp);
8680Sstevel@tonic-gate }
8690Sstevel@tonic-gate 
8700Sstevel@tonic-gate /*
8710Sstevel@tonic-gate  * Return a pointer to the highest priority thread sleeping on wchan.
8720Sstevel@tonic-gate  */
8730Sstevel@tonic-gate ulwp_t *
8746247Sraf queue_waiter(queue_head_t *qp)
8750Sstevel@tonic-gate {
8760Sstevel@tonic-gate 	ulwp_t **ulwpp;
8776247Sraf 	ulwp_t *prev;
8786247Sraf 	int more;
8796247Sraf 
8806247Sraf 	if ((ulwpp = queue_slot(qp, &prev, &more)) == NULL)
8810Sstevel@tonic-gate 		return (NULL);
8820Sstevel@tonic-gate 	return (*ulwpp);
8830Sstevel@tonic-gate }
8840Sstevel@tonic-gate 
8856247Sraf int
8866247Sraf dequeue_self(queue_head_t *qp)
8870Sstevel@tonic-gate {
8880Sstevel@tonic-gate 	ulwp_t *self = curthread;
8896247Sraf 	queue_root_t *qrp;
8900Sstevel@tonic-gate 	ulwp_t **ulwpp;
8910Sstevel@tonic-gate 	ulwp_t *ulwp;
8926247Sraf 	ulwp_t *prev;
8930Sstevel@tonic-gate 	int found = 0;
8940Sstevel@tonic-gate 
8950Sstevel@tonic-gate 	ASSERT(MUTEX_OWNED(&qp->qh_lock, self));
8960Sstevel@tonic-gate 
8970Sstevel@tonic-gate 	/* find self on the sleep queue */
8986247Sraf 	if ((qrp = qp->qh_root) != NULL) {
8996247Sraf 		for (prev = NULL, ulwpp = &qrp->qr_head;
9006247Sraf 		    (ulwp = *ulwpp) != NULL;
9016247Sraf 		    prev = ulwp, ulwpp = &ulwp->ul_link) {
9026247Sraf 			if (ulwp == self) {
9036247Sraf 				queue_unlink(qp, ulwpp, prev);
9046247Sraf 				self->ul_cvmutex = NULL;
9056247Sraf 				self->ul_sleepq = NULL;
9066247Sraf 				self->ul_wchan = NULL;
9076247Sraf 				found = 1;
9086247Sraf 				break;
9096247Sraf 			}
9100Sstevel@tonic-gate 		}
9110Sstevel@tonic-gate 	}
9120Sstevel@tonic-gate 
9130Sstevel@tonic-gate 	if (!found)
9140Sstevel@tonic-gate 		thr_panic("dequeue_self(): curthread not found on queue");
9150Sstevel@tonic-gate 
9166247Sraf 	return ((qrp = qp->qh_root) != NULL && qrp->qr_head != NULL);
9170Sstevel@tonic-gate }
9180Sstevel@tonic-gate 
9190Sstevel@tonic-gate /*
9200Sstevel@tonic-gate  * Called from call_user_handler() and _thrp_suspend() to take
9210Sstevel@tonic-gate  * ourself off of our sleep queue so we can grab locks.
9220Sstevel@tonic-gate  */
9230Sstevel@tonic-gate void
9240Sstevel@tonic-gate unsleep_self(void)
9250Sstevel@tonic-gate {
9260Sstevel@tonic-gate 	ulwp_t *self = curthread;
9270Sstevel@tonic-gate 	queue_head_t *qp;
9280Sstevel@tonic-gate 
9290Sstevel@tonic-gate 	/*
9300Sstevel@tonic-gate 	 * Calling enter_critical()/exit_critical() here would lead
9310Sstevel@tonic-gate 	 * to recursion.  Just manipulate self->ul_critical directly.
9320Sstevel@tonic-gate 	 */
9330Sstevel@tonic-gate 	self->ul_critical++;
9340Sstevel@tonic-gate 	while (self->ul_sleepq != NULL) {
9350Sstevel@tonic-gate 		qp = queue_lock(self->ul_wchan, self->ul_qtype);
9360Sstevel@tonic-gate 		/*
9370Sstevel@tonic-gate 		 * We may have been moved from a CV queue to a
9380Sstevel@tonic-gate 		 * mutex queue while we were attempting queue_lock().
9390Sstevel@tonic-gate 		 * If so, just loop around and try again.
9400Sstevel@tonic-gate 		 * dequeue_self() clears self->ul_sleepq.
9410Sstevel@tonic-gate 		 */
9426247Sraf 		if (qp == self->ul_sleepq)
9436247Sraf 			(void) dequeue_self(qp);
9440Sstevel@tonic-gate 		queue_unlock(qp);
9450Sstevel@tonic-gate 	}
9466247Sraf 	self->ul_writer = 0;
9470Sstevel@tonic-gate 	self->ul_critical--;
9480Sstevel@tonic-gate }
9490Sstevel@tonic-gate 
9500Sstevel@tonic-gate /*
9510Sstevel@tonic-gate  * Common code for calling the the ___lwp_mutex_timedlock() system call.
9520Sstevel@tonic-gate  * Returns with mutex_owner and mutex_ownerpid set correctly.
9530Sstevel@tonic-gate  */
9544574Sraf static int
9550Sstevel@tonic-gate mutex_lock_kernel(mutex_t *mp, timespec_t *tsp, tdb_mutex_stats_t *msp)
9560Sstevel@tonic-gate {
9570Sstevel@tonic-gate 	ulwp_t *self = curthread;
9580Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
9594574Sraf 	int mtype = mp->mutex_type;
9600Sstevel@tonic-gate 	hrtime_t begin_sleep;
9614574Sraf 	int acquired;
9620Sstevel@tonic-gate 	int error;
9630Sstevel@tonic-gate 
9640Sstevel@tonic-gate 	self->ul_sp = stkptr();
9650Sstevel@tonic-gate 	self->ul_wchan = mp;
9660Sstevel@tonic-gate 	if (__td_event_report(self, TD_SLEEP, udp)) {
9670Sstevel@tonic-gate 		self->ul_td_evbuf.eventnum = TD_SLEEP;
9680Sstevel@tonic-gate 		self->ul_td_evbuf.eventdata = mp;
9690Sstevel@tonic-gate 		tdb_event(TD_SLEEP, udp);
9700Sstevel@tonic-gate 	}
9710Sstevel@tonic-gate 	if (msp) {
9720Sstevel@tonic-gate 		tdb_incr(msp->mutex_sleep);
9730Sstevel@tonic-gate 		begin_sleep = gethrtime();
9740Sstevel@tonic-gate 	}
9750Sstevel@tonic-gate 
9760Sstevel@tonic-gate 	DTRACE_PROBE1(plockstat, mutex__block, mp);
9770Sstevel@tonic-gate 
9780Sstevel@tonic-gate 	for (;;) {
9794574Sraf 		/*
9804574Sraf 		 * A return value of EOWNERDEAD or ELOCKUNMAPPED
9814574Sraf 		 * means we successfully acquired the lock.
9824574Sraf 		 */
9834574Sraf 		if ((error = ___lwp_mutex_timedlock(mp, tsp)) != 0 &&
9844574Sraf 		    error != EOWNERDEAD && error != ELOCKUNMAPPED) {
9854574Sraf 			acquired = 0;
9860Sstevel@tonic-gate 			break;
9870Sstevel@tonic-gate 		}
9880Sstevel@tonic-gate 
9894574Sraf 		if (mtype & USYNC_PROCESS) {
9900Sstevel@tonic-gate 			/*
9910Sstevel@tonic-gate 			 * Defend against forkall().  We may be the child,
9920Sstevel@tonic-gate 			 * in which case we don't actually own the mutex.
9930Sstevel@tonic-gate 			 */
9940Sstevel@tonic-gate 			enter_critical(self);
9950Sstevel@tonic-gate 			if (mp->mutex_ownerpid == udp->pid) {
9960Sstevel@tonic-gate 				mp->mutex_owner = (uintptr_t)self;
9970Sstevel@tonic-gate 				exit_critical(self);
9984574Sraf 				acquired = 1;
9990Sstevel@tonic-gate 				break;
10000Sstevel@tonic-gate 			}
10010Sstevel@tonic-gate 			exit_critical(self);
10020Sstevel@tonic-gate 		} else {
10030Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
10044574Sraf 			acquired = 1;
10050Sstevel@tonic-gate 			break;
10060Sstevel@tonic-gate 		}
10070Sstevel@tonic-gate 	}
10080Sstevel@tonic-gate 	if (msp)
10090Sstevel@tonic-gate 		msp->mutex_sleep_time += gethrtime() - begin_sleep;
10100Sstevel@tonic-gate 	self->ul_wchan = NULL;
10110Sstevel@tonic-gate 	self->ul_sp = 0;
10120Sstevel@tonic-gate 
10134574Sraf 	if (acquired) {
10144574Sraf 		DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1);
10154574Sraf 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
10164574Sraf 	} else {
10174574Sraf 		DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0);
10184574Sraf 		DTRACE_PROBE2(plockstat, mutex__error, mp, error);
10194574Sraf 	}
10204574Sraf 
10210Sstevel@tonic-gate 	return (error);
10220Sstevel@tonic-gate }
10230Sstevel@tonic-gate 
10240Sstevel@tonic-gate /*
10250Sstevel@tonic-gate  * Common code for calling the ___lwp_mutex_trylock() system call.
10260Sstevel@tonic-gate  * Returns with mutex_owner and mutex_ownerpid set correctly.
10270Sstevel@tonic-gate  */
10280Sstevel@tonic-gate int
10290Sstevel@tonic-gate mutex_trylock_kernel(mutex_t *mp)
10300Sstevel@tonic-gate {
10310Sstevel@tonic-gate 	ulwp_t *self = curthread;
10320Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
10334574Sraf 	int mtype = mp->mutex_type;
10340Sstevel@tonic-gate 	int error;
10354574Sraf 	int acquired;
10360Sstevel@tonic-gate 
10370Sstevel@tonic-gate 	for (;;) {
10384574Sraf 		/*
10394574Sraf 		 * A return value of EOWNERDEAD or ELOCKUNMAPPED
10404574Sraf 		 * means we successfully acquired the lock.
10414574Sraf 		 */
10424574Sraf 		if ((error = ___lwp_mutex_trylock(mp)) != 0 &&
10434574Sraf 		    error != EOWNERDEAD && error != ELOCKUNMAPPED) {
10444574Sraf 			acquired = 0;
10450Sstevel@tonic-gate 			break;
10460Sstevel@tonic-gate 		}
10470Sstevel@tonic-gate 
10484574Sraf 		if (mtype & USYNC_PROCESS) {
10490Sstevel@tonic-gate 			/*
10500Sstevel@tonic-gate 			 * Defend against forkall().  We may be the child,
10510Sstevel@tonic-gate 			 * in which case we don't actually own the mutex.
10520Sstevel@tonic-gate 			 */
10530Sstevel@tonic-gate 			enter_critical(self);
10540Sstevel@tonic-gate 			if (mp->mutex_ownerpid == udp->pid) {
10550Sstevel@tonic-gate 				mp->mutex_owner = (uintptr_t)self;
10560Sstevel@tonic-gate 				exit_critical(self);
10574574Sraf 				acquired = 1;
10580Sstevel@tonic-gate 				break;
10590Sstevel@tonic-gate 			}
10600Sstevel@tonic-gate 			exit_critical(self);
10610Sstevel@tonic-gate 		} else {
10620Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
10634574Sraf 			acquired = 1;
10640Sstevel@tonic-gate 			break;
10650Sstevel@tonic-gate 		}
10660Sstevel@tonic-gate 	}
10670Sstevel@tonic-gate 
10684574Sraf 	if (acquired) {
10694574Sraf 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
10704574Sraf 	} else if (error != EBUSY) {
10714574Sraf 		DTRACE_PROBE2(plockstat, mutex__error, mp, error);
10724574Sraf 	}
10734574Sraf 
10740Sstevel@tonic-gate 	return (error);
10750Sstevel@tonic-gate }
10760Sstevel@tonic-gate 
10770Sstevel@tonic-gate volatile sc_shared_t *
10780Sstevel@tonic-gate setup_schedctl(void)
10790Sstevel@tonic-gate {
10800Sstevel@tonic-gate 	ulwp_t *self = curthread;
10810Sstevel@tonic-gate 	volatile sc_shared_t *scp;
10820Sstevel@tonic-gate 	sc_shared_t *tmp;
10830Sstevel@tonic-gate 
10840Sstevel@tonic-gate 	if ((scp = self->ul_schedctl) == NULL && /* no shared state yet */
10850Sstevel@tonic-gate 	    !self->ul_vfork &&			/* not a child of vfork() */
10860Sstevel@tonic-gate 	    !self->ul_schedctl_called) {	/* haven't been called before */
10870Sstevel@tonic-gate 		enter_critical(self);
10880Sstevel@tonic-gate 		self->ul_schedctl_called = &self->ul_uberdata->uberflags;
10890Sstevel@tonic-gate 		if ((tmp = __schedctl()) != (sc_shared_t *)(-1))
10900Sstevel@tonic-gate 			self->ul_schedctl = scp = tmp;
10910Sstevel@tonic-gate 		exit_critical(self);
10920Sstevel@tonic-gate 	}
10930Sstevel@tonic-gate 	/*
10940Sstevel@tonic-gate 	 * Unless the call to setup_schedctl() is surrounded
10950Sstevel@tonic-gate 	 * by enter_critical()/exit_critical(), the address
10960Sstevel@tonic-gate 	 * we are returning could be invalid due to a forkall()
10970Sstevel@tonic-gate 	 * having occurred in another thread.
10980Sstevel@tonic-gate 	 */
10990Sstevel@tonic-gate 	return (scp);
11000Sstevel@tonic-gate }
11010Sstevel@tonic-gate 
11020Sstevel@tonic-gate /*
11030Sstevel@tonic-gate  * Interfaces from libsched, incorporated into libc.
11040Sstevel@tonic-gate  * libsched.so.1 is now a filter library onto libc.
11050Sstevel@tonic-gate  */
1106*6812Sraf #pragma weak schedctl_lookup = schedctl_init
11070Sstevel@tonic-gate schedctl_t *
1108*6812Sraf schedctl_init(void)
11090Sstevel@tonic-gate {
11100Sstevel@tonic-gate 	volatile sc_shared_t *scp = setup_schedctl();
11110Sstevel@tonic-gate 	return ((scp == NULL)? NULL : (schedctl_t *)&scp->sc_preemptctl);
11120Sstevel@tonic-gate }
11130Sstevel@tonic-gate 
11140Sstevel@tonic-gate void
1115*6812Sraf schedctl_exit(void)
11160Sstevel@tonic-gate {
11170Sstevel@tonic-gate }
11180Sstevel@tonic-gate 
11190Sstevel@tonic-gate /*
11200Sstevel@tonic-gate  * Contract private interface for java.
11210Sstevel@tonic-gate  * Set up the schedctl data if it doesn't exist yet.
11220Sstevel@tonic-gate  * Return a pointer to the pointer to the schedctl data.
11230Sstevel@tonic-gate  */
11240Sstevel@tonic-gate volatile sc_shared_t *volatile *
11250Sstevel@tonic-gate _thr_schedctl(void)
11260Sstevel@tonic-gate {
11270Sstevel@tonic-gate 	ulwp_t *self = curthread;
11280Sstevel@tonic-gate 	volatile sc_shared_t *volatile *ptr;
11290Sstevel@tonic-gate 
11300Sstevel@tonic-gate 	if (self->ul_vfork)
11310Sstevel@tonic-gate 		return (NULL);
11320Sstevel@tonic-gate 	if (*(ptr = &self->ul_schedctl) == NULL)
11330Sstevel@tonic-gate 		(void) setup_schedctl();
11340Sstevel@tonic-gate 	return (ptr);
11350Sstevel@tonic-gate }
11360Sstevel@tonic-gate 
11370Sstevel@tonic-gate /*
11380Sstevel@tonic-gate  * Block signals and attempt to block preemption.
11390Sstevel@tonic-gate  * no_preempt()/preempt() must be used in pairs but can be nested.
11400Sstevel@tonic-gate  */
11410Sstevel@tonic-gate void
11420Sstevel@tonic-gate no_preempt(ulwp_t *self)
11430Sstevel@tonic-gate {
11440Sstevel@tonic-gate 	volatile sc_shared_t *scp;
11450Sstevel@tonic-gate 
11460Sstevel@tonic-gate 	if (self->ul_preempt++ == 0) {
11470Sstevel@tonic-gate 		enter_critical(self);
11480Sstevel@tonic-gate 		if ((scp = self->ul_schedctl) != NULL ||
11490Sstevel@tonic-gate 		    (scp = setup_schedctl()) != NULL) {
11500Sstevel@tonic-gate 			/*
11510Sstevel@tonic-gate 			 * Save the pre-existing preempt value.
11520Sstevel@tonic-gate 			 */
11530Sstevel@tonic-gate 			self->ul_savpreempt = scp->sc_preemptctl.sc_nopreempt;
11540Sstevel@tonic-gate 			scp->sc_preemptctl.sc_nopreempt = 1;
11550Sstevel@tonic-gate 		}
11560Sstevel@tonic-gate 	}
11570Sstevel@tonic-gate }
11580Sstevel@tonic-gate 
11590Sstevel@tonic-gate /*
11600Sstevel@tonic-gate  * Undo the effects of no_preempt().
11610Sstevel@tonic-gate  */
11620Sstevel@tonic-gate void
11630Sstevel@tonic-gate preempt(ulwp_t *self)
11640Sstevel@tonic-gate {
11650Sstevel@tonic-gate 	volatile sc_shared_t *scp;
11660Sstevel@tonic-gate 
11670Sstevel@tonic-gate 	ASSERT(self->ul_preempt > 0);
11680Sstevel@tonic-gate 	if (--self->ul_preempt == 0) {
11690Sstevel@tonic-gate 		if ((scp = self->ul_schedctl) != NULL) {
11700Sstevel@tonic-gate 			/*
11710Sstevel@tonic-gate 			 * Restore the pre-existing preempt value.
11720Sstevel@tonic-gate 			 */
11730Sstevel@tonic-gate 			scp->sc_preemptctl.sc_nopreempt = self->ul_savpreempt;
11740Sstevel@tonic-gate 			if (scp->sc_preemptctl.sc_yield &&
11750Sstevel@tonic-gate 			    scp->sc_preemptctl.sc_nopreempt == 0) {
11766515Sraf 				yield();
11770Sstevel@tonic-gate 				if (scp->sc_preemptctl.sc_yield) {
11780Sstevel@tonic-gate 					/*
11790Sstevel@tonic-gate 					 * Shouldn't happen.  This is either
11800Sstevel@tonic-gate 					 * a race condition or the thread
11810Sstevel@tonic-gate 					 * just entered the real-time class.
11820Sstevel@tonic-gate 					 */
11836515Sraf 					yield();
11840Sstevel@tonic-gate 					scp->sc_preemptctl.sc_yield = 0;
11850Sstevel@tonic-gate 				}
11860Sstevel@tonic-gate 			}
11870Sstevel@tonic-gate 		}
11880Sstevel@tonic-gate 		exit_critical(self);
11890Sstevel@tonic-gate 	}
11900Sstevel@tonic-gate }
11910Sstevel@tonic-gate 
11920Sstevel@tonic-gate /*
11930Sstevel@tonic-gate  * If a call to preempt() would cause the current thread to yield or to
11940Sstevel@tonic-gate  * take deferred actions in exit_critical(), then unpark the specified
11950Sstevel@tonic-gate  * lwp so it can run while we delay.  Return the original lwpid if the
11960Sstevel@tonic-gate  * unpark was not performed, else return zero.  The tests are a repeat
11970Sstevel@tonic-gate  * of some of the tests in preempt(), above.  This is a statistical
11980Sstevel@tonic-gate  * optimization solely for cond_sleep_queue(), below.
11990Sstevel@tonic-gate  */
12000Sstevel@tonic-gate static lwpid_t
12010Sstevel@tonic-gate preempt_unpark(ulwp_t *self, lwpid_t lwpid)
12020Sstevel@tonic-gate {
12030Sstevel@tonic-gate 	volatile sc_shared_t *scp = self->ul_schedctl;
12040Sstevel@tonic-gate 
12050Sstevel@tonic-gate 	ASSERT(self->ul_preempt == 1 && self->ul_critical > 0);
12060Sstevel@tonic-gate 	if ((scp != NULL && scp->sc_preemptctl.sc_yield) ||
12070Sstevel@tonic-gate 	    (self->ul_curplease && self->ul_critical == 1)) {
12080Sstevel@tonic-gate 		(void) __lwp_unpark(lwpid);
12090Sstevel@tonic-gate 		lwpid = 0;
12100Sstevel@tonic-gate 	}
12110Sstevel@tonic-gate 	return (lwpid);
12120Sstevel@tonic-gate }
12130Sstevel@tonic-gate 
12140Sstevel@tonic-gate /*
12154613Sraf  * Spin for a while (if 'tryhard' is true), trying to grab the lock.
12160Sstevel@tonic-gate  * If this fails, return EBUSY and let the caller deal with it.
12170Sstevel@tonic-gate  * If this succeeds, return 0 with mutex_owner set to curthread.
12180Sstevel@tonic-gate  */
12194574Sraf static int
12204613Sraf mutex_trylock_adaptive(mutex_t *mp, int tryhard)
12210Sstevel@tonic-gate {
12220Sstevel@tonic-gate 	ulwp_t *self = curthread;
12234574Sraf 	int error = EBUSY;
12240Sstevel@tonic-gate 	ulwp_t *ulwp;
12250Sstevel@tonic-gate 	volatile sc_shared_t *scp;
12265629Sraf 	volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw;
12275629Sraf 	volatile uint64_t *ownerp = (volatile uint64_t *)&mp->mutex_owner;
12285629Sraf 	uint32_t new_lockword;
12295629Sraf 	int count = 0;
12305629Sraf 	int max_count;
12315629Sraf 	uint8_t max_spinners;
12324574Sraf 
12334574Sraf 	ASSERT(!(mp->mutex_type & USYNC_PROCESS));
12344574Sraf 
12354574Sraf 	if (MUTEX_OWNER(mp) == self)
12360Sstevel@tonic-gate 		return (EBUSY);
12370Sstevel@tonic-gate 
12384574Sraf 	/* short-cut, not definitive (see below) */
12394574Sraf 	if (mp->mutex_flag & LOCK_NOTRECOVERABLE) {
12404574Sraf 		ASSERT(mp->mutex_type & LOCK_ROBUST);
12415629Sraf 		error = ENOTRECOVERABLE;
12425629Sraf 		goto done;
12434574Sraf 	}
12444574Sraf 
12455629Sraf 	/*
12465629Sraf 	 * Make one attempt to acquire the lock before
12475629Sraf 	 * incurring the overhead of the spin loop.
12485629Sraf 	 */
12495629Sraf 	if (set_lock_byte(lockp) == 0) {
12505629Sraf 		*ownerp = (uintptr_t)self;
12515629Sraf 		error = 0;
12525629Sraf 		goto done;
12535629Sraf 	}
12545629Sraf 	if (!tryhard)
12555629Sraf 		goto done;
12565629Sraf 	if (ncpus == 0)
12575629Sraf 		ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN);
12585629Sraf 	if ((max_spinners = self->ul_max_spinners) >= ncpus)
12595629Sraf 		max_spinners = ncpus - 1;
12605629Sraf 	max_count = (max_spinners != 0)? self->ul_adaptive_spin : 0;
12615629Sraf 	if (max_count == 0)
12625629Sraf 		goto done;
12635629Sraf 
12640Sstevel@tonic-gate 	/*
12650Sstevel@tonic-gate 	 * This spin loop is unfair to lwps that have already dropped into
12660Sstevel@tonic-gate 	 * the kernel to sleep.  They will starve on a highly-contended mutex.
12670Sstevel@tonic-gate 	 * This is just too bad.  The adaptive spin algorithm is intended
12680Sstevel@tonic-gate 	 * to allow programs with highly-contended locks (that is, broken
12690Sstevel@tonic-gate 	 * programs) to execute with reasonable speed despite their contention.
12700Sstevel@tonic-gate 	 * Being fair would reduce the speed of such programs and well-written
12710Sstevel@tonic-gate 	 * programs will not suffer in any case.
12720Sstevel@tonic-gate 	 */
12735629Sraf 	enter_critical(self);
12745629Sraf 	if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1) {
12755629Sraf 		exit_critical(self);
12765629Sraf 		goto done;
12775629Sraf 	}
12785629Sraf 	DTRACE_PROBE1(plockstat, mutex__spin, mp);
12795629Sraf 	for (count = 1; ; count++) {
12800Sstevel@tonic-gate 		if (*lockp == 0 && set_lock_byte(lockp) == 0) {
12810Sstevel@tonic-gate 			*ownerp = (uintptr_t)self;
12824574Sraf 			error = 0;
12834574Sraf 			break;
12840Sstevel@tonic-gate 		}
12855629Sraf 		if (count == max_count)
12865629Sraf 			break;
12870Sstevel@tonic-gate 		SMT_PAUSE();
12880Sstevel@tonic-gate 		/*
12890Sstevel@tonic-gate 		 * Stop spinning if the mutex owner is not running on
12900Sstevel@tonic-gate 		 * a processor; it will not drop the lock any time soon
12910Sstevel@tonic-gate 		 * and we would just be wasting time to keep spinning.
12920Sstevel@tonic-gate 		 *
12930Sstevel@tonic-gate 		 * Note that we are looking at another thread (ulwp_t)
12940Sstevel@tonic-gate 		 * without ensuring that the other thread does not exit.
12950Sstevel@tonic-gate 		 * The scheme relies on ulwp_t structures never being
12960Sstevel@tonic-gate 		 * deallocated by the library (the library employs a free
12970Sstevel@tonic-gate 		 * list of ulwp_t structs that are reused when new threads
12980Sstevel@tonic-gate 		 * are created) and on schedctl shared memory never being
12990Sstevel@tonic-gate 		 * deallocated once created via __schedctl().
13000Sstevel@tonic-gate 		 *
13010Sstevel@tonic-gate 		 * Thus, the worst that can happen when the spinning thread
13020Sstevel@tonic-gate 		 * looks at the owner's schedctl data is that it is looking
13030Sstevel@tonic-gate 		 * at some other thread's schedctl data.  This almost never
13040Sstevel@tonic-gate 		 * happens and is benign when it does.
13050Sstevel@tonic-gate 		 */
13060Sstevel@tonic-gate 		if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL &&
13070Sstevel@tonic-gate 		    ((scp = ulwp->ul_schedctl) == NULL ||
13080Sstevel@tonic-gate 		    scp->sc_state != SC_ONPROC))
13090Sstevel@tonic-gate 			break;
13100Sstevel@tonic-gate 	}
13115629Sraf 	new_lockword = spinners_decr(&mp->mutex_lockword);
13125629Sraf 	if (error && (new_lockword & (LOCKMASK | SPINNERMASK)) == 0) {
13135629Sraf 		/*
13145629Sraf 		 * We haven't yet acquired the lock, the lock
13155629Sraf 		 * is free, and there are no other spinners.
13165629Sraf 		 * Make one final attempt to acquire the lock.
13175629Sraf 		 *
13185629Sraf 		 * This isn't strictly necessary since mutex_lock_queue()
13195629Sraf 		 * (the next action this thread will take if it doesn't
13205629Sraf 		 * acquire the lock here) makes one attempt to acquire
13215629Sraf 		 * the lock before putting the thread to sleep.
13225629Sraf 		 *
13235629Sraf 		 * If the next action for this thread (on failure here)
13245629Sraf 		 * were not to call mutex_lock_queue(), this would be
13255629Sraf 		 * necessary for correctness, to avoid ending up with an
13265629Sraf 		 * unheld mutex with waiters but no one to wake them up.
13275629Sraf 		 */
13285629Sraf 		if (set_lock_byte(lockp) == 0) {
13295629Sraf 			*ownerp = (uintptr_t)self;
13305629Sraf 			error = 0;
13315629Sraf 		}
13325629Sraf 		count++;
13335629Sraf 	}
13340Sstevel@tonic-gate 	exit_critical(self);
13350Sstevel@tonic-gate 
13365629Sraf done:
13374574Sraf 	if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) {
13384574Sraf 		ASSERT(mp->mutex_type & LOCK_ROBUST);
13394574Sraf 		/*
13406057Sraf 		 * We shouldn't own the mutex.
13416057Sraf 		 * Just clear the lock; everyone has already been waked up.
13424574Sraf 		 */
13434574Sraf 		mp->mutex_owner = 0;
13446057Sraf 		(void) clear_lockbyte(&mp->mutex_lockword);
13454574Sraf 		error = ENOTRECOVERABLE;
13464574Sraf 	}
13474574Sraf 
13484574Sraf 	if (error) {
13495629Sraf 		if (count) {
13505629Sraf 			DTRACE_PROBE2(plockstat, mutex__spun, 0, count);
13515629Sraf 		}
13524574Sraf 		if (error != EBUSY) {
13534574Sraf 			DTRACE_PROBE2(plockstat, mutex__error, mp, error);
13544574Sraf 		}
13554574Sraf 	} else {
13565629Sraf 		if (count) {
13575629Sraf 			DTRACE_PROBE2(plockstat, mutex__spun, 1, count);
13585629Sraf 		}
13594574Sraf 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count);
13604574Sraf 		if (mp->mutex_flag & LOCK_OWNERDEAD) {
13614574Sraf 			ASSERT(mp->mutex_type & LOCK_ROBUST);
13624574Sraf 			error = EOWNERDEAD;
13634574Sraf 		}
13644574Sraf 	}
13654574Sraf 
13664574Sraf 	return (error);
13670Sstevel@tonic-gate }
13680Sstevel@tonic-gate 
13690Sstevel@tonic-gate /*
13700Sstevel@tonic-gate  * Same as mutex_trylock_adaptive(), except specifically for queue locks.
13710Sstevel@tonic-gate  * The owner field is not set here; the caller (spin_lock_set()) sets it.
13720Sstevel@tonic-gate  */
13734574Sraf static int
13740Sstevel@tonic-gate mutex_queuelock_adaptive(mutex_t *mp)
13750Sstevel@tonic-gate {
13760Sstevel@tonic-gate 	ulwp_t *ulwp;
13770Sstevel@tonic-gate 	volatile sc_shared_t *scp;
13780Sstevel@tonic-gate 	volatile uint8_t *lockp;
13790Sstevel@tonic-gate 	volatile uint64_t *ownerp;
13800Sstevel@tonic-gate 	int count = curthread->ul_queue_spin;
13810Sstevel@tonic-gate 
13820Sstevel@tonic-gate 	ASSERT(mp->mutex_type == USYNC_THREAD);
13830Sstevel@tonic-gate 
13840Sstevel@tonic-gate 	if (count == 0)
13850Sstevel@tonic-gate 		return (EBUSY);
13860Sstevel@tonic-gate 
13870Sstevel@tonic-gate 	lockp = (volatile uint8_t *)&mp->mutex_lockw;
13880Sstevel@tonic-gate 	ownerp = (volatile uint64_t *)&mp->mutex_owner;
13890Sstevel@tonic-gate 	while (--count >= 0) {
13900Sstevel@tonic-gate 		if (*lockp == 0 && set_lock_byte(lockp) == 0)
13910Sstevel@tonic-gate 			return (0);
13920Sstevel@tonic-gate 		SMT_PAUSE();
13930Sstevel@tonic-gate 		if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL &&
13940Sstevel@tonic-gate 		    ((scp = ulwp->ul_schedctl) == NULL ||
13950Sstevel@tonic-gate 		    scp->sc_state != SC_ONPROC))
13960Sstevel@tonic-gate 			break;
13970Sstevel@tonic-gate 	}
13980Sstevel@tonic-gate 
13990Sstevel@tonic-gate 	return (EBUSY);
14000Sstevel@tonic-gate }
14010Sstevel@tonic-gate 
14020Sstevel@tonic-gate /*
14030Sstevel@tonic-gate  * Like mutex_trylock_adaptive(), but for process-shared mutexes.
14044613Sraf  * Spin for a while (if 'tryhard' is true), trying to grab the lock.
14050Sstevel@tonic-gate  * If this fails, return EBUSY and let the caller deal with it.
14060Sstevel@tonic-gate  * If this succeeds, return 0 with mutex_owner set to curthread
14070Sstevel@tonic-gate  * and mutex_ownerpid set to the current pid.
14080Sstevel@tonic-gate  */
14094574Sraf static int
14104613Sraf mutex_trylock_process(mutex_t *mp, int tryhard)
14110Sstevel@tonic-gate {
14120Sstevel@tonic-gate 	ulwp_t *self = curthread;
14135629Sraf 	uberdata_t *udp = self->ul_uberdata;
14144574Sraf 	int error = EBUSY;
14156057Sraf 	volatile uint64_t *lockp = (volatile uint64_t *)&mp->mutex_lockword64;
14165629Sraf 	uint32_t new_lockword;
14175629Sraf 	int count = 0;
14185629Sraf 	int max_count;
14195629Sraf 	uint8_t max_spinners;
14204574Sraf 
14214574Sraf 	ASSERT(mp->mutex_type & USYNC_PROCESS);
14224574Sraf 
14234574Sraf 	if (shared_mutex_held(mp))
14240Sstevel@tonic-gate 		return (EBUSY);
14250Sstevel@tonic-gate 
14264574Sraf 	/* short-cut, not definitive (see below) */
14274574Sraf 	if (mp->mutex_flag & LOCK_NOTRECOVERABLE) {
14284574Sraf 		ASSERT(mp->mutex_type & LOCK_ROBUST);
14295629Sraf 		error = ENOTRECOVERABLE;
14305629Sraf 		goto done;
14314574Sraf 	}
14324574Sraf 
14335629Sraf 	/*
14345629Sraf 	 * Make one attempt to acquire the lock before
14355629Sraf 	 * incurring the overhead of the spin loop.
14365629Sraf 	 */
14375629Sraf 	enter_critical(self);
14386057Sraf 	if (set_lock_byte64(lockp, udp->pid) == 0) {
14395629Sraf 		mp->mutex_owner = (uintptr_t)self;
14406057Sraf 		/* mp->mutex_ownerpid was set by set_lock_byte64() */
14415629Sraf 		exit_critical(self);
14425629Sraf 		error = 0;
14435629Sraf 		goto done;
14445629Sraf 	}
14455629Sraf 	exit_critical(self);
14465629Sraf 	if (!tryhard)
14475629Sraf 		goto done;
14484574Sraf 	if (ncpus == 0)
14494574Sraf 		ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN);
14505629Sraf 	if ((max_spinners = self->ul_max_spinners) >= ncpus)
14515629Sraf 		max_spinners = ncpus - 1;
14525629Sraf 	max_count = (max_spinners != 0)? self->ul_adaptive_spin : 0;
14535629Sraf 	if (max_count == 0)
14545629Sraf 		goto done;
14555629Sraf 
14560Sstevel@tonic-gate 	/*
14570Sstevel@tonic-gate 	 * This is a process-shared mutex.
14580Sstevel@tonic-gate 	 * We cannot know if the owner is running on a processor.
14590Sstevel@tonic-gate 	 * We just spin and hope that it is on a processor.
14600Sstevel@tonic-gate 	 */
14614574Sraf 	enter_critical(self);
14625629Sraf 	if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1) {
14635629Sraf 		exit_critical(self);
14645629Sraf 		goto done;
14655629Sraf 	}
14665629Sraf 	DTRACE_PROBE1(plockstat, mutex__spin, mp);
14675629Sraf 	for (count = 1; ; count++) {
14686057Sraf 		if ((*lockp & LOCKMASK64) == 0 &&
14696057Sraf 		    set_lock_byte64(lockp, udp->pid) == 0) {
14704574Sraf 			mp->mutex_owner = (uintptr_t)self;
14716057Sraf 			/* mp->mutex_ownerpid was set by set_lock_byte64() */
14724574Sraf 			error = 0;
14734574Sraf 			break;
14744574Sraf 		}
14755629Sraf 		if (count == max_count)
14765629Sraf 			break;
14774574Sraf 		SMT_PAUSE();
14784574Sraf 	}
14795629Sraf 	new_lockword = spinners_decr(&mp->mutex_lockword);
14805629Sraf 	if (error && (new_lockword & (LOCKMASK | SPINNERMASK)) == 0) {
14815629Sraf 		/*
14825629Sraf 		 * We haven't yet acquired the lock, the lock
14835629Sraf 		 * is free, and there are no other spinners.
14845629Sraf 		 * Make one final attempt to acquire the lock.
14855629Sraf 		 *
14865629Sraf 		 * This isn't strictly necessary since mutex_lock_kernel()
14875629Sraf 		 * (the next action this thread will take if it doesn't
14885629Sraf 		 * acquire the lock here) makes one attempt to acquire
14895629Sraf 		 * the lock before putting the thread to sleep.
14905629Sraf 		 *
14915629Sraf 		 * If the next action for this thread (on failure here)
14925629Sraf 		 * were not to call mutex_lock_kernel(), this would be
14935629Sraf 		 * necessary for correctness, to avoid ending up with an
14945629Sraf 		 * unheld mutex with waiters but no one to wake them up.
14955629Sraf 		 */
14966057Sraf 		if (set_lock_byte64(lockp, udp->pid) == 0) {
14975629Sraf 			mp->mutex_owner = (uintptr_t)self;
14986057Sraf 			/* mp->mutex_ownerpid was set by set_lock_byte64() */
14995629Sraf 			error = 0;
15005629Sraf 		}
15015629Sraf 		count++;
15025629Sraf 	}
15034574Sraf 	exit_critical(self);
15044574Sraf 
15055629Sraf done:
15064574Sraf 	if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) {
15074574Sraf 		ASSERT(mp->mutex_type & LOCK_ROBUST);
15084574Sraf 		/*
15096057Sraf 		 * We shouldn't own the mutex.
15106057Sraf 		 * Just clear the lock; everyone has already been waked up.
15114574Sraf 		 */
15124574Sraf 		mp->mutex_owner = 0;
15136057Sraf 		/* mp->mutex_ownerpid is cleared by clear_lockbyte64() */
15146057Sraf 		(void) clear_lockbyte64(&mp->mutex_lockword64);
15154574Sraf 		error = ENOTRECOVERABLE;
15160Sstevel@tonic-gate 	}
15170Sstevel@tonic-gate 
15184574Sraf 	if (error) {
15195629Sraf 		if (count) {
15205629Sraf 			DTRACE_PROBE2(plockstat, mutex__spun, 0, count);
15215629Sraf 		}
15224574Sraf 		if (error != EBUSY) {
15234574Sraf 			DTRACE_PROBE2(plockstat, mutex__error, mp, error);
15244574Sraf 		}
15254574Sraf 	} else {
15265629Sraf 		if (count) {
15275629Sraf 			DTRACE_PROBE2(plockstat, mutex__spun, 1, count);
15285629Sraf 		}
15294574Sraf 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count);
15304574Sraf 		if (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED)) {
15314574Sraf 			ASSERT(mp->mutex_type & LOCK_ROBUST);
15324574Sraf 			if (mp->mutex_flag & LOCK_OWNERDEAD)
15334574Sraf 				error = EOWNERDEAD;
15344574Sraf 			else if (mp->mutex_type & USYNC_PROCESS_ROBUST)
15354574Sraf 				error = ELOCKUNMAPPED;
15364574Sraf 			else
15374574Sraf 				error = EOWNERDEAD;
15384574Sraf 		}
15394574Sraf 	}
15404574Sraf 
15414574Sraf 	return (error);
15420Sstevel@tonic-gate }
15430Sstevel@tonic-gate 
15440Sstevel@tonic-gate /*
15450Sstevel@tonic-gate  * Mutex wakeup code for releasing a USYNC_THREAD mutex.
15460Sstevel@tonic-gate  * Returns the lwpid of the thread that was dequeued, if any.
15470Sstevel@tonic-gate  * The caller of mutex_wakeup() must call __lwp_unpark(lwpid)
15480Sstevel@tonic-gate  * to wake up the specified lwp.
15490Sstevel@tonic-gate  */
15504574Sraf static lwpid_t
15510Sstevel@tonic-gate mutex_wakeup(mutex_t *mp)
15520Sstevel@tonic-gate {
15530Sstevel@tonic-gate 	lwpid_t lwpid = 0;
15546247Sraf 	int more;
15550Sstevel@tonic-gate 	queue_head_t *qp;
15560Sstevel@tonic-gate 	ulwp_t *ulwp;
15570Sstevel@tonic-gate 
15580Sstevel@tonic-gate 	/*
15590Sstevel@tonic-gate 	 * Dequeue a waiter from the sleep queue.  Don't touch the mutex
15600Sstevel@tonic-gate 	 * waiters bit if no one was found on the queue because the mutex
15610Sstevel@tonic-gate 	 * might have been deallocated or reallocated for another purpose.
15620Sstevel@tonic-gate 	 */
15630Sstevel@tonic-gate 	qp = queue_lock(mp, MX);
15646247Sraf 	if ((ulwp = dequeue(qp, &more)) != NULL) {
15650Sstevel@tonic-gate 		lwpid = ulwp->ul_lwpid;
15666247Sraf 		mp->mutex_waiters = more;
15670Sstevel@tonic-gate 	}
15680Sstevel@tonic-gate 	queue_unlock(qp);
15690Sstevel@tonic-gate 	return (lwpid);
15700Sstevel@tonic-gate }
15710Sstevel@tonic-gate 
15720Sstevel@tonic-gate /*
15734574Sraf  * Mutex wakeup code for releasing all waiters on a USYNC_THREAD mutex.
15744574Sraf  */
15754574Sraf static void
15764574Sraf mutex_wakeup_all(mutex_t *mp)
15774574Sraf {
15784574Sraf 	queue_head_t *qp;
15796247Sraf 	queue_root_t *qrp;
15804574Sraf 	int nlwpid = 0;
15814574Sraf 	int maxlwps = MAXLWPS;
15824574Sraf 	ulwp_t *ulwp;
15834574Sraf 	lwpid_t buffer[MAXLWPS];
15844574Sraf 	lwpid_t *lwpid = buffer;
15854574Sraf 
15864574Sraf 	/*
15874574Sraf 	 * Walk the list of waiters and prepare to wake up all of them.
15884574Sraf 	 * The waiters flag has already been cleared from the mutex.
15894574Sraf 	 *
15904574Sraf 	 * We keep track of lwpids that are to be unparked in lwpid[].
15914574Sraf 	 * __lwp_unpark_all() is called to unpark all of them after
15924574Sraf 	 * they have been removed from the sleep queue and the sleep
15934574Sraf 	 * queue lock has been dropped.  If we run out of space in our
15944574Sraf 	 * on-stack buffer, we need to allocate more but we can't call
15954574Sraf 	 * lmalloc() because we are holding a queue lock when the overflow
15964574Sraf 	 * occurs and lmalloc() acquires a lock.  We can't use alloca()
15974574Sraf 	 * either because the application may have allocated a small
15984574Sraf 	 * stack and we don't want to overrun the stack.  So we call
15994574Sraf 	 * alloc_lwpids() to allocate a bigger buffer using the mmap()
16004574Sraf 	 * system call directly since that path acquires no locks.
16014574Sraf 	 */
16024574Sraf 	qp = queue_lock(mp, MX);
16036247Sraf 	for (;;) {
16046247Sraf 		if ((qrp = qp->qh_root) == NULL ||
16056247Sraf 		    (ulwp = qrp->qr_head) == NULL)
16066247Sraf 			break;
16076247Sraf 		ASSERT(ulwp->ul_wchan == mp);
16086247Sraf 		queue_unlink(qp, &qrp->qr_head, NULL);
16096247Sraf 		ulwp->ul_sleepq = NULL;
16106247Sraf 		ulwp->ul_wchan = NULL;
16116247Sraf 		if (nlwpid == maxlwps)
16126247Sraf 			lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps);
16136247Sraf 		lwpid[nlwpid++] = ulwp->ul_lwpid;
16144574Sraf 	}
16154574Sraf 
16164574Sraf 	if (nlwpid == 0) {
16174574Sraf 		queue_unlock(qp);
16184574Sraf 	} else {
16195629Sraf 		mp->mutex_waiters = 0;
16204574Sraf 		no_preempt(curthread);
16214574Sraf 		queue_unlock(qp);
16224574Sraf 		if (nlwpid == 1)
16234574Sraf 			(void) __lwp_unpark(lwpid[0]);
16244574Sraf 		else
16254574Sraf 			(void) __lwp_unpark_all(lwpid, nlwpid);
16264574Sraf 		preempt(curthread);
16274574Sraf 	}
16284574Sraf 
16294574Sraf 	if (lwpid != buffer)
16306515Sraf 		(void) munmap((caddr_t)lwpid, maxlwps * sizeof (lwpid_t));
16314574Sraf }
16324574Sraf 
16334574Sraf /*
16345629Sraf  * Release a process-private mutex.
16355629Sraf  * As an optimization, if there are waiters but there are also spinners
16365629Sraf  * attempting to acquire the mutex, then don't bother waking up a waiter;
16375629Sraf  * one of the spinners will acquire the mutex soon and it would be a waste
16385629Sraf  * of resources to wake up some thread just to have it spin for a while
16395629Sraf  * and then possibly go back to sleep.  See mutex_trylock_adaptive().
16400Sstevel@tonic-gate  */
16414574Sraf static lwpid_t
16424574Sraf mutex_unlock_queue(mutex_t *mp, int release_all)
16430Sstevel@tonic-gate {
16445629Sraf 	lwpid_t lwpid = 0;
16455629Sraf 	uint32_t old_lockword;
16465629Sraf 
16476057Sraf 	DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
16485629Sraf 	mp->mutex_owner = 0;
16495629Sraf 	old_lockword = clear_lockbyte(&mp->mutex_lockword);
16505629Sraf 	if ((old_lockword & WAITERMASK) &&
16515629Sraf 	    (release_all || (old_lockword & SPINNERMASK) == 0)) {
16525629Sraf 		ulwp_t *self = curthread;
16530Sstevel@tonic-gate 		no_preempt(self);	/* ensure a prompt wakeup */
16545629Sraf 		if (release_all)
16555629Sraf 			mutex_wakeup_all(mp);
16565629Sraf 		else
16575629Sraf 			lwpid = mutex_wakeup(mp);
16585629Sraf 		if (lwpid == 0)
16595629Sraf 			preempt(self);
16604574Sraf 	}
16610Sstevel@tonic-gate 	return (lwpid);
16620Sstevel@tonic-gate }
16630Sstevel@tonic-gate 
16640Sstevel@tonic-gate /*
16650Sstevel@tonic-gate  * Like mutex_unlock_queue(), but for process-shared mutexes.
16660Sstevel@tonic-gate  */
16674574Sraf static void
16684574Sraf mutex_unlock_process(mutex_t *mp, int release_all)
16690Sstevel@tonic-gate {
16706057Sraf 	uint64_t old_lockword64;
16716057Sraf 
16726057Sraf 	DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
16730Sstevel@tonic-gate 	mp->mutex_owner = 0;
16746057Sraf 	/* mp->mutex_ownerpid is cleared by clear_lockbyte64() */
16756057Sraf 	old_lockword64 = clear_lockbyte64(&mp->mutex_lockword64);
16766057Sraf 	if ((old_lockword64 & WAITERMASK64) &&
16776057Sraf 	    (release_all || (old_lockword64 & SPINNERMASK64) == 0)) {
16785629Sraf 		ulwp_t *self = curthread;
16795629Sraf 		no_preempt(self);	/* ensure a prompt wakeup */
16805629Sraf 		(void) ___lwp_mutex_wakeup(mp, release_all);
16815629Sraf 		preempt(self);
16820Sstevel@tonic-gate 	}
16830Sstevel@tonic-gate }
16840Sstevel@tonic-gate 
16850Sstevel@tonic-gate void
16860Sstevel@tonic-gate stall(void)
16870Sstevel@tonic-gate {
16880Sstevel@tonic-gate 	for (;;)
16890Sstevel@tonic-gate 		(void) mutex_lock_kernel(&stall_mutex, NULL, NULL);
16900Sstevel@tonic-gate }
16910Sstevel@tonic-gate 
16920Sstevel@tonic-gate /*
16930Sstevel@tonic-gate  * Acquire a USYNC_THREAD mutex via user-level sleep queues.
16940Sstevel@tonic-gate  * We failed set_lock_byte(&mp->mutex_lockw) before coming here.
16954574Sraf  * If successful, returns with mutex_owner set correctly.
16960Sstevel@tonic-gate  */
16970Sstevel@tonic-gate int
16980Sstevel@tonic-gate mutex_lock_queue(ulwp_t *self, tdb_mutex_stats_t *msp, mutex_t *mp,
16990Sstevel@tonic-gate 	timespec_t *tsp)
17000Sstevel@tonic-gate {
17010Sstevel@tonic-gate 	uberdata_t *udp = curthread->ul_uberdata;
17020Sstevel@tonic-gate 	queue_head_t *qp;
17030Sstevel@tonic-gate 	hrtime_t begin_sleep;
17040Sstevel@tonic-gate 	int error = 0;
17050Sstevel@tonic-gate 
17060Sstevel@tonic-gate 	self->ul_sp = stkptr();
17070Sstevel@tonic-gate 	if (__td_event_report(self, TD_SLEEP, udp)) {
17080Sstevel@tonic-gate 		self->ul_wchan = mp;
17090Sstevel@tonic-gate 		self->ul_td_evbuf.eventnum = TD_SLEEP;
17100Sstevel@tonic-gate 		self->ul_td_evbuf.eventdata = mp;
17110Sstevel@tonic-gate 		tdb_event(TD_SLEEP, udp);
17120Sstevel@tonic-gate 	}
17130Sstevel@tonic-gate 	if (msp) {
17140Sstevel@tonic-gate 		tdb_incr(msp->mutex_sleep);
17150Sstevel@tonic-gate 		begin_sleep = gethrtime();
17160Sstevel@tonic-gate 	}
17170Sstevel@tonic-gate 
17180Sstevel@tonic-gate 	DTRACE_PROBE1(plockstat, mutex__block, mp);
17190Sstevel@tonic-gate 
17200Sstevel@tonic-gate 	/*
17210Sstevel@tonic-gate 	 * Put ourself on the sleep queue, and while we are
17220Sstevel@tonic-gate 	 * unable to grab the lock, go park in the kernel.
17230Sstevel@tonic-gate 	 * Take ourself off the sleep queue after we acquire the lock.
17240Sstevel@tonic-gate 	 * The waiter bit can be set/cleared only while holding the queue lock.
17250Sstevel@tonic-gate 	 */
17260Sstevel@tonic-gate 	qp = queue_lock(mp, MX);
17276247Sraf 	enqueue(qp, self, 0);
17280Sstevel@tonic-gate 	mp->mutex_waiters = 1;
17290Sstevel@tonic-gate 	for (;;) {
17300Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
17310Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
17326247Sraf 			mp->mutex_waiters = dequeue_self(qp);
17330Sstevel@tonic-gate 			break;
17340Sstevel@tonic-gate 		}
17350Sstevel@tonic-gate 		set_parking_flag(self, 1);
17360Sstevel@tonic-gate 		queue_unlock(qp);
17370Sstevel@tonic-gate 		/*
17380Sstevel@tonic-gate 		 * __lwp_park() will return the residual time in tsp
17390Sstevel@tonic-gate 		 * if we are unparked before the timeout expires.
17400Sstevel@tonic-gate 		 */
17415629Sraf 		error = __lwp_park(tsp, 0);
17420Sstevel@tonic-gate 		set_parking_flag(self, 0);
17430Sstevel@tonic-gate 		/*
17440Sstevel@tonic-gate 		 * We could have taken a signal or suspended ourself.
17450Sstevel@tonic-gate 		 * If we did, then we removed ourself from the queue.
17460Sstevel@tonic-gate 		 * Someone else may have removed us from the queue
17470Sstevel@tonic-gate 		 * as a consequence of mutex_unlock().  We may have
17480Sstevel@tonic-gate 		 * gotten a timeout from __lwp_park().  Or we may still
17490Sstevel@tonic-gate 		 * be on the queue and this is just a spurious wakeup.
17500Sstevel@tonic-gate 		 */
17510Sstevel@tonic-gate 		qp = queue_lock(mp, MX);
17520Sstevel@tonic-gate 		if (self->ul_sleepq == NULL) {
17535629Sraf 			if (error) {
17546247Sraf 				mp->mutex_waiters = queue_waiter(qp)? 1 : 0;
17555629Sraf 				if (error != EINTR)
17565629Sraf 					break;
17575629Sraf 				error = 0;
17585629Sraf 			}
17590Sstevel@tonic-gate 			if (set_lock_byte(&mp->mutex_lockw) == 0) {
17600Sstevel@tonic-gate 				mp->mutex_owner = (uintptr_t)self;
17610Sstevel@tonic-gate 				break;
17620Sstevel@tonic-gate 			}
17636247Sraf 			enqueue(qp, self, 0);
17640Sstevel@tonic-gate 			mp->mutex_waiters = 1;
17650Sstevel@tonic-gate 		}
17660Sstevel@tonic-gate 		ASSERT(self->ul_sleepq == qp &&
17670Sstevel@tonic-gate 		    self->ul_qtype == MX &&
17680Sstevel@tonic-gate 		    self->ul_wchan == mp);
17690Sstevel@tonic-gate 		if (error) {
17705629Sraf 			if (error != EINTR) {
17716247Sraf 				mp->mutex_waiters = dequeue_self(qp);
17725629Sraf 				break;
17735629Sraf 			}
17745629Sraf 			error = 0;
17750Sstevel@tonic-gate 		}
17760Sstevel@tonic-gate 	}
17770Sstevel@tonic-gate 	ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL &&
17780Sstevel@tonic-gate 	    self->ul_wchan == NULL);
17790Sstevel@tonic-gate 	self->ul_sp = 0;
17800Sstevel@tonic-gate 	queue_unlock(qp);
17814574Sraf 
17820Sstevel@tonic-gate 	if (msp)
17830Sstevel@tonic-gate 		msp->mutex_sleep_time += gethrtime() - begin_sleep;
17840Sstevel@tonic-gate 
17850Sstevel@tonic-gate 	ASSERT(error == 0 || error == EINVAL || error == ETIME);
17864574Sraf 
17874574Sraf 	if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) {
17884574Sraf 		ASSERT(mp->mutex_type & LOCK_ROBUST);
17894574Sraf 		/*
17906057Sraf 		 * We shouldn't own the mutex.
17916057Sraf 		 * Just clear the lock; everyone has already been waked up.
17924574Sraf 		 */
17934574Sraf 		mp->mutex_owner = 0;
17946057Sraf 		(void) clear_lockbyte(&mp->mutex_lockword);
17954574Sraf 		error = ENOTRECOVERABLE;
17964574Sraf 	}
17974574Sraf 
17984574Sraf 	if (error) {
17994574Sraf 		DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0);
18004574Sraf 		DTRACE_PROBE2(plockstat, mutex__error, mp, error);
18014574Sraf 	} else {
18024574Sraf 		DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1);
18034574Sraf 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
18044574Sraf 		if (mp->mutex_flag & LOCK_OWNERDEAD) {
18054574Sraf 			ASSERT(mp->mutex_type & LOCK_ROBUST);
18064574Sraf 			error = EOWNERDEAD;
18074574Sraf 		}
18084574Sraf 	}
18094574Sraf 
18100Sstevel@tonic-gate 	return (error);
18110Sstevel@tonic-gate }
18120Sstevel@tonic-gate 
18134574Sraf static int
18144574Sraf mutex_recursion(mutex_t *mp, int mtype, int try)
18154574Sraf {
1816*6812Sraf 	ASSERT(mutex_held(mp));
18174574Sraf 	ASSERT(mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK));
18184574Sraf 	ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK);
18194574Sraf 
18204574Sraf 	if (mtype & LOCK_RECURSIVE) {
18214574Sraf 		if (mp->mutex_rcount == RECURSION_MAX) {
18224574Sraf 			DTRACE_PROBE2(plockstat, mutex__error, mp, EAGAIN);
18234574Sraf 			return (EAGAIN);
18244574Sraf 		}
18254574Sraf 		mp->mutex_rcount++;
18264574Sraf 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1, 0);
18274574Sraf 		return (0);
18284574Sraf 	}
18294574Sraf 	if (try == MUTEX_LOCK) {
18304574Sraf 		DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK);
18314574Sraf 		return (EDEADLK);
18324574Sraf 	}
18334574Sraf 	return (EBUSY);
18344574Sraf }
18354574Sraf 
18364574Sraf /*
18374574Sraf  * Register this USYNC_PROCESS|LOCK_ROBUST mutex with the kernel so
18384574Sraf  * it can apply LOCK_OWNERDEAD|LOCK_UNMAPPED if it becomes necessary.
18394574Sraf  * We use tdb_hash_lock here and in the synch object tracking code in
18404574Sraf  * the tdb_agent.c file.  There is no conflict between these two usages.
18414574Sraf  */
18424574Sraf void
18434574Sraf register_lock(mutex_t *mp)
18444574Sraf {
18454574Sraf 	uberdata_t *udp = curthread->ul_uberdata;
18464574Sraf 	uint_t hash = LOCK_HASH(mp);
18474574Sraf 	robust_t *rlp;
18484574Sraf 	robust_t **rlpp;
18494574Sraf 	robust_t **table;
18504574Sraf 
18514574Sraf 	if ((table = udp->robustlocks) == NULL) {
18524574Sraf 		lmutex_lock(&udp->tdb_hash_lock);
18534574Sraf 		if ((table = udp->robustlocks) == NULL) {
18544574Sraf 			table = lmalloc(LOCKHASHSZ * sizeof (robust_t *));
1855*6812Sraf 			membar_producer();
18564574Sraf 			udp->robustlocks = table;
18574574Sraf 		}
18584574Sraf 		lmutex_unlock(&udp->tdb_hash_lock);
18594574Sraf 	}
1860*6812Sraf 	membar_consumer();
18614574Sraf 
18624574Sraf 	/*
18634574Sraf 	 * First search the registered table with no locks held.
18644574Sraf 	 * This is safe because the table never shrinks
18654574Sraf 	 * and we can only get a false negative.
18664574Sraf 	 */
18674574Sraf 	for (rlp = table[hash]; rlp != NULL; rlp = rlp->robust_next) {
18684574Sraf 		if (rlp->robust_lock == mp)	/* already registered */
18694574Sraf 			return;
18704574Sraf 	}
18714574Sraf 
18724574Sraf 	/*
18734574Sraf 	 * The lock was not found.
18744574Sraf 	 * Repeat the operation with tdb_hash_lock held.
18754574Sraf 	 */
18764574Sraf 	lmutex_lock(&udp->tdb_hash_lock);
18774574Sraf 
18784574Sraf 	for (rlpp = &table[hash];
18794574Sraf 	    (rlp = *rlpp) != NULL;
18804574Sraf 	    rlpp = &rlp->robust_next) {
18814574Sraf 		if (rlp->robust_lock == mp) {	/* already registered */
18824574Sraf 			lmutex_unlock(&udp->tdb_hash_lock);
18834574Sraf 			return;
18844574Sraf 		}
18854574Sraf 	}
18864574Sraf 
18874574Sraf 	/*
18884574Sraf 	 * The lock has never been registered.
18894574Sraf 	 * Register it now and add it to the table.
18904574Sraf 	 */
18914574Sraf 	(void) ___lwp_mutex_register(mp);
18924574Sraf 	rlp = lmalloc(sizeof (*rlp));
18934574Sraf 	rlp->robust_lock = mp;
1894*6812Sraf 	membar_producer();
18954574Sraf 	*rlpp = rlp;
18964574Sraf 
18974574Sraf 	lmutex_unlock(&udp->tdb_hash_lock);
18984574Sraf }
18994574Sraf 
19004574Sraf /*
19014574Sraf  * This is called in the child of fork()/forkall() to start over
19024574Sraf  * with a clean slate.  (Each process must register its own locks.)
19034574Sraf  * No locks are needed because all other threads are suspended or gone.
19044574Sraf  */
19054574Sraf void
19064574Sraf unregister_locks(void)
19074574Sraf {
19084574Sraf 	uberdata_t *udp = curthread->ul_uberdata;
19094574Sraf 	uint_t hash;
19104574Sraf 	robust_t **table;
19114574Sraf 	robust_t *rlp;
19124574Sraf 	robust_t *next;
19134574Sraf 
19144574Sraf 	if ((table = udp->robustlocks) != NULL) {
19154574Sraf 		for (hash = 0; hash < LOCKHASHSZ; hash++) {
19164574Sraf 			rlp = table[hash];
19174574Sraf 			while (rlp != NULL) {
19184574Sraf 				next = rlp->robust_next;
19194574Sraf 				lfree(rlp, sizeof (*rlp));
19204574Sraf 				rlp = next;
19214574Sraf 			}
19224574Sraf 		}
19234574Sraf 		lfree(table, LOCKHASHSZ * sizeof (robust_t *));
19244574Sraf 		udp->robustlocks = NULL;
19254574Sraf 	}
19264574Sraf }
19274574Sraf 
19280Sstevel@tonic-gate /*
19290Sstevel@tonic-gate  * Returns with mutex_owner set correctly.
19300Sstevel@tonic-gate  */
19316247Sraf int
19320Sstevel@tonic-gate mutex_lock_internal(mutex_t *mp, timespec_t *tsp, int try)
19330Sstevel@tonic-gate {
19340Sstevel@tonic-gate 	ulwp_t *self = curthread;
19350Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
19360Sstevel@tonic-gate 	int mtype = mp->mutex_type;
19370Sstevel@tonic-gate 	tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
19380Sstevel@tonic-gate 	int error = 0;
19396247Sraf 	int noceil = try & MUTEX_NOCEIL;
19404574Sraf 	uint8_t ceil;
19414574Sraf 	int myprio;
19420Sstevel@tonic-gate 
19436247Sraf 	try &= ~MUTEX_NOCEIL;
19440Sstevel@tonic-gate 	ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK);
19450Sstevel@tonic-gate 
19460Sstevel@tonic-gate 	if (!self->ul_schedctl_called)
19470Sstevel@tonic-gate 		(void) setup_schedctl();
19480Sstevel@tonic-gate 
19490Sstevel@tonic-gate 	if (msp && try == MUTEX_TRY)
19500Sstevel@tonic-gate 		tdb_incr(msp->mutex_try);
19510Sstevel@tonic-gate 
1952*6812Sraf 	if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && mutex_held(mp))
19534574Sraf 		return (mutex_recursion(mp, mtype, try));
19540Sstevel@tonic-gate 
19550Sstevel@tonic-gate 	if (self->ul_error_detection && try == MUTEX_LOCK &&
1956*6812Sraf 	    tsp == NULL && mutex_held(mp))
19570Sstevel@tonic-gate 		lock_error(mp, "mutex_lock", NULL, NULL);
19580Sstevel@tonic-gate 
19596247Sraf 	if ((mtype & LOCK_PRIO_PROTECT) && noceil == 0) {
19606247Sraf 		update_sched(self);
19616247Sraf 		if (self->ul_cid != self->ul_rtclassid) {
19626247Sraf 			DTRACE_PROBE2(plockstat, mutex__error, mp, EPERM);
19636247Sraf 			return (EPERM);
19646247Sraf 		}
19654574Sraf 		ceil = mp->mutex_ceiling;
19666247Sraf 		myprio = self->ul_epri? self->ul_epri : self->ul_pri;
19674574Sraf 		if (myprio > ceil) {
19684574Sraf 			DTRACE_PROBE2(plockstat, mutex__error, mp, EINVAL);
19694574Sraf 			return (EINVAL);
19704574Sraf 		}
19714574Sraf 		if ((error = _ceil_mylist_add(mp)) != 0) {
19724574Sraf 			DTRACE_PROBE2(plockstat, mutex__error, mp, error);
19734574Sraf 			return (error);
19740Sstevel@tonic-gate 		}
19754574Sraf 		if (myprio < ceil)
19764574Sraf 			_ceil_prio_inherit(ceil);
19774574Sraf 	}
19784574Sraf 
19794574Sraf 	if ((mtype & (USYNC_PROCESS | LOCK_ROBUST))
19804574Sraf 	    == (USYNC_PROCESS | LOCK_ROBUST))
19814574Sraf 		register_lock(mp);
19824574Sraf 
19834574Sraf 	if (mtype & LOCK_PRIO_INHERIT) {
19844574Sraf 		/* go straight to the kernel */
19854574Sraf 		if (try == MUTEX_TRY)
19864574Sraf 			error = mutex_trylock_kernel(mp);
19874574Sraf 		else	/* MUTEX_LOCK */
19884574Sraf 			error = mutex_lock_kernel(mp, tsp, msp);
19894574Sraf 		/*
19904574Sraf 		 * The kernel never sets or clears the lock byte
19914574Sraf 		 * for LOCK_PRIO_INHERIT mutexes.
19924574Sraf 		 * Set it here for consistency.
19934574Sraf 		 */
19944574Sraf 		switch (error) {
19954574Sraf 		case 0:
19966247Sraf 			self->ul_pilocks++;
19974574Sraf 			mp->mutex_lockw = LOCKSET;
19984574Sraf 			break;
19994574Sraf 		case EOWNERDEAD:
20004574Sraf 		case ELOCKUNMAPPED:
20016247Sraf 			self->ul_pilocks++;
20024574Sraf 			mp->mutex_lockw = LOCKSET;
20034574Sraf 			/* FALLTHROUGH */
20044574Sraf 		case ENOTRECOVERABLE:
20054574Sraf 			ASSERT(mtype & LOCK_ROBUST);
20064574Sraf 			break;
20074574Sraf 		case EDEADLK:
20084574Sraf 			if (try == MUTEX_LOCK)
20094574Sraf 				stall();
20104574Sraf 			error = EBUSY;
20114574Sraf 			break;
20120Sstevel@tonic-gate 		}
20130Sstevel@tonic-gate 	} else if (mtype & USYNC_PROCESS) {
20144613Sraf 		error = mutex_trylock_process(mp, try == MUTEX_LOCK);
20154574Sraf 		if (error == EBUSY && try == MUTEX_LOCK)
20160Sstevel@tonic-gate 			error = mutex_lock_kernel(mp, tsp, msp);
20175629Sraf 	} else {	/* USYNC_THREAD */
20184613Sraf 		error = mutex_trylock_adaptive(mp, try == MUTEX_LOCK);
20194574Sraf 		if (error == EBUSY && try == MUTEX_LOCK)
20204574Sraf 			error = mutex_lock_queue(self, msp, mp, tsp);
20210Sstevel@tonic-gate 	}
20220Sstevel@tonic-gate 
20230Sstevel@tonic-gate 	switch (error) {
20244574Sraf 	case 0:
20250Sstevel@tonic-gate 	case EOWNERDEAD:
20260Sstevel@tonic-gate 	case ELOCKUNMAPPED:
20274574Sraf 		if (mtype & LOCK_ROBUST)
20284574Sraf 			remember_lock(mp);
20290Sstevel@tonic-gate 		if (msp)
20300Sstevel@tonic-gate 			record_begin_hold(msp);
20310Sstevel@tonic-gate 		break;
20320Sstevel@tonic-gate 	default:
20336247Sraf 		if ((mtype & LOCK_PRIO_PROTECT) && noceil == 0) {
20344574Sraf 			(void) _ceil_mylist_del(mp);
20354574Sraf 			if (myprio < ceil)
20364574Sraf 				_ceil_prio_waive();
20374574Sraf 		}
20380Sstevel@tonic-gate 		if (try == MUTEX_TRY) {
20390Sstevel@tonic-gate 			if (msp)
20400Sstevel@tonic-gate 				tdb_incr(msp->mutex_try_fail);
20410Sstevel@tonic-gate 			if (__td_event_report(self, TD_LOCK_TRY, udp)) {
20420Sstevel@tonic-gate 				self->ul_td_evbuf.eventnum = TD_LOCK_TRY;
20430Sstevel@tonic-gate 				tdb_event(TD_LOCK_TRY, udp);
20440Sstevel@tonic-gate 			}
20450Sstevel@tonic-gate 		}
20460Sstevel@tonic-gate 		break;
20470Sstevel@tonic-gate 	}
20480Sstevel@tonic-gate 
20490Sstevel@tonic-gate 	return (error);
20500Sstevel@tonic-gate }
20510Sstevel@tonic-gate 
20520Sstevel@tonic-gate int
20530Sstevel@tonic-gate fast_process_lock(mutex_t *mp, timespec_t *tsp, int mtype, int try)
20540Sstevel@tonic-gate {
20550Sstevel@tonic-gate 	ulwp_t *self = curthread;
20560Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
20570Sstevel@tonic-gate 
20580Sstevel@tonic-gate 	/*
20590Sstevel@tonic-gate 	 * We know that USYNC_PROCESS is set in mtype and that
20600Sstevel@tonic-gate 	 * zero, one, or both of the flags LOCK_RECURSIVE and
20610Sstevel@tonic-gate 	 * LOCK_ERRORCHECK are set, and that no other flags are set.
20620Sstevel@tonic-gate 	 */
20634574Sraf 	ASSERT((mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0);
20640Sstevel@tonic-gate 	enter_critical(self);
20656057Sraf 	if (set_lock_byte64(&mp->mutex_lockword64, udp->pid) == 0) {
20660Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
20676057Sraf 		/* mp->mutex_ownerpid was set by set_lock_byte64() */
20680Sstevel@tonic-gate 		exit_critical(self);
20690Sstevel@tonic-gate 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
20700Sstevel@tonic-gate 		return (0);
20710Sstevel@tonic-gate 	}
20720Sstevel@tonic-gate 	exit_critical(self);
20730Sstevel@tonic-gate 
20744574Sraf 	if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && shared_mutex_held(mp))
20754574Sraf 		return (mutex_recursion(mp, mtype, try));
20764574Sraf 
20774613Sraf 	if (try == MUTEX_LOCK) {
20784613Sraf 		if (mutex_trylock_process(mp, 1) == 0)
20794613Sraf 			return (0);
20800Sstevel@tonic-gate 		return (mutex_lock_kernel(mp, tsp, NULL));
20814613Sraf 	}
20820Sstevel@tonic-gate 
20830Sstevel@tonic-gate 	if (__td_event_report(self, TD_LOCK_TRY, udp)) {
20840Sstevel@tonic-gate 		self->ul_td_evbuf.eventnum = TD_LOCK_TRY;
20850Sstevel@tonic-gate 		tdb_event(TD_LOCK_TRY, udp);
20860Sstevel@tonic-gate 	}
20870Sstevel@tonic-gate 	return (EBUSY);
20880Sstevel@tonic-gate }
20890Sstevel@tonic-gate 
20900Sstevel@tonic-gate static int
20910Sstevel@tonic-gate mutex_lock_impl(mutex_t *mp, timespec_t *tsp)
20920Sstevel@tonic-gate {
20930Sstevel@tonic-gate 	ulwp_t *self = curthread;
20946247Sraf 	int mtype = mp->mutex_type;
20950Sstevel@tonic-gate 	uberflags_t *gflags;
20960Sstevel@tonic-gate 
20970Sstevel@tonic-gate 	/*
20980Sstevel@tonic-gate 	 * Optimize the case of USYNC_THREAD, including
20990Sstevel@tonic-gate 	 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
21000Sstevel@tonic-gate 	 * no error detection, no lock statistics,
21010Sstevel@tonic-gate 	 * and the process has only a single thread.
21020Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
21030Sstevel@tonic-gate 	 */
21046247Sraf 	if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) |
21056247Sraf 	    self->ul_uberdata->uberflags.uf_all) == 0) {
21060Sstevel@tonic-gate 		/*
21070Sstevel@tonic-gate 		 * Only one thread exists so we don't need an atomic operation.
21080Sstevel@tonic-gate 		 */
21090Sstevel@tonic-gate 		if (mp->mutex_lockw == 0) {
21100Sstevel@tonic-gate 			mp->mutex_lockw = LOCKSET;
21110Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
21120Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
21130Sstevel@tonic-gate 			return (0);
21140Sstevel@tonic-gate 		}
21154574Sraf 		if (mtype && MUTEX_OWNER(mp) == self)
21164574Sraf 			return (mutex_recursion(mp, mtype, MUTEX_LOCK));
21170Sstevel@tonic-gate 		/*
21180Sstevel@tonic-gate 		 * We have reached a deadlock, probably because the
21190Sstevel@tonic-gate 		 * process is executing non-async-signal-safe code in
21200Sstevel@tonic-gate 		 * a signal handler and is attempting to acquire a lock
21210Sstevel@tonic-gate 		 * that it already owns.  This is not surprising, given
21220Sstevel@tonic-gate 		 * bad programming practices over the years that has
21230Sstevel@tonic-gate 		 * resulted in applications calling printf() and such
21240Sstevel@tonic-gate 		 * in their signal handlers.  Unless the user has told
21250Sstevel@tonic-gate 		 * us that the signal handlers are safe by setting:
21260Sstevel@tonic-gate 		 *	export _THREAD_ASYNC_SAFE=1
21270Sstevel@tonic-gate 		 * we return EDEADLK rather than actually deadlocking.
21280Sstevel@tonic-gate 		 */
21290Sstevel@tonic-gate 		if (tsp == NULL &&
21300Sstevel@tonic-gate 		    MUTEX_OWNER(mp) == self && !self->ul_async_safe) {
21310Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK);
21320Sstevel@tonic-gate 			return (EDEADLK);
21330Sstevel@tonic-gate 		}
21340Sstevel@tonic-gate 	}
21350Sstevel@tonic-gate 
21360Sstevel@tonic-gate 	/*
21370Sstevel@tonic-gate 	 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
21380Sstevel@tonic-gate 	 * no error detection, and no lock statistics.
21390Sstevel@tonic-gate 	 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
21400Sstevel@tonic-gate 	 */
21410Sstevel@tonic-gate 	if ((gflags = self->ul_schedctl_called) != NULL &&
21420Sstevel@tonic-gate 	    (gflags->uf_trs_ted |
21430Sstevel@tonic-gate 	    (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) {
21440Sstevel@tonic-gate 		if (mtype & USYNC_PROCESS)
21450Sstevel@tonic-gate 			return (fast_process_lock(mp, tsp, mtype, MUTEX_LOCK));
21460Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
21470Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
21480Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
21490Sstevel@tonic-gate 			return (0);
21500Sstevel@tonic-gate 		}
21514574Sraf 		if (mtype && MUTEX_OWNER(mp) == self)
21524574Sraf 			return (mutex_recursion(mp, mtype, MUTEX_LOCK));
21534613Sraf 		if (mutex_trylock_adaptive(mp, 1) != 0)
21544574Sraf 			return (mutex_lock_queue(self, NULL, mp, tsp));
21554574Sraf 		return (0);
21560Sstevel@tonic-gate 	}
21570Sstevel@tonic-gate 
21580Sstevel@tonic-gate 	/* else do it the long way */
21590Sstevel@tonic-gate 	return (mutex_lock_internal(mp, tsp, MUTEX_LOCK));
21600Sstevel@tonic-gate }
21610Sstevel@tonic-gate 
2162*6812Sraf #pragma weak pthread_mutex_lock = mutex_lock
2163*6812Sraf #pragma weak _mutex_lock = mutex_lock
21640Sstevel@tonic-gate int
2165*6812Sraf mutex_lock(mutex_t *mp)
21660Sstevel@tonic-gate {
21670Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
21680Sstevel@tonic-gate 	return (mutex_lock_impl(mp, NULL));
21690Sstevel@tonic-gate }
21700Sstevel@tonic-gate 
21710Sstevel@tonic-gate int
2172*6812Sraf pthread_mutex_timedlock(pthread_mutex_t *_RESTRICT_KYWD mp,
2173*6812Sraf 	const struct timespec *_RESTRICT_KYWD abstime)
21740Sstevel@tonic-gate {
21750Sstevel@tonic-gate 	timespec_t tslocal;
21760Sstevel@tonic-gate 	int error;
21770Sstevel@tonic-gate 
21780Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
21790Sstevel@tonic-gate 	abstime_to_reltime(CLOCK_REALTIME, abstime, &tslocal);
2180*6812Sraf 	error = mutex_lock_impl((mutex_t *)mp, &tslocal);
21810Sstevel@tonic-gate 	if (error == ETIME)
21820Sstevel@tonic-gate 		error = ETIMEDOUT;
21830Sstevel@tonic-gate 	return (error);
21840Sstevel@tonic-gate }
21850Sstevel@tonic-gate 
21860Sstevel@tonic-gate int
2187*6812Sraf pthread_mutex_reltimedlock_np(pthread_mutex_t *_RESTRICT_KYWD mp,
2188*6812Sraf 	const struct timespec *_RESTRICT_KYWD reltime)
21890Sstevel@tonic-gate {
21900Sstevel@tonic-gate 	timespec_t tslocal;
21910Sstevel@tonic-gate 	int error;
21920Sstevel@tonic-gate 
21930Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
21940Sstevel@tonic-gate 	tslocal = *reltime;
2195*6812Sraf 	error = mutex_lock_impl((mutex_t *)mp, &tslocal);
21960Sstevel@tonic-gate 	if (error == ETIME)
21970Sstevel@tonic-gate 		error = ETIMEDOUT;
21980Sstevel@tonic-gate 	return (error);
21990Sstevel@tonic-gate }
22000Sstevel@tonic-gate 
2201*6812Sraf #pragma weak pthread_mutex_trylock = mutex_trylock
22020Sstevel@tonic-gate int
2203*6812Sraf mutex_trylock(mutex_t *mp)
22040Sstevel@tonic-gate {
22050Sstevel@tonic-gate 	ulwp_t *self = curthread;
22060Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
22076247Sraf 	int mtype = mp->mutex_type;
22080Sstevel@tonic-gate 	uberflags_t *gflags;
22090Sstevel@tonic-gate 
22100Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
22116247Sraf 
22120Sstevel@tonic-gate 	/*
22130Sstevel@tonic-gate 	 * Optimize the case of USYNC_THREAD, including
22140Sstevel@tonic-gate 	 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
22150Sstevel@tonic-gate 	 * no error detection, no lock statistics,
22160Sstevel@tonic-gate 	 * and the process has only a single thread.
22170Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
22180Sstevel@tonic-gate 	 */
22196247Sraf 	if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) |
22200Sstevel@tonic-gate 	    udp->uberflags.uf_all) == 0) {
22210Sstevel@tonic-gate 		/*
22220Sstevel@tonic-gate 		 * Only one thread exists so we don't need an atomic operation.
22230Sstevel@tonic-gate 		 */
22240Sstevel@tonic-gate 		if (mp->mutex_lockw == 0) {
22250Sstevel@tonic-gate 			mp->mutex_lockw = LOCKSET;
22260Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
22270Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
22280Sstevel@tonic-gate 			return (0);
22290Sstevel@tonic-gate 		}
22304574Sraf 		if (mtype && MUTEX_OWNER(mp) == self)
22314574Sraf 			return (mutex_recursion(mp, mtype, MUTEX_TRY));
22320Sstevel@tonic-gate 		return (EBUSY);
22330Sstevel@tonic-gate 	}
22340Sstevel@tonic-gate 
22350Sstevel@tonic-gate 	/*
22360Sstevel@tonic-gate 	 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
22370Sstevel@tonic-gate 	 * no error detection, and no lock statistics.
22380Sstevel@tonic-gate 	 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
22390Sstevel@tonic-gate 	 */
22400Sstevel@tonic-gate 	if ((gflags = self->ul_schedctl_called) != NULL &&
22410Sstevel@tonic-gate 	    (gflags->uf_trs_ted |
22420Sstevel@tonic-gate 	    (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) {
22430Sstevel@tonic-gate 		if (mtype & USYNC_PROCESS)
22440Sstevel@tonic-gate 			return (fast_process_lock(mp, NULL, mtype, MUTEX_TRY));
22450Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
22460Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
22470Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
22480Sstevel@tonic-gate 			return (0);
22490Sstevel@tonic-gate 		}
22504574Sraf 		if (mtype && MUTEX_OWNER(mp) == self)
22514574Sraf 			return (mutex_recursion(mp, mtype, MUTEX_TRY));
22524613Sraf 		if (__td_event_report(self, TD_LOCK_TRY, udp)) {
22534613Sraf 			self->ul_td_evbuf.eventnum = TD_LOCK_TRY;
22544613Sraf 			tdb_event(TD_LOCK_TRY, udp);
22550Sstevel@tonic-gate 		}
22564613Sraf 		return (EBUSY);
22570Sstevel@tonic-gate 	}
22580Sstevel@tonic-gate 
22590Sstevel@tonic-gate 	/* else do it the long way */
22600Sstevel@tonic-gate 	return (mutex_lock_internal(mp, NULL, MUTEX_TRY));
22610Sstevel@tonic-gate }
22620Sstevel@tonic-gate 
22630Sstevel@tonic-gate int
22644574Sraf mutex_unlock_internal(mutex_t *mp, int retain_robust_flags)
22650Sstevel@tonic-gate {
22660Sstevel@tonic-gate 	ulwp_t *self = curthread;
22670Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
22680Sstevel@tonic-gate 	int mtype = mp->mutex_type;
22690Sstevel@tonic-gate 	tdb_mutex_stats_t *msp;
22704574Sraf 	int error = 0;
22714574Sraf 	int release_all;
22720Sstevel@tonic-gate 	lwpid_t lwpid;
22730Sstevel@tonic-gate 
2274*6812Sraf 	if ((mtype & LOCK_ERRORCHECK) && !mutex_held(mp))
22750Sstevel@tonic-gate 		return (EPERM);
22760Sstevel@tonic-gate 
2277*6812Sraf 	if (self->ul_error_detection && !mutex_held(mp))
22780Sstevel@tonic-gate 		lock_error(mp, "mutex_unlock", NULL, NULL);
22790Sstevel@tonic-gate 
22800Sstevel@tonic-gate 	if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
22810Sstevel@tonic-gate 		mp->mutex_rcount--;
22820Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
22830Sstevel@tonic-gate 		return (0);
22840Sstevel@tonic-gate 	}
22850Sstevel@tonic-gate 
22860Sstevel@tonic-gate 	if ((msp = MUTEX_STATS(mp, udp)) != NULL)
22870Sstevel@tonic-gate 		(void) record_hold_time(msp);
22880Sstevel@tonic-gate 
22894574Sraf 	if (!retain_robust_flags && !(mtype & LOCK_PRIO_INHERIT) &&
22904574Sraf 	    (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) {
22914574Sraf 		ASSERT(mp->mutex_type & LOCK_ROBUST);
22924574Sraf 		mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED);
22934574Sraf 		mp->mutex_flag |= LOCK_NOTRECOVERABLE;
22944574Sraf 	}
22954574Sraf 	release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0);
22964574Sraf 
22974574Sraf 	if (mtype & LOCK_PRIO_INHERIT) {
22980Sstevel@tonic-gate 		no_preempt(self);
22990Sstevel@tonic-gate 		mp->mutex_owner = 0;
23006057Sraf 		/* mp->mutex_ownerpid is cleared by ___lwp_mutex_unlock() */
23010Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
23024574Sraf 		mp->mutex_lockw = LOCKCLEAR;
23036247Sraf 		self->ul_pilocks--;
23044574Sraf 		error = ___lwp_mutex_unlock(mp);
23050Sstevel@tonic-gate 		preempt(self);
23060Sstevel@tonic-gate 	} else if (mtype & USYNC_PROCESS) {
23075629Sraf 		mutex_unlock_process(mp, release_all);
23080Sstevel@tonic-gate 	} else {	/* USYNC_THREAD */
23094574Sraf 		if ((lwpid = mutex_unlock_queue(mp, release_all)) != 0) {
23100Sstevel@tonic-gate 			(void) __lwp_unpark(lwpid);
23110Sstevel@tonic-gate 			preempt(self);
23120Sstevel@tonic-gate 		}
23130Sstevel@tonic-gate 	}
23140Sstevel@tonic-gate 
23154574Sraf 	if (mtype & LOCK_ROBUST)
23164574Sraf 		forget_lock(mp);
23174574Sraf 
23184574Sraf 	if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp))
23194574Sraf 		_ceil_prio_waive();
23204574Sraf 
23210Sstevel@tonic-gate 	return (error);
23220Sstevel@tonic-gate }
23230Sstevel@tonic-gate 
2324*6812Sraf #pragma weak pthread_mutex_unlock = mutex_unlock
2325*6812Sraf #pragma weak _mutex_unlock = mutex_unlock
23260Sstevel@tonic-gate int
2327*6812Sraf mutex_unlock(mutex_t *mp)
23280Sstevel@tonic-gate {
23290Sstevel@tonic-gate 	ulwp_t *self = curthread;
23306247Sraf 	int mtype = mp->mutex_type;
23310Sstevel@tonic-gate 	uberflags_t *gflags;
23320Sstevel@tonic-gate 	lwpid_t lwpid;
23330Sstevel@tonic-gate 	short el;
23340Sstevel@tonic-gate 
23350Sstevel@tonic-gate 	/*
23360Sstevel@tonic-gate 	 * Optimize the case of USYNC_THREAD, including
23370Sstevel@tonic-gate 	 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
23380Sstevel@tonic-gate 	 * no error detection, no lock statistics,
23390Sstevel@tonic-gate 	 * and the process has only a single thread.
23400Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
23410Sstevel@tonic-gate 	 */
23426247Sraf 	if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) |
23436247Sraf 	    self->ul_uberdata->uberflags.uf_all) == 0) {
23440Sstevel@tonic-gate 		if (mtype) {
23450Sstevel@tonic-gate 			/*
23460Sstevel@tonic-gate 			 * At this point we know that one or both of the
23470Sstevel@tonic-gate 			 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set.
23480Sstevel@tonic-gate 			 */
23490Sstevel@tonic-gate 			if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self))
23500Sstevel@tonic-gate 				return (EPERM);
23510Sstevel@tonic-gate 			if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
23520Sstevel@tonic-gate 				mp->mutex_rcount--;
23530Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
23540Sstevel@tonic-gate 				return (0);
23550Sstevel@tonic-gate 			}
23560Sstevel@tonic-gate 		}
23570Sstevel@tonic-gate 		/*
23580Sstevel@tonic-gate 		 * Only one thread exists so we don't need an atomic operation.
23590Sstevel@tonic-gate 		 * Also, there can be no waiters.
23600Sstevel@tonic-gate 		 */
23610Sstevel@tonic-gate 		mp->mutex_owner = 0;
23620Sstevel@tonic-gate 		mp->mutex_lockword = 0;
23630Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
23640Sstevel@tonic-gate 		return (0);
23650Sstevel@tonic-gate 	}
23660Sstevel@tonic-gate 
23670Sstevel@tonic-gate 	/*
23680Sstevel@tonic-gate 	 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
23690Sstevel@tonic-gate 	 * no error detection, and no lock statistics.
23700Sstevel@tonic-gate 	 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
23710Sstevel@tonic-gate 	 */
23720Sstevel@tonic-gate 	if ((gflags = self->ul_schedctl_called) != NULL) {
23730Sstevel@tonic-gate 		if (((el = gflags->uf_trs_ted) | mtype) == 0) {
23740Sstevel@tonic-gate fast_unlock:
23755629Sraf 			if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) {
23760Sstevel@tonic-gate 				(void) __lwp_unpark(lwpid);
23770Sstevel@tonic-gate 				preempt(self);
23780Sstevel@tonic-gate 			}
23790Sstevel@tonic-gate 			return (0);
23800Sstevel@tonic-gate 		}
23810Sstevel@tonic-gate 		if (el)		/* error detection or lock statistics */
23820Sstevel@tonic-gate 			goto slow_unlock;
23830Sstevel@tonic-gate 		if ((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) {
23840Sstevel@tonic-gate 			/*
23850Sstevel@tonic-gate 			 * At this point we know that one or both of the
23860Sstevel@tonic-gate 			 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set.
23870Sstevel@tonic-gate 			 */
23880Sstevel@tonic-gate 			if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self))
23890Sstevel@tonic-gate 				return (EPERM);
23900Sstevel@tonic-gate 			if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
23910Sstevel@tonic-gate 				mp->mutex_rcount--;
23920Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
23930Sstevel@tonic-gate 				return (0);
23940Sstevel@tonic-gate 			}
23950Sstevel@tonic-gate 			goto fast_unlock;
23960Sstevel@tonic-gate 		}
23970Sstevel@tonic-gate 		if ((mtype &
23980Sstevel@tonic-gate 		    ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) {
23990Sstevel@tonic-gate 			/*
24000Sstevel@tonic-gate 			 * At this point we know that zero, one, or both of the
24010Sstevel@tonic-gate 			 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set and
24020Sstevel@tonic-gate 			 * that the USYNC_PROCESS flag is set.
24030Sstevel@tonic-gate 			 */
24040Sstevel@tonic-gate 			if ((mtype & LOCK_ERRORCHECK) && !shared_mutex_held(mp))
24050Sstevel@tonic-gate 				return (EPERM);
24060Sstevel@tonic-gate 			if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
24070Sstevel@tonic-gate 				mp->mutex_rcount--;
24080Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
24090Sstevel@tonic-gate 				return (0);
24100Sstevel@tonic-gate 			}
24115629Sraf 			mutex_unlock_process(mp, 0);
24120Sstevel@tonic-gate 			return (0);
24130Sstevel@tonic-gate 		}
24140Sstevel@tonic-gate 	}
24150Sstevel@tonic-gate 
24160Sstevel@tonic-gate 	/* else do it the long way */
24170Sstevel@tonic-gate slow_unlock:
24184574Sraf 	return (mutex_unlock_internal(mp, 0));
24190Sstevel@tonic-gate }
24200Sstevel@tonic-gate 
24210Sstevel@tonic-gate /*
24220Sstevel@tonic-gate  * Internally to the library, almost all mutex lock/unlock actions
24230Sstevel@tonic-gate  * go through these lmutex_ functions, to protect critical regions.
2424*6812Sraf  * We replicate a bit of code from mutex_lock() and mutex_unlock()
24250Sstevel@tonic-gate  * to make these functions faster since we know that the mutex type
24260Sstevel@tonic-gate  * of all internal locks is USYNC_THREAD.  We also know that internal
24270Sstevel@tonic-gate  * locking can never fail, so we panic if it does.
24280Sstevel@tonic-gate  */
24290Sstevel@tonic-gate void
24300Sstevel@tonic-gate lmutex_lock(mutex_t *mp)
24310Sstevel@tonic-gate {
24320Sstevel@tonic-gate 	ulwp_t *self = curthread;
24330Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
24340Sstevel@tonic-gate 
24350Sstevel@tonic-gate 	ASSERT(mp->mutex_type == USYNC_THREAD);
24360Sstevel@tonic-gate 
24370Sstevel@tonic-gate 	enter_critical(self);
24380Sstevel@tonic-gate 	/*
24390Sstevel@tonic-gate 	 * Optimize the case of no lock statistics and only a single thread.
24400Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
24410Sstevel@tonic-gate 	 */
24420Sstevel@tonic-gate 	if (udp->uberflags.uf_all == 0) {
24430Sstevel@tonic-gate 		/*
24440Sstevel@tonic-gate 		 * Only one thread exists; the mutex must be free.
24450Sstevel@tonic-gate 		 */
24460Sstevel@tonic-gate 		ASSERT(mp->mutex_lockw == 0);
24470Sstevel@tonic-gate 		mp->mutex_lockw = LOCKSET;
24480Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
24490Sstevel@tonic-gate 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
24500Sstevel@tonic-gate 	} else {
24510Sstevel@tonic-gate 		tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
24520Sstevel@tonic-gate 
24530Sstevel@tonic-gate 		if (!self->ul_schedctl_called)
24540Sstevel@tonic-gate 			(void) setup_schedctl();
24550Sstevel@tonic-gate 
24560Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
24570Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
24580Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
24594613Sraf 		} else if (mutex_trylock_adaptive(mp, 1) != 0) {
24600Sstevel@tonic-gate 			(void) mutex_lock_queue(self, msp, mp, NULL);
24610Sstevel@tonic-gate 		}
24620Sstevel@tonic-gate 
24630Sstevel@tonic-gate 		if (msp)
24640Sstevel@tonic-gate 			record_begin_hold(msp);
24650Sstevel@tonic-gate 	}
24660Sstevel@tonic-gate }
24670Sstevel@tonic-gate 
24680Sstevel@tonic-gate void
24690Sstevel@tonic-gate lmutex_unlock(mutex_t *mp)
24700Sstevel@tonic-gate {
24710Sstevel@tonic-gate 	ulwp_t *self = curthread;
24720Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
24730Sstevel@tonic-gate 
24740Sstevel@tonic-gate 	ASSERT(mp->mutex_type == USYNC_THREAD);
24750Sstevel@tonic-gate 
24760Sstevel@tonic-gate 	/*
24770Sstevel@tonic-gate 	 * Optimize the case of no lock statistics and only a single thread.
24780Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
24790Sstevel@tonic-gate 	 */
24800Sstevel@tonic-gate 	if (udp->uberflags.uf_all == 0) {
24810Sstevel@tonic-gate 		/*
24820Sstevel@tonic-gate 		 * Only one thread exists so there can be no waiters.
24830Sstevel@tonic-gate 		 */
24840Sstevel@tonic-gate 		mp->mutex_owner = 0;
24850Sstevel@tonic-gate 		mp->mutex_lockword = 0;
24860Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
24870Sstevel@tonic-gate 	} else {
24880Sstevel@tonic-gate 		tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
24890Sstevel@tonic-gate 		lwpid_t lwpid;
24900Sstevel@tonic-gate 
24910Sstevel@tonic-gate 		if (msp)
24920Sstevel@tonic-gate 			(void) record_hold_time(msp);
24934574Sraf 		if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) {
24940Sstevel@tonic-gate 			(void) __lwp_unpark(lwpid);
24950Sstevel@tonic-gate 			preempt(self);
24960Sstevel@tonic-gate 		}
24970Sstevel@tonic-gate 	}
24980Sstevel@tonic-gate 	exit_critical(self);
24990Sstevel@tonic-gate }
25000Sstevel@tonic-gate 
25012248Sraf /*
25022248Sraf  * For specialized code in libc, like the asynchronous i/o code,
25032248Sraf  * the following sig_*() locking primitives are used in order
25042248Sraf  * to make the code asynchronous signal safe.  Signals are
25052248Sraf  * deferred while locks acquired by these functions are held.
25062248Sraf  */
25072248Sraf void
25082248Sraf sig_mutex_lock(mutex_t *mp)
25092248Sraf {
25102248Sraf 	sigoff(curthread);
25116515Sraf 	(void) mutex_lock(mp);
25122248Sraf }
25132248Sraf 
25142248Sraf void
25152248Sraf sig_mutex_unlock(mutex_t *mp)
25162248Sraf {
25176515Sraf 	(void) mutex_unlock(mp);
25182248Sraf 	sigon(curthread);
25192248Sraf }
25202248Sraf 
25212248Sraf int
25222248Sraf sig_mutex_trylock(mutex_t *mp)
25232248Sraf {
25242248Sraf 	int error;
25252248Sraf 
25262248Sraf 	sigoff(curthread);
25276515Sraf 	if ((error = mutex_trylock(mp)) != 0)
25282248Sraf 		sigon(curthread);
25292248Sraf 	return (error);
25302248Sraf }
25312248Sraf 
25322248Sraf /*
25332248Sraf  * sig_cond_wait() is a cancellation point.
25342248Sraf  */
25352248Sraf int
25362248Sraf sig_cond_wait(cond_t *cv, mutex_t *mp)
25372248Sraf {
25382248Sraf 	int error;
25392248Sraf 
25402248Sraf 	ASSERT(curthread->ul_sigdefer != 0);
25416515Sraf 	pthread_testcancel();
25425891Sraf 	error = __cond_wait(cv, mp);
25432248Sraf 	if (error == EINTR && curthread->ul_cursig) {
25442248Sraf 		sig_mutex_unlock(mp);
25452248Sraf 		/* take the deferred signal here */
25462248Sraf 		sig_mutex_lock(mp);
25472248Sraf 	}
25486515Sraf 	pthread_testcancel();
25492248Sraf 	return (error);
25502248Sraf }
25512248Sraf 
25522248Sraf /*
25532248Sraf  * sig_cond_reltimedwait() is a cancellation point.
25542248Sraf  */
25552248Sraf int
25562248Sraf sig_cond_reltimedwait(cond_t *cv, mutex_t *mp, const timespec_t *ts)
25572248Sraf {
25582248Sraf 	int error;
25592248Sraf 
25602248Sraf 	ASSERT(curthread->ul_sigdefer != 0);
25616515Sraf 	pthread_testcancel();
25625891Sraf 	error = __cond_reltimedwait(cv, mp, ts);
25632248Sraf 	if (error == EINTR && curthread->ul_cursig) {
25642248Sraf 		sig_mutex_unlock(mp);
25652248Sraf 		/* take the deferred signal here */
25662248Sraf 		sig_mutex_lock(mp);
25672248Sraf 	}
25686515Sraf 	pthread_testcancel();
25692248Sraf 	return (error);
25702248Sraf }
25712248Sraf 
25725891Sraf /*
25735891Sraf  * For specialized code in libc, like the stdio code.
25745891Sraf  * the following cancel_safe_*() locking primitives are used in
25755891Sraf  * order to make the code cancellation-safe.  Cancellation is
25765891Sraf  * deferred while locks acquired by these functions are held.
25775891Sraf  */
25785891Sraf void
25795891Sraf cancel_safe_mutex_lock(mutex_t *mp)
25805891Sraf {
25816515Sraf 	(void) mutex_lock(mp);
25825891Sraf 	curthread->ul_libc_locks++;
25835891Sraf }
25845891Sraf 
25855891Sraf int
25865891Sraf cancel_safe_mutex_trylock(mutex_t *mp)
25875891Sraf {
25885891Sraf 	int error;
25895891Sraf 
25906515Sraf 	if ((error = mutex_trylock(mp)) == 0)
25915891Sraf 		curthread->ul_libc_locks++;
25925891Sraf 	return (error);
25935891Sraf }
25945891Sraf 
25955891Sraf void
25965891Sraf cancel_safe_mutex_unlock(mutex_t *mp)
25975891Sraf {
25985891Sraf 	ulwp_t *self = curthread;
25995891Sraf 
26005891Sraf 	ASSERT(self->ul_libc_locks != 0);
26015891Sraf 
26026515Sraf 	(void) mutex_unlock(mp);
26035891Sraf 
26045891Sraf 	/*
26055891Sraf 	 * Decrement the count of locks held by cancel_safe_mutex_lock().
26065891Sraf 	 * If we are then in a position to terminate cleanly and
26075891Sraf 	 * if there is a pending cancellation and cancellation
26085891Sraf 	 * is not disabled and we received EINTR from a recent
26095891Sraf 	 * system call then perform the cancellation action now.
26105891Sraf 	 */
26115891Sraf 	if (--self->ul_libc_locks == 0 &&
26125891Sraf 	    !(self->ul_vfork | self->ul_nocancel |
26135891Sraf 	    self->ul_critical | self->ul_sigdefer) &&
26145891Sraf 	    cancel_active())
2615*6812Sraf 		pthread_exit(PTHREAD_CANCELED);
26165891Sraf }
26175891Sraf 
26180Sstevel@tonic-gate static int
26190Sstevel@tonic-gate shared_mutex_held(mutex_t *mparg)
26200Sstevel@tonic-gate {
26210Sstevel@tonic-gate 	/*
26224574Sraf 	 * The 'volatile' is necessary to make sure the compiler doesn't
26234574Sraf 	 * reorder the tests of the various components of the mutex.
26244574Sraf 	 * They must be tested in this order:
26254574Sraf 	 *	mutex_lockw
26264574Sraf 	 *	mutex_owner
26274574Sraf 	 *	mutex_ownerpid
26284574Sraf 	 * This relies on the fact that everywhere mutex_lockw is cleared,
26294574Sraf 	 * mutex_owner and mutex_ownerpid are cleared before mutex_lockw
26304574Sraf 	 * is cleared, and that everywhere mutex_lockw is set, mutex_owner
26314574Sraf 	 * and mutex_ownerpid are set after mutex_lockw is set, and that
26324574Sraf 	 * mutex_lockw is set or cleared with a memory barrier.
26330Sstevel@tonic-gate 	 */
26340Sstevel@tonic-gate 	volatile mutex_t *mp = (volatile mutex_t *)mparg;
26350Sstevel@tonic-gate 	ulwp_t *self = curthread;
26360Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
26370Sstevel@tonic-gate 
26384574Sraf 	return (MUTEX_OWNED(mp, self) && mp->mutex_ownerpid == udp->pid);
26390Sstevel@tonic-gate }
26400Sstevel@tonic-gate 
2641*6812Sraf #pragma weak _mutex_held = mutex_held
26420Sstevel@tonic-gate int
2643*6812Sraf mutex_held(mutex_t *mparg)
26440Sstevel@tonic-gate {
26454574Sraf 	volatile mutex_t *mp = (volatile mutex_t *)mparg;
26464574Sraf 
26474574Sraf 	if (mparg->mutex_type & USYNC_PROCESS)
26484574Sraf 		return (shared_mutex_held(mparg));
26490Sstevel@tonic-gate 	return (MUTEX_OWNED(mp, curthread));
26500Sstevel@tonic-gate }
26510Sstevel@tonic-gate 
2652*6812Sraf #pragma weak pthread_mutex_destroy = mutex_destroy
2653*6812Sraf #pragma weak _mutex_destroy = mutex_destroy
26540Sstevel@tonic-gate int
2655*6812Sraf mutex_destroy(mutex_t *mp)
26560Sstevel@tonic-gate {
26574574Sraf 	if (mp->mutex_type & USYNC_PROCESS)
26584574Sraf 		forget_lock(mp);
26596515Sraf 	(void) memset(mp, 0, sizeof (*mp));
26600Sstevel@tonic-gate 	tdb_sync_obj_deregister(mp);
26610Sstevel@tonic-gate 	return (0);
26620Sstevel@tonic-gate }
26630Sstevel@tonic-gate 
2664*6812Sraf #pragma weak pthread_mutex_consistent_np = mutex_consistent
26654574Sraf int
2666*6812Sraf mutex_consistent(mutex_t *mp)
26674574Sraf {
26684574Sraf 	/*
26694574Sraf 	 * Do this only for an inconsistent, initialized robust lock
26704574Sraf 	 * that we hold.  For all other cases, return EINVAL.
26714574Sraf 	 */
2672*6812Sraf 	if (mutex_held(mp) &&
26734574Sraf 	    (mp->mutex_type & LOCK_ROBUST) &&
26744574Sraf 	    (mp->mutex_flag & LOCK_INITED) &&
26754574Sraf 	    (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) {
26764574Sraf 		mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED);
26774574Sraf 		mp->mutex_rcount = 0;
26784574Sraf 		return (0);
26794574Sraf 	}
26804574Sraf 	return (EINVAL);
26814574Sraf }
26824574Sraf 
26830Sstevel@tonic-gate /*
26840Sstevel@tonic-gate  * Spin locks are separate from ordinary mutexes,
26850Sstevel@tonic-gate  * but we use the same data structure for them.
26860Sstevel@tonic-gate  */
26870Sstevel@tonic-gate 
26880Sstevel@tonic-gate int
2689*6812Sraf pthread_spin_init(pthread_spinlock_t *lock, int pshared)
26900Sstevel@tonic-gate {
26910Sstevel@tonic-gate 	mutex_t *mp = (mutex_t *)lock;
26920Sstevel@tonic-gate 
26936515Sraf 	(void) memset(mp, 0, sizeof (*mp));
26940Sstevel@tonic-gate 	if (pshared == PTHREAD_PROCESS_SHARED)
26950Sstevel@tonic-gate 		mp->mutex_type = USYNC_PROCESS;
26960Sstevel@tonic-gate 	else
26970Sstevel@tonic-gate 		mp->mutex_type = USYNC_THREAD;
26980Sstevel@tonic-gate 	mp->mutex_flag = LOCK_INITED;
26990Sstevel@tonic-gate 	mp->mutex_magic = MUTEX_MAGIC;
27000Sstevel@tonic-gate 	return (0);
27010Sstevel@tonic-gate }
27020Sstevel@tonic-gate 
27030Sstevel@tonic-gate int
2704*6812Sraf pthread_spin_destroy(pthread_spinlock_t *lock)
27050Sstevel@tonic-gate {
27066515Sraf 	(void) memset(lock, 0, sizeof (*lock));
27070Sstevel@tonic-gate 	return (0);
27080Sstevel@tonic-gate }
27090Sstevel@tonic-gate 
27100Sstevel@tonic-gate int
2711*6812Sraf pthread_spin_trylock(pthread_spinlock_t *lock)
27120Sstevel@tonic-gate {
27130Sstevel@tonic-gate 	mutex_t *mp = (mutex_t *)lock;
27140Sstevel@tonic-gate 	ulwp_t *self = curthread;
27150Sstevel@tonic-gate 	int error = 0;
27160Sstevel@tonic-gate 
27170Sstevel@tonic-gate 	no_preempt(self);
27180Sstevel@tonic-gate 	if (set_lock_byte(&mp->mutex_lockw) != 0)
27190Sstevel@tonic-gate 		error = EBUSY;
27200Sstevel@tonic-gate 	else {
27210Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
27220Sstevel@tonic-gate 		if (mp->mutex_type == USYNC_PROCESS)
27230Sstevel@tonic-gate 			mp->mutex_ownerpid = self->ul_uberdata->pid;
27240Sstevel@tonic-gate 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
27250Sstevel@tonic-gate 	}
27260Sstevel@tonic-gate 	preempt(self);
27270Sstevel@tonic-gate 	return (error);
27280Sstevel@tonic-gate }
27290Sstevel@tonic-gate 
27300Sstevel@tonic-gate int
2731*6812Sraf pthread_spin_lock(pthread_spinlock_t *lock)
27320Sstevel@tonic-gate {
27334574Sraf 	mutex_t *mp = (mutex_t *)lock;
27344574Sraf 	ulwp_t *self = curthread;
27354574Sraf 	volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw;
27364574Sraf 	int count = 0;
27374574Sraf 
27384574Sraf 	ASSERT(!self->ul_critical || self->ul_bindflags);
27394574Sraf 
27404574Sraf 	DTRACE_PROBE1(plockstat, mutex__spin, mp);
27414574Sraf 
27420Sstevel@tonic-gate 	/*
27430Sstevel@tonic-gate 	 * We don't care whether the owner is running on a processor.
27440Sstevel@tonic-gate 	 * We just spin because that's what this interface requires.
27450Sstevel@tonic-gate 	 */
27460Sstevel@tonic-gate 	for (;;) {
27470Sstevel@tonic-gate 		if (*lockp == 0) {	/* lock byte appears to be clear */
27484574Sraf 			no_preempt(self);
27494574Sraf 			if (set_lock_byte(lockp) == 0)
27504574Sraf 				break;
27514574Sraf 			preempt(self);
27520Sstevel@tonic-gate 		}
27535629Sraf 		if (count < INT_MAX)
27545629Sraf 			count++;
27550Sstevel@tonic-gate 		SMT_PAUSE();
27560Sstevel@tonic-gate 	}
27574574Sraf 	mp->mutex_owner = (uintptr_t)self;
27584574Sraf 	if (mp->mutex_type == USYNC_PROCESS)
27594574Sraf 		mp->mutex_ownerpid = self->ul_uberdata->pid;
27604574Sraf 	preempt(self);
27615629Sraf 	if (count) {
27625629Sraf 		DTRACE_PROBE2(plockstat, mutex__spun, 1, count);
27635629Sraf 	}
27644574Sraf 	DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count);
27654574Sraf 	return (0);
27660Sstevel@tonic-gate }
27670Sstevel@tonic-gate 
27680Sstevel@tonic-gate int
2769*6812Sraf pthread_spin_unlock(pthread_spinlock_t *lock)
27700Sstevel@tonic-gate {
27710Sstevel@tonic-gate 	mutex_t *mp = (mutex_t *)lock;
27720Sstevel@tonic-gate 	ulwp_t *self = curthread;
27730Sstevel@tonic-gate 
27740Sstevel@tonic-gate 	no_preempt(self);
27750Sstevel@tonic-gate 	mp->mutex_owner = 0;
27760Sstevel@tonic-gate 	mp->mutex_ownerpid = 0;
27770Sstevel@tonic-gate 	DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
27784570Sraf 	(void) atomic_swap_32(&mp->mutex_lockword, 0);
27790Sstevel@tonic-gate 	preempt(self);
27800Sstevel@tonic-gate 	return (0);
27810Sstevel@tonic-gate }
27820Sstevel@tonic-gate 
27835629Sraf #define	INITIAL_LOCKS	8	/* initial size of ul_heldlocks.array */
27844574Sraf 
27854574Sraf /*
27864574Sraf  * Find/allocate an entry for 'lock' in our array of held locks.
27874574Sraf  */
27884574Sraf static mutex_t **
27894574Sraf find_lock_entry(mutex_t *lock)
27904574Sraf {
27914574Sraf 	ulwp_t *self = curthread;
27924574Sraf 	mutex_t **remembered = NULL;
27934574Sraf 	mutex_t **lockptr;
27944574Sraf 	uint_t nlocks;
27954574Sraf 
27964574Sraf 	if ((nlocks = self->ul_heldlockcnt) != 0)
27974574Sraf 		lockptr = self->ul_heldlocks.array;
27984574Sraf 	else {
27994574Sraf 		nlocks = 1;
28004574Sraf 		lockptr = &self->ul_heldlocks.single;
28014574Sraf 	}
28024574Sraf 
28034574Sraf 	for (; nlocks; nlocks--, lockptr++) {
28044574Sraf 		if (*lockptr == lock)
28054574Sraf 			return (lockptr);
28064574Sraf 		if (*lockptr == NULL && remembered == NULL)
28074574Sraf 			remembered = lockptr;
28084574Sraf 	}
28094574Sraf 	if (remembered != NULL) {
28104574Sraf 		*remembered = lock;
28114574Sraf 		return (remembered);
28124574Sraf 	}
28134574Sraf 
28144574Sraf 	/*
28154574Sraf 	 * No entry available.  Allocate more space, converting
28164574Sraf 	 * the single entry into an array of entries if necessary.
28174574Sraf 	 */
28184574Sraf 	if ((nlocks = self->ul_heldlockcnt) == 0) {
28194574Sraf 		/*
28204574Sraf 		 * Initial allocation of the array.
28214574Sraf 		 * Convert the single entry into an array.
28224574Sraf 		 */
28234574Sraf 		self->ul_heldlockcnt = nlocks = INITIAL_LOCKS;
28244574Sraf 		lockptr = lmalloc(nlocks * sizeof (mutex_t *));
28254574Sraf 		/*
28264574Sraf 		 * The single entry becomes the first entry in the array.
28274574Sraf 		 */
28284574Sraf 		*lockptr = self->ul_heldlocks.single;
28294574Sraf 		self->ul_heldlocks.array = lockptr;
28304574Sraf 		/*
28314574Sraf 		 * Return the next available entry in the array.
28324574Sraf 		 */
28334574Sraf 		*++lockptr = lock;
28344574Sraf 		return (lockptr);
28354574Sraf 	}
28364574Sraf 	/*
28374574Sraf 	 * Reallocate the array, double the size each time.
28384574Sraf 	 */
28394574Sraf 	lockptr = lmalloc(nlocks * 2 * sizeof (mutex_t *));
28406515Sraf 	(void) memcpy(lockptr, self->ul_heldlocks.array,
28414574Sraf 	    nlocks * sizeof (mutex_t *));
28424574Sraf 	lfree(self->ul_heldlocks.array, nlocks * sizeof (mutex_t *));
28434574Sraf 	self->ul_heldlocks.array = lockptr;
28444574Sraf 	self->ul_heldlockcnt *= 2;
28454574Sraf 	/*
28464574Sraf 	 * Return the next available entry in the newly allocated array.
28474574Sraf 	 */
28484574Sraf 	*(lockptr += nlocks) = lock;
28494574Sraf 	return (lockptr);
28504574Sraf }
28514574Sraf 
28524574Sraf /*
28534574Sraf  * Insert 'lock' into our list of held locks.
28544574Sraf  * Currently only used for LOCK_ROBUST mutexes.
28554574Sraf  */
28564574Sraf void
28574574Sraf remember_lock(mutex_t *lock)
28584574Sraf {
28594574Sraf 	(void) find_lock_entry(lock);
28604574Sraf }
28614574Sraf 
28624574Sraf /*
28634574Sraf  * Remove 'lock' from our list of held locks.
28644574Sraf  * Currently only used for LOCK_ROBUST mutexes.
28654574Sraf  */
28664574Sraf void
28674574Sraf forget_lock(mutex_t *lock)
28684574Sraf {
28694574Sraf 	*find_lock_entry(lock) = NULL;
28704574Sraf }
28714574Sraf 
28724574Sraf /*
28734574Sraf  * Free the array of held locks.
28744574Sraf  */
28754574Sraf void
28764574Sraf heldlock_free(ulwp_t *ulwp)
28774574Sraf {
28784574Sraf 	uint_t nlocks;
28794574Sraf 
28804574Sraf 	if ((nlocks = ulwp->ul_heldlockcnt) != 0)
28814574Sraf 		lfree(ulwp->ul_heldlocks.array, nlocks * sizeof (mutex_t *));
28824574Sraf 	ulwp->ul_heldlockcnt = 0;
28834574Sraf 	ulwp->ul_heldlocks.array = NULL;
28844574Sraf }
28854574Sraf 
28864574Sraf /*
28874574Sraf  * Mark all held LOCK_ROBUST mutexes LOCK_OWNERDEAD.
28884574Sraf  * Called from _thrp_exit() to deal with abandoned locks.
28894574Sraf  */
28904574Sraf void
28914574Sraf heldlock_exit(void)
28924574Sraf {
28934574Sraf 	ulwp_t *self = curthread;
28944574Sraf 	mutex_t **lockptr;
28954574Sraf 	uint_t nlocks;
28964574Sraf 	mutex_t *mp;
28974574Sraf 
28984574Sraf 	if ((nlocks = self->ul_heldlockcnt) != 0)
28994574Sraf 		lockptr = self->ul_heldlocks.array;
29004574Sraf 	else {
29014574Sraf 		nlocks = 1;
29024574Sraf 		lockptr = &self->ul_heldlocks.single;
29034574Sraf 	}
29044574Sraf 
29054574Sraf 	for (; nlocks; nlocks--, lockptr++) {
29064574Sraf 		/*
29074574Sraf 		 * The kernel takes care of transitioning held
29084574Sraf 		 * LOCK_PRIO_INHERIT mutexes to LOCK_OWNERDEAD.
29094574Sraf 		 * We avoid that case here.
29104574Sraf 		 */
29114574Sraf 		if ((mp = *lockptr) != NULL &&
2912*6812Sraf 		    mutex_held(mp) &&
29134574Sraf 		    (mp->mutex_type & (LOCK_ROBUST | LOCK_PRIO_INHERIT)) ==
29144574Sraf 		    LOCK_ROBUST) {
29154574Sraf 			mp->mutex_rcount = 0;
29164574Sraf 			if (!(mp->mutex_flag & LOCK_UNMAPPED))
29174574Sraf 				mp->mutex_flag |= LOCK_OWNERDEAD;
29184574Sraf 			(void) mutex_unlock_internal(mp, 1);
29194574Sraf 		}
29204574Sraf 	}
29214574Sraf 
29224574Sraf 	heldlock_free(self);
29234574Sraf }
29244574Sraf 
2925*6812Sraf #pragma weak _cond_init = cond_init
29260Sstevel@tonic-gate /* ARGSUSED2 */
29270Sstevel@tonic-gate int
2928*6812Sraf cond_init(cond_t *cvp, int type, void *arg)
29290Sstevel@tonic-gate {
29300Sstevel@tonic-gate 	if (type != USYNC_THREAD && type != USYNC_PROCESS)
29310Sstevel@tonic-gate 		return (EINVAL);
29326515Sraf 	(void) memset(cvp, 0, sizeof (*cvp));
29330Sstevel@tonic-gate 	cvp->cond_type = (uint16_t)type;
29340Sstevel@tonic-gate 	cvp->cond_magic = COND_MAGIC;
29350Sstevel@tonic-gate 	return (0);
29360Sstevel@tonic-gate }
29370Sstevel@tonic-gate 
29380Sstevel@tonic-gate /*
29390Sstevel@tonic-gate  * cond_sleep_queue(): utility function for cond_wait_queue().
29400Sstevel@tonic-gate  *
29410Sstevel@tonic-gate  * Go to sleep on a condvar sleep queue, expect to be waked up
29420Sstevel@tonic-gate  * by someone calling cond_signal() or cond_broadcast() or due
29430Sstevel@tonic-gate  * to receiving a UNIX signal or being cancelled, or just simply
29440Sstevel@tonic-gate  * due to a spurious wakeup (like someome calling forkall()).
29450Sstevel@tonic-gate  *
29460Sstevel@tonic-gate  * The associated mutex is *not* reacquired before returning.
29470Sstevel@tonic-gate  * That must be done by the caller of cond_sleep_queue().
29480Sstevel@tonic-gate  */
29494574Sraf static int
29500Sstevel@tonic-gate cond_sleep_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
29510Sstevel@tonic-gate {
29520Sstevel@tonic-gate 	ulwp_t *self = curthread;
29530Sstevel@tonic-gate 	queue_head_t *qp;
29540Sstevel@tonic-gate 	queue_head_t *mqp;
29550Sstevel@tonic-gate 	lwpid_t lwpid;
29560Sstevel@tonic-gate 	int signalled;
29570Sstevel@tonic-gate 	int error;
29586247Sraf 	int cv_wake;
29594574Sraf 	int release_all;
29600Sstevel@tonic-gate 
29610Sstevel@tonic-gate 	/*
29620Sstevel@tonic-gate 	 * Put ourself on the CV sleep queue, unlock the mutex, then
29630Sstevel@tonic-gate 	 * park ourself and unpark a candidate lwp to grab the mutex.
29640Sstevel@tonic-gate 	 * We must go onto the CV sleep queue before dropping the
29650Sstevel@tonic-gate 	 * mutex in order to guarantee atomicity of the operation.
29660Sstevel@tonic-gate 	 */
29670Sstevel@tonic-gate 	self->ul_sp = stkptr();
29680Sstevel@tonic-gate 	qp = queue_lock(cvp, CV);
29696247Sraf 	enqueue(qp, self, 0);
29700Sstevel@tonic-gate 	cvp->cond_waiters_user = 1;
29710Sstevel@tonic-gate 	self->ul_cvmutex = mp;
29726247Sraf 	self->ul_cv_wake = cv_wake = (tsp != NULL);
29730Sstevel@tonic-gate 	self->ul_signalled = 0;
29744574Sraf 	if (mp->mutex_flag & LOCK_OWNERDEAD) {
29754574Sraf 		mp->mutex_flag &= ~LOCK_OWNERDEAD;
29764574Sraf 		mp->mutex_flag |= LOCK_NOTRECOVERABLE;
29774574Sraf 	}
29784574Sraf 	release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0);
29794574Sraf 	lwpid = mutex_unlock_queue(mp, release_all);
29800Sstevel@tonic-gate 	for (;;) {
29810Sstevel@tonic-gate 		set_parking_flag(self, 1);
29820Sstevel@tonic-gate 		queue_unlock(qp);
29830Sstevel@tonic-gate 		if (lwpid != 0) {
29840Sstevel@tonic-gate 			lwpid = preempt_unpark(self, lwpid);
29850Sstevel@tonic-gate 			preempt(self);
29860Sstevel@tonic-gate 		}
29870Sstevel@tonic-gate 		/*
29880Sstevel@tonic-gate 		 * We may have a deferred signal present,
29890Sstevel@tonic-gate 		 * in which case we should return EINTR.
29900Sstevel@tonic-gate 		 * Also, we may have received a SIGCANCEL; if so
29910Sstevel@tonic-gate 		 * and we are cancelable we should return EINTR.
29920Sstevel@tonic-gate 		 * We force an immediate EINTR return from
29930Sstevel@tonic-gate 		 * __lwp_park() by turning our parking flag off.
29940Sstevel@tonic-gate 		 */
29950Sstevel@tonic-gate 		if (self->ul_cursig != 0 ||
29960Sstevel@tonic-gate 		    (self->ul_cancelable && self->ul_cancel_pending))
29970Sstevel@tonic-gate 			set_parking_flag(self, 0);
29980Sstevel@tonic-gate 		/*
29990Sstevel@tonic-gate 		 * __lwp_park() will return the residual time in tsp
30000Sstevel@tonic-gate 		 * if we are unparked before the timeout expires.
30010Sstevel@tonic-gate 		 */
30020Sstevel@tonic-gate 		error = __lwp_park(tsp, lwpid);
30030Sstevel@tonic-gate 		set_parking_flag(self, 0);
30040Sstevel@tonic-gate 		lwpid = 0;	/* unpark the other lwp only once */
30050Sstevel@tonic-gate 		/*
30060Sstevel@tonic-gate 		 * We were waked up by cond_signal(), cond_broadcast(),
30070Sstevel@tonic-gate 		 * by an interrupt or timeout (EINTR or ETIME),
30080Sstevel@tonic-gate 		 * or we may just have gotten a spurious wakeup.
30090Sstevel@tonic-gate 		 */
30100Sstevel@tonic-gate 		qp = queue_lock(cvp, CV);
30116247Sraf 		if (!cv_wake)
30126247Sraf 			mqp = queue_lock(mp, MX);
30130Sstevel@tonic-gate 		if (self->ul_sleepq == NULL)
30140Sstevel@tonic-gate 			break;
30150Sstevel@tonic-gate 		/*
30160Sstevel@tonic-gate 		 * We are on either the condvar sleep queue or the
30171893Sraf 		 * mutex sleep queue.  Break out of the sleep if we
30181893Sraf 		 * were interrupted or we timed out (EINTR or ETIME).
30190Sstevel@tonic-gate 		 * Else this is a spurious wakeup; continue the loop.
30200Sstevel@tonic-gate 		 */
30216247Sraf 		if (!cv_wake && self->ul_sleepq == mqp) { /* mutex queue */
30221893Sraf 			if (error) {
30236247Sraf 				mp->mutex_waiters = dequeue_self(mqp);
30241893Sraf 				break;
30251893Sraf 			}
30261893Sraf 			tsp = NULL;	/* no more timeout */
30271893Sraf 		} else if (self->ul_sleepq == qp) {	/* condvar queue */
30280Sstevel@tonic-gate 			if (error) {
30296247Sraf 				cvp->cond_waiters_user = dequeue_self(qp);
30300Sstevel@tonic-gate 				break;
30310Sstevel@tonic-gate 			}
30320Sstevel@tonic-gate 			/*
30330Sstevel@tonic-gate 			 * Else a spurious wakeup on the condvar queue.
30340Sstevel@tonic-gate 			 * __lwp_park() has already adjusted the timeout.
30350Sstevel@tonic-gate 			 */
30360Sstevel@tonic-gate 		} else {
30370Sstevel@tonic-gate 			thr_panic("cond_sleep_queue(): thread not on queue");
30380Sstevel@tonic-gate 		}
30396247Sraf 		if (!cv_wake)
30406247Sraf 			queue_unlock(mqp);
30410Sstevel@tonic-gate 	}
30420Sstevel@tonic-gate 
30430Sstevel@tonic-gate 	self->ul_sp = 0;
30446247Sraf 	self->ul_cv_wake = 0;
30456247Sraf 	ASSERT(self->ul_cvmutex == NULL);
30460Sstevel@tonic-gate 	ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL &&
30470Sstevel@tonic-gate 	    self->ul_wchan == NULL);
30480Sstevel@tonic-gate 
30490Sstevel@tonic-gate 	signalled = self->ul_signalled;
30500Sstevel@tonic-gate 	self->ul_signalled = 0;
30510Sstevel@tonic-gate 	queue_unlock(qp);
30526247Sraf 	if (!cv_wake)
30536247Sraf 		queue_unlock(mqp);
30540Sstevel@tonic-gate 
30550Sstevel@tonic-gate 	/*
30560Sstevel@tonic-gate 	 * If we were concurrently cond_signal()d and any of:
30570Sstevel@tonic-gate 	 * received a UNIX signal, were cancelled, or got a timeout,
30580Sstevel@tonic-gate 	 * then perform another cond_signal() to avoid consuming it.
30590Sstevel@tonic-gate 	 */
30600Sstevel@tonic-gate 	if (error && signalled)
3061*6812Sraf 		(void) cond_signal(cvp);
30620Sstevel@tonic-gate 
30630Sstevel@tonic-gate 	return (error);
30640Sstevel@tonic-gate }
30650Sstevel@tonic-gate 
30660Sstevel@tonic-gate int
30675629Sraf cond_wait_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
30680Sstevel@tonic-gate {
30690Sstevel@tonic-gate 	ulwp_t *self = curthread;
30700Sstevel@tonic-gate 	int error;
30714574Sraf 	int merror;
30720Sstevel@tonic-gate 
30730Sstevel@tonic-gate 	/*
30740Sstevel@tonic-gate 	 * The old thread library was programmed to defer signals
30750Sstevel@tonic-gate 	 * while in cond_wait() so that the associated mutex would
30760Sstevel@tonic-gate 	 * be guaranteed to be held when the application signal
30770Sstevel@tonic-gate 	 * handler was invoked.
30780Sstevel@tonic-gate 	 *
30790Sstevel@tonic-gate 	 * We do not behave this way by default; the state of the
30800Sstevel@tonic-gate 	 * associated mutex in the signal handler is undefined.
30810Sstevel@tonic-gate 	 *
30820Sstevel@tonic-gate 	 * To accommodate applications that depend on the old
30830Sstevel@tonic-gate 	 * behavior, the _THREAD_COND_WAIT_DEFER environment
30840Sstevel@tonic-gate 	 * variable can be set to 1 and we will behave in the
30850Sstevel@tonic-gate 	 * old way with respect to cond_wait().
30860Sstevel@tonic-gate 	 */
30870Sstevel@tonic-gate 	if (self->ul_cond_wait_defer)
30880Sstevel@tonic-gate 		sigoff(self);
30890Sstevel@tonic-gate 
30900Sstevel@tonic-gate 	error = cond_sleep_queue(cvp, mp, tsp);
30910Sstevel@tonic-gate 
30920Sstevel@tonic-gate 	/*
30930Sstevel@tonic-gate 	 * Reacquire the mutex.
30940Sstevel@tonic-gate 	 */
30955629Sraf 	if ((merror = mutex_lock_impl(mp, NULL)) != 0)
30964574Sraf 		error = merror;
30970Sstevel@tonic-gate 
30980Sstevel@tonic-gate 	/*
30990Sstevel@tonic-gate 	 * Take any deferred signal now, after we have reacquired the mutex.
31000Sstevel@tonic-gate 	 */
31010Sstevel@tonic-gate 	if (self->ul_cond_wait_defer)
31020Sstevel@tonic-gate 		sigon(self);
31030Sstevel@tonic-gate 
31040Sstevel@tonic-gate 	return (error);
31050Sstevel@tonic-gate }
31060Sstevel@tonic-gate 
31070Sstevel@tonic-gate /*
31080Sstevel@tonic-gate  * cond_sleep_kernel(): utility function for cond_wait_kernel().
31090Sstevel@tonic-gate  * See the comment ahead of cond_sleep_queue(), above.
31100Sstevel@tonic-gate  */
31114574Sraf static int
31120Sstevel@tonic-gate cond_sleep_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
31130Sstevel@tonic-gate {
31140Sstevel@tonic-gate 	int mtype = mp->mutex_type;
31150Sstevel@tonic-gate 	ulwp_t *self = curthread;
31160Sstevel@tonic-gate 	int error;
31170Sstevel@tonic-gate 
31184574Sraf 	if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp))
31194574Sraf 		_ceil_prio_waive();
31200Sstevel@tonic-gate 
31210Sstevel@tonic-gate 	self->ul_sp = stkptr();
31220Sstevel@tonic-gate 	self->ul_wchan = cvp;
31230Sstevel@tonic-gate 	mp->mutex_owner = 0;
31246057Sraf 	/* mp->mutex_ownerpid is cleared by ___lwp_cond_wait() */
31256247Sraf 	if (mtype & LOCK_PRIO_INHERIT) {
31260Sstevel@tonic-gate 		mp->mutex_lockw = LOCKCLEAR;
31276247Sraf 		self->ul_pilocks--;
31286247Sraf 	}
31290Sstevel@tonic-gate 	/*
31300Sstevel@tonic-gate 	 * ___lwp_cond_wait() returns immediately with EINTR if
31310Sstevel@tonic-gate 	 * set_parking_flag(self,0) is called on this lwp before it
31320Sstevel@tonic-gate 	 * goes to sleep in the kernel.  sigacthandler() calls this
31330Sstevel@tonic-gate 	 * when a deferred signal is noted.  This assures that we don't
31340Sstevel@tonic-gate 	 * get stuck in ___lwp_cond_wait() with all signals blocked
31350Sstevel@tonic-gate 	 * due to taking a deferred signal before going to sleep.
31360Sstevel@tonic-gate 	 */
31370Sstevel@tonic-gate 	set_parking_flag(self, 1);
31380Sstevel@tonic-gate 	if (self->ul_cursig != 0 ||
31390Sstevel@tonic-gate 	    (self->ul_cancelable && self->ul_cancel_pending))
31400Sstevel@tonic-gate 		set_parking_flag(self, 0);
31410Sstevel@tonic-gate 	error = ___lwp_cond_wait(cvp, mp, tsp, 1);
31420Sstevel@tonic-gate 	set_parking_flag(self, 0);
31430Sstevel@tonic-gate 	self->ul_sp = 0;
31440Sstevel@tonic-gate 	self->ul_wchan = NULL;
31450Sstevel@tonic-gate 	return (error);
31460Sstevel@tonic-gate }
31470Sstevel@tonic-gate 
31480Sstevel@tonic-gate int
31490Sstevel@tonic-gate cond_wait_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
31500Sstevel@tonic-gate {
31510Sstevel@tonic-gate 	ulwp_t *self = curthread;
31520Sstevel@tonic-gate 	int error;
31530Sstevel@tonic-gate 	int merror;
31540Sstevel@tonic-gate 
31550Sstevel@tonic-gate 	/*
31560Sstevel@tonic-gate 	 * See the large comment in cond_wait_queue(), above.
31570Sstevel@tonic-gate 	 */
31580Sstevel@tonic-gate 	if (self->ul_cond_wait_defer)
31590Sstevel@tonic-gate 		sigoff(self);
31600Sstevel@tonic-gate 
31610Sstevel@tonic-gate 	error = cond_sleep_kernel(cvp, mp, tsp);
31620Sstevel@tonic-gate 
31630Sstevel@tonic-gate 	/*
31640Sstevel@tonic-gate 	 * Override the return code from ___lwp_cond_wait()
31650Sstevel@tonic-gate 	 * with any non-zero return code from mutex_lock().
31660Sstevel@tonic-gate 	 * This addresses robust lock failures in particular;
31670Sstevel@tonic-gate 	 * the caller must see the EOWNERDEAD or ENOTRECOVERABLE
31680Sstevel@tonic-gate 	 * errors in order to take corrective action.
31690Sstevel@tonic-gate 	 */
31705629Sraf 	if ((merror = mutex_lock_impl(mp, NULL)) != 0)
31710Sstevel@tonic-gate 		error = merror;
31720Sstevel@tonic-gate 
31730Sstevel@tonic-gate 	/*
31740Sstevel@tonic-gate 	 * Take any deferred signal now, after we have reacquired the mutex.
31750Sstevel@tonic-gate 	 */
31760Sstevel@tonic-gate 	if (self->ul_cond_wait_defer)
31770Sstevel@tonic-gate 		sigon(self);
31780Sstevel@tonic-gate 
31790Sstevel@tonic-gate 	return (error);
31800Sstevel@tonic-gate }
31810Sstevel@tonic-gate 
31820Sstevel@tonic-gate /*
3183*6812Sraf  * Common code for cond_wait() and cond_timedwait()
31840Sstevel@tonic-gate  */
31850Sstevel@tonic-gate int
31860Sstevel@tonic-gate cond_wait_common(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
31870Sstevel@tonic-gate {
31880Sstevel@tonic-gate 	int mtype = mp->mutex_type;
31890Sstevel@tonic-gate 	hrtime_t begin_sleep = 0;
31900Sstevel@tonic-gate 	ulwp_t *self = curthread;
31910Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
31920Sstevel@tonic-gate 	tdb_cond_stats_t *csp = COND_STATS(cvp, udp);
31930Sstevel@tonic-gate 	tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
31940Sstevel@tonic-gate 	uint8_t rcount;
31950Sstevel@tonic-gate 	int error = 0;
31960Sstevel@tonic-gate 
31970Sstevel@tonic-gate 	/*
31980Sstevel@tonic-gate 	 * The SUSV3 Posix spec for pthread_cond_timedwait() states:
31990Sstevel@tonic-gate 	 *	Except in the case of [ETIMEDOUT], all these error checks
32000Sstevel@tonic-gate 	 *	shall act as if they were performed immediately at the
32010Sstevel@tonic-gate 	 *	beginning of processing for the function and shall cause
32020Sstevel@tonic-gate 	 *	an error return, in effect, prior to modifying the state
32030Sstevel@tonic-gate 	 *	of the mutex specified by mutex or the condition variable
32040Sstevel@tonic-gate 	 *	specified by cond.
32050Sstevel@tonic-gate 	 * Therefore, we must return EINVAL now if the timout is invalid.
32060Sstevel@tonic-gate 	 */
32070Sstevel@tonic-gate 	if (tsp != NULL &&
32080Sstevel@tonic-gate 	    (tsp->tv_sec < 0 || (ulong_t)tsp->tv_nsec >= NANOSEC))
32090Sstevel@tonic-gate 		return (EINVAL);
32100Sstevel@tonic-gate 
32110Sstevel@tonic-gate 	if (__td_event_report(self, TD_SLEEP, udp)) {
32120Sstevel@tonic-gate 		self->ul_sp = stkptr();
32130Sstevel@tonic-gate 		self->ul_wchan = cvp;
32140Sstevel@tonic-gate 		self->ul_td_evbuf.eventnum = TD_SLEEP;
32150Sstevel@tonic-gate 		self->ul_td_evbuf.eventdata = cvp;
32160Sstevel@tonic-gate 		tdb_event(TD_SLEEP, udp);
32170Sstevel@tonic-gate 		self->ul_sp = 0;
32180Sstevel@tonic-gate 	}
32190Sstevel@tonic-gate 	if (csp) {
32200Sstevel@tonic-gate 		if (tsp)
32210Sstevel@tonic-gate 			tdb_incr(csp->cond_timedwait);
32220Sstevel@tonic-gate 		else
32230Sstevel@tonic-gate 			tdb_incr(csp->cond_wait);
32240Sstevel@tonic-gate 	}
32250Sstevel@tonic-gate 	if (msp)
32260Sstevel@tonic-gate 		begin_sleep = record_hold_time(msp);
32270Sstevel@tonic-gate 	else if (csp)
32280Sstevel@tonic-gate 		begin_sleep = gethrtime();
32290Sstevel@tonic-gate 
32300Sstevel@tonic-gate 	if (self->ul_error_detection) {
3231*6812Sraf 		if (!mutex_held(mp))
32320Sstevel@tonic-gate 			lock_error(mp, "cond_wait", cvp, NULL);
32330Sstevel@tonic-gate 		if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0)
32340Sstevel@tonic-gate 			lock_error(mp, "recursive mutex in cond_wait",
32355629Sraf 			    cvp, NULL);
32360Sstevel@tonic-gate 		if (cvp->cond_type & USYNC_PROCESS) {
32374574Sraf 			if (!(mtype & USYNC_PROCESS))
32380Sstevel@tonic-gate 				lock_error(mp, "cond_wait", cvp,
32395629Sraf 				    "condvar process-shared, "
32405629Sraf 				    "mutex process-private");
32410Sstevel@tonic-gate 		} else {
32424574Sraf 			if (mtype & USYNC_PROCESS)
32430Sstevel@tonic-gate 				lock_error(mp, "cond_wait", cvp,
32445629Sraf 				    "condvar process-private, "
32455629Sraf 				    "mutex process-shared");
32460Sstevel@tonic-gate 		}
32470Sstevel@tonic-gate 	}
32480Sstevel@tonic-gate 
32490Sstevel@tonic-gate 	/*
32500Sstevel@tonic-gate 	 * We deal with recursive mutexes by completely
32510Sstevel@tonic-gate 	 * dropping the lock and restoring the recursion
32520Sstevel@tonic-gate 	 * count after waking up.  This is arguably wrong,
32530Sstevel@tonic-gate 	 * but it obeys the principle of least astonishment.
32540Sstevel@tonic-gate 	 */
32550Sstevel@tonic-gate 	rcount = mp->mutex_rcount;
32560Sstevel@tonic-gate 	mp->mutex_rcount = 0;
32574574Sraf 	if ((mtype &
32584574Sraf 	    (USYNC_PROCESS | LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT)) |
32590Sstevel@tonic-gate 	    (cvp->cond_type & USYNC_PROCESS))
32600Sstevel@tonic-gate 		error = cond_wait_kernel(cvp, mp, tsp);
32610Sstevel@tonic-gate 	else
32625629Sraf 		error = cond_wait_queue(cvp, mp, tsp);
32630Sstevel@tonic-gate 	mp->mutex_rcount = rcount;
32640Sstevel@tonic-gate 
32650Sstevel@tonic-gate 	if (csp) {
32660Sstevel@tonic-gate 		hrtime_t lapse = gethrtime() - begin_sleep;
32670Sstevel@tonic-gate 		if (tsp == NULL)
32680Sstevel@tonic-gate 			csp->cond_wait_sleep_time += lapse;
32690Sstevel@tonic-gate 		else {
32700Sstevel@tonic-gate 			csp->cond_timedwait_sleep_time += lapse;
32710Sstevel@tonic-gate 			if (error == ETIME)
32720Sstevel@tonic-gate 				tdb_incr(csp->cond_timedwait_timeout);
32730Sstevel@tonic-gate 		}
32740Sstevel@tonic-gate 	}
32750Sstevel@tonic-gate 	return (error);
32760Sstevel@tonic-gate }
32770Sstevel@tonic-gate 
32780Sstevel@tonic-gate /*
3279*6812Sraf  * cond_wait() is a cancellation point but __cond_wait() is not.
3280*6812Sraf  * Internally, libc calls the non-cancellation version.
32815891Sraf  * Other libraries need to use pthread_setcancelstate(), as appropriate,
32825891Sraf  * since __cond_wait() is not exported from libc.
32830Sstevel@tonic-gate  */
32840Sstevel@tonic-gate int
32855891Sraf __cond_wait(cond_t *cvp, mutex_t *mp)
32860Sstevel@tonic-gate {
32870Sstevel@tonic-gate 	ulwp_t *self = curthread;
32880Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
32890Sstevel@tonic-gate 	uberflags_t *gflags;
32900Sstevel@tonic-gate 
32910Sstevel@tonic-gate 	/*
32920Sstevel@tonic-gate 	 * Optimize the common case of USYNC_THREAD plus
32930Sstevel@tonic-gate 	 * no error detection, no lock statistics, and no event tracing.
32940Sstevel@tonic-gate 	 */
32950Sstevel@tonic-gate 	if ((gflags = self->ul_schedctl_called) != NULL &&
32960Sstevel@tonic-gate 	    (cvp->cond_type | mp->mutex_type | gflags->uf_trs_ted |
32970Sstevel@tonic-gate 	    self->ul_td_events_enable |
32980Sstevel@tonic-gate 	    udp->tdb.tdb_ev_global_mask.event_bits[0]) == 0)
32995629Sraf 		return (cond_wait_queue(cvp, mp, NULL));
33000Sstevel@tonic-gate 
33010Sstevel@tonic-gate 	/*
33020Sstevel@tonic-gate 	 * Else do it the long way.
33030Sstevel@tonic-gate 	 */
33040Sstevel@tonic-gate 	return (cond_wait_common(cvp, mp, NULL));
33050Sstevel@tonic-gate }
33060Sstevel@tonic-gate 
3307*6812Sraf #pragma weak _cond_wait = cond_wait
33080Sstevel@tonic-gate int
3309*6812Sraf cond_wait(cond_t *cvp, mutex_t *mp)
33100Sstevel@tonic-gate {
33110Sstevel@tonic-gate 	int error;
33120Sstevel@tonic-gate 
33130Sstevel@tonic-gate 	_cancelon();
33145891Sraf 	error = __cond_wait(cvp, mp);
33150Sstevel@tonic-gate 	if (error == EINTR)
33160Sstevel@tonic-gate 		_canceloff();
33170Sstevel@tonic-gate 	else
33180Sstevel@tonic-gate 		_canceloff_nocancel();
33190Sstevel@tonic-gate 	return (error);
33200Sstevel@tonic-gate }
33210Sstevel@tonic-gate 
33225891Sraf /*
33235891Sraf  * pthread_cond_wait() is a cancellation point.
33245891Sraf  */
33250Sstevel@tonic-gate int
3326*6812Sraf pthread_cond_wait(pthread_cond_t *_RESTRICT_KYWD cvp,
3327*6812Sraf 	pthread_mutex_t *_RESTRICT_KYWD mp)
33280Sstevel@tonic-gate {
33290Sstevel@tonic-gate 	int error;
33300Sstevel@tonic-gate 
3331*6812Sraf 	error = cond_wait((cond_t *)cvp, (mutex_t *)mp);
33320Sstevel@tonic-gate 	return ((error == EINTR)? 0 : error);
33330Sstevel@tonic-gate }
33340Sstevel@tonic-gate 
33350Sstevel@tonic-gate /*
3336*6812Sraf  * cond_timedwait() is a cancellation point but __cond_timedwait() is not.
33370Sstevel@tonic-gate  */
33380Sstevel@tonic-gate int
33395891Sraf __cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime)
33400Sstevel@tonic-gate {
33410Sstevel@tonic-gate 	clockid_t clock_id = cvp->cond_clockid;
33420Sstevel@tonic-gate 	timespec_t reltime;
33430Sstevel@tonic-gate 	int error;
33440Sstevel@tonic-gate 
33450Sstevel@tonic-gate 	if (clock_id != CLOCK_REALTIME && clock_id != CLOCK_HIGHRES)
33460Sstevel@tonic-gate 		clock_id = CLOCK_REALTIME;
33470Sstevel@tonic-gate 	abstime_to_reltime(clock_id, abstime, &reltime);
33480Sstevel@tonic-gate 	error = cond_wait_common(cvp, mp, &reltime);
33490Sstevel@tonic-gate 	if (error == ETIME && clock_id == CLOCK_HIGHRES) {
33500Sstevel@tonic-gate 		/*
33510Sstevel@tonic-gate 		 * Don't return ETIME if we didn't really get a timeout.
33520Sstevel@tonic-gate 		 * This can happen if we return because someone resets
33530Sstevel@tonic-gate 		 * the system clock.  Just return zero in this case,
33540Sstevel@tonic-gate 		 * giving a spurious wakeup but not a timeout.
33550Sstevel@tonic-gate 		 */
33560Sstevel@tonic-gate 		if ((hrtime_t)(uint32_t)abstime->tv_sec * NANOSEC +
33570Sstevel@tonic-gate 		    abstime->tv_nsec > gethrtime())
33580Sstevel@tonic-gate 			error = 0;
33590Sstevel@tonic-gate 	}
33600Sstevel@tonic-gate 	return (error);
33610Sstevel@tonic-gate }
33620Sstevel@tonic-gate 
33630Sstevel@tonic-gate int
3364*6812Sraf cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime)
33650Sstevel@tonic-gate {
33660Sstevel@tonic-gate 	int error;
33670Sstevel@tonic-gate 
33680Sstevel@tonic-gate 	_cancelon();
33695891Sraf 	error = __cond_timedwait(cvp, mp, abstime);
33700Sstevel@tonic-gate 	if (error == EINTR)
33710Sstevel@tonic-gate 		_canceloff();
33720Sstevel@tonic-gate 	else
33730Sstevel@tonic-gate 		_canceloff_nocancel();
33740Sstevel@tonic-gate 	return (error);
33750Sstevel@tonic-gate }
33760Sstevel@tonic-gate 
33775891Sraf /*
33785891Sraf  * pthread_cond_timedwait() is a cancellation point.
33795891Sraf  */
33800Sstevel@tonic-gate int
3381*6812Sraf pthread_cond_timedwait(pthread_cond_t *_RESTRICT_KYWD cvp,
3382*6812Sraf 	pthread_mutex_t *_RESTRICT_KYWD mp,
3383*6812Sraf 	const struct timespec *_RESTRICT_KYWD abstime)
33840Sstevel@tonic-gate {
33850Sstevel@tonic-gate 	int error;
33860Sstevel@tonic-gate 
3387*6812Sraf 	error = cond_timedwait((cond_t *)cvp, (mutex_t *)mp, abstime);
33880Sstevel@tonic-gate 	if (error == ETIME)
33890Sstevel@tonic-gate 		error = ETIMEDOUT;
33900Sstevel@tonic-gate 	else if (error == EINTR)
33910Sstevel@tonic-gate 		error = 0;
33920Sstevel@tonic-gate 	return (error);
33930Sstevel@tonic-gate }
33940Sstevel@tonic-gate 
33950Sstevel@tonic-gate /*
3396*6812Sraf  * cond_reltimedwait() is a cancellation point but __cond_reltimedwait() is not.
33970Sstevel@tonic-gate  */
33980Sstevel@tonic-gate int
33995891Sraf __cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime)
34000Sstevel@tonic-gate {
34010Sstevel@tonic-gate 	timespec_t tslocal = *reltime;
34020Sstevel@tonic-gate 
34030Sstevel@tonic-gate 	return (cond_wait_common(cvp, mp, &tslocal));
34040Sstevel@tonic-gate }
34050Sstevel@tonic-gate 
34060Sstevel@tonic-gate int
3407*6812Sraf cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime)
34080Sstevel@tonic-gate {
34090Sstevel@tonic-gate 	int error;
34100Sstevel@tonic-gate 
34110Sstevel@tonic-gate 	_cancelon();
34125891Sraf 	error = __cond_reltimedwait(cvp, mp, reltime);
34130Sstevel@tonic-gate 	if (error == EINTR)
34140Sstevel@tonic-gate 		_canceloff();
34150Sstevel@tonic-gate 	else
34160Sstevel@tonic-gate 		_canceloff_nocancel();
34170Sstevel@tonic-gate 	return (error);
34180Sstevel@tonic-gate }
34190Sstevel@tonic-gate 
34200Sstevel@tonic-gate int
3421*6812Sraf pthread_cond_reltimedwait_np(pthread_cond_t *_RESTRICT_KYWD cvp,
3422*6812Sraf 	pthread_mutex_t *_RESTRICT_KYWD mp,
3423*6812Sraf 	const struct timespec *_RESTRICT_KYWD reltime)
34240Sstevel@tonic-gate {
34250Sstevel@tonic-gate 	int error;
34260Sstevel@tonic-gate 
3427*6812Sraf 	error = cond_reltimedwait((cond_t *)cvp, (mutex_t *)mp, reltime);
34280Sstevel@tonic-gate 	if (error == ETIME)
34290Sstevel@tonic-gate 		error = ETIMEDOUT;
34300Sstevel@tonic-gate 	else if (error == EINTR)
34310Sstevel@tonic-gate 		error = 0;
34320Sstevel@tonic-gate 	return (error);
34330Sstevel@tonic-gate }
34340Sstevel@tonic-gate 
3435*6812Sraf #pragma weak pthread_cond_signal = cond_signal
3436*6812Sraf #pragma weak _cond_signal = cond_signal
34370Sstevel@tonic-gate int
3438*6812Sraf cond_signal(cond_t *cvp)
34390Sstevel@tonic-gate {
34400Sstevel@tonic-gate 	ulwp_t *self = curthread;
34410Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
34420Sstevel@tonic-gate 	tdb_cond_stats_t *csp = COND_STATS(cvp, udp);
34430Sstevel@tonic-gate 	int error = 0;
34446247Sraf 	int more;
34456247Sraf 	lwpid_t lwpid;
34460Sstevel@tonic-gate 	queue_head_t *qp;
34470Sstevel@tonic-gate 	mutex_t *mp;
34480Sstevel@tonic-gate 	queue_head_t *mqp;
34490Sstevel@tonic-gate 	ulwp_t **ulwpp;
34500Sstevel@tonic-gate 	ulwp_t *ulwp;
34516247Sraf 	ulwp_t *prev;
34520Sstevel@tonic-gate 
34530Sstevel@tonic-gate 	if (csp)
34540Sstevel@tonic-gate 		tdb_incr(csp->cond_signal);
34550Sstevel@tonic-gate 
34560Sstevel@tonic-gate 	if (cvp->cond_waiters_kernel)	/* someone sleeping in the kernel? */
3457*6812Sraf 		error = _lwp_cond_signal(cvp);
34580Sstevel@tonic-gate 
34590Sstevel@tonic-gate 	if (!cvp->cond_waiters_user)	/* no one sleeping at user-level */
34600Sstevel@tonic-gate 		return (error);
34610Sstevel@tonic-gate 
34620Sstevel@tonic-gate 	/*
34630Sstevel@tonic-gate 	 * Move someone from the condvar sleep queue to the mutex sleep
34640Sstevel@tonic-gate 	 * queue for the mutex that he will acquire on being waked up.
34650Sstevel@tonic-gate 	 * We can do this only if we own the mutex he will acquire.
34660Sstevel@tonic-gate 	 * If we do not own the mutex, or if his ul_cv_wake flag
34670Sstevel@tonic-gate 	 * is set, just dequeue and unpark him.
34680Sstevel@tonic-gate 	 */
34690Sstevel@tonic-gate 	qp = queue_lock(cvp, CV);
34706247Sraf 	ulwpp = queue_slot(qp, &prev, &more);
34716247Sraf 	cvp->cond_waiters_user = more;
34726247Sraf 	if (ulwpp == NULL) {	/* no one on the sleep queue */
34730Sstevel@tonic-gate 		queue_unlock(qp);
34740Sstevel@tonic-gate 		return (error);
34750Sstevel@tonic-gate 	}
34766247Sraf 	ulwp = *ulwpp;
34770Sstevel@tonic-gate 
34780Sstevel@tonic-gate 	/*
34790Sstevel@tonic-gate 	 * Inform the thread that he was the recipient of a cond_signal().
34800Sstevel@tonic-gate 	 * This lets him deal with cond_signal() and, concurrently,
34810Sstevel@tonic-gate 	 * one or more of a cancellation, a UNIX signal, or a timeout.
34820Sstevel@tonic-gate 	 * These latter conditions must not consume a cond_signal().
34830Sstevel@tonic-gate 	 */
34840Sstevel@tonic-gate 	ulwp->ul_signalled = 1;
34850Sstevel@tonic-gate 
34860Sstevel@tonic-gate 	/*
34870Sstevel@tonic-gate 	 * Dequeue the waiter but leave his ul_sleepq non-NULL
34880Sstevel@tonic-gate 	 * while we move him to the mutex queue so that he can
34890Sstevel@tonic-gate 	 * deal properly with spurious wakeups.
34900Sstevel@tonic-gate 	 */
34916247Sraf 	queue_unlink(qp, ulwpp, prev);
34920Sstevel@tonic-gate 
34930Sstevel@tonic-gate 	mp = ulwp->ul_cvmutex;		/* the mutex he will acquire */
34940Sstevel@tonic-gate 	ulwp->ul_cvmutex = NULL;
34950Sstevel@tonic-gate 	ASSERT(mp != NULL);
34960Sstevel@tonic-gate 
34970Sstevel@tonic-gate 	if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) {
34986247Sraf 		/* just wake him up */
34996247Sraf 		lwpid = ulwp->ul_lwpid;
35000Sstevel@tonic-gate 		no_preempt(self);
35010Sstevel@tonic-gate 		ulwp->ul_sleepq = NULL;
35020Sstevel@tonic-gate 		ulwp->ul_wchan = NULL;
35030Sstevel@tonic-gate 		queue_unlock(qp);
35040Sstevel@tonic-gate 		(void) __lwp_unpark(lwpid);
35050Sstevel@tonic-gate 		preempt(self);
35060Sstevel@tonic-gate 	} else {
35076247Sraf 		/* move him to the mutex queue */
35080Sstevel@tonic-gate 		mqp = queue_lock(mp, MX);
35096247Sraf 		enqueue(mqp, ulwp, 0);
35100Sstevel@tonic-gate 		mp->mutex_waiters = 1;
35110Sstevel@tonic-gate 		queue_unlock(mqp);
35120Sstevel@tonic-gate 		queue_unlock(qp);
35130Sstevel@tonic-gate 	}
35140Sstevel@tonic-gate 
35150Sstevel@tonic-gate 	return (error);
35160Sstevel@tonic-gate }
35170Sstevel@tonic-gate 
35184570Sraf /*
35194574Sraf  * Utility function called by mutex_wakeup_all(), cond_broadcast(),
35204574Sraf  * and rw_queue_release() to (re)allocate a big buffer to hold the
35214574Sraf  * lwpids of all the threads to be set running after they are removed
35224574Sraf  * from their sleep queues.  Since we are holding a queue lock, we
35234574Sraf  * cannot call any function that might acquire a lock.  mmap(), munmap(),
35244574Sraf  * lwp_unpark_all() are simple system calls and are safe in this regard.
35254570Sraf  */
35264570Sraf lwpid_t *
35274570Sraf alloc_lwpids(lwpid_t *lwpid, int *nlwpid_ptr, int *maxlwps_ptr)
35284570Sraf {
35294570Sraf 	/*
35304570Sraf 	 * Allocate NEWLWPS ids on the first overflow.
35314570Sraf 	 * Double the allocation each time after that.
35324570Sraf 	 */
35334570Sraf 	int nlwpid = *nlwpid_ptr;
35344570Sraf 	int maxlwps = *maxlwps_ptr;
35354570Sraf 	int first_allocation;
35364570Sraf 	int newlwps;
35374570Sraf 	void *vaddr;
35384570Sraf 
35394570Sraf 	ASSERT(nlwpid == maxlwps);
35404570Sraf 
35414570Sraf 	first_allocation = (maxlwps == MAXLWPS);
35424570Sraf 	newlwps = first_allocation? NEWLWPS : 2 * maxlwps;
35436515Sraf 	vaddr = mmap(NULL, newlwps * sizeof (lwpid_t),
35444570Sraf 	    PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0);
35454570Sraf 
35464570Sraf 	if (vaddr == MAP_FAILED) {
35474570Sraf 		/*
35484570Sraf 		 * Let's hope this never happens.
35494570Sraf 		 * If it does, then we have a terrible
35504570Sraf 		 * thundering herd on our hands.
35514570Sraf 		 */
35524570Sraf 		(void) __lwp_unpark_all(lwpid, nlwpid);
35534570Sraf 		*nlwpid_ptr = 0;
35544570Sraf 	} else {
35556515Sraf 		(void) memcpy(vaddr, lwpid, maxlwps * sizeof (lwpid_t));
35564570Sraf 		if (!first_allocation)
35576515Sraf 			(void) munmap((caddr_t)lwpid,
35584570Sraf 			    maxlwps * sizeof (lwpid_t));
35594570Sraf 		lwpid = vaddr;
35604570Sraf 		*maxlwps_ptr = newlwps;
35614570Sraf 	}
35624570Sraf 
35634570Sraf 	return (lwpid);
35644570Sraf }
35650Sstevel@tonic-gate 
3566*6812Sraf #pragma weak pthread_cond_broadcast = cond_broadcast
3567*6812Sraf #pragma weak _cond_broadcast = cond_broadcast
35680Sstevel@tonic-gate int
3569*6812Sraf cond_broadcast(cond_t *cvp)
35700Sstevel@tonic-gate {
35710Sstevel@tonic-gate 	ulwp_t *self = curthread;
35720Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
35730Sstevel@tonic-gate 	tdb_cond_stats_t *csp = COND_STATS(cvp, udp);
35740Sstevel@tonic-gate 	int error = 0;
35750Sstevel@tonic-gate 	queue_head_t *qp;
35766247Sraf 	queue_root_t *qrp;
35770Sstevel@tonic-gate 	mutex_t *mp;
35780Sstevel@tonic-gate 	mutex_t *mp_cache = NULL;
35794570Sraf 	queue_head_t *mqp = NULL;
35800Sstevel@tonic-gate 	ulwp_t *ulwp;
35814570Sraf 	int nlwpid = 0;
35824570Sraf 	int maxlwps = MAXLWPS;
35830Sstevel@tonic-gate 	lwpid_t buffer[MAXLWPS];
35840Sstevel@tonic-gate 	lwpid_t *lwpid = buffer;
35850Sstevel@tonic-gate 
35860Sstevel@tonic-gate 	if (csp)
35870Sstevel@tonic-gate 		tdb_incr(csp->cond_broadcast);
35880Sstevel@tonic-gate 
35890Sstevel@tonic-gate 	if (cvp->cond_waiters_kernel)	/* someone sleeping in the kernel? */
3590*6812Sraf 		error = _lwp_cond_broadcast(cvp);
35910Sstevel@tonic-gate 
35920Sstevel@tonic-gate 	if (!cvp->cond_waiters_user)	/* no one sleeping at user-level */
35930Sstevel@tonic-gate 		return (error);
35940Sstevel@tonic-gate 
35950Sstevel@tonic-gate 	/*
35960Sstevel@tonic-gate 	 * Move everyone from the condvar sleep queue to the mutex sleep
35970Sstevel@tonic-gate 	 * queue for the mutex that they will acquire on being waked up.
35980Sstevel@tonic-gate 	 * We can do this only if we own the mutex they will acquire.
35990Sstevel@tonic-gate 	 * If we do not own the mutex, or if their ul_cv_wake flag
36000Sstevel@tonic-gate 	 * is set, just dequeue and unpark them.
36010Sstevel@tonic-gate 	 *
36020Sstevel@tonic-gate 	 * We keep track of lwpids that are to be unparked in lwpid[].
36030Sstevel@tonic-gate 	 * __lwp_unpark_all() is called to unpark all of them after
36040Sstevel@tonic-gate 	 * they have been removed from the sleep queue and the sleep
36050Sstevel@tonic-gate 	 * queue lock has been dropped.  If we run out of space in our
36060Sstevel@tonic-gate 	 * on-stack buffer, we need to allocate more but we can't call
36070Sstevel@tonic-gate 	 * lmalloc() because we are holding a queue lock when the overflow
36080Sstevel@tonic-gate 	 * occurs and lmalloc() acquires a lock.  We can't use alloca()
36094570Sraf 	 * either because the application may have allocated a small
36104570Sraf 	 * stack and we don't want to overrun the stack.  So we call
36114570Sraf 	 * alloc_lwpids() to allocate a bigger buffer using the mmap()
36120Sstevel@tonic-gate 	 * system call directly since that path acquires no locks.
36130Sstevel@tonic-gate 	 */
36140Sstevel@tonic-gate 	qp = queue_lock(cvp, CV);
36150Sstevel@tonic-gate 	cvp->cond_waiters_user = 0;
36166247Sraf 	for (;;) {
36176247Sraf 		if ((qrp = qp->qh_root) == NULL ||
36186247Sraf 		    (ulwp = qrp->qr_head) == NULL)
36196247Sraf 			break;
36206247Sraf 		ASSERT(ulwp->ul_wchan == cvp);
36216247Sraf 		queue_unlink(qp, &qrp->qr_head, NULL);
36220Sstevel@tonic-gate 		mp = ulwp->ul_cvmutex;		/* his mutex */
36230Sstevel@tonic-gate 		ulwp->ul_cvmutex = NULL;
36240Sstevel@tonic-gate 		ASSERT(mp != NULL);
36250Sstevel@tonic-gate 		if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) {
36266247Sraf 			/* just wake him up */
36270Sstevel@tonic-gate 			ulwp->ul_sleepq = NULL;
36280Sstevel@tonic-gate 			ulwp->ul_wchan = NULL;
36294570Sraf 			if (nlwpid == maxlwps)
36304570Sraf 				lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps);
36310Sstevel@tonic-gate 			lwpid[nlwpid++] = ulwp->ul_lwpid;
36320Sstevel@tonic-gate 		} else {
36336247Sraf 			/* move him to the mutex queue */
36340Sstevel@tonic-gate 			if (mp != mp_cache) {
36350Sstevel@tonic-gate 				mp_cache = mp;
36364570Sraf 				if (mqp != NULL)
36374570Sraf 					queue_unlock(mqp);
36384570Sraf 				mqp = queue_lock(mp, MX);
36390Sstevel@tonic-gate 			}
36406247Sraf 			enqueue(mqp, ulwp, 0);
36410Sstevel@tonic-gate 			mp->mutex_waiters = 1;
36420Sstevel@tonic-gate 		}
36430Sstevel@tonic-gate 	}
36444570Sraf 	if (mqp != NULL)
36454570Sraf 		queue_unlock(mqp);
36464570Sraf 	if (nlwpid == 0) {
36474570Sraf 		queue_unlock(qp);
36484570Sraf 	} else {
36494570Sraf 		no_preempt(self);
36504570Sraf 		queue_unlock(qp);
36510Sstevel@tonic-gate 		if (nlwpid == 1)
36520Sstevel@tonic-gate 			(void) __lwp_unpark(lwpid[0]);
36530Sstevel@tonic-gate 		else
36540Sstevel@tonic-gate 			(void) __lwp_unpark_all(lwpid, nlwpid);
36554570Sraf 		preempt(self);
36560Sstevel@tonic-gate 	}
36570Sstevel@tonic-gate 	if (lwpid != buffer)
36586515Sraf 		(void) munmap((caddr_t)lwpid, maxlwps * sizeof (lwpid_t));
36590Sstevel@tonic-gate 	return (error);
36600Sstevel@tonic-gate }
36610Sstevel@tonic-gate 
3662*6812Sraf #pragma weak pthread_cond_destroy = cond_destroy
36630Sstevel@tonic-gate int
3664*6812Sraf cond_destroy(cond_t *cvp)
36650Sstevel@tonic-gate {
36660Sstevel@tonic-gate 	cvp->cond_magic = 0;
36670Sstevel@tonic-gate 	tdb_sync_obj_deregister(cvp);
36680Sstevel@tonic-gate 	return (0);
36690Sstevel@tonic-gate }
36700Sstevel@tonic-gate 
36710Sstevel@tonic-gate #if defined(THREAD_DEBUG)
36720Sstevel@tonic-gate void
36730Sstevel@tonic-gate assert_no_libc_locks_held(void)
36740Sstevel@tonic-gate {
36750Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
36760Sstevel@tonic-gate }
36770Sstevel@tonic-gate 
36780Sstevel@tonic-gate /* protected by link_lock */
36790Sstevel@tonic-gate uint64_t spin_lock_spin;
36800Sstevel@tonic-gate uint64_t spin_lock_spin2;
36810Sstevel@tonic-gate uint64_t spin_lock_sleep;
36820Sstevel@tonic-gate uint64_t spin_lock_wakeup;
36830Sstevel@tonic-gate 
36840Sstevel@tonic-gate /*
36850Sstevel@tonic-gate  * Record spin lock statistics.
36860Sstevel@tonic-gate  * Called by a thread exiting itself in thrp_exit().
36870Sstevel@tonic-gate  * Also called via atexit() from the thread calling
36880Sstevel@tonic-gate  * exit() to do all the other threads as well.
36890Sstevel@tonic-gate  */
36900Sstevel@tonic-gate void
36910Sstevel@tonic-gate record_spin_locks(ulwp_t *ulwp)
36920Sstevel@tonic-gate {
36930Sstevel@tonic-gate 	spin_lock_spin += ulwp->ul_spin_lock_spin;
36940Sstevel@tonic-gate 	spin_lock_spin2 += ulwp->ul_spin_lock_spin2;
36950Sstevel@tonic-gate 	spin_lock_sleep += ulwp->ul_spin_lock_sleep;
36960Sstevel@tonic-gate 	spin_lock_wakeup += ulwp->ul_spin_lock_wakeup;
36970Sstevel@tonic-gate 	ulwp->ul_spin_lock_spin = 0;
36980Sstevel@tonic-gate 	ulwp->ul_spin_lock_spin2 = 0;
36990Sstevel@tonic-gate 	ulwp->ul_spin_lock_sleep = 0;
37000Sstevel@tonic-gate 	ulwp->ul_spin_lock_wakeup = 0;
37010Sstevel@tonic-gate }
37020Sstevel@tonic-gate 
37030Sstevel@tonic-gate /*
37040Sstevel@tonic-gate  * atexit function:  dump the queue statistics to stderr.
37050Sstevel@tonic-gate  */
37060Sstevel@tonic-gate #include <stdio.h>
37070Sstevel@tonic-gate void
37080Sstevel@tonic-gate dump_queue_statistics(void)
37090Sstevel@tonic-gate {
37100Sstevel@tonic-gate 	uberdata_t *udp = curthread->ul_uberdata;
37110Sstevel@tonic-gate 	queue_head_t *qp;
37120Sstevel@tonic-gate 	int qn;
37130Sstevel@tonic-gate 	uint64_t spin_lock_total = 0;
37140Sstevel@tonic-gate 
37150Sstevel@tonic-gate 	if (udp->queue_head == NULL || thread_queue_dump == 0)
37160Sstevel@tonic-gate 		return;
37170Sstevel@tonic-gate 
37180Sstevel@tonic-gate 	if (fprintf(stderr, "\n%5d mutex queues:\n", QHASHSIZE) < 0 ||
37196247Sraf 	    fprintf(stderr, "queue#   lockcount    max qlen    max hlen\n") < 0)
37200Sstevel@tonic-gate 		return;
37210Sstevel@tonic-gate 	for (qn = 0, qp = udp->queue_head; qn < QHASHSIZE; qn++, qp++) {
37220Sstevel@tonic-gate 		if (qp->qh_lockcount == 0)
37230Sstevel@tonic-gate 			continue;
37240Sstevel@tonic-gate 		spin_lock_total += qp->qh_lockcount;
37256247Sraf 		if (fprintf(stderr, "%5d %12llu%12u%12u\n", qn,
37266247Sraf 		    (u_longlong_t)qp->qh_lockcount,
37276247Sraf 		    qp->qh_qmax, qp->qh_hmax) < 0)
37285629Sraf 			return;
37290Sstevel@tonic-gate 	}
37300Sstevel@tonic-gate 
37310Sstevel@tonic-gate 	if (fprintf(stderr, "\n%5d condvar queues:\n", QHASHSIZE) < 0 ||
37326247Sraf 	    fprintf(stderr, "queue#   lockcount    max qlen    max hlen\n") < 0)
37330Sstevel@tonic-gate 		return;
37340Sstevel@tonic-gate 	for (qn = 0; qn < QHASHSIZE; qn++, qp++) {
37350Sstevel@tonic-gate 		if (qp->qh_lockcount == 0)
37360Sstevel@tonic-gate 			continue;
37370Sstevel@tonic-gate 		spin_lock_total += qp->qh_lockcount;
37386247Sraf 		if (fprintf(stderr, "%5d %12llu%12u%12u\n", qn,
37396247Sraf 		    (u_longlong_t)qp->qh_lockcount,
37406247Sraf 		    qp->qh_qmax, qp->qh_hmax) < 0)
37415629Sraf 			return;
37420Sstevel@tonic-gate 	}
37430Sstevel@tonic-gate 
37440Sstevel@tonic-gate 	(void) fprintf(stderr, "\n  spin_lock_total  = %10llu\n",
37455629Sraf 	    (u_longlong_t)spin_lock_total);
37460Sstevel@tonic-gate 	(void) fprintf(stderr, "  spin_lock_spin   = %10llu\n",
37475629Sraf 	    (u_longlong_t)spin_lock_spin);
37480Sstevel@tonic-gate 	(void) fprintf(stderr, "  spin_lock_spin2  = %10llu\n",
37495629Sraf 	    (u_longlong_t)spin_lock_spin2);
37500Sstevel@tonic-gate 	(void) fprintf(stderr, "  spin_lock_sleep  = %10llu\n",
37515629Sraf 	    (u_longlong_t)spin_lock_sleep);
37520Sstevel@tonic-gate 	(void) fprintf(stderr, "  spin_lock_wakeup = %10llu\n",
37535629Sraf 	    (u_longlong_t)spin_lock_wakeup);
37540Sstevel@tonic-gate }
37556247Sraf #endif
3756