xref: /onnv-gate/usr/src/lib/libc/port/threads/synch.c (revision 6247:ad4c702ff226)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
51893Sraf  * Common Development and Distribution License (the "License").
61893Sraf  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
211219Sraf 
220Sstevel@tonic-gate /*
235891Sraf  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
240Sstevel@tonic-gate  * Use is subject to license terms.
250Sstevel@tonic-gate  */
260Sstevel@tonic-gate 
270Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
280Sstevel@tonic-gate 
296057Sraf #define	atomic_cas_64	_atomic_cas_64
300Sstevel@tonic-gate 
310Sstevel@tonic-gate #include "lint.h"
320Sstevel@tonic-gate #include "thr_uberdata.h"
33*6247Sraf #include <sys/rtpriocntl.h>
346057Sraf #include <sys/sdt.h>
356057Sraf #include <atomic.h>
360Sstevel@tonic-gate 
37*6247Sraf #if defined(THREAD_DEBUG)
38*6247Sraf #define	INCR32(x)	(((x) != UINT32_MAX)? (x)++ : 0)
39*6247Sraf #define	INCR(x)		((x)++)
40*6247Sraf #define	DECR(x)		((x)--)
41*6247Sraf #define	MAXINCR(m, x)	((m < ++x)? (m = x) : 0)
42*6247Sraf #else
43*6247Sraf #define	INCR32(x)
44*6247Sraf #define	INCR(x)
45*6247Sraf #define	DECR(x)
46*6247Sraf #define	MAXINCR(m, x)
47*6247Sraf #endif
48*6247Sraf 
490Sstevel@tonic-gate /*
500Sstevel@tonic-gate  * This mutex is initialized to be held by lwp#1.
510Sstevel@tonic-gate  * It is used to block a thread that has returned from a mutex_lock()
524574Sraf  * of a LOCK_PRIO_INHERIT mutex with an unrecoverable error.
530Sstevel@tonic-gate  */
540Sstevel@tonic-gate mutex_t	stall_mutex = DEFAULTMUTEX;
550Sstevel@tonic-gate 
560Sstevel@tonic-gate static int shared_mutex_held(mutex_t *);
574574Sraf static int mutex_queuelock_adaptive(mutex_t *);
584574Sraf static void mutex_wakeup_all(mutex_t *);
590Sstevel@tonic-gate 
600Sstevel@tonic-gate /*
610Sstevel@tonic-gate  * Lock statistics support functions.
620Sstevel@tonic-gate  */
630Sstevel@tonic-gate void
640Sstevel@tonic-gate record_begin_hold(tdb_mutex_stats_t *msp)
650Sstevel@tonic-gate {
660Sstevel@tonic-gate 	tdb_incr(msp->mutex_lock);
670Sstevel@tonic-gate 	msp->mutex_begin_hold = gethrtime();
680Sstevel@tonic-gate }
690Sstevel@tonic-gate 
700Sstevel@tonic-gate hrtime_t
710Sstevel@tonic-gate record_hold_time(tdb_mutex_stats_t *msp)
720Sstevel@tonic-gate {
730Sstevel@tonic-gate 	hrtime_t now = gethrtime();
740Sstevel@tonic-gate 
750Sstevel@tonic-gate 	if (msp->mutex_begin_hold)
760Sstevel@tonic-gate 		msp->mutex_hold_time += now - msp->mutex_begin_hold;
770Sstevel@tonic-gate 	msp->mutex_begin_hold = 0;
780Sstevel@tonic-gate 	return (now);
790Sstevel@tonic-gate }
800Sstevel@tonic-gate 
810Sstevel@tonic-gate /*
820Sstevel@tonic-gate  * Called once at library initialization.
830Sstevel@tonic-gate  */
840Sstevel@tonic-gate void
850Sstevel@tonic-gate mutex_setup(void)
860Sstevel@tonic-gate {
870Sstevel@tonic-gate 	if (set_lock_byte(&stall_mutex.mutex_lockw))
880Sstevel@tonic-gate 		thr_panic("mutex_setup() cannot acquire stall_mutex");
890Sstevel@tonic-gate 	stall_mutex.mutex_owner = (uintptr_t)curthread;
900Sstevel@tonic-gate }
910Sstevel@tonic-gate 
920Sstevel@tonic-gate /*
935629Sraf  * The default spin count of 1000 is experimentally determined.
945629Sraf  * On sun4u machines with any number of processors it could be raised
950Sstevel@tonic-gate  * to 10,000 but that (experimentally) makes almost no difference.
965629Sraf  * The environment variable:
970Sstevel@tonic-gate  *	_THREAD_ADAPTIVE_SPIN=count
985629Sraf  * can be used to override and set the count in the range [0 .. 1,000,000].
990Sstevel@tonic-gate  */
1000Sstevel@tonic-gate int	thread_adaptive_spin = 1000;
1010Sstevel@tonic-gate uint_t	thread_max_spinners = 100;
1020Sstevel@tonic-gate int	thread_queue_verify = 0;
1030Sstevel@tonic-gate static	int	ncpus;
1040Sstevel@tonic-gate 
1050Sstevel@tonic-gate /*
1060Sstevel@tonic-gate  * Distinguish spinning for queue locks from spinning for regular locks.
1075629Sraf  * We try harder to acquire queue locks by spinning.
1080Sstevel@tonic-gate  * The environment variable:
1090Sstevel@tonic-gate  *	_THREAD_QUEUE_SPIN=count
1100Sstevel@tonic-gate  * can be used to override and set the count in the range [0 .. 1,000,000].
1110Sstevel@tonic-gate  */
1125629Sraf int	thread_queue_spin = 10000;
1130Sstevel@tonic-gate 
1144574Sraf #define	ALL_ATTRIBUTES				\
1154574Sraf 	(LOCK_RECURSIVE | LOCK_ERRORCHECK |	\
1164574Sraf 	LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT |	\
1174574Sraf 	LOCK_ROBUST)
1180Sstevel@tonic-gate 
1190Sstevel@tonic-gate /*
1204574Sraf  * 'type' can be one of USYNC_THREAD, USYNC_PROCESS, or USYNC_PROCESS_ROBUST,
1214574Sraf  * augmented by zero or more the flags:
1224574Sraf  *	LOCK_RECURSIVE
1234574Sraf  *	LOCK_ERRORCHECK
1244574Sraf  *	LOCK_PRIO_INHERIT
1254574Sraf  *	LOCK_PRIO_PROTECT
1264574Sraf  *	LOCK_ROBUST
1270Sstevel@tonic-gate  */
1280Sstevel@tonic-gate #pragma weak _private_mutex_init = __mutex_init
1290Sstevel@tonic-gate #pragma weak mutex_init = __mutex_init
1300Sstevel@tonic-gate #pragma weak _mutex_init = __mutex_init
1310Sstevel@tonic-gate /* ARGSUSED2 */
1320Sstevel@tonic-gate int
1330Sstevel@tonic-gate __mutex_init(mutex_t *mp, int type, void *arg)
1340Sstevel@tonic-gate {
1354574Sraf 	int basetype = (type & ~ALL_ATTRIBUTES);
136*6247Sraf 	const pcclass_t *pccp;
1374574Sraf 	int error = 0;
138*6247Sraf 	int ceil;
1394574Sraf 
1404574Sraf 	if (basetype == USYNC_PROCESS_ROBUST) {
1414574Sraf 		/*
1424574Sraf 		 * USYNC_PROCESS_ROBUST is a deprecated historical type.
1434574Sraf 		 * We change it into (USYNC_PROCESS | LOCK_ROBUST) but
1444574Sraf 		 * retain the USYNC_PROCESS_ROBUST flag so we can return
1454574Sraf 		 * ELOCKUNMAPPED when necessary (only USYNC_PROCESS_ROBUST
1464574Sraf 		 * mutexes will ever draw ELOCKUNMAPPED).
1474574Sraf 		 */
1484574Sraf 		type |= (USYNC_PROCESS | LOCK_ROBUST);
1494574Sraf 		basetype = USYNC_PROCESS;
1504574Sraf 	}
1514574Sraf 
152*6247Sraf 	if (type & LOCK_PRIO_PROTECT)
153*6247Sraf 		pccp = get_info_by_policy(SCHED_FIFO);
154*6247Sraf 	if ((basetype != USYNC_THREAD && basetype != USYNC_PROCESS) ||
1554574Sraf 	    (type & (LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT))
156*6247Sraf 	    == (LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT) ||
157*6247Sraf 	    ((type & LOCK_PRIO_PROTECT) &&
158*6247Sraf 	    ((ceil = *(int *)arg) < pccp->pcc_primin ||
159*6247Sraf 	    ceil > pccp->pcc_primax))) {
1604574Sraf 		error = EINVAL;
1614574Sraf 	} else if (type & LOCK_ROBUST) {
1624574Sraf 		/*
1634574Sraf 		 * Callers of mutex_init() with the LOCK_ROBUST attribute
1644574Sraf 		 * are required to pass an initially all-zero mutex.
1654574Sraf 		 * Multiple calls to mutex_init() are allowed; all but
1664574Sraf 		 * the first return EBUSY.  A call to mutex_init() is
1674574Sraf 		 * allowed to make an inconsistent robust lock consistent
1684574Sraf 		 * (for historical usage, even though the proper interface
1694574Sraf 		 * for this is mutex_consistent()).  Note that we use
1704574Sraf 		 * atomic_or_16() to set the LOCK_INITED flag so as
1714574Sraf 		 * not to disturb surrounding bits (LOCK_OWNERDEAD, etc).
1724574Sraf 		 */
1734574Sraf 		extern void _atomic_or_16(volatile uint16_t *, uint16_t);
1744574Sraf 		if (!(mp->mutex_flag & LOCK_INITED)) {
1754574Sraf 			mp->mutex_type = (uint8_t)type;
1764574Sraf 			_atomic_or_16(&mp->mutex_flag, LOCK_INITED);
1774574Sraf 			mp->mutex_magic = MUTEX_MAGIC;
1784574Sraf 		} else if (type != mp->mutex_type ||
179*6247Sraf 		    ((type & LOCK_PRIO_PROTECT) && mp->mutex_ceiling != ceil)) {
1804574Sraf 			error = EINVAL;
1814574Sraf 		} else if (__mutex_consistent(mp) != 0) {
1824574Sraf 			error = EBUSY;
1834574Sraf 		}
1844574Sraf 		/* register a process robust mutex with the kernel */
1854574Sraf 		if (basetype == USYNC_PROCESS)
1864574Sraf 			register_lock(mp);
1874574Sraf 	} else {
1880Sstevel@tonic-gate 		(void) _memset(mp, 0, sizeof (*mp));
1890Sstevel@tonic-gate 		mp->mutex_type = (uint8_t)type;
1900Sstevel@tonic-gate 		mp->mutex_flag = LOCK_INITED;
1914574Sraf 		mp->mutex_magic = MUTEX_MAGIC;
1920Sstevel@tonic-gate 	}
1934574Sraf 
194*6247Sraf 	if (error == 0 && (type & LOCK_PRIO_PROTECT)) {
195*6247Sraf 		mp->mutex_ceiling = ceil;
196*6247Sraf 	}
1974574Sraf 
1980Sstevel@tonic-gate 	return (error);
1990Sstevel@tonic-gate }
2000Sstevel@tonic-gate 
2010Sstevel@tonic-gate /*
202*6247Sraf  * Delete mp from list of ceiling mutexes owned by curthread.
2030Sstevel@tonic-gate  * Return 1 if the head of the chain was updated.
2040Sstevel@tonic-gate  */
2050Sstevel@tonic-gate int
2060Sstevel@tonic-gate _ceil_mylist_del(mutex_t *mp)
2070Sstevel@tonic-gate {
2080Sstevel@tonic-gate 	ulwp_t *self = curthread;
2090Sstevel@tonic-gate 	mxchain_t **mcpp;
2100Sstevel@tonic-gate 	mxchain_t *mcp;
2110Sstevel@tonic-gate 
212*6247Sraf 	for (mcpp = &self->ul_mxchain;
213*6247Sraf 	    (mcp = *mcpp) != NULL;
214*6247Sraf 	    mcpp = &mcp->mxchain_next) {
215*6247Sraf 		if (mcp->mxchain_mx == mp) {
216*6247Sraf 			*mcpp = mcp->mxchain_next;
217*6247Sraf 			lfree(mcp, sizeof (*mcp));
218*6247Sraf 			return (mcpp == &self->ul_mxchain);
219*6247Sraf 		}
220*6247Sraf 	}
221*6247Sraf 	return (0);
2220Sstevel@tonic-gate }
2230Sstevel@tonic-gate 
2240Sstevel@tonic-gate /*
225*6247Sraf  * Add mp to the list of ceiling mutexes owned by curthread.
2260Sstevel@tonic-gate  * Return ENOMEM if no memory could be allocated.
2270Sstevel@tonic-gate  */
2280Sstevel@tonic-gate int
2290Sstevel@tonic-gate _ceil_mylist_add(mutex_t *mp)
2300Sstevel@tonic-gate {
2310Sstevel@tonic-gate 	ulwp_t *self = curthread;
2320Sstevel@tonic-gate 	mxchain_t *mcp;
2330Sstevel@tonic-gate 
2340Sstevel@tonic-gate 	if ((mcp = lmalloc(sizeof (*mcp))) == NULL)
2350Sstevel@tonic-gate 		return (ENOMEM);
2360Sstevel@tonic-gate 	mcp->mxchain_mx = mp;
2370Sstevel@tonic-gate 	mcp->mxchain_next = self->ul_mxchain;
2380Sstevel@tonic-gate 	self->ul_mxchain = mcp;
2390Sstevel@tonic-gate 	return (0);
2400Sstevel@tonic-gate }
2410Sstevel@tonic-gate 
2420Sstevel@tonic-gate /*
243*6247Sraf  * Helper function for _ceil_prio_inherit() and _ceil_prio_waive(), below.
244*6247Sraf  */
245*6247Sraf static void
246*6247Sraf set_rt_priority(ulwp_t *self, int prio)
247*6247Sraf {
248*6247Sraf 	pcparms_t pcparm;
249*6247Sraf 
250*6247Sraf 	pcparm.pc_cid = self->ul_rtclassid;
251*6247Sraf 	((rtparms_t *)pcparm.pc_clparms)->rt_tqnsecs = RT_NOCHANGE;
252*6247Sraf 	((rtparms_t *)pcparm.pc_clparms)->rt_pri = prio;
253*6247Sraf 	(void) _private_priocntl(P_LWPID, self->ul_lwpid, PC_SETPARMS, &pcparm);
254*6247Sraf }
255*6247Sraf 
256*6247Sraf /*
257*6247Sraf  * Inherit priority from ceiling.
258*6247Sraf  * This changes the effective priority, not the assigned priority.
2590Sstevel@tonic-gate  */
2600Sstevel@tonic-gate void
261*6247Sraf _ceil_prio_inherit(int prio)
2620Sstevel@tonic-gate {
2630Sstevel@tonic-gate 	ulwp_t *self = curthread;
264*6247Sraf 
265*6247Sraf 	self->ul_epri = prio;
266*6247Sraf 	set_rt_priority(self, prio);
2670Sstevel@tonic-gate }
2680Sstevel@tonic-gate 
2690Sstevel@tonic-gate /*
2700Sstevel@tonic-gate  * Waive inherited ceiling priority.  Inherit from head of owned ceiling locks
2710Sstevel@tonic-gate  * if holding at least one ceiling lock.  If no ceiling locks are held at this
2720Sstevel@tonic-gate  * point, disinherit completely, reverting back to assigned priority.
2730Sstevel@tonic-gate  */
2740Sstevel@tonic-gate void
2750Sstevel@tonic-gate _ceil_prio_waive(void)
2760Sstevel@tonic-gate {
2770Sstevel@tonic-gate 	ulwp_t *self = curthread;
278*6247Sraf 	mxchain_t *mcp = self->ul_mxchain;
279*6247Sraf 	int prio;
280*6247Sraf 
281*6247Sraf 	if (mcp == NULL) {
282*6247Sraf 		prio = self->ul_pri;
283*6247Sraf 		self->ul_epri = 0;
2840Sstevel@tonic-gate 	} else {
285*6247Sraf 		prio = mcp->mxchain_mx->mutex_ceiling;
286*6247Sraf 		self->ul_epri = prio;
2870Sstevel@tonic-gate 	}
288*6247Sraf 	set_rt_priority(self, prio);
2890Sstevel@tonic-gate }
2900Sstevel@tonic-gate 
2910Sstevel@tonic-gate /*
2925629Sraf  * Clear the lock byte.  Retain the waiters byte and the spinners byte.
2935629Sraf  * Return the old value of the lock word.
2945629Sraf  */
2955629Sraf static uint32_t
2965629Sraf clear_lockbyte(volatile uint32_t *lockword)
2975629Sraf {
2985629Sraf 	uint32_t old;
2995629Sraf 	uint32_t new;
3005629Sraf 
3015629Sraf 	do {
3025629Sraf 		old = *lockword;
3035629Sraf 		new = old & ~LOCKMASK;
3045629Sraf 	} while (atomic_cas_32(lockword, old, new) != old);
3055629Sraf 
3065629Sraf 	return (old);
3075629Sraf }
3085629Sraf 
3095629Sraf /*
3106057Sraf  * Same as clear_lockbyte(), but operates on mutex_lockword64.
3116057Sraf  * The mutex_ownerpid field is cleared along with the lock byte.
3126057Sraf  */
3136057Sraf static uint64_t
3146057Sraf clear_lockbyte64(volatile uint64_t *lockword64)
3156057Sraf {
3166057Sraf 	uint64_t old;
3176057Sraf 	uint64_t new;
3186057Sraf 
3196057Sraf 	do {
3206057Sraf 		old = *lockword64;
3216057Sraf 		new = old & ~LOCKMASK64;
3226057Sraf 	} while (atomic_cas_64(lockword64, old, new) != old);
3236057Sraf 
3246057Sraf 	return (old);
3256057Sraf }
3266057Sraf 
3276057Sraf /*
3286057Sraf  * Similar to set_lock_byte(), which only tries to set the lock byte.
3296057Sraf  * Here, we attempt to set the lock byte AND the mutex_ownerpid,
3306057Sraf  * keeping the remaining bytes constant.
3316057Sraf  */
3326057Sraf static int
3336057Sraf set_lock_byte64(volatile uint64_t *lockword64, pid_t ownerpid)
3346057Sraf {
3356057Sraf 	uint64_t old;
3366057Sraf 	uint64_t new;
3376057Sraf 
3386057Sraf 	old = *lockword64 & ~LOCKMASK64;
3396057Sraf 	new = old | ((uint64_t)(uint_t)ownerpid << PIDSHIFT) | LOCKBYTE64;
3406057Sraf 	if (atomic_cas_64(lockword64, old, new) == old)
3416057Sraf 		return (LOCKCLEAR);
3426057Sraf 
3436057Sraf 	return (LOCKSET);
3446057Sraf }
3456057Sraf 
3466057Sraf /*
3475629Sraf  * Increment the spinners count in the mutex lock word.
3485629Sraf  * Return 0 on success.  Return -1 if the count would overflow.
3495629Sraf  */
3505629Sraf static int
3515629Sraf spinners_incr(volatile uint32_t *lockword, uint8_t max_spinners)
3525629Sraf {
3535629Sraf 	uint32_t old;
3545629Sraf 	uint32_t new;
3555629Sraf 
3565629Sraf 	do {
3575629Sraf 		old = *lockword;
3585629Sraf 		if (((old & SPINNERMASK) >> SPINNERSHIFT) >= max_spinners)
3595629Sraf 			return (-1);
3605629Sraf 		new = old + (1 << SPINNERSHIFT);
3615629Sraf 	} while (atomic_cas_32(lockword, old, new) != old);
3625629Sraf 
3635629Sraf 	return (0);
3645629Sraf }
3655629Sraf 
3665629Sraf /*
3675629Sraf  * Decrement the spinners count in the mutex lock word.
3685629Sraf  * Return the new value of the lock word.
3695629Sraf  */
3705629Sraf static uint32_t
3715629Sraf spinners_decr(volatile uint32_t *lockword)
3725629Sraf {
3735629Sraf 	uint32_t old;
3745629Sraf 	uint32_t new;
3755629Sraf 
3765629Sraf 	do {
3775629Sraf 		new = old = *lockword;
3785629Sraf 		if (new & SPINNERMASK)
3795629Sraf 			new -= (1 << SPINNERSHIFT);
3805629Sraf 	} while (atomic_cas_32(lockword, old, new) != old);
3815629Sraf 
3825629Sraf 	return (new);
3835629Sraf }
3845629Sraf 
3855629Sraf /*
3860Sstevel@tonic-gate  * Non-preemptive spin locks.  Used by queue_lock().
3870Sstevel@tonic-gate  * No lock statistics are gathered for these locks.
3885629Sraf  * No DTrace probes are provided for these locks.
3890Sstevel@tonic-gate  */
3900Sstevel@tonic-gate void
3910Sstevel@tonic-gate spin_lock_set(mutex_t *mp)
3920Sstevel@tonic-gate {
3930Sstevel@tonic-gate 	ulwp_t *self = curthread;
3940Sstevel@tonic-gate 
3950Sstevel@tonic-gate 	no_preempt(self);
3960Sstevel@tonic-gate 	if (set_lock_byte(&mp->mutex_lockw) == 0) {
3970Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
3980Sstevel@tonic-gate 		return;
3990Sstevel@tonic-gate 	}
4000Sstevel@tonic-gate 	/*
4010Sstevel@tonic-gate 	 * Spin for a while, attempting to acquire the lock.
4020Sstevel@tonic-gate 	 */
403*6247Sraf 	INCR32(self->ul_spin_lock_spin);
4040Sstevel@tonic-gate 	if (mutex_queuelock_adaptive(mp) == 0 ||
4050Sstevel@tonic-gate 	    set_lock_byte(&mp->mutex_lockw) == 0) {
4060Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
4070Sstevel@tonic-gate 		return;
4080Sstevel@tonic-gate 	}
4090Sstevel@tonic-gate 	/*
4100Sstevel@tonic-gate 	 * Try harder if we were previously at a no premption level.
4110Sstevel@tonic-gate 	 */
4120Sstevel@tonic-gate 	if (self->ul_preempt > 1) {
413*6247Sraf 		INCR32(self->ul_spin_lock_spin2);
4140Sstevel@tonic-gate 		if (mutex_queuelock_adaptive(mp) == 0 ||
4150Sstevel@tonic-gate 		    set_lock_byte(&mp->mutex_lockw) == 0) {
4160Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
4170Sstevel@tonic-gate 			return;
4180Sstevel@tonic-gate 		}
4190Sstevel@tonic-gate 	}
4200Sstevel@tonic-gate 	/*
4210Sstevel@tonic-gate 	 * Give up and block in the kernel for the mutex.
4220Sstevel@tonic-gate 	 */
423*6247Sraf 	INCR32(self->ul_spin_lock_sleep);
4240Sstevel@tonic-gate 	(void) ___lwp_mutex_timedlock(mp, NULL);
4250Sstevel@tonic-gate 	mp->mutex_owner = (uintptr_t)self;
4260Sstevel@tonic-gate }
4270Sstevel@tonic-gate 
4280Sstevel@tonic-gate void
4290Sstevel@tonic-gate spin_lock_clear(mutex_t *mp)
4300Sstevel@tonic-gate {
4310Sstevel@tonic-gate 	ulwp_t *self = curthread;
4320Sstevel@tonic-gate 
4330Sstevel@tonic-gate 	mp->mutex_owner = 0;
4344570Sraf 	if (atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK) {
4354574Sraf 		(void) ___lwp_mutex_wakeup(mp, 0);
436*6247Sraf 		INCR32(self->ul_spin_lock_wakeup);
4370Sstevel@tonic-gate 	}
4380Sstevel@tonic-gate 	preempt(self);
4390Sstevel@tonic-gate }
4400Sstevel@tonic-gate 
4410Sstevel@tonic-gate /*
4420Sstevel@tonic-gate  * Allocate the sleep queue hash table.
4430Sstevel@tonic-gate  */
4440Sstevel@tonic-gate void
4450Sstevel@tonic-gate queue_alloc(void)
4460Sstevel@tonic-gate {
4470Sstevel@tonic-gate 	ulwp_t *self = curthread;
4480Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
449*6247Sraf 	queue_head_t *qp;
4500Sstevel@tonic-gate 	void *data;
4510Sstevel@tonic-gate 	int i;
4520Sstevel@tonic-gate 
4530Sstevel@tonic-gate 	/*
4540Sstevel@tonic-gate 	 * No locks are needed; we call here only when single-threaded.
4550Sstevel@tonic-gate 	 */
4560Sstevel@tonic-gate 	ASSERT(self == udp->ulwp_one);
4570Sstevel@tonic-gate 	ASSERT(!udp->uberflags.uf_mt);
4580Sstevel@tonic-gate 	if ((data = _private_mmap(NULL, 2 * QHASHSIZE * sizeof (queue_head_t),
4590Sstevel@tonic-gate 	    PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
4600Sstevel@tonic-gate 	    == MAP_FAILED)
4610Sstevel@tonic-gate 		thr_panic("cannot allocate thread queue_head table");
462*6247Sraf 	udp->queue_head = qp = (queue_head_t *)data;
463*6247Sraf 	for (i = 0; i < 2 * QHASHSIZE; qp++, i++) {
464*6247Sraf 		qp->qh_type = (i < QHASHSIZE)? MX : CV;
465*6247Sraf 		qp->qh_lock.mutex_flag = LOCK_INITED;
466*6247Sraf 		qp->qh_lock.mutex_magic = MUTEX_MAGIC;
467*6247Sraf 		qp->qh_hlist = &qp->qh_def_root;
468*6247Sraf #if defined(THREAD_DEBUG)
469*6247Sraf 		qp->qh_hlen = 1;
470*6247Sraf 		qp->qh_hmax = 1;
471*6247Sraf #endif
4724574Sraf 	}
4730Sstevel@tonic-gate }
4740Sstevel@tonic-gate 
4750Sstevel@tonic-gate #if defined(THREAD_DEBUG)
4760Sstevel@tonic-gate 
4770Sstevel@tonic-gate /*
4780Sstevel@tonic-gate  * Debugging: verify correctness of a sleep queue.
4790Sstevel@tonic-gate  */
4800Sstevel@tonic-gate void
4810Sstevel@tonic-gate QVERIFY(queue_head_t *qp)
4820Sstevel@tonic-gate {
4830Sstevel@tonic-gate 	ulwp_t *self = curthread;
4840Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
485*6247Sraf 	queue_root_t *qrp;
4860Sstevel@tonic-gate 	ulwp_t *ulwp;
4870Sstevel@tonic-gate 	ulwp_t *prev;
4880Sstevel@tonic-gate 	uint_t index;
489*6247Sraf 	uint32_t cnt;
4900Sstevel@tonic-gate 	char qtype;
4910Sstevel@tonic-gate 	void *wchan;
4920Sstevel@tonic-gate 
4930Sstevel@tonic-gate 	ASSERT(qp >= udp->queue_head && (qp - udp->queue_head) < 2 * QHASHSIZE);
4940Sstevel@tonic-gate 	ASSERT(MUTEX_OWNED(&qp->qh_lock, self));
495*6247Sraf 	for (cnt = 0, qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next) {
496*6247Sraf 		cnt++;
497*6247Sraf 		ASSERT((qrp->qr_head != NULL && qrp->qr_tail != NULL) ||
498*6247Sraf 		    (qrp->qr_head == NULL && qrp->qr_tail == NULL));
499*6247Sraf 	}
500*6247Sraf 	ASSERT(qp->qh_hlen == cnt && qp->qh_hmax >= cnt);
501*6247Sraf 	qtype = ((qp - udp->queue_head) < QHASHSIZE)? MX : CV;
502*6247Sraf 	ASSERT(qp->qh_type == qtype);
5030Sstevel@tonic-gate 	if (!thread_queue_verify)
5040Sstevel@tonic-gate 		return;
5050Sstevel@tonic-gate 	/* real expensive stuff, only for _THREAD_QUEUE_VERIFY */
506*6247Sraf 	for (cnt = 0, qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next) {
507*6247Sraf 		for (prev = NULL, ulwp = qrp->qr_head; ulwp != NULL;
508*6247Sraf 		    prev = ulwp, ulwp = ulwp->ul_link) {
509*6247Sraf 			cnt++;
510*6247Sraf 			if (ulwp->ul_writer)
511*6247Sraf 				ASSERT(prev == NULL || prev->ul_writer);
512*6247Sraf 			ASSERT(ulwp->ul_qtype == qtype);
513*6247Sraf 			ASSERT(ulwp->ul_wchan != NULL);
514*6247Sraf 			ASSERT(ulwp->ul_sleepq == qp);
515*6247Sraf 			wchan = ulwp->ul_wchan;
516*6247Sraf 			ASSERT(qrp->qr_wchan == wchan);
517*6247Sraf 			index = QUEUE_HASH(wchan, qtype);
518*6247Sraf 			ASSERT(&udp->queue_head[index] == qp);
519*6247Sraf 		}
520*6247Sraf 		ASSERT(qrp->qr_tail == prev);
5210Sstevel@tonic-gate 	}
5220Sstevel@tonic-gate 	ASSERT(qp->qh_qlen == cnt);
5230Sstevel@tonic-gate }
5240Sstevel@tonic-gate 
5250Sstevel@tonic-gate #else	/* THREAD_DEBUG */
5260Sstevel@tonic-gate 
5270Sstevel@tonic-gate #define	QVERIFY(qp)
5280Sstevel@tonic-gate 
5290Sstevel@tonic-gate #endif	/* THREAD_DEBUG */
5300Sstevel@tonic-gate 
5310Sstevel@tonic-gate /*
5320Sstevel@tonic-gate  * Acquire a queue head.
5330Sstevel@tonic-gate  */
5340Sstevel@tonic-gate queue_head_t *
5350Sstevel@tonic-gate queue_lock(void *wchan, int qtype)
5360Sstevel@tonic-gate {
5370Sstevel@tonic-gate 	uberdata_t *udp = curthread->ul_uberdata;
5380Sstevel@tonic-gate 	queue_head_t *qp;
539*6247Sraf 	queue_root_t *qrp;
5400Sstevel@tonic-gate 
5410Sstevel@tonic-gate 	ASSERT(qtype == MX || qtype == CV);
5420Sstevel@tonic-gate 
5430Sstevel@tonic-gate 	/*
5440Sstevel@tonic-gate 	 * It is possible that we could be called while still single-threaded.
5450Sstevel@tonic-gate 	 * If so, we call queue_alloc() to allocate the queue_head[] array.
5460Sstevel@tonic-gate 	 */
5470Sstevel@tonic-gate 	if ((qp = udp->queue_head) == NULL) {
5480Sstevel@tonic-gate 		queue_alloc();
5490Sstevel@tonic-gate 		qp = udp->queue_head;
5500Sstevel@tonic-gate 	}
5510Sstevel@tonic-gate 	qp += QUEUE_HASH(wchan, qtype);
5520Sstevel@tonic-gate 	spin_lock_set(&qp->qh_lock);
553*6247Sraf 	for (qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next)
554*6247Sraf 		if (qrp->qr_wchan == wchan)
555*6247Sraf 			break;
556*6247Sraf 	if (qrp == NULL && qp->qh_def_root.qr_head == NULL) {
557*6247Sraf 		/* the default queue root is available; use it */
558*6247Sraf 		qrp = &qp->qh_def_root;
559*6247Sraf 		qrp->qr_wchan = wchan;
560*6247Sraf 		ASSERT(qrp->qr_next == NULL);
561*6247Sraf 		ASSERT(qrp->qr_tail == NULL &&
562*6247Sraf 		    qrp->qr_rtcount == 0 && qrp->qr_qlen == 0);
563*6247Sraf 	}
564*6247Sraf 	qp->qh_wchan = wchan;	/* valid until queue_unlock() is called */
565*6247Sraf 	qp->qh_root = qrp;	/* valid until queue_unlock() is called */
566*6247Sraf 	INCR32(qp->qh_lockcount);
5670Sstevel@tonic-gate 	QVERIFY(qp);
5680Sstevel@tonic-gate 	return (qp);
5690Sstevel@tonic-gate }
5700Sstevel@tonic-gate 
5710Sstevel@tonic-gate /*
5720Sstevel@tonic-gate  * Release a queue head.
5730Sstevel@tonic-gate  */
5740Sstevel@tonic-gate void
5750Sstevel@tonic-gate queue_unlock(queue_head_t *qp)
5760Sstevel@tonic-gate {
5770Sstevel@tonic-gate 	QVERIFY(qp);
5780Sstevel@tonic-gate 	spin_lock_clear(&qp->qh_lock);
5790Sstevel@tonic-gate }
5800Sstevel@tonic-gate 
5810Sstevel@tonic-gate /*
5820Sstevel@tonic-gate  * For rwlock queueing, we must queue writers ahead of readers of the
5830Sstevel@tonic-gate  * same priority.  We do this by making writers appear to have a half
5840Sstevel@tonic-gate  * point higher priority for purposes of priority comparisons below.
5850Sstevel@tonic-gate  */
5860Sstevel@tonic-gate #define	CMP_PRIO(ulwp)	((real_priority(ulwp) << 1) + (ulwp)->ul_writer)
5870Sstevel@tonic-gate 
5880Sstevel@tonic-gate void
589*6247Sraf enqueue(queue_head_t *qp, ulwp_t *ulwp, int force_fifo)
5900Sstevel@tonic-gate {
591*6247Sraf 	queue_root_t *qrp;
5920Sstevel@tonic-gate 	ulwp_t **ulwpp;
5930Sstevel@tonic-gate 	ulwp_t *next;
5940Sstevel@tonic-gate 	int pri = CMP_PRIO(ulwp);
595*6247Sraf 
5960Sstevel@tonic-gate 	ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread));
5970Sstevel@tonic-gate 	ASSERT(ulwp->ul_sleepq != qp);
5980Sstevel@tonic-gate 
599*6247Sraf 	if ((qrp = qp->qh_root) == NULL) {
600*6247Sraf 		/* use the thread's queue root for the linkage */
601*6247Sraf 		qrp = &ulwp->ul_queue_root;
602*6247Sraf 		qrp->qr_next = qp->qh_hlist;
603*6247Sraf 		qrp->qr_prev = NULL;
604*6247Sraf 		qrp->qr_head = NULL;
605*6247Sraf 		qrp->qr_tail = NULL;
606*6247Sraf 		qrp->qr_wchan = qp->qh_wchan;
607*6247Sraf 		qrp->qr_rtcount = 0;
608*6247Sraf 		qrp->qr_qlen = 0;
609*6247Sraf 		qrp->qr_qmax = 0;
610*6247Sraf 		qp->qh_hlist->qr_prev = qrp;
611*6247Sraf 		qp->qh_hlist = qrp;
612*6247Sraf 		qp->qh_root = qrp;
613*6247Sraf 		MAXINCR(qp->qh_hmax, qp->qh_hlen);
614*6247Sraf 	}
615*6247Sraf 
6160Sstevel@tonic-gate 	/*
6170Sstevel@tonic-gate 	 * LIFO queue ordering is unfair and can lead to starvation,
6180Sstevel@tonic-gate 	 * but it gives better performance for heavily contended locks.
6190Sstevel@tonic-gate 	 * We use thread_queue_fifo (range is 0..8) to determine
6200Sstevel@tonic-gate 	 * the frequency of FIFO vs LIFO queuing:
6210Sstevel@tonic-gate 	 *	0 : every 256th time	(almost always LIFO)
6220Sstevel@tonic-gate 	 *	1 : every 128th time
6230Sstevel@tonic-gate 	 *	2 : every 64th  time
6240Sstevel@tonic-gate 	 *	3 : every 32nd  time
6250Sstevel@tonic-gate 	 *	4 : every 16th  time	(the default value, mostly LIFO)
6260Sstevel@tonic-gate 	 *	5 : every 8th   time
6270Sstevel@tonic-gate 	 *	6 : every 4th   time
6280Sstevel@tonic-gate 	 *	7 : every 2nd   time
6290Sstevel@tonic-gate 	 *	8 : every time		(never LIFO, always FIFO)
6300Sstevel@tonic-gate 	 * Note that there is always some degree of FIFO ordering.
6310Sstevel@tonic-gate 	 * This breaks live lock conditions that occur in applications
6320Sstevel@tonic-gate 	 * that are written assuming (incorrectly) that threads acquire
6330Sstevel@tonic-gate 	 * locks fairly, that is, in roughly round-robin order.
634*6247Sraf 	 * In any event, the queue is maintained in kernel priority order.
6350Sstevel@tonic-gate 	 *
636*6247Sraf 	 * If force_fifo is non-zero, fifo queueing is forced.
6370Sstevel@tonic-gate 	 * SUSV3 requires this for semaphores.
6380Sstevel@tonic-gate 	 */
639*6247Sraf 	if (qrp->qr_head == NULL) {
6400Sstevel@tonic-gate 		/*
6410Sstevel@tonic-gate 		 * The queue is empty.  LIFO/FIFO doesn't matter.
6420Sstevel@tonic-gate 		 */
643*6247Sraf 		ASSERT(qrp->qr_tail == NULL);
644*6247Sraf 		ulwpp = &qrp->qr_head;
645*6247Sraf 	} else if (force_fifo |
646*6247Sraf 	    (((++qp->qh_qcnt << curthread->ul_queue_fifo) & 0xff) == 0)) {
6470Sstevel@tonic-gate 		/*
6480Sstevel@tonic-gate 		 * Enqueue after the last thread whose priority is greater
6490Sstevel@tonic-gate 		 * than or equal to the priority of the thread being queued.
6500Sstevel@tonic-gate 		 * Attempt first to go directly onto the tail of the queue.
6510Sstevel@tonic-gate 		 */
652*6247Sraf 		if (pri <= CMP_PRIO(qrp->qr_tail))
653*6247Sraf 			ulwpp = &qrp->qr_tail->ul_link;
6540Sstevel@tonic-gate 		else {
655*6247Sraf 			for (ulwpp = &qrp->qr_head; (next = *ulwpp) != NULL;
6560Sstevel@tonic-gate 			    ulwpp = &next->ul_link)
6570Sstevel@tonic-gate 				if (pri > CMP_PRIO(next))
6580Sstevel@tonic-gate 					break;
6590Sstevel@tonic-gate 		}
6600Sstevel@tonic-gate 	} else {
6610Sstevel@tonic-gate 		/*
6620Sstevel@tonic-gate 		 * Enqueue before the first thread whose priority is less
6630Sstevel@tonic-gate 		 * than or equal to the priority of the thread being queued.
6640Sstevel@tonic-gate 		 * Hopefully we can go directly onto the head of the queue.
6650Sstevel@tonic-gate 		 */
666*6247Sraf 		for (ulwpp = &qrp->qr_head; (next = *ulwpp) != NULL;
6670Sstevel@tonic-gate 		    ulwpp = &next->ul_link)
6680Sstevel@tonic-gate 			if (pri >= CMP_PRIO(next))
6690Sstevel@tonic-gate 				break;
6700Sstevel@tonic-gate 	}
6710Sstevel@tonic-gate 	if ((ulwp->ul_link = *ulwpp) == NULL)
672*6247Sraf 		qrp->qr_tail = ulwp;
6730Sstevel@tonic-gate 	*ulwpp = ulwp;
6740Sstevel@tonic-gate 
6750Sstevel@tonic-gate 	ulwp->ul_sleepq = qp;
676*6247Sraf 	ulwp->ul_wchan = qp->qh_wchan;
677*6247Sraf 	ulwp->ul_qtype = qp->qh_type;
678*6247Sraf 	if ((ulwp->ul_schedctl != NULL &&
679*6247Sraf 	    ulwp->ul_schedctl->sc_cid == ulwp->ul_rtclassid) |
680*6247Sraf 	    ulwp->ul_pilocks) {
681*6247Sraf 		ulwp->ul_rtqueued = 1;
682*6247Sraf 		qrp->qr_rtcount++;
683*6247Sraf 	}
684*6247Sraf 	MAXINCR(qrp->qr_qmax, qrp->qr_qlen);
685*6247Sraf 	MAXINCR(qp->qh_qmax, qp->qh_qlen);
686*6247Sraf }
687*6247Sraf 
688*6247Sraf /*
689*6247Sraf  * Helper function for queue_slot() and queue_slot_rt().
690*6247Sraf  * Try to find a non-suspended thread on the queue.
691*6247Sraf  */
692*6247Sraf static ulwp_t **
693*6247Sraf queue_slot_runnable(ulwp_t **ulwpp, ulwp_t **prevp, int rt)
694*6247Sraf {
695*6247Sraf 	ulwp_t *ulwp;
696*6247Sraf 	ulwp_t **foundpp = NULL;
697*6247Sraf 	int priority = -1;
698*6247Sraf 	ulwp_t *prev;
699*6247Sraf 	int tpri;
700*6247Sraf 
701*6247Sraf 	for (prev = NULL;
702*6247Sraf 	    (ulwp = *ulwpp) != NULL;
703*6247Sraf 	    prev = ulwp, ulwpp = &ulwp->ul_link) {
704*6247Sraf 		if (ulwp->ul_stop)	/* skip suspended threads */
705*6247Sraf 			continue;
706*6247Sraf 		tpri = rt? CMP_PRIO(ulwp) : 0;
707*6247Sraf 		if (tpri > priority) {
708*6247Sraf 			foundpp = ulwpp;
709*6247Sraf 			*prevp = prev;
710*6247Sraf 			priority = tpri;
711*6247Sraf 			if (!rt)
712*6247Sraf 				break;
713*6247Sraf 		}
714*6247Sraf 	}
715*6247Sraf 	return (foundpp);
7160Sstevel@tonic-gate }
7170Sstevel@tonic-gate 
7180Sstevel@tonic-gate /*
719*6247Sraf  * For real-time, we search the entire queue because the dispatch
720*6247Sraf  * (kernel) priorities may have changed since enqueueing.
7210Sstevel@tonic-gate  */
7220Sstevel@tonic-gate static ulwp_t **
723*6247Sraf queue_slot_rt(ulwp_t **ulwpp_org, ulwp_t **prevp)
724*6247Sraf {
725*6247Sraf 	ulwp_t **ulwpp = ulwpp_org;
726*6247Sraf 	ulwp_t *ulwp = *ulwpp;
727*6247Sraf 	ulwp_t **foundpp = ulwpp;
728*6247Sraf 	int priority = CMP_PRIO(ulwp);
729*6247Sraf 	ulwp_t *prev;
730*6247Sraf 	int tpri;
731*6247Sraf 
732*6247Sraf 	for (prev = ulwp, ulwpp = &ulwp->ul_link;
733*6247Sraf 	    (ulwp = *ulwpp) != NULL;
734*6247Sraf 	    prev = ulwp, ulwpp = &ulwp->ul_link) {
735*6247Sraf 		tpri = CMP_PRIO(ulwp);
736*6247Sraf 		if (tpri > priority) {
737*6247Sraf 			foundpp = ulwpp;
738*6247Sraf 			*prevp = prev;
739*6247Sraf 			priority = tpri;
740*6247Sraf 		}
741*6247Sraf 	}
742*6247Sraf 	ulwp = *foundpp;
743*6247Sraf 
744*6247Sraf 	/*
745*6247Sraf 	 * Try not to return a suspended thread.
746*6247Sraf 	 * This mimics the old libthread's behavior.
747*6247Sraf 	 */
748*6247Sraf 	if (ulwp->ul_stop &&
749*6247Sraf 	    (ulwpp = queue_slot_runnable(ulwpp_org, prevp, 1)) != NULL) {
750*6247Sraf 		foundpp = ulwpp;
751*6247Sraf 		ulwp = *foundpp;
752*6247Sraf 	}
753*6247Sraf 	ulwp->ul_rt = 1;
754*6247Sraf 	return (foundpp);
755*6247Sraf }
756*6247Sraf 
757*6247Sraf ulwp_t **
758*6247Sraf queue_slot(queue_head_t *qp, ulwp_t **prevp, int *more)
759*6247Sraf {
760*6247Sraf 	queue_root_t *qrp;
761*6247Sraf 	ulwp_t **ulwpp;
762*6247Sraf 	ulwp_t *ulwp;
763*6247Sraf 	int rt;
764*6247Sraf 
765*6247Sraf 	ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread));
766*6247Sraf 
767*6247Sraf 	if ((qrp = qp->qh_root) == NULL || (ulwp = qrp->qr_head) == NULL) {
768*6247Sraf 		*more = 0;
769*6247Sraf 		return (NULL);		/* no lwps on the queue */
770*6247Sraf 	}
771*6247Sraf 	rt = (qrp->qr_rtcount != 0);
772*6247Sraf 	*prevp = NULL;
773*6247Sraf 	if (ulwp->ul_link == NULL) {	/* only one lwp on the queue */
774*6247Sraf 		*more = 0;
775*6247Sraf 		ulwp->ul_rt = rt;
776*6247Sraf 		return (&qrp->qr_head);
777*6247Sraf 	}
778*6247Sraf 	*more = 1;
779*6247Sraf 
780*6247Sraf 	if (rt)		/* real-time queue */
781*6247Sraf 		return (queue_slot_rt(&qrp->qr_head, prevp));
782*6247Sraf 	/*
783*6247Sraf 	 * Try not to return a suspended thread.
784*6247Sraf 	 * This mimics the old libthread's behavior.
785*6247Sraf 	 */
786*6247Sraf 	if (ulwp->ul_stop &&
787*6247Sraf 	    (ulwpp = queue_slot_runnable(&qrp->qr_head, prevp, 0)) != NULL) {
788*6247Sraf 		ulwp = *ulwpp;
789*6247Sraf 		ulwp->ul_rt = 0;
790*6247Sraf 		return (ulwpp);
791*6247Sraf 	}
792*6247Sraf 	/*
793*6247Sraf 	 * The common case; just pick the first thread on the queue.
794*6247Sraf 	 */
795*6247Sraf 	ulwp->ul_rt = 0;
796*6247Sraf 	return (&qrp->qr_head);
797*6247Sraf }
798*6247Sraf 
799*6247Sraf /*
800*6247Sraf  * Common code for unlinking an lwp from a user-level sleep queue.
801*6247Sraf  */
802*6247Sraf void
803*6247Sraf queue_unlink(queue_head_t *qp, ulwp_t **ulwpp, ulwp_t *prev)
804*6247Sraf {
805*6247Sraf 	queue_root_t *qrp = qp->qh_root;
806*6247Sraf 	queue_root_t *nqrp;
807*6247Sraf 	ulwp_t *ulwp = *ulwpp;
808*6247Sraf 	ulwp_t *next;
809*6247Sraf 
810*6247Sraf 	ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread));
811*6247Sraf 	ASSERT(qp->qh_wchan != NULL && ulwp->ul_wchan == qp->qh_wchan);
812*6247Sraf 
813*6247Sraf 	DECR(qp->qh_qlen);
814*6247Sraf 	DECR(qrp->qr_qlen);
815*6247Sraf 	if (ulwp->ul_rtqueued) {
816*6247Sraf 		ulwp->ul_rtqueued = 0;
817*6247Sraf 		qrp->qr_rtcount--;
818*6247Sraf 	}
819*6247Sraf 	next = ulwp->ul_link;
820*6247Sraf 	*ulwpp = next;
821*6247Sraf 	ulwp->ul_link = NULL;
822*6247Sraf 	if (qrp->qr_tail == ulwp)
823*6247Sraf 		qrp->qr_tail = prev;
824*6247Sraf 	if (qrp == &ulwp->ul_queue_root) {
825*6247Sraf 		/*
826*6247Sraf 		 * We can't continue to use the unlinked thread's
827*6247Sraf 		 * queue root for the linkage.
828*6247Sraf 		 */
829*6247Sraf 		queue_root_t *qr_next = qrp->qr_next;
830*6247Sraf 		queue_root_t *qr_prev = qrp->qr_prev;
831*6247Sraf 
832*6247Sraf 		if (qrp->qr_tail) {
833*6247Sraf 			/* switch to using the last thread's queue root */
834*6247Sraf 			ASSERT(qrp->qr_qlen != 0);
835*6247Sraf 			nqrp = &qrp->qr_tail->ul_queue_root;
836*6247Sraf 			*nqrp = *qrp;
837*6247Sraf 			if (qr_next)
838*6247Sraf 				qr_next->qr_prev = nqrp;
839*6247Sraf 			if (qr_prev)
840*6247Sraf 				qr_prev->qr_next = nqrp;
841*6247Sraf 			else
842*6247Sraf 				qp->qh_hlist = nqrp;
843*6247Sraf 			qp->qh_root = nqrp;
844*6247Sraf 		} else {
845*6247Sraf 			/* empty queue root; just delete from the hash list */
846*6247Sraf 			ASSERT(qrp->qr_qlen == 0);
847*6247Sraf 			if (qr_next)
848*6247Sraf 				qr_next->qr_prev = qr_prev;
849*6247Sraf 			if (qr_prev)
850*6247Sraf 				qr_prev->qr_next = qr_next;
851*6247Sraf 			else
852*6247Sraf 				qp->qh_hlist = qr_next;
853*6247Sraf 			qp->qh_root = NULL;
854*6247Sraf 			DECR(qp->qh_hlen);
855*6247Sraf 		}
856*6247Sraf 	}
857*6247Sraf }
858*6247Sraf 
859*6247Sraf ulwp_t *
860*6247Sraf dequeue(queue_head_t *qp, int *more)
8610Sstevel@tonic-gate {
8620Sstevel@tonic-gate 	ulwp_t **ulwpp;
8630Sstevel@tonic-gate 	ulwp_t *ulwp;
864*6247Sraf 	ulwp_t *prev;
865*6247Sraf 
866*6247Sraf 	if ((ulwpp = queue_slot(qp, &prev, more)) == NULL)
8670Sstevel@tonic-gate 		return (NULL);
8680Sstevel@tonic-gate 	ulwp = *ulwpp;
869*6247Sraf 	queue_unlink(qp, ulwpp, prev);
8700Sstevel@tonic-gate 	ulwp->ul_sleepq = NULL;
8710Sstevel@tonic-gate 	ulwp->ul_wchan = NULL;
8720Sstevel@tonic-gate 	return (ulwp);
8730Sstevel@tonic-gate }
8740Sstevel@tonic-gate 
8750Sstevel@tonic-gate /*
8760Sstevel@tonic-gate  * Return a pointer to the highest priority thread sleeping on wchan.
8770Sstevel@tonic-gate  */
8780Sstevel@tonic-gate ulwp_t *
879*6247Sraf queue_waiter(queue_head_t *qp)
8800Sstevel@tonic-gate {
8810Sstevel@tonic-gate 	ulwp_t **ulwpp;
882*6247Sraf 	ulwp_t *prev;
883*6247Sraf 	int more;
884*6247Sraf 
885*6247Sraf 	if ((ulwpp = queue_slot(qp, &prev, &more)) == NULL)
8860Sstevel@tonic-gate 		return (NULL);
8870Sstevel@tonic-gate 	return (*ulwpp);
8880Sstevel@tonic-gate }
8890Sstevel@tonic-gate 
890*6247Sraf int
891*6247Sraf dequeue_self(queue_head_t *qp)
8920Sstevel@tonic-gate {
8930Sstevel@tonic-gate 	ulwp_t *self = curthread;
894*6247Sraf 	queue_root_t *qrp;
8950Sstevel@tonic-gate 	ulwp_t **ulwpp;
8960Sstevel@tonic-gate 	ulwp_t *ulwp;
897*6247Sraf 	ulwp_t *prev;
8980Sstevel@tonic-gate 	int found = 0;
8990Sstevel@tonic-gate 
9000Sstevel@tonic-gate 	ASSERT(MUTEX_OWNED(&qp->qh_lock, self));
9010Sstevel@tonic-gate 
9020Sstevel@tonic-gate 	/* find self on the sleep queue */
903*6247Sraf 	if ((qrp = qp->qh_root) != NULL) {
904*6247Sraf 		for (prev = NULL, ulwpp = &qrp->qr_head;
905*6247Sraf 		    (ulwp = *ulwpp) != NULL;
906*6247Sraf 		    prev = ulwp, ulwpp = &ulwp->ul_link) {
907*6247Sraf 			if (ulwp == self) {
908*6247Sraf 				queue_unlink(qp, ulwpp, prev);
909*6247Sraf 				self->ul_cvmutex = NULL;
910*6247Sraf 				self->ul_sleepq = NULL;
911*6247Sraf 				self->ul_wchan = NULL;
912*6247Sraf 				found = 1;
913*6247Sraf 				break;
914*6247Sraf 			}
9150Sstevel@tonic-gate 		}
9160Sstevel@tonic-gate 	}
9170Sstevel@tonic-gate 
9180Sstevel@tonic-gate 	if (!found)
9190Sstevel@tonic-gate 		thr_panic("dequeue_self(): curthread not found on queue");
9200Sstevel@tonic-gate 
921*6247Sraf 	return ((qrp = qp->qh_root) != NULL && qrp->qr_head != NULL);
9220Sstevel@tonic-gate }
9230Sstevel@tonic-gate 
9240Sstevel@tonic-gate /*
9250Sstevel@tonic-gate  * Called from call_user_handler() and _thrp_suspend() to take
9260Sstevel@tonic-gate  * ourself off of our sleep queue so we can grab locks.
9270Sstevel@tonic-gate  */
9280Sstevel@tonic-gate void
9290Sstevel@tonic-gate unsleep_self(void)
9300Sstevel@tonic-gate {
9310Sstevel@tonic-gate 	ulwp_t *self = curthread;
9320Sstevel@tonic-gate 	queue_head_t *qp;
9330Sstevel@tonic-gate 
9340Sstevel@tonic-gate 	/*
9350Sstevel@tonic-gate 	 * Calling enter_critical()/exit_critical() here would lead
9360Sstevel@tonic-gate 	 * to recursion.  Just manipulate self->ul_critical directly.
9370Sstevel@tonic-gate 	 */
9380Sstevel@tonic-gate 	self->ul_critical++;
9390Sstevel@tonic-gate 	while (self->ul_sleepq != NULL) {
9400Sstevel@tonic-gate 		qp = queue_lock(self->ul_wchan, self->ul_qtype);
9410Sstevel@tonic-gate 		/*
9420Sstevel@tonic-gate 		 * We may have been moved from a CV queue to a
9430Sstevel@tonic-gate 		 * mutex queue while we were attempting queue_lock().
9440Sstevel@tonic-gate 		 * If so, just loop around and try again.
9450Sstevel@tonic-gate 		 * dequeue_self() clears self->ul_sleepq.
9460Sstevel@tonic-gate 		 */
947*6247Sraf 		if (qp == self->ul_sleepq)
948*6247Sraf 			(void) dequeue_self(qp);
9490Sstevel@tonic-gate 		queue_unlock(qp);
9500Sstevel@tonic-gate 	}
951*6247Sraf 	self->ul_writer = 0;
9520Sstevel@tonic-gate 	self->ul_critical--;
9530Sstevel@tonic-gate }
9540Sstevel@tonic-gate 
9550Sstevel@tonic-gate /*
9560Sstevel@tonic-gate  * Common code for calling the the ___lwp_mutex_timedlock() system call.
9570Sstevel@tonic-gate  * Returns with mutex_owner and mutex_ownerpid set correctly.
9580Sstevel@tonic-gate  */
9594574Sraf static int
9600Sstevel@tonic-gate mutex_lock_kernel(mutex_t *mp, timespec_t *tsp, tdb_mutex_stats_t *msp)
9610Sstevel@tonic-gate {
9620Sstevel@tonic-gate 	ulwp_t *self = curthread;
9630Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
9644574Sraf 	int mtype = mp->mutex_type;
9650Sstevel@tonic-gate 	hrtime_t begin_sleep;
9664574Sraf 	int acquired;
9670Sstevel@tonic-gate 	int error;
9680Sstevel@tonic-gate 
9690Sstevel@tonic-gate 	self->ul_sp = stkptr();
9700Sstevel@tonic-gate 	self->ul_wchan = mp;
9710Sstevel@tonic-gate 	if (__td_event_report(self, TD_SLEEP, udp)) {
9720Sstevel@tonic-gate 		self->ul_td_evbuf.eventnum = TD_SLEEP;
9730Sstevel@tonic-gate 		self->ul_td_evbuf.eventdata = mp;
9740Sstevel@tonic-gate 		tdb_event(TD_SLEEP, udp);
9750Sstevel@tonic-gate 	}
9760Sstevel@tonic-gate 	if (msp) {
9770Sstevel@tonic-gate 		tdb_incr(msp->mutex_sleep);
9780Sstevel@tonic-gate 		begin_sleep = gethrtime();
9790Sstevel@tonic-gate 	}
9800Sstevel@tonic-gate 
9810Sstevel@tonic-gate 	DTRACE_PROBE1(plockstat, mutex__block, mp);
9820Sstevel@tonic-gate 
9830Sstevel@tonic-gate 	for (;;) {
9844574Sraf 		/*
9854574Sraf 		 * A return value of EOWNERDEAD or ELOCKUNMAPPED
9864574Sraf 		 * means we successfully acquired the lock.
9874574Sraf 		 */
9884574Sraf 		if ((error = ___lwp_mutex_timedlock(mp, tsp)) != 0 &&
9894574Sraf 		    error != EOWNERDEAD && error != ELOCKUNMAPPED) {
9904574Sraf 			acquired = 0;
9910Sstevel@tonic-gate 			break;
9920Sstevel@tonic-gate 		}
9930Sstevel@tonic-gate 
9944574Sraf 		if (mtype & USYNC_PROCESS) {
9950Sstevel@tonic-gate 			/*
9960Sstevel@tonic-gate 			 * Defend against forkall().  We may be the child,
9970Sstevel@tonic-gate 			 * in which case we don't actually own the mutex.
9980Sstevel@tonic-gate 			 */
9990Sstevel@tonic-gate 			enter_critical(self);
10000Sstevel@tonic-gate 			if (mp->mutex_ownerpid == udp->pid) {
10010Sstevel@tonic-gate 				mp->mutex_owner = (uintptr_t)self;
10020Sstevel@tonic-gate 				exit_critical(self);
10034574Sraf 				acquired = 1;
10040Sstevel@tonic-gate 				break;
10050Sstevel@tonic-gate 			}
10060Sstevel@tonic-gate 			exit_critical(self);
10070Sstevel@tonic-gate 		} else {
10080Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
10094574Sraf 			acquired = 1;
10100Sstevel@tonic-gate 			break;
10110Sstevel@tonic-gate 		}
10120Sstevel@tonic-gate 	}
10130Sstevel@tonic-gate 	if (msp)
10140Sstevel@tonic-gate 		msp->mutex_sleep_time += gethrtime() - begin_sleep;
10150Sstevel@tonic-gate 	self->ul_wchan = NULL;
10160Sstevel@tonic-gate 	self->ul_sp = 0;
10170Sstevel@tonic-gate 
10184574Sraf 	if (acquired) {
10194574Sraf 		DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1);
10204574Sraf 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
10214574Sraf 	} else {
10224574Sraf 		DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0);
10234574Sraf 		DTRACE_PROBE2(plockstat, mutex__error, mp, error);
10244574Sraf 	}
10254574Sraf 
10260Sstevel@tonic-gate 	return (error);
10270Sstevel@tonic-gate }
10280Sstevel@tonic-gate 
10290Sstevel@tonic-gate /*
10300Sstevel@tonic-gate  * Common code for calling the ___lwp_mutex_trylock() system call.
10310Sstevel@tonic-gate  * Returns with mutex_owner and mutex_ownerpid set correctly.
10320Sstevel@tonic-gate  */
10330Sstevel@tonic-gate int
10340Sstevel@tonic-gate mutex_trylock_kernel(mutex_t *mp)
10350Sstevel@tonic-gate {
10360Sstevel@tonic-gate 	ulwp_t *self = curthread;
10370Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
10384574Sraf 	int mtype = mp->mutex_type;
10390Sstevel@tonic-gate 	int error;
10404574Sraf 	int acquired;
10410Sstevel@tonic-gate 
10420Sstevel@tonic-gate 	for (;;) {
10434574Sraf 		/*
10444574Sraf 		 * A return value of EOWNERDEAD or ELOCKUNMAPPED
10454574Sraf 		 * means we successfully acquired the lock.
10464574Sraf 		 */
10474574Sraf 		if ((error = ___lwp_mutex_trylock(mp)) != 0 &&
10484574Sraf 		    error != EOWNERDEAD && error != ELOCKUNMAPPED) {
10494574Sraf 			acquired = 0;
10500Sstevel@tonic-gate 			break;
10510Sstevel@tonic-gate 		}
10520Sstevel@tonic-gate 
10534574Sraf 		if (mtype & USYNC_PROCESS) {
10540Sstevel@tonic-gate 			/*
10550Sstevel@tonic-gate 			 * Defend against forkall().  We may be the child,
10560Sstevel@tonic-gate 			 * in which case we don't actually own the mutex.
10570Sstevel@tonic-gate 			 */
10580Sstevel@tonic-gate 			enter_critical(self);
10590Sstevel@tonic-gate 			if (mp->mutex_ownerpid == udp->pid) {
10600Sstevel@tonic-gate 				mp->mutex_owner = (uintptr_t)self;
10610Sstevel@tonic-gate 				exit_critical(self);
10624574Sraf 				acquired = 1;
10630Sstevel@tonic-gate 				break;
10640Sstevel@tonic-gate 			}
10650Sstevel@tonic-gate 			exit_critical(self);
10660Sstevel@tonic-gate 		} else {
10670Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
10684574Sraf 			acquired = 1;
10690Sstevel@tonic-gate 			break;
10700Sstevel@tonic-gate 		}
10710Sstevel@tonic-gate 	}
10720Sstevel@tonic-gate 
10734574Sraf 	if (acquired) {
10744574Sraf 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
10754574Sraf 	} else if (error != EBUSY) {
10764574Sraf 		DTRACE_PROBE2(plockstat, mutex__error, mp, error);
10774574Sraf 	}
10784574Sraf 
10790Sstevel@tonic-gate 	return (error);
10800Sstevel@tonic-gate }
10810Sstevel@tonic-gate 
10820Sstevel@tonic-gate volatile sc_shared_t *
10830Sstevel@tonic-gate setup_schedctl(void)
10840Sstevel@tonic-gate {
10850Sstevel@tonic-gate 	ulwp_t *self = curthread;
10860Sstevel@tonic-gate 	volatile sc_shared_t *scp;
10870Sstevel@tonic-gate 	sc_shared_t *tmp;
10880Sstevel@tonic-gate 
10890Sstevel@tonic-gate 	if ((scp = self->ul_schedctl) == NULL && /* no shared state yet */
10900Sstevel@tonic-gate 	    !self->ul_vfork &&			/* not a child of vfork() */
10910Sstevel@tonic-gate 	    !self->ul_schedctl_called) {	/* haven't been called before */
10920Sstevel@tonic-gate 		enter_critical(self);
10930Sstevel@tonic-gate 		self->ul_schedctl_called = &self->ul_uberdata->uberflags;
10940Sstevel@tonic-gate 		if ((tmp = __schedctl()) != (sc_shared_t *)(-1))
10950Sstevel@tonic-gate 			self->ul_schedctl = scp = tmp;
10960Sstevel@tonic-gate 		exit_critical(self);
10970Sstevel@tonic-gate 	}
10980Sstevel@tonic-gate 	/*
10990Sstevel@tonic-gate 	 * Unless the call to setup_schedctl() is surrounded
11000Sstevel@tonic-gate 	 * by enter_critical()/exit_critical(), the address
11010Sstevel@tonic-gate 	 * we are returning could be invalid due to a forkall()
11020Sstevel@tonic-gate 	 * having occurred in another thread.
11030Sstevel@tonic-gate 	 */
11040Sstevel@tonic-gate 	return (scp);
11050Sstevel@tonic-gate }
11060Sstevel@tonic-gate 
11070Sstevel@tonic-gate /*
11080Sstevel@tonic-gate  * Interfaces from libsched, incorporated into libc.
11090Sstevel@tonic-gate  * libsched.so.1 is now a filter library onto libc.
11100Sstevel@tonic-gate  */
11110Sstevel@tonic-gate #pragma weak schedctl_lookup = _schedctl_init
11120Sstevel@tonic-gate #pragma weak _schedctl_lookup = _schedctl_init
11130Sstevel@tonic-gate #pragma weak schedctl_init = _schedctl_init
11140Sstevel@tonic-gate schedctl_t *
11150Sstevel@tonic-gate _schedctl_init(void)
11160Sstevel@tonic-gate {
11170Sstevel@tonic-gate 	volatile sc_shared_t *scp = setup_schedctl();
11180Sstevel@tonic-gate 	return ((scp == NULL)? NULL : (schedctl_t *)&scp->sc_preemptctl);
11190Sstevel@tonic-gate }
11200Sstevel@tonic-gate 
11210Sstevel@tonic-gate #pragma weak schedctl_exit = _schedctl_exit
11220Sstevel@tonic-gate void
11230Sstevel@tonic-gate _schedctl_exit(void)
11240Sstevel@tonic-gate {
11250Sstevel@tonic-gate }
11260Sstevel@tonic-gate 
11270Sstevel@tonic-gate /*
11280Sstevel@tonic-gate  * Contract private interface for java.
11290Sstevel@tonic-gate  * Set up the schedctl data if it doesn't exist yet.
11300Sstevel@tonic-gate  * Return a pointer to the pointer to the schedctl data.
11310Sstevel@tonic-gate  */
11320Sstevel@tonic-gate volatile sc_shared_t *volatile *
11330Sstevel@tonic-gate _thr_schedctl(void)
11340Sstevel@tonic-gate {
11350Sstevel@tonic-gate 	ulwp_t *self = curthread;
11360Sstevel@tonic-gate 	volatile sc_shared_t *volatile *ptr;
11370Sstevel@tonic-gate 
11380Sstevel@tonic-gate 	if (self->ul_vfork)
11390Sstevel@tonic-gate 		return (NULL);
11400Sstevel@tonic-gate 	if (*(ptr = &self->ul_schedctl) == NULL)
11410Sstevel@tonic-gate 		(void) setup_schedctl();
11420Sstevel@tonic-gate 	return (ptr);
11430Sstevel@tonic-gate }
11440Sstevel@tonic-gate 
11450Sstevel@tonic-gate /*
11460Sstevel@tonic-gate  * Block signals and attempt to block preemption.
11470Sstevel@tonic-gate  * no_preempt()/preempt() must be used in pairs but can be nested.
11480Sstevel@tonic-gate  */
11490Sstevel@tonic-gate void
11500Sstevel@tonic-gate no_preempt(ulwp_t *self)
11510Sstevel@tonic-gate {
11520Sstevel@tonic-gate 	volatile sc_shared_t *scp;
11530Sstevel@tonic-gate 
11540Sstevel@tonic-gate 	if (self->ul_preempt++ == 0) {
11550Sstevel@tonic-gate 		enter_critical(self);
11560Sstevel@tonic-gate 		if ((scp = self->ul_schedctl) != NULL ||
11570Sstevel@tonic-gate 		    (scp = setup_schedctl()) != NULL) {
11580Sstevel@tonic-gate 			/*
11590Sstevel@tonic-gate 			 * Save the pre-existing preempt value.
11600Sstevel@tonic-gate 			 */
11610Sstevel@tonic-gate 			self->ul_savpreempt = scp->sc_preemptctl.sc_nopreempt;
11620Sstevel@tonic-gate 			scp->sc_preemptctl.sc_nopreempt = 1;
11630Sstevel@tonic-gate 		}
11640Sstevel@tonic-gate 	}
11650Sstevel@tonic-gate }
11660Sstevel@tonic-gate 
11670Sstevel@tonic-gate /*
11680Sstevel@tonic-gate  * Undo the effects of no_preempt().
11690Sstevel@tonic-gate  */
11700Sstevel@tonic-gate void
11710Sstevel@tonic-gate preempt(ulwp_t *self)
11720Sstevel@tonic-gate {
11730Sstevel@tonic-gate 	volatile sc_shared_t *scp;
11740Sstevel@tonic-gate 
11750Sstevel@tonic-gate 	ASSERT(self->ul_preempt > 0);
11760Sstevel@tonic-gate 	if (--self->ul_preempt == 0) {
11770Sstevel@tonic-gate 		if ((scp = self->ul_schedctl) != NULL) {
11780Sstevel@tonic-gate 			/*
11790Sstevel@tonic-gate 			 * Restore the pre-existing preempt value.
11800Sstevel@tonic-gate 			 */
11810Sstevel@tonic-gate 			scp->sc_preemptctl.sc_nopreempt = self->ul_savpreempt;
11820Sstevel@tonic-gate 			if (scp->sc_preemptctl.sc_yield &&
11830Sstevel@tonic-gate 			    scp->sc_preemptctl.sc_nopreempt == 0) {
11840Sstevel@tonic-gate 				lwp_yield();
11850Sstevel@tonic-gate 				if (scp->sc_preemptctl.sc_yield) {
11860Sstevel@tonic-gate 					/*
11870Sstevel@tonic-gate 					 * Shouldn't happen.  This is either
11880Sstevel@tonic-gate 					 * a race condition or the thread
11890Sstevel@tonic-gate 					 * just entered the real-time class.
11900Sstevel@tonic-gate 					 */
11910Sstevel@tonic-gate 					lwp_yield();
11920Sstevel@tonic-gate 					scp->sc_preemptctl.sc_yield = 0;
11930Sstevel@tonic-gate 				}
11940Sstevel@tonic-gate 			}
11950Sstevel@tonic-gate 		}
11960Sstevel@tonic-gate 		exit_critical(self);
11970Sstevel@tonic-gate 	}
11980Sstevel@tonic-gate }
11990Sstevel@tonic-gate 
12000Sstevel@tonic-gate /*
12010Sstevel@tonic-gate  * If a call to preempt() would cause the current thread to yield or to
12020Sstevel@tonic-gate  * take deferred actions in exit_critical(), then unpark the specified
12030Sstevel@tonic-gate  * lwp so it can run while we delay.  Return the original lwpid if the
12040Sstevel@tonic-gate  * unpark was not performed, else return zero.  The tests are a repeat
12050Sstevel@tonic-gate  * of some of the tests in preempt(), above.  This is a statistical
12060Sstevel@tonic-gate  * optimization solely for cond_sleep_queue(), below.
12070Sstevel@tonic-gate  */
12080Sstevel@tonic-gate static lwpid_t
12090Sstevel@tonic-gate preempt_unpark(ulwp_t *self, lwpid_t lwpid)
12100Sstevel@tonic-gate {
12110Sstevel@tonic-gate 	volatile sc_shared_t *scp = self->ul_schedctl;
12120Sstevel@tonic-gate 
12130Sstevel@tonic-gate 	ASSERT(self->ul_preempt == 1 && self->ul_critical > 0);
12140Sstevel@tonic-gate 	if ((scp != NULL && scp->sc_preemptctl.sc_yield) ||
12150Sstevel@tonic-gate 	    (self->ul_curplease && self->ul_critical == 1)) {
12160Sstevel@tonic-gate 		(void) __lwp_unpark(lwpid);
12170Sstevel@tonic-gate 		lwpid = 0;
12180Sstevel@tonic-gate 	}
12190Sstevel@tonic-gate 	return (lwpid);
12200Sstevel@tonic-gate }
12210Sstevel@tonic-gate 
12220Sstevel@tonic-gate /*
12234613Sraf  * Spin for a while (if 'tryhard' is true), trying to grab the lock.
12240Sstevel@tonic-gate  * If this fails, return EBUSY and let the caller deal with it.
12250Sstevel@tonic-gate  * If this succeeds, return 0 with mutex_owner set to curthread.
12260Sstevel@tonic-gate  */
12274574Sraf static int
12284613Sraf mutex_trylock_adaptive(mutex_t *mp, int tryhard)
12290Sstevel@tonic-gate {
12300Sstevel@tonic-gate 	ulwp_t *self = curthread;
12314574Sraf 	int error = EBUSY;
12320Sstevel@tonic-gate 	ulwp_t *ulwp;
12330Sstevel@tonic-gate 	volatile sc_shared_t *scp;
12345629Sraf 	volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw;
12355629Sraf 	volatile uint64_t *ownerp = (volatile uint64_t *)&mp->mutex_owner;
12365629Sraf 	uint32_t new_lockword;
12375629Sraf 	int count = 0;
12385629Sraf 	int max_count;
12395629Sraf 	uint8_t max_spinners;
12404574Sraf 
12414574Sraf 	ASSERT(!(mp->mutex_type & USYNC_PROCESS));
12424574Sraf 
12434574Sraf 	if (MUTEX_OWNER(mp) == self)
12440Sstevel@tonic-gate 		return (EBUSY);
12450Sstevel@tonic-gate 
12464574Sraf 	/* short-cut, not definitive (see below) */
12474574Sraf 	if (mp->mutex_flag & LOCK_NOTRECOVERABLE) {
12484574Sraf 		ASSERT(mp->mutex_type & LOCK_ROBUST);
12495629Sraf 		error = ENOTRECOVERABLE;
12505629Sraf 		goto done;
12514574Sraf 	}
12524574Sraf 
12535629Sraf 	/*
12545629Sraf 	 * Make one attempt to acquire the lock before
12555629Sraf 	 * incurring the overhead of the spin loop.
12565629Sraf 	 */
12575629Sraf 	if (set_lock_byte(lockp) == 0) {
12585629Sraf 		*ownerp = (uintptr_t)self;
12595629Sraf 		error = 0;
12605629Sraf 		goto done;
12615629Sraf 	}
12625629Sraf 	if (!tryhard)
12635629Sraf 		goto done;
12645629Sraf 	if (ncpus == 0)
12655629Sraf 		ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN);
12665629Sraf 	if ((max_spinners = self->ul_max_spinners) >= ncpus)
12675629Sraf 		max_spinners = ncpus - 1;
12685629Sraf 	max_count = (max_spinners != 0)? self->ul_adaptive_spin : 0;
12695629Sraf 	if (max_count == 0)
12705629Sraf 		goto done;
12715629Sraf 
12720Sstevel@tonic-gate 	/*
12730Sstevel@tonic-gate 	 * This spin loop is unfair to lwps that have already dropped into
12740Sstevel@tonic-gate 	 * the kernel to sleep.  They will starve on a highly-contended mutex.
12750Sstevel@tonic-gate 	 * This is just too bad.  The adaptive spin algorithm is intended
12760Sstevel@tonic-gate 	 * to allow programs with highly-contended locks (that is, broken
12770Sstevel@tonic-gate 	 * programs) to execute with reasonable speed despite their contention.
12780Sstevel@tonic-gate 	 * Being fair would reduce the speed of such programs and well-written
12790Sstevel@tonic-gate 	 * programs will not suffer in any case.
12800Sstevel@tonic-gate 	 */
12815629Sraf 	enter_critical(self);
12825629Sraf 	if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1) {
12835629Sraf 		exit_critical(self);
12845629Sraf 		goto done;
12855629Sraf 	}
12865629Sraf 	DTRACE_PROBE1(plockstat, mutex__spin, mp);
12875629Sraf 	for (count = 1; ; count++) {
12880Sstevel@tonic-gate 		if (*lockp == 0 && set_lock_byte(lockp) == 0) {
12890Sstevel@tonic-gate 			*ownerp = (uintptr_t)self;
12904574Sraf 			error = 0;
12914574Sraf 			break;
12920Sstevel@tonic-gate 		}
12935629Sraf 		if (count == max_count)
12945629Sraf 			break;
12950Sstevel@tonic-gate 		SMT_PAUSE();
12960Sstevel@tonic-gate 		/*
12970Sstevel@tonic-gate 		 * Stop spinning if the mutex owner is not running on
12980Sstevel@tonic-gate 		 * a processor; it will not drop the lock any time soon
12990Sstevel@tonic-gate 		 * and we would just be wasting time to keep spinning.
13000Sstevel@tonic-gate 		 *
13010Sstevel@tonic-gate 		 * Note that we are looking at another thread (ulwp_t)
13020Sstevel@tonic-gate 		 * without ensuring that the other thread does not exit.
13030Sstevel@tonic-gate 		 * The scheme relies on ulwp_t structures never being
13040Sstevel@tonic-gate 		 * deallocated by the library (the library employs a free
13050Sstevel@tonic-gate 		 * list of ulwp_t structs that are reused when new threads
13060Sstevel@tonic-gate 		 * are created) and on schedctl shared memory never being
13070Sstevel@tonic-gate 		 * deallocated once created via __schedctl().
13080Sstevel@tonic-gate 		 *
13090Sstevel@tonic-gate 		 * Thus, the worst that can happen when the spinning thread
13100Sstevel@tonic-gate 		 * looks at the owner's schedctl data is that it is looking
13110Sstevel@tonic-gate 		 * at some other thread's schedctl data.  This almost never
13120Sstevel@tonic-gate 		 * happens and is benign when it does.
13130Sstevel@tonic-gate 		 */
13140Sstevel@tonic-gate 		if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL &&
13150Sstevel@tonic-gate 		    ((scp = ulwp->ul_schedctl) == NULL ||
13160Sstevel@tonic-gate 		    scp->sc_state != SC_ONPROC))
13170Sstevel@tonic-gate 			break;
13180Sstevel@tonic-gate 	}
13195629Sraf 	new_lockword = spinners_decr(&mp->mutex_lockword);
13205629Sraf 	if (error && (new_lockword & (LOCKMASK | SPINNERMASK)) == 0) {
13215629Sraf 		/*
13225629Sraf 		 * We haven't yet acquired the lock, the lock
13235629Sraf 		 * is free, and there are no other spinners.
13245629Sraf 		 * Make one final attempt to acquire the lock.
13255629Sraf 		 *
13265629Sraf 		 * This isn't strictly necessary since mutex_lock_queue()
13275629Sraf 		 * (the next action this thread will take if it doesn't
13285629Sraf 		 * acquire the lock here) makes one attempt to acquire
13295629Sraf 		 * the lock before putting the thread to sleep.
13305629Sraf 		 *
13315629Sraf 		 * If the next action for this thread (on failure here)
13325629Sraf 		 * were not to call mutex_lock_queue(), this would be
13335629Sraf 		 * necessary for correctness, to avoid ending up with an
13345629Sraf 		 * unheld mutex with waiters but no one to wake them up.
13355629Sraf 		 */
13365629Sraf 		if (set_lock_byte(lockp) == 0) {
13375629Sraf 			*ownerp = (uintptr_t)self;
13385629Sraf 			error = 0;
13395629Sraf 		}
13405629Sraf 		count++;
13415629Sraf 	}
13420Sstevel@tonic-gate 	exit_critical(self);
13430Sstevel@tonic-gate 
13445629Sraf done:
13454574Sraf 	if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) {
13464574Sraf 		ASSERT(mp->mutex_type & LOCK_ROBUST);
13474574Sraf 		/*
13486057Sraf 		 * We shouldn't own the mutex.
13496057Sraf 		 * Just clear the lock; everyone has already been waked up.
13504574Sraf 		 */
13514574Sraf 		mp->mutex_owner = 0;
13526057Sraf 		(void) clear_lockbyte(&mp->mutex_lockword);
13534574Sraf 		error = ENOTRECOVERABLE;
13544574Sraf 	}
13554574Sraf 
13564574Sraf 	if (error) {
13575629Sraf 		if (count) {
13585629Sraf 			DTRACE_PROBE2(plockstat, mutex__spun, 0, count);
13595629Sraf 		}
13604574Sraf 		if (error != EBUSY) {
13614574Sraf 			DTRACE_PROBE2(plockstat, mutex__error, mp, error);
13624574Sraf 		}
13634574Sraf 	} else {
13645629Sraf 		if (count) {
13655629Sraf 			DTRACE_PROBE2(plockstat, mutex__spun, 1, count);
13665629Sraf 		}
13674574Sraf 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count);
13684574Sraf 		if (mp->mutex_flag & LOCK_OWNERDEAD) {
13694574Sraf 			ASSERT(mp->mutex_type & LOCK_ROBUST);
13704574Sraf 			error = EOWNERDEAD;
13714574Sraf 		}
13724574Sraf 	}
13734574Sraf 
13744574Sraf 	return (error);
13750Sstevel@tonic-gate }
13760Sstevel@tonic-gate 
13770Sstevel@tonic-gate /*
13780Sstevel@tonic-gate  * Same as mutex_trylock_adaptive(), except specifically for queue locks.
13790Sstevel@tonic-gate  * The owner field is not set here; the caller (spin_lock_set()) sets it.
13800Sstevel@tonic-gate  */
13814574Sraf static int
13820Sstevel@tonic-gate mutex_queuelock_adaptive(mutex_t *mp)
13830Sstevel@tonic-gate {
13840Sstevel@tonic-gate 	ulwp_t *ulwp;
13850Sstevel@tonic-gate 	volatile sc_shared_t *scp;
13860Sstevel@tonic-gate 	volatile uint8_t *lockp;
13870Sstevel@tonic-gate 	volatile uint64_t *ownerp;
13880Sstevel@tonic-gate 	int count = curthread->ul_queue_spin;
13890Sstevel@tonic-gate 
13900Sstevel@tonic-gate 	ASSERT(mp->mutex_type == USYNC_THREAD);
13910Sstevel@tonic-gate 
13920Sstevel@tonic-gate 	if (count == 0)
13930Sstevel@tonic-gate 		return (EBUSY);
13940Sstevel@tonic-gate 
13950Sstevel@tonic-gate 	lockp = (volatile uint8_t *)&mp->mutex_lockw;
13960Sstevel@tonic-gate 	ownerp = (volatile uint64_t *)&mp->mutex_owner;
13970Sstevel@tonic-gate 	while (--count >= 0) {
13980Sstevel@tonic-gate 		if (*lockp == 0 && set_lock_byte(lockp) == 0)
13990Sstevel@tonic-gate 			return (0);
14000Sstevel@tonic-gate 		SMT_PAUSE();
14010Sstevel@tonic-gate 		if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL &&
14020Sstevel@tonic-gate 		    ((scp = ulwp->ul_schedctl) == NULL ||
14030Sstevel@tonic-gate 		    scp->sc_state != SC_ONPROC))
14040Sstevel@tonic-gate 			break;
14050Sstevel@tonic-gate 	}
14060Sstevel@tonic-gate 
14070Sstevel@tonic-gate 	return (EBUSY);
14080Sstevel@tonic-gate }
14090Sstevel@tonic-gate 
14100Sstevel@tonic-gate /*
14110Sstevel@tonic-gate  * Like mutex_trylock_adaptive(), but for process-shared mutexes.
14124613Sraf  * Spin for a while (if 'tryhard' is true), trying to grab the lock.
14130Sstevel@tonic-gate  * If this fails, return EBUSY and let the caller deal with it.
14140Sstevel@tonic-gate  * If this succeeds, return 0 with mutex_owner set to curthread
14150Sstevel@tonic-gate  * and mutex_ownerpid set to the current pid.
14160Sstevel@tonic-gate  */
14174574Sraf static int
14184613Sraf mutex_trylock_process(mutex_t *mp, int tryhard)
14190Sstevel@tonic-gate {
14200Sstevel@tonic-gate 	ulwp_t *self = curthread;
14215629Sraf 	uberdata_t *udp = self->ul_uberdata;
14224574Sraf 	int error = EBUSY;
14236057Sraf 	volatile uint64_t *lockp = (volatile uint64_t *)&mp->mutex_lockword64;
14245629Sraf 	uint32_t new_lockword;
14255629Sraf 	int count = 0;
14265629Sraf 	int max_count;
14275629Sraf 	uint8_t max_spinners;
14284574Sraf 
14294574Sraf 	ASSERT(mp->mutex_type & USYNC_PROCESS);
14304574Sraf 
14314574Sraf 	if (shared_mutex_held(mp))
14320Sstevel@tonic-gate 		return (EBUSY);
14330Sstevel@tonic-gate 
14344574Sraf 	/* short-cut, not definitive (see below) */
14354574Sraf 	if (mp->mutex_flag & LOCK_NOTRECOVERABLE) {
14364574Sraf 		ASSERT(mp->mutex_type & LOCK_ROBUST);
14375629Sraf 		error = ENOTRECOVERABLE;
14385629Sraf 		goto done;
14394574Sraf 	}
14404574Sraf 
14415629Sraf 	/*
14425629Sraf 	 * Make one attempt to acquire the lock before
14435629Sraf 	 * incurring the overhead of the spin loop.
14445629Sraf 	 */
14455629Sraf 	enter_critical(self);
14466057Sraf 	if (set_lock_byte64(lockp, udp->pid) == 0) {
14475629Sraf 		mp->mutex_owner = (uintptr_t)self;
14486057Sraf 		/* mp->mutex_ownerpid was set by set_lock_byte64() */
14495629Sraf 		exit_critical(self);
14505629Sraf 		error = 0;
14515629Sraf 		goto done;
14525629Sraf 	}
14535629Sraf 	exit_critical(self);
14545629Sraf 	if (!tryhard)
14555629Sraf 		goto done;
14564574Sraf 	if (ncpus == 0)
14574574Sraf 		ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN);
14585629Sraf 	if ((max_spinners = self->ul_max_spinners) >= ncpus)
14595629Sraf 		max_spinners = ncpus - 1;
14605629Sraf 	max_count = (max_spinners != 0)? self->ul_adaptive_spin : 0;
14615629Sraf 	if (max_count == 0)
14625629Sraf 		goto done;
14635629Sraf 
14640Sstevel@tonic-gate 	/*
14650Sstevel@tonic-gate 	 * This is a process-shared mutex.
14660Sstevel@tonic-gate 	 * We cannot know if the owner is running on a processor.
14670Sstevel@tonic-gate 	 * We just spin and hope that it is on a processor.
14680Sstevel@tonic-gate 	 */
14694574Sraf 	enter_critical(self);
14705629Sraf 	if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1) {
14715629Sraf 		exit_critical(self);
14725629Sraf 		goto done;
14735629Sraf 	}
14745629Sraf 	DTRACE_PROBE1(plockstat, mutex__spin, mp);
14755629Sraf 	for (count = 1; ; count++) {
14766057Sraf 		if ((*lockp & LOCKMASK64) == 0 &&
14776057Sraf 		    set_lock_byte64(lockp, udp->pid) == 0) {
14784574Sraf 			mp->mutex_owner = (uintptr_t)self;
14796057Sraf 			/* mp->mutex_ownerpid was set by set_lock_byte64() */
14804574Sraf 			error = 0;
14814574Sraf 			break;
14824574Sraf 		}
14835629Sraf 		if (count == max_count)
14845629Sraf 			break;
14854574Sraf 		SMT_PAUSE();
14864574Sraf 	}
14875629Sraf 	new_lockword = spinners_decr(&mp->mutex_lockword);
14885629Sraf 	if (error && (new_lockword & (LOCKMASK | SPINNERMASK)) == 0) {
14895629Sraf 		/*
14905629Sraf 		 * We haven't yet acquired the lock, the lock
14915629Sraf 		 * is free, and there are no other spinners.
14925629Sraf 		 * Make one final attempt to acquire the lock.
14935629Sraf 		 *
14945629Sraf 		 * This isn't strictly necessary since mutex_lock_kernel()
14955629Sraf 		 * (the next action this thread will take if it doesn't
14965629Sraf 		 * acquire the lock here) makes one attempt to acquire
14975629Sraf 		 * the lock before putting the thread to sleep.
14985629Sraf 		 *
14995629Sraf 		 * If the next action for this thread (on failure here)
15005629Sraf 		 * were not to call mutex_lock_kernel(), this would be
15015629Sraf 		 * necessary for correctness, to avoid ending up with an
15025629Sraf 		 * unheld mutex with waiters but no one to wake them up.
15035629Sraf 		 */
15046057Sraf 		if (set_lock_byte64(lockp, udp->pid) == 0) {
15055629Sraf 			mp->mutex_owner = (uintptr_t)self;
15066057Sraf 			/* mp->mutex_ownerpid was set by set_lock_byte64() */
15075629Sraf 			error = 0;
15085629Sraf 		}
15095629Sraf 		count++;
15105629Sraf 	}
15114574Sraf 	exit_critical(self);
15124574Sraf 
15135629Sraf done:
15144574Sraf 	if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) {
15154574Sraf 		ASSERT(mp->mutex_type & LOCK_ROBUST);
15164574Sraf 		/*
15176057Sraf 		 * We shouldn't own the mutex.
15186057Sraf 		 * Just clear the lock; everyone has already been waked up.
15194574Sraf 		 */
15204574Sraf 		mp->mutex_owner = 0;
15216057Sraf 		/* mp->mutex_ownerpid is cleared by clear_lockbyte64() */
15226057Sraf 		(void) clear_lockbyte64(&mp->mutex_lockword64);
15234574Sraf 		error = ENOTRECOVERABLE;
15240Sstevel@tonic-gate 	}
15250Sstevel@tonic-gate 
15264574Sraf 	if (error) {
15275629Sraf 		if (count) {
15285629Sraf 			DTRACE_PROBE2(plockstat, mutex__spun, 0, count);
15295629Sraf 		}
15304574Sraf 		if (error != EBUSY) {
15314574Sraf 			DTRACE_PROBE2(plockstat, mutex__error, mp, error);
15324574Sraf 		}
15334574Sraf 	} else {
15345629Sraf 		if (count) {
15355629Sraf 			DTRACE_PROBE2(plockstat, mutex__spun, 1, count);
15365629Sraf 		}
15374574Sraf 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count);
15384574Sraf 		if (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED)) {
15394574Sraf 			ASSERT(mp->mutex_type & LOCK_ROBUST);
15404574Sraf 			if (mp->mutex_flag & LOCK_OWNERDEAD)
15414574Sraf 				error = EOWNERDEAD;
15424574Sraf 			else if (mp->mutex_type & USYNC_PROCESS_ROBUST)
15434574Sraf 				error = ELOCKUNMAPPED;
15444574Sraf 			else
15454574Sraf 				error = EOWNERDEAD;
15464574Sraf 		}
15474574Sraf 	}
15484574Sraf 
15494574Sraf 	return (error);
15500Sstevel@tonic-gate }
15510Sstevel@tonic-gate 
15520Sstevel@tonic-gate /*
15530Sstevel@tonic-gate  * Mutex wakeup code for releasing a USYNC_THREAD mutex.
15540Sstevel@tonic-gate  * Returns the lwpid of the thread that was dequeued, if any.
15550Sstevel@tonic-gate  * The caller of mutex_wakeup() must call __lwp_unpark(lwpid)
15560Sstevel@tonic-gate  * to wake up the specified lwp.
15570Sstevel@tonic-gate  */
15584574Sraf static lwpid_t
15590Sstevel@tonic-gate mutex_wakeup(mutex_t *mp)
15600Sstevel@tonic-gate {
15610Sstevel@tonic-gate 	lwpid_t lwpid = 0;
1562*6247Sraf 	int more;
15630Sstevel@tonic-gate 	queue_head_t *qp;
15640Sstevel@tonic-gate 	ulwp_t *ulwp;
15650Sstevel@tonic-gate 
15660Sstevel@tonic-gate 	/*
15670Sstevel@tonic-gate 	 * Dequeue a waiter from the sleep queue.  Don't touch the mutex
15680Sstevel@tonic-gate 	 * waiters bit if no one was found on the queue because the mutex
15690Sstevel@tonic-gate 	 * might have been deallocated or reallocated for another purpose.
15700Sstevel@tonic-gate 	 */
15710Sstevel@tonic-gate 	qp = queue_lock(mp, MX);
1572*6247Sraf 	if ((ulwp = dequeue(qp, &more)) != NULL) {
15730Sstevel@tonic-gate 		lwpid = ulwp->ul_lwpid;
1574*6247Sraf 		mp->mutex_waiters = more;
15750Sstevel@tonic-gate 	}
15760Sstevel@tonic-gate 	queue_unlock(qp);
15770Sstevel@tonic-gate 	return (lwpid);
15780Sstevel@tonic-gate }
15790Sstevel@tonic-gate 
15800Sstevel@tonic-gate /*
15814574Sraf  * Mutex wakeup code for releasing all waiters on a USYNC_THREAD mutex.
15824574Sraf  */
15834574Sraf static void
15844574Sraf mutex_wakeup_all(mutex_t *mp)
15854574Sraf {
15864574Sraf 	queue_head_t *qp;
1587*6247Sraf 	queue_root_t *qrp;
15884574Sraf 	int nlwpid = 0;
15894574Sraf 	int maxlwps = MAXLWPS;
15904574Sraf 	ulwp_t *ulwp;
15914574Sraf 	lwpid_t buffer[MAXLWPS];
15924574Sraf 	lwpid_t *lwpid = buffer;
15934574Sraf 
15944574Sraf 	/*
15954574Sraf 	 * Walk the list of waiters and prepare to wake up all of them.
15964574Sraf 	 * The waiters flag has already been cleared from the mutex.
15974574Sraf 	 *
15984574Sraf 	 * We keep track of lwpids that are to be unparked in lwpid[].
15994574Sraf 	 * __lwp_unpark_all() is called to unpark all of them after
16004574Sraf 	 * they have been removed from the sleep queue and the sleep
16014574Sraf 	 * queue lock has been dropped.  If we run out of space in our
16024574Sraf 	 * on-stack buffer, we need to allocate more but we can't call
16034574Sraf 	 * lmalloc() because we are holding a queue lock when the overflow
16044574Sraf 	 * occurs and lmalloc() acquires a lock.  We can't use alloca()
16054574Sraf 	 * either because the application may have allocated a small
16064574Sraf 	 * stack and we don't want to overrun the stack.  So we call
16074574Sraf 	 * alloc_lwpids() to allocate a bigger buffer using the mmap()
16084574Sraf 	 * system call directly since that path acquires no locks.
16094574Sraf 	 */
16104574Sraf 	qp = queue_lock(mp, MX);
1611*6247Sraf 	for (;;) {
1612*6247Sraf 		if ((qrp = qp->qh_root) == NULL ||
1613*6247Sraf 		    (ulwp = qrp->qr_head) == NULL)
1614*6247Sraf 			break;
1615*6247Sraf 		ASSERT(ulwp->ul_wchan == mp);
1616*6247Sraf 		queue_unlink(qp, &qrp->qr_head, NULL);
1617*6247Sraf 		ulwp->ul_sleepq = NULL;
1618*6247Sraf 		ulwp->ul_wchan = NULL;
1619*6247Sraf 		if (nlwpid == maxlwps)
1620*6247Sraf 			lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps);
1621*6247Sraf 		lwpid[nlwpid++] = ulwp->ul_lwpid;
16224574Sraf 	}
16234574Sraf 
16244574Sraf 	if (nlwpid == 0) {
16254574Sraf 		queue_unlock(qp);
16264574Sraf 	} else {
16275629Sraf 		mp->mutex_waiters = 0;
16284574Sraf 		no_preempt(curthread);
16294574Sraf 		queue_unlock(qp);
16304574Sraf 		if (nlwpid == 1)
16314574Sraf 			(void) __lwp_unpark(lwpid[0]);
16324574Sraf 		else
16334574Sraf 			(void) __lwp_unpark_all(lwpid, nlwpid);
16344574Sraf 		preempt(curthread);
16354574Sraf 	}
16364574Sraf 
16374574Sraf 	if (lwpid != buffer)
16384574Sraf 		(void) _private_munmap(lwpid, maxlwps * sizeof (lwpid_t));
16394574Sraf }
16404574Sraf 
16414574Sraf /*
16425629Sraf  * Release a process-private mutex.
16435629Sraf  * As an optimization, if there are waiters but there are also spinners
16445629Sraf  * attempting to acquire the mutex, then don't bother waking up a waiter;
16455629Sraf  * one of the spinners will acquire the mutex soon and it would be a waste
16465629Sraf  * of resources to wake up some thread just to have it spin for a while
16475629Sraf  * and then possibly go back to sleep.  See mutex_trylock_adaptive().
16480Sstevel@tonic-gate  */
16494574Sraf static lwpid_t
16504574Sraf mutex_unlock_queue(mutex_t *mp, int release_all)
16510Sstevel@tonic-gate {
16525629Sraf 	lwpid_t lwpid = 0;
16535629Sraf 	uint32_t old_lockword;
16545629Sraf 
16556057Sraf 	DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
16565629Sraf 	mp->mutex_owner = 0;
16575629Sraf 	old_lockword = clear_lockbyte(&mp->mutex_lockword);
16585629Sraf 	if ((old_lockword & WAITERMASK) &&
16595629Sraf 	    (release_all || (old_lockword & SPINNERMASK) == 0)) {
16605629Sraf 		ulwp_t *self = curthread;
16610Sstevel@tonic-gate 		no_preempt(self);	/* ensure a prompt wakeup */
16625629Sraf 		if (release_all)
16635629Sraf 			mutex_wakeup_all(mp);
16645629Sraf 		else
16655629Sraf 			lwpid = mutex_wakeup(mp);
16665629Sraf 		if (lwpid == 0)
16675629Sraf 			preempt(self);
16684574Sraf 	}
16690Sstevel@tonic-gate 	return (lwpid);
16700Sstevel@tonic-gate }
16710Sstevel@tonic-gate 
16720Sstevel@tonic-gate /*
16730Sstevel@tonic-gate  * Like mutex_unlock_queue(), but for process-shared mutexes.
16740Sstevel@tonic-gate  */
16754574Sraf static void
16764574Sraf mutex_unlock_process(mutex_t *mp, int release_all)
16770Sstevel@tonic-gate {
16786057Sraf 	uint64_t old_lockword64;
16796057Sraf 
16806057Sraf 	DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
16810Sstevel@tonic-gate 	mp->mutex_owner = 0;
16826057Sraf 	/* mp->mutex_ownerpid is cleared by clear_lockbyte64() */
16836057Sraf 	old_lockword64 = clear_lockbyte64(&mp->mutex_lockword64);
16846057Sraf 	if ((old_lockword64 & WAITERMASK64) &&
16856057Sraf 	    (release_all || (old_lockword64 & SPINNERMASK64) == 0)) {
16865629Sraf 		ulwp_t *self = curthread;
16875629Sraf 		no_preempt(self);	/* ensure a prompt wakeup */
16885629Sraf 		(void) ___lwp_mutex_wakeup(mp, release_all);
16895629Sraf 		preempt(self);
16900Sstevel@tonic-gate 	}
16910Sstevel@tonic-gate }
16920Sstevel@tonic-gate 
16930Sstevel@tonic-gate void
16940Sstevel@tonic-gate stall(void)
16950Sstevel@tonic-gate {
16960Sstevel@tonic-gate 	for (;;)
16970Sstevel@tonic-gate 		(void) mutex_lock_kernel(&stall_mutex, NULL, NULL);
16980Sstevel@tonic-gate }
16990Sstevel@tonic-gate 
17000Sstevel@tonic-gate /*
17010Sstevel@tonic-gate  * Acquire a USYNC_THREAD mutex via user-level sleep queues.
17020Sstevel@tonic-gate  * We failed set_lock_byte(&mp->mutex_lockw) before coming here.
17034574Sraf  * If successful, returns with mutex_owner set correctly.
17040Sstevel@tonic-gate  */
17050Sstevel@tonic-gate int
17060Sstevel@tonic-gate mutex_lock_queue(ulwp_t *self, tdb_mutex_stats_t *msp, mutex_t *mp,
17070Sstevel@tonic-gate 	timespec_t *tsp)
17080Sstevel@tonic-gate {
17090Sstevel@tonic-gate 	uberdata_t *udp = curthread->ul_uberdata;
17100Sstevel@tonic-gate 	queue_head_t *qp;
17110Sstevel@tonic-gate 	hrtime_t begin_sleep;
17120Sstevel@tonic-gate 	int error = 0;
17130Sstevel@tonic-gate 
17140Sstevel@tonic-gate 	self->ul_sp = stkptr();
17150Sstevel@tonic-gate 	if (__td_event_report(self, TD_SLEEP, udp)) {
17160Sstevel@tonic-gate 		self->ul_wchan = mp;
17170Sstevel@tonic-gate 		self->ul_td_evbuf.eventnum = TD_SLEEP;
17180Sstevel@tonic-gate 		self->ul_td_evbuf.eventdata = mp;
17190Sstevel@tonic-gate 		tdb_event(TD_SLEEP, udp);
17200Sstevel@tonic-gate 	}
17210Sstevel@tonic-gate 	if (msp) {
17220Sstevel@tonic-gate 		tdb_incr(msp->mutex_sleep);
17230Sstevel@tonic-gate 		begin_sleep = gethrtime();
17240Sstevel@tonic-gate 	}
17250Sstevel@tonic-gate 
17260Sstevel@tonic-gate 	DTRACE_PROBE1(plockstat, mutex__block, mp);
17270Sstevel@tonic-gate 
17280Sstevel@tonic-gate 	/*
17290Sstevel@tonic-gate 	 * Put ourself on the sleep queue, and while we are
17300Sstevel@tonic-gate 	 * unable to grab the lock, go park in the kernel.
17310Sstevel@tonic-gate 	 * Take ourself off the sleep queue after we acquire the lock.
17320Sstevel@tonic-gate 	 * The waiter bit can be set/cleared only while holding the queue lock.
17330Sstevel@tonic-gate 	 */
17340Sstevel@tonic-gate 	qp = queue_lock(mp, MX);
1735*6247Sraf 	enqueue(qp, self, 0);
17360Sstevel@tonic-gate 	mp->mutex_waiters = 1;
17370Sstevel@tonic-gate 	for (;;) {
17380Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
17390Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
1740*6247Sraf 			mp->mutex_waiters = dequeue_self(qp);
17410Sstevel@tonic-gate 			break;
17420Sstevel@tonic-gate 		}
17430Sstevel@tonic-gate 		set_parking_flag(self, 1);
17440Sstevel@tonic-gate 		queue_unlock(qp);
17450Sstevel@tonic-gate 		/*
17460Sstevel@tonic-gate 		 * __lwp_park() will return the residual time in tsp
17470Sstevel@tonic-gate 		 * if we are unparked before the timeout expires.
17480Sstevel@tonic-gate 		 */
17495629Sraf 		error = __lwp_park(tsp, 0);
17500Sstevel@tonic-gate 		set_parking_flag(self, 0);
17510Sstevel@tonic-gate 		/*
17520Sstevel@tonic-gate 		 * We could have taken a signal or suspended ourself.
17530Sstevel@tonic-gate 		 * If we did, then we removed ourself from the queue.
17540Sstevel@tonic-gate 		 * Someone else may have removed us from the queue
17550Sstevel@tonic-gate 		 * as a consequence of mutex_unlock().  We may have
17560Sstevel@tonic-gate 		 * gotten a timeout from __lwp_park().  Or we may still
17570Sstevel@tonic-gate 		 * be on the queue and this is just a spurious wakeup.
17580Sstevel@tonic-gate 		 */
17590Sstevel@tonic-gate 		qp = queue_lock(mp, MX);
17600Sstevel@tonic-gate 		if (self->ul_sleepq == NULL) {
17615629Sraf 			if (error) {
1762*6247Sraf 				mp->mutex_waiters = queue_waiter(qp)? 1 : 0;
17635629Sraf 				if (error != EINTR)
17645629Sraf 					break;
17655629Sraf 				error = 0;
17665629Sraf 			}
17670Sstevel@tonic-gate 			if (set_lock_byte(&mp->mutex_lockw) == 0) {
17680Sstevel@tonic-gate 				mp->mutex_owner = (uintptr_t)self;
17690Sstevel@tonic-gate 				break;
17700Sstevel@tonic-gate 			}
1771*6247Sraf 			enqueue(qp, self, 0);
17720Sstevel@tonic-gate 			mp->mutex_waiters = 1;
17730Sstevel@tonic-gate 		}
17740Sstevel@tonic-gate 		ASSERT(self->ul_sleepq == qp &&
17750Sstevel@tonic-gate 		    self->ul_qtype == MX &&
17760Sstevel@tonic-gate 		    self->ul_wchan == mp);
17770Sstevel@tonic-gate 		if (error) {
17785629Sraf 			if (error != EINTR) {
1779*6247Sraf 				mp->mutex_waiters = dequeue_self(qp);
17805629Sraf 				break;
17815629Sraf 			}
17825629Sraf 			error = 0;
17830Sstevel@tonic-gate 		}
17840Sstevel@tonic-gate 	}
17850Sstevel@tonic-gate 	ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL &&
17860Sstevel@tonic-gate 	    self->ul_wchan == NULL);
17870Sstevel@tonic-gate 	self->ul_sp = 0;
17880Sstevel@tonic-gate 	queue_unlock(qp);
17894574Sraf 
17900Sstevel@tonic-gate 	if (msp)
17910Sstevel@tonic-gate 		msp->mutex_sleep_time += gethrtime() - begin_sleep;
17920Sstevel@tonic-gate 
17930Sstevel@tonic-gate 	ASSERT(error == 0 || error == EINVAL || error == ETIME);
17944574Sraf 
17954574Sraf 	if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) {
17964574Sraf 		ASSERT(mp->mutex_type & LOCK_ROBUST);
17974574Sraf 		/*
17986057Sraf 		 * We shouldn't own the mutex.
17996057Sraf 		 * Just clear the lock; everyone has already been waked up.
18004574Sraf 		 */
18014574Sraf 		mp->mutex_owner = 0;
18026057Sraf 		(void) clear_lockbyte(&mp->mutex_lockword);
18034574Sraf 		error = ENOTRECOVERABLE;
18044574Sraf 	}
18054574Sraf 
18064574Sraf 	if (error) {
18074574Sraf 		DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0);
18084574Sraf 		DTRACE_PROBE2(plockstat, mutex__error, mp, error);
18094574Sraf 	} else {
18104574Sraf 		DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1);
18114574Sraf 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
18124574Sraf 		if (mp->mutex_flag & LOCK_OWNERDEAD) {
18134574Sraf 			ASSERT(mp->mutex_type & LOCK_ROBUST);
18144574Sraf 			error = EOWNERDEAD;
18154574Sraf 		}
18164574Sraf 	}
18174574Sraf 
18180Sstevel@tonic-gate 	return (error);
18190Sstevel@tonic-gate }
18200Sstevel@tonic-gate 
18214574Sraf static int
18224574Sraf mutex_recursion(mutex_t *mp, int mtype, int try)
18234574Sraf {
18244574Sraf 	ASSERT(mutex_is_held(mp));
18254574Sraf 	ASSERT(mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK));
18264574Sraf 	ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK);
18274574Sraf 
18284574Sraf 	if (mtype & LOCK_RECURSIVE) {
18294574Sraf 		if (mp->mutex_rcount == RECURSION_MAX) {
18304574Sraf 			DTRACE_PROBE2(plockstat, mutex__error, mp, EAGAIN);
18314574Sraf 			return (EAGAIN);
18324574Sraf 		}
18334574Sraf 		mp->mutex_rcount++;
18344574Sraf 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1, 0);
18354574Sraf 		return (0);
18364574Sraf 	}
18374574Sraf 	if (try == MUTEX_LOCK) {
18384574Sraf 		DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK);
18394574Sraf 		return (EDEADLK);
18404574Sraf 	}
18414574Sraf 	return (EBUSY);
18424574Sraf }
18434574Sraf 
18444574Sraf /*
18454574Sraf  * Register this USYNC_PROCESS|LOCK_ROBUST mutex with the kernel so
18464574Sraf  * it can apply LOCK_OWNERDEAD|LOCK_UNMAPPED if it becomes necessary.
18474574Sraf  * We use tdb_hash_lock here and in the synch object tracking code in
18484574Sraf  * the tdb_agent.c file.  There is no conflict between these two usages.
18494574Sraf  */
18504574Sraf void
18514574Sraf register_lock(mutex_t *mp)
18524574Sraf {
18534574Sraf 	uberdata_t *udp = curthread->ul_uberdata;
18544574Sraf 	uint_t hash = LOCK_HASH(mp);
18554574Sraf 	robust_t *rlp;
18564574Sraf 	robust_t **rlpp;
18574574Sraf 	robust_t **table;
18584574Sraf 
18594574Sraf 	if ((table = udp->robustlocks) == NULL) {
18604574Sraf 		lmutex_lock(&udp->tdb_hash_lock);
18614574Sraf 		if ((table = udp->robustlocks) == NULL) {
18624574Sraf 			table = lmalloc(LOCKHASHSZ * sizeof (robust_t *));
18634574Sraf 			_membar_producer();
18644574Sraf 			udp->robustlocks = table;
18654574Sraf 		}
18664574Sraf 		lmutex_unlock(&udp->tdb_hash_lock);
18674574Sraf 	}
18684574Sraf 	_membar_consumer();
18694574Sraf 
18704574Sraf 	/*
18714574Sraf 	 * First search the registered table with no locks held.
18724574Sraf 	 * This is safe because the table never shrinks
18734574Sraf 	 * and we can only get a false negative.
18744574Sraf 	 */
18754574Sraf 	for (rlp = table[hash]; rlp != NULL; rlp = rlp->robust_next) {
18764574Sraf 		if (rlp->robust_lock == mp)	/* already registered */
18774574Sraf 			return;
18784574Sraf 	}
18794574Sraf 
18804574Sraf 	/*
18814574Sraf 	 * The lock was not found.
18824574Sraf 	 * Repeat the operation with tdb_hash_lock held.
18834574Sraf 	 */
18844574Sraf 	lmutex_lock(&udp->tdb_hash_lock);
18854574Sraf 
18864574Sraf 	for (rlpp = &table[hash];
18874574Sraf 	    (rlp = *rlpp) != NULL;
18884574Sraf 	    rlpp = &rlp->robust_next) {
18894574Sraf 		if (rlp->robust_lock == mp) {	/* already registered */
18904574Sraf 			lmutex_unlock(&udp->tdb_hash_lock);
18914574Sraf 			return;
18924574Sraf 		}
18934574Sraf 	}
18944574Sraf 
18954574Sraf 	/*
18964574Sraf 	 * The lock has never been registered.
18974574Sraf 	 * Register it now and add it to the table.
18984574Sraf 	 */
18994574Sraf 	(void) ___lwp_mutex_register(mp);
19004574Sraf 	rlp = lmalloc(sizeof (*rlp));
19014574Sraf 	rlp->robust_lock = mp;
19024574Sraf 	_membar_producer();
19034574Sraf 	*rlpp = rlp;
19044574Sraf 
19054574Sraf 	lmutex_unlock(&udp->tdb_hash_lock);
19064574Sraf }
19074574Sraf 
19084574Sraf /*
19094574Sraf  * This is called in the child of fork()/forkall() to start over
19104574Sraf  * with a clean slate.  (Each process must register its own locks.)
19114574Sraf  * No locks are needed because all other threads are suspended or gone.
19124574Sraf  */
19134574Sraf void
19144574Sraf unregister_locks(void)
19154574Sraf {
19164574Sraf 	uberdata_t *udp = curthread->ul_uberdata;
19174574Sraf 	uint_t hash;
19184574Sraf 	robust_t **table;
19194574Sraf 	robust_t *rlp;
19204574Sraf 	robust_t *next;
19214574Sraf 
19224574Sraf 	if ((table = udp->robustlocks) != NULL) {
19234574Sraf 		for (hash = 0; hash < LOCKHASHSZ; hash++) {
19244574Sraf 			rlp = table[hash];
19254574Sraf 			while (rlp != NULL) {
19264574Sraf 				next = rlp->robust_next;
19274574Sraf 				lfree(rlp, sizeof (*rlp));
19284574Sraf 				rlp = next;
19294574Sraf 			}
19304574Sraf 		}
19314574Sraf 		lfree(table, LOCKHASHSZ * sizeof (robust_t *));
19324574Sraf 		udp->robustlocks = NULL;
19334574Sraf 	}
19344574Sraf }
19354574Sraf 
19360Sstevel@tonic-gate /*
19370Sstevel@tonic-gate  * Returns with mutex_owner set correctly.
19380Sstevel@tonic-gate  */
1939*6247Sraf int
19400Sstevel@tonic-gate mutex_lock_internal(mutex_t *mp, timespec_t *tsp, int try)
19410Sstevel@tonic-gate {
19420Sstevel@tonic-gate 	ulwp_t *self = curthread;
19430Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
19440Sstevel@tonic-gate 	int mtype = mp->mutex_type;
19450Sstevel@tonic-gate 	tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
19460Sstevel@tonic-gate 	int error = 0;
1947*6247Sraf 	int noceil = try & MUTEX_NOCEIL;
19484574Sraf 	uint8_t ceil;
19494574Sraf 	int myprio;
19500Sstevel@tonic-gate 
1951*6247Sraf 	try &= ~MUTEX_NOCEIL;
19520Sstevel@tonic-gate 	ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK);
19530Sstevel@tonic-gate 
19540Sstevel@tonic-gate 	if (!self->ul_schedctl_called)
19550Sstevel@tonic-gate 		(void) setup_schedctl();
19560Sstevel@tonic-gate 
19570Sstevel@tonic-gate 	if (msp && try == MUTEX_TRY)
19580Sstevel@tonic-gate 		tdb_incr(msp->mutex_try);
19590Sstevel@tonic-gate 
19604574Sraf 	if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && mutex_is_held(mp))
19614574Sraf 		return (mutex_recursion(mp, mtype, try));
19620Sstevel@tonic-gate 
19630Sstevel@tonic-gate 	if (self->ul_error_detection && try == MUTEX_LOCK &&
19640Sstevel@tonic-gate 	    tsp == NULL && mutex_is_held(mp))
19650Sstevel@tonic-gate 		lock_error(mp, "mutex_lock", NULL, NULL);
19660Sstevel@tonic-gate 
1967*6247Sraf 	if ((mtype & LOCK_PRIO_PROTECT) && noceil == 0) {
1968*6247Sraf 		update_sched(self);
1969*6247Sraf 		if (self->ul_cid != self->ul_rtclassid) {
1970*6247Sraf 			DTRACE_PROBE2(plockstat, mutex__error, mp, EPERM);
1971*6247Sraf 			return (EPERM);
1972*6247Sraf 		}
19734574Sraf 		ceil = mp->mutex_ceiling;
1974*6247Sraf 		myprio = self->ul_epri? self->ul_epri : self->ul_pri;
19754574Sraf 		if (myprio > ceil) {
19764574Sraf 			DTRACE_PROBE2(plockstat, mutex__error, mp, EINVAL);
19774574Sraf 			return (EINVAL);
19784574Sraf 		}
19794574Sraf 		if ((error = _ceil_mylist_add(mp)) != 0) {
19804574Sraf 			DTRACE_PROBE2(plockstat, mutex__error, mp, error);
19814574Sraf 			return (error);
19820Sstevel@tonic-gate 		}
19834574Sraf 		if (myprio < ceil)
19844574Sraf 			_ceil_prio_inherit(ceil);
19854574Sraf 	}
19864574Sraf 
19874574Sraf 	if ((mtype & (USYNC_PROCESS | LOCK_ROBUST))
19884574Sraf 	    == (USYNC_PROCESS | LOCK_ROBUST))
19894574Sraf 		register_lock(mp);
19904574Sraf 
19914574Sraf 	if (mtype & LOCK_PRIO_INHERIT) {
19924574Sraf 		/* go straight to the kernel */
19934574Sraf 		if (try == MUTEX_TRY)
19944574Sraf 			error = mutex_trylock_kernel(mp);
19954574Sraf 		else	/* MUTEX_LOCK */
19964574Sraf 			error = mutex_lock_kernel(mp, tsp, msp);
19974574Sraf 		/*
19984574Sraf 		 * The kernel never sets or clears the lock byte
19994574Sraf 		 * for LOCK_PRIO_INHERIT mutexes.
20004574Sraf 		 * Set it here for consistency.
20014574Sraf 		 */
20024574Sraf 		switch (error) {
20034574Sraf 		case 0:
2004*6247Sraf 			self->ul_pilocks++;
20054574Sraf 			mp->mutex_lockw = LOCKSET;
20064574Sraf 			break;
20074574Sraf 		case EOWNERDEAD:
20084574Sraf 		case ELOCKUNMAPPED:
2009*6247Sraf 			self->ul_pilocks++;
20104574Sraf 			mp->mutex_lockw = LOCKSET;
20114574Sraf 			/* FALLTHROUGH */
20124574Sraf 		case ENOTRECOVERABLE:
20134574Sraf 			ASSERT(mtype & LOCK_ROBUST);
20144574Sraf 			break;
20154574Sraf 		case EDEADLK:
20164574Sraf 			if (try == MUTEX_LOCK)
20174574Sraf 				stall();
20184574Sraf 			error = EBUSY;
20194574Sraf 			break;
20200Sstevel@tonic-gate 		}
20210Sstevel@tonic-gate 	} else if (mtype & USYNC_PROCESS) {
20224613Sraf 		error = mutex_trylock_process(mp, try == MUTEX_LOCK);
20234574Sraf 		if (error == EBUSY && try == MUTEX_LOCK)
20240Sstevel@tonic-gate 			error = mutex_lock_kernel(mp, tsp, msp);
20255629Sraf 	} else {	/* USYNC_THREAD */
20264613Sraf 		error = mutex_trylock_adaptive(mp, try == MUTEX_LOCK);
20274574Sraf 		if (error == EBUSY && try == MUTEX_LOCK)
20284574Sraf 			error = mutex_lock_queue(self, msp, mp, tsp);
20290Sstevel@tonic-gate 	}
20300Sstevel@tonic-gate 
20310Sstevel@tonic-gate 	switch (error) {
20324574Sraf 	case 0:
20330Sstevel@tonic-gate 	case EOWNERDEAD:
20340Sstevel@tonic-gate 	case ELOCKUNMAPPED:
20354574Sraf 		if (mtype & LOCK_ROBUST)
20364574Sraf 			remember_lock(mp);
20370Sstevel@tonic-gate 		if (msp)
20380Sstevel@tonic-gate 			record_begin_hold(msp);
20390Sstevel@tonic-gate 		break;
20400Sstevel@tonic-gate 	default:
2041*6247Sraf 		if ((mtype & LOCK_PRIO_PROTECT) && noceil == 0) {
20424574Sraf 			(void) _ceil_mylist_del(mp);
20434574Sraf 			if (myprio < ceil)
20444574Sraf 				_ceil_prio_waive();
20454574Sraf 		}
20460Sstevel@tonic-gate 		if (try == MUTEX_TRY) {
20470Sstevel@tonic-gate 			if (msp)
20480Sstevel@tonic-gate 				tdb_incr(msp->mutex_try_fail);
20490Sstevel@tonic-gate 			if (__td_event_report(self, TD_LOCK_TRY, udp)) {
20500Sstevel@tonic-gate 				self->ul_td_evbuf.eventnum = TD_LOCK_TRY;
20510Sstevel@tonic-gate 				tdb_event(TD_LOCK_TRY, udp);
20520Sstevel@tonic-gate 			}
20530Sstevel@tonic-gate 		}
20540Sstevel@tonic-gate 		break;
20550Sstevel@tonic-gate 	}
20560Sstevel@tonic-gate 
20570Sstevel@tonic-gate 	return (error);
20580Sstevel@tonic-gate }
20590Sstevel@tonic-gate 
20600Sstevel@tonic-gate int
20610Sstevel@tonic-gate fast_process_lock(mutex_t *mp, timespec_t *tsp, int mtype, int try)
20620Sstevel@tonic-gate {
20630Sstevel@tonic-gate 	ulwp_t *self = curthread;
20640Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
20650Sstevel@tonic-gate 
20660Sstevel@tonic-gate 	/*
20670Sstevel@tonic-gate 	 * We know that USYNC_PROCESS is set in mtype and that
20680Sstevel@tonic-gate 	 * zero, one, or both of the flags LOCK_RECURSIVE and
20690Sstevel@tonic-gate 	 * LOCK_ERRORCHECK are set, and that no other flags are set.
20700Sstevel@tonic-gate 	 */
20714574Sraf 	ASSERT((mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0);
20720Sstevel@tonic-gate 	enter_critical(self);
20736057Sraf 	if (set_lock_byte64(&mp->mutex_lockword64, udp->pid) == 0) {
20740Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
20756057Sraf 		/* mp->mutex_ownerpid was set by set_lock_byte64() */
20760Sstevel@tonic-gate 		exit_critical(self);
20770Sstevel@tonic-gate 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
20780Sstevel@tonic-gate 		return (0);
20790Sstevel@tonic-gate 	}
20800Sstevel@tonic-gate 	exit_critical(self);
20810Sstevel@tonic-gate 
20824574Sraf 	if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && shared_mutex_held(mp))
20834574Sraf 		return (mutex_recursion(mp, mtype, try));
20844574Sraf 
20854613Sraf 	if (try == MUTEX_LOCK) {
20864613Sraf 		if (mutex_trylock_process(mp, 1) == 0)
20874613Sraf 			return (0);
20880Sstevel@tonic-gate 		return (mutex_lock_kernel(mp, tsp, NULL));
20894613Sraf 	}
20900Sstevel@tonic-gate 
20910Sstevel@tonic-gate 	if (__td_event_report(self, TD_LOCK_TRY, udp)) {
20920Sstevel@tonic-gate 		self->ul_td_evbuf.eventnum = TD_LOCK_TRY;
20930Sstevel@tonic-gate 		tdb_event(TD_LOCK_TRY, udp);
20940Sstevel@tonic-gate 	}
20950Sstevel@tonic-gate 	return (EBUSY);
20960Sstevel@tonic-gate }
20970Sstevel@tonic-gate 
20980Sstevel@tonic-gate static int
20990Sstevel@tonic-gate mutex_lock_impl(mutex_t *mp, timespec_t *tsp)
21000Sstevel@tonic-gate {
21010Sstevel@tonic-gate 	ulwp_t *self = curthread;
2102*6247Sraf 	int mtype = mp->mutex_type;
21030Sstevel@tonic-gate 	uberflags_t *gflags;
21040Sstevel@tonic-gate 
21050Sstevel@tonic-gate 	/*
21060Sstevel@tonic-gate 	 * Optimize the case of USYNC_THREAD, including
21070Sstevel@tonic-gate 	 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
21080Sstevel@tonic-gate 	 * no error detection, no lock statistics,
21090Sstevel@tonic-gate 	 * and the process has only a single thread.
21100Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
21110Sstevel@tonic-gate 	 */
2112*6247Sraf 	if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) |
2113*6247Sraf 	    self->ul_uberdata->uberflags.uf_all) == 0) {
21140Sstevel@tonic-gate 		/*
21150Sstevel@tonic-gate 		 * Only one thread exists so we don't need an atomic operation.
21160Sstevel@tonic-gate 		 */
21170Sstevel@tonic-gate 		if (mp->mutex_lockw == 0) {
21180Sstevel@tonic-gate 			mp->mutex_lockw = LOCKSET;
21190Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
21200Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
21210Sstevel@tonic-gate 			return (0);
21220Sstevel@tonic-gate 		}
21234574Sraf 		if (mtype && MUTEX_OWNER(mp) == self)
21244574Sraf 			return (mutex_recursion(mp, mtype, MUTEX_LOCK));
21250Sstevel@tonic-gate 		/*
21260Sstevel@tonic-gate 		 * We have reached a deadlock, probably because the
21270Sstevel@tonic-gate 		 * process is executing non-async-signal-safe code in
21280Sstevel@tonic-gate 		 * a signal handler and is attempting to acquire a lock
21290Sstevel@tonic-gate 		 * that it already owns.  This is not surprising, given
21300Sstevel@tonic-gate 		 * bad programming practices over the years that has
21310Sstevel@tonic-gate 		 * resulted in applications calling printf() and such
21320Sstevel@tonic-gate 		 * in their signal handlers.  Unless the user has told
21330Sstevel@tonic-gate 		 * us that the signal handlers are safe by setting:
21340Sstevel@tonic-gate 		 *	export _THREAD_ASYNC_SAFE=1
21350Sstevel@tonic-gate 		 * we return EDEADLK rather than actually deadlocking.
21360Sstevel@tonic-gate 		 */
21370Sstevel@tonic-gate 		if (tsp == NULL &&
21380Sstevel@tonic-gate 		    MUTEX_OWNER(mp) == self && !self->ul_async_safe) {
21390Sstevel@tonic-gate 			DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK);
21400Sstevel@tonic-gate 			return (EDEADLK);
21410Sstevel@tonic-gate 		}
21420Sstevel@tonic-gate 	}
21430Sstevel@tonic-gate 
21440Sstevel@tonic-gate 	/*
21450Sstevel@tonic-gate 	 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
21460Sstevel@tonic-gate 	 * no error detection, and no lock statistics.
21470Sstevel@tonic-gate 	 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
21480Sstevel@tonic-gate 	 */
21490Sstevel@tonic-gate 	if ((gflags = self->ul_schedctl_called) != NULL &&
21500Sstevel@tonic-gate 	    (gflags->uf_trs_ted |
21510Sstevel@tonic-gate 	    (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) {
21520Sstevel@tonic-gate 		if (mtype & USYNC_PROCESS)
21530Sstevel@tonic-gate 			return (fast_process_lock(mp, tsp, mtype, MUTEX_LOCK));
21540Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
21550Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
21560Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
21570Sstevel@tonic-gate 			return (0);
21580Sstevel@tonic-gate 		}
21594574Sraf 		if (mtype && MUTEX_OWNER(mp) == self)
21604574Sraf 			return (mutex_recursion(mp, mtype, MUTEX_LOCK));
21614613Sraf 		if (mutex_trylock_adaptive(mp, 1) != 0)
21624574Sraf 			return (mutex_lock_queue(self, NULL, mp, tsp));
21634574Sraf 		return (0);
21640Sstevel@tonic-gate 	}
21650Sstevel@tonic-gate 
21660Sstevel@tonic-gate 	/* else do it the long way */
21670Sstevel@tonic-gate 	return (mutex_lock_internal(mp, tsp, MUTEX_LOCK));
21680Sstevel@tonic-gate }
21690Sstevel@tonic-gate 
21705891Sraf /*
21715891Sraf  * Of the following function names (all the same function, of course),
21725891Sraf  * only _private_mutex_lock() is not exported from libc.  This means
21735891Sraf  * that calling _private_mutex_lock() within libc will not invoke the
21745891Sraf  * dynamic linker.  This is critical for any code called in the child
21755891Sraf  * of vfork() (via posix_spawn()) because invoking the dynamic linker
21765891Sraf  * in such a case would corrupt the parent's address space.  There are
21775891Sraf  * other places in libc where avoiding the dynamic linker is necessary.
21785891Sraf  * Of course, _private_mutex_lock() can be called in cases not requiring
21795891Sraf  * the avoidance of the dynamic linker too, and often is.
21805891Sraf  */
21810Sstevel@tonic-gate #pragma weak _private_mutex_lock = __mutex_lock
21820Sstevel@tonic-gate #pragma weak mutex_lock = __mutex_lock
21830Sstevel@tonic-gate #pragma weak _mutex_lock = __mutex_lock
21840Sstevel@tonic-gate #pragma weak pthread_mutex_lock = __mutex_lock
21850Sstevel@tonic-gate #pragma weak _pthread_mutex_lock = __mutex_lock
21860Sstevel@tonic-gate int
21870Sstevel@tonic-gate __mutex_lock(mutex_t *mp)
21880Sstevel@tonic-gate {
21890Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
21900Sstevel@tonic-gate 	return (mutex_lock_impl(mp, NULL));
21910Sstevel@tonic-gate }
21920Sstevel@tonic-gate 
21930Sstevel@tonic-gate #pragma weak pthread_mutex_timedlock = _pthread_mutex_timedlock
21940Sstevel@tonic-gate int
21950Sstevel@tonic-gate _pthread_mutex_timedlock(mutex_t *mp, const timespec_t *abstime)
21960Sstevel@tonic-gate {
21970Sstevel@tonic-gate 	timespec_t tslocal;
21980Sstevel@tonic-gate 	int error;
21990Sstevel@tonic-gate 
22000Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
22010Sstevel@tonic-gate 	abstime_to_reltime(CLOCK_REALTIME, abstime, &tslocal);
22020Sstevel@tonic-gate 	error = mutex_lock_impl(mp, &tslocal);
22030Sstevel@tonic-gate 	if (error == ETIME)
22040Sstevel@tonic-gate 		error = ETIMEDOUT;
22050Sstevel@tonic-gate 	return (error);
22060Sstevel@tonic-gate }
22070Sstevel@tonic-gate 
22080Sstevel@tonic-gate #pragma weak pthread_mutex_reltimedlock_np = _pthread_mutex_reltimedlock_np
22090Sstevel@tonic-gate int
22100Sstevel@tonic-gate _pthread_mutex_reltimedlock_np(mutex_t *mp, const timespec_t *reltime)
22110Sstevel@tonic-gate {
22120Sstevel@tonic-gate 	timespec_t tslocal;
22130Sstevel@tonic-gate 	int error;
22140Sstevel@tonic-gate 
22150Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
22160Sstevel@tonic-gate 	tslocal = *reltime;
22170Sstevel@tonic-gate 	error = mutex_lock_impl(mp, &tslocal);
22180Sstevel@tonic-gate 	if (error == ETIME)
22190Sstevel@tonic-gate 		error = ETIMEDOUT;
22200Sstevel@tonic-gate 	return (error);
22210Sstevel@tonic-gate }
22220Sstevel@tonic-gate 
22230Sstevel@tonic-gate #pragma weak _private_mutex_trylock = __mutex_trylock
22240Sstevel@tonic-gate #pragma weak mutex_trylock = __mutex_trylock
22250Sstevel@tonic-gate #pragma weak _mutex_trylock = __mutex_trylock
22260Sstevel@tonic-gate #pragma weak pthread_mutex_trylock = __mutex_trylock
22270Sstevel@tonic-gate #pragma weak _pthread_mutex_trylock = __mutex_trylock
22280Sstevel@tonic-gate int
22290Sstevel@tonic-gate __mutex_trylock(mutex_t *mp)
22300Sstevel@tonic-gate {
22310Sstevel@tonic-gate 	ulwp_t *self = curthread;
22320Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
2233*6247Sraf 	int mtype = mp->mutex_type;
22340Sstevel@tonic-gate 	uberflags_t *gflags;
22350Sstevel@tonic-gate 
22360Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
2237*6247Sraf 
22380Sstevel@tonic-gate 	/*
22390Sstevel@tonic-gate 	 * Optimize the case of USYNC_THREAD, including
22400Sstevel@tonic-gate 	 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
22410Sstevel@tonic-gate 	 * no error detection, no lock statistics,
22420Sstevel@tonic-gate 	 * and the process has only a single thread.
22430Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
22440Sstevel@tonic-gate 	 */
2245*6247Sraf 	if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) |
22460Sstevel@tonic-gate 	    udp->uberflags.uf_all) == 0) {
22470Sstevel@tonic-gate 		/*
22480Sstevel@tonic-gate 		 * Only one thread exists so we don't need an atomic operation.
22490Sstevel@tonic-gate 		 */
22500Sstevel@tonic-gate 		if (mp->mutex_lockw == 0) {
22510Sstevel@tonic-gate 			mp->mutex_lockw = LOCKSET;
22520Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
22530Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
22540Sstevel@tonic-gate 			return (0);
22550Sstevel@tonic-gate 		}
22564574Sraf 		if (mtype && MUTEX_OWNER(mp) == self)
22574574Sraf 			return (mutex_recursion(mp, mtype, MUTEX_TRY));
22580Sstevel@tonic-gate 		return (EBUSY);
22590Sstevel@tonic-gate 	}
22600Sstevel@tonic-gate 
22610Sstevel@tonic-gate 	/*
22620Sstevel@tonic-gate 	 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
22630Sstevel@tonic-gate 	 * no error detection, and no lock statistics.
22640Sstevel@tonic-gate 	 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
22650Sstevel@tonic-gate 	 */
22660Sstevel@tonic-gate 	if ((gflags = self->ul_schedctl_called) != NULL &&
22670Sstevel@tonic-gate 	    (gflags->uf_trs_ted |
22680Sstevel@tonic-gate 	    (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) {
22690Sstevel@tonic-gate 		if (mtype & USYNC_PROCESS)
22700Sstevel@tonic-gate 			return (fast_process_lock(mp, NULL, mtype, MUTEX_TRY));
22710Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
22720Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
22730Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
22740Sstevel@tonic-gate 			return (0);
22750Sstevel@tonic-gate 		}
22764574Sraf 		if (mtype && MUTEX_OWNER(mp) == self)
22774574Sraf 			return (mutex_recursion(mp, mtype, MUTEX_TRY));
22784613Sraf 		if (__td_event_report(self, TD_LOCK_TRY, udp)) {
22794613Sraf 			self->ul_td_evbuf.eventnum = TD_LOCK_TRY;
22804613Sraf 			tdb_event(TD_LOCK_TRY, udp);
22810Sstevel@tonic-gate 		}
22824613Sraf 		return (EBUSY);
22830Sstevel@tonic-gate 	}
22840Sstevel@tonic-gate 
22850Sstevel@tonic-gate 	/* else do it the long way */
22860Sstevel@tonic-gate 	return (mutex_lock_internal(mp, NULL, MUTEX_TRY));
22870Sstevel@tonic-gate }
22880Sstevel@tonic-gate 
22890Sstevel@tonic-gate int
22904574Sraf mutex_unlock_internal(mutex_t *mp, int retain_robust_flags)
22910Sstevel@tonic-gate {
22920Sstevel@tonic-gate 	ulwp_t *self = curthread;
22930Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
22940Sstevel@tonic-gate 	int mtype = mp->mutex_type;
22950Sstevel@tonic-gate 	tdb_mutex_stats_t *msp;
22964574Sraf 	int error = 0;
22974574Sraf 	int release_all;
22980Sstevel@tonic-gate 	lwpid_t lwpid;
22990Sstevel@tonic-gate 
23000Sstevel@tonic-gate 	if ((mtype & LOCK_ERRORCHECK) && !mutex_is_held(mp))
23010Sstevel@tonic-gate 		return (EPERM);
23020Sstevel@tonic-gate 
23030Sstevel@tonic-gate 	if (self->ul_error_detection && !mutex_is_held(mp))
23040Sstevel@tonic-gate 		lock_error(mp, "mutex_unlock", NULL, NULL);
23050Sstevel@tonic-gate 
23060Sstevel@tonic-gate 	if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
23070Sstevel@tonic-gate 		mp->mutex_rcount--;
23080Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
23090Sstevel@tonic-gate 		return (0);
23100Sstevel@tonic-gate 	}
23110Sstevel@tonic-gate 
23120Sstevel@tonic-gate 	if ((msp = MUTEX_STATS(mp, udp)) != NULL)
23130Sstevel@tonic-gate 		(void) record_hold_time(msp);
23140Sstevel@tonic-gate 
23154574Sraf 	if (!retain_robust_flags && !(mtype & LOCK_PRIO_INHERIT) &&
23164574Sraf 	    (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) {
23174574Sraf 		ASSERT(mp->mutex_type & LOCK_ROBUST);
23184574Sraf 		mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED);
23194574Sraf 		mp->mutex_flag |= LOCK_NOTRECOVERABLE;
23204574Sraf 	}
23214574Sraf 	release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0);
23224574Sraf 
23234574Sraf 	if (mtype & LOCK_PRIO_INHERIT) {
23240Sstevel@tonic-gate 		no_preempt(self);
23250Sstevel@tonic-gate 		mp->mutex_owner = 0;
23266057Sraf 		/* mp->mutex_ownerpid is cleared by ___lwp_mutex_unlock() */
23270Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
23284574Sraf 		mp->mutex_lockw = LOCKCLEAR;
2329*6247Sraf 		self->ul_pilocks--;
23304574Sraf 		error = ___lwp_mutex_unlock(mp);
23310Sstevel@tonic-gate 		preempt(self);
23320Sstevel@tonic-gate 	} else if (mtype & USYNC_PROCESS) {
23335629Sraf 		mutex_unlock_process(mp, release_all);
23340Sstevel@tonic-gate 	} else {	/* USYNC_THREAD */
23354574Sraf 		if ((lwpid = mutex_unlock_queue(mp, release_all)) != 0) {
23360Sstevel@tonic-gate 			(void) __lwp_unpark(lwpid);
23370Sstevel@tonic-gate 			preempt(self);
23380Sstevel@tonic-gate 		}
23390Sstevel@tonic-gate 	}
23400Sstevel@tonic-gate 
23414574Sraf 	if (mtype & LOCK_ROBUST)
23424574Sraf 		forget_lock(mp);
23434574Sraf 
23444574Sraf 	if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp))
23454574Sraf 		_ceil_prio_waive();
23464574Sraf 
23470Sstevel@tonic-gate 	return (error);
23480Sstevel@tonic-gate }
23490Sstevel@tonic-gate 
23500Sstevel@tonic-gate #pragma weak _private_mutex_unlock = __mutex_unlock
23510Sstevel@tonic-gate #pragma weak mutex_unlock = __mutex_unlock
23520Sstevel@tonic-gate #pragma weak _mutex_unlock = __mutex_unlock
23530Sstevel@tonic-gate #pragma weak pthread_mutex_unlock = __mutex_unlock
23540Sstevel@tonic-gate #pragma weak _pthread_mutex_unlock = __mutex_unlock
23550Sstevel@tonic-gate int
23560Sstevel@tonic-gate __mutex_unlock(mutex_t *mp)
23570Sstevel@tonic-gate {
23580Sstevel@tonic-gate 	ulwp_t *self = curthread;
2359*6247Sraf 	int mtype = mp->mutex_type;
23600Sstevel@tonic-gate 	uberflags_t *gflags;
23610Sstevel@tonic-gate 	lwpid_t lwpid;
23620Sstevel@tonic-gate 	short el;
23630Sstevel@tonic-gate 
23640Sstevel@tonic-gate 	/*
23650Sstevel@tonic-gate 	 * Optimize the case of USYNC_THREAD, including
23660Sstevel@tonic-gate 	 * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases,
23670Sstevel@tonic-gate 	 * no error detection, no lock statistics,
23680Sstevel@tonic-gate 	 * and the process has only a single thread.
23690Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
23700Sstevel@tonic-gate 	 */
2371*6247Sraf 	if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) |
2372*6247Sraf 	    self->ul_uberdata->uberflags.uf_all) == 0) {
23730Sstevel@tonic-gate 		if (mtype) {
23740Sstevel@tonic-gate 			/*
23750Sstevel@tonic-gate 			 * At this point we know that one or both of the
23760Sstevel@tonic-gate 			 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set.
23770Sstevel@tonic-gate 			 */
23780Sstevel@tonic-gate 			if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self))
23790Sstevel@tonic-gate 				return (EPERM);
23800Sstevel@tonic-gate 			if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
23810Sstevel@tonic-gate 				mp->mutex_rcount--;
23820Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
23830Sstevel@tonic-gate 				return (0);
23840Sstevel@tonic-gate 			}
23850Sstevel@tonic-gate 		}
23860Sstevel@tonic-gate 		/*
23870Sstevel@tonic-gate 		 * Only one thread exists so we don't need an atomic operation.
23880Sstevel@tonic-gate 		 * Also, there can be no waiters.
23890Sstevel@tonic-gate 		 */
23900Sstevel@tonic-gate 		mp->mutex_owner = 0;
23910Sstevel@tonic-gate 		mp->mutex_lockword = 0;
23920Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
23930Sstevel@tonic-gate 		return (0);
23940Sstevel@tonic-gate 	}
23950Sstevel@tonic-gate 
23960Sstevel@tonic-gate 	/*
23970Sstevel@tonic-gate 	 * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS,
23980Sstevel@tonic-gate 	 * no error detection, and no lock statistics.
23990Sstevel@tonic-gate 	 * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases.
24000Sstevel@tonic-gate 	 */
24010Sstevel@tonic-gate 	if ((gflags = self->ul_schedctl_called) != NULL) {
24020Sstevel@tonic-gate 		if (((el = gflags->uf_trs_ted) | mtype) == 0) {
24030Sstevel@tonic-gate fast_unlock:
24045629Sraf 			if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) {
24050Sstevel@tonic-gate 				(void) __lwp_unpark(lwpid);
24060Sstevel@tonic-gate 				preempt(self);
24070Sstevel@tonic-gate 			}
24080Sstevel@tonic-gate 			return (0);
24090Sstevel@tonic-gate 		}
24100Sstevel@tonic-gate 		if (el)		/* error detection or lock statistics */
24110Sstevel@tonic-gate 			goto slow_unlock;
24120Sstevel@tonic-gate 		if ((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) {
24130Sstevel@tonic-gate 			/*
24140Sstevel@tonic-gate 			 * At this point we know that one or both of the
24150Sstevel@tonic-gate 			 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set.
24160Sstevel@tonic-gate 			 */
24170Sstevel@tonic-gate 			if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self))
24180Sstevel@tonic-gate 				return (EPERM);
24190Sstevel@tonic-gate 			if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
24200Sstevel@tonic-gate 				mp->mutex_rcount--;
24210Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
24220Sstevel@tonic-gate 				return (0);
24230Sstevel@tonic-gate 			}
24240Sstevel@tonic-gate 			goto fast_unlock;
24250Sstevel@tonic-gate 		}
24260Sstevel@tonic-gate 		if ((mtype &
24270Sstevel@tonic-gate 		    ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) {
24280Sstevel@tonic-gate 			/*
24290Sstevel@tonic-gate 			 * At this point we know that zero, one, or both of the
24300Sstevel@tonic-gate 			 * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set and
24310Sstevel@tonic-gate 			 * that the USYNC_PROCESS flag is set.
24320Sstevel@tonic-gate 			 */
24330Sstevel@tonic-gate 			if ((mtype & LOCK_ERRORCHECK) && !shared_mutex_held(mp))
24340Sstevel@tonic-gate 				return (EPERM);
24350Sstevel@tonic-gate 			if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) {
24360Sstevel@tonic-gate 				mp->mutex_rcount--;
24370Sstevel@tonic-gate 				DTRACE_PROBE2(plockstat, mutex__release, mp, 1);
24380Sstevel@tonic-gate 				return (0);
24390Sstevel@tonic-gate 			}
24405629Sraf 			mutex_unlock_process(mp, 0);
24410Sstevel@tonic-gate 			return (0);
24420Sstevel@tonic-gate 		}
24430Sstevel@tonic-gate 	}
24440Sstevel@tonic-gate 
24450Sstevel@tonic-gate 	/* else do it the long way */
24460Sstevel@tonic-gate slow_unlock:
24474574Sraf 	return (mutex_unlock_internal(mp, 0));
24480Sstevel@tonic-gate }
24490Sstevel@tonic-gate 
24500Sstevel@tonic-gate /*
24510Sstevel@tonic-gate  * Internally to the library, almost all mutex lock/unlock actions
24520Sstevel@tonic-gate  * go through these lmutex_ functions, to protect critical regions.
24530Sstevel@tonic-gate  * We replicate a bit of code from __mutex_lock() and __mutex_unlock()
24540Sstevel@tonic-gate  * to make these functions faster since we know that the mutex type
24550Sstevel@tonic-gate  * of all internal locks is USYNC_THREAD.  We also know that internal
24560Sstevel@tonic-gate  * locking can never fail, so we panic if it does.
24570Sstevel@tonic-gate  */
24580Sstevel@tonic-gate void
24590Sstevel@tonic-gate lmutex_lock(mutex_t *mp)
24600Sstevel@tonic-gate {
24610Sstevel@tonic-gate 	ulwp_t *self = curthread;
24620Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
24630Sstevel@tonic-gate 
24640Sstevel@tonic-gate 	ASSERT(mp->mutex_type == USYNC_THREAD);
24650Sstevel@tonic-gate 
24660Sstevel@tonic-gate 	enter_critical(self);
24670Sstevel@tonic-gate 	/*
24680Sstevel@tonic-gate 	 * Optimize the case of no lock statistics and only a single thread.
24690Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
24700Sstevel@tonic-gate 	 */
24710Sstevel@tonic-gate 	if (udp->uberflags.uf_all == 0) {
24720Sstevel@tonic-gate 		/*
24730Sstevel@tonic-gate 		 * Only one thread exists; the mutex must be free.
24740Sstevel@tonic-gate 		 */
24750Sstevel@tonic-gate 		ASSERT(mp->mutex_lockw == 0);
24760Sstevel@tonic-gate 		mp->mutex_lockw = LOCKSET;
24770Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
24780Sstevel@tonic-gate 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
24790Sstevel@tonic-gate 	} else {
24800Sstevel@tonic-gate 		tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
24810Sstevel@tonic-gate 
24820Sstevel@tonic-gate 		if (!self->ul_schedctl_called)
24830Sstevel@tonic-gate 			(void) setup_schedctl();
24840Sstevel@tonic-gate 
24850Sstevel@tonic-gate 		if (set_lock_byte(&mp->mutex_lockw) == 0) {
24860Sstevel@tonic-gate 			mp->mutex_owner = (uintptr_t)self;
24870Sstevel@tonic-gate 			DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
24884613Sraf 		} else if (mutex_trylock_adaptive(mp, 1) != 0) {
24890Sstevel@tonic-gate 			(void) mutex_lock_queue(self, msp, mp, NULL);
24900Sstevel@tonic-gate 		}
24910Sstevel@tonic-gate 
24920Sstevel@tonic-gate 		if (msp)
24930Sstevel@tonic-gate 			record_begin_hold(msp);
24940Sstevel@tonic-gate 	}
24950Sstevel@tonic-gate }
24960Sstevel@tonic-gate 
24970Sstevel@tonic-gate void
24980Sstevel@tonic-gate lmutex_unlock(mutex_t *mp)
24990Sstevel@tonic-gate {
25000Sstevel@tonic-gate 	ulwp_t *self = curthread;
25010Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
25020Sstevel@tonic-gate 
25030Sstevel@tonic-gate 	ASSERT(mp->mutex_type == USYNC_THREAD);
25040Sstevel@tonic-gate 
25050Sstevel@tonic-gate 	/*
25060Sstevel@tonic-gate 	 * Optimize the case of no lock statistics and only a single thread.
25070Sstevel@tonic-gate 	 * (Most likely a traditional single-threaded application.)
25080Sstevel@tonic-gate 	 */
25090Sstevel@tonic-gate 	if (udp->uberflags.uf_all == 0) {
25100Sstevel@tonic-gate 		/*
25110Sstevel@tonic-gate 		 * Only one thread exists so there can be no waiters.
25120Sstevel@tonic-gate 		 */
25130Sstevel@tonic-gate 		mp->mutex_owner = 0;
25140Sstevel@tonic-gate 		mp->mutex_lockword = 0;
25150Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
25160Sstevel@tonic-gate 	} else {
25170Sstevel@tonic-gate 		tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
25180Sstevel@tonic-gate 		lwpid_t lwpid;
25190Sstevel@tonic-gate 
25200Sstevel@tonic-gate 		if (msp)
25210Sstevel@tonic-gate 			(void) record_hold_time(msp);
25224574Sraf 		if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) {
25230Sstevel@tonic-gate 			(void) __lwp_unpark(lwpid);
25240Sstevel@tonic-gate 			preempt(self);
25250Sstevel@tonic-gate 		}
25260Sstevel@tonic-gate 	}
25270Sstevel@tonic-gate 	exit_critical(self);
25280Sstevel@tonic-gate }
25290Sstevel@tonic-gate 
25302248Sraf /*
25312248Sraf  * For specialized code in libc, like the asynchronous i/o code,
25322248Sraf  * the following sig_*() locking primitives are used in order
25332248Sraf  * to make the code asynchronous signal safe.  Signals are
25342248Sraf  * deferred while locks acquired by these functions are held.
25352248Sraf  */
25362248Sraf void
25372248Sraf sig_mutex_lock(mutex_t *mp)
25382248Sraf {
25392248Sraf 	sigoff(curthread);
25402248Sraf 	(void) _private_mutex_lock(mp);
25412248Sraf }
25422248Sraf 
25432248Sraf void
25442248Sraf sig_mutex_unlock(mutex_t *mp)
25452248Sraf {
25462248Sraf 	(void) _private_mutex_unlock(mp);
25472248Sraf 	sigon(curthread);
25482248Sraf }
25492248Sraf 
25502248Sraf int
25512248Sraf sig_mutex_trylock(mutex_t *mp)
25522248Sraf {
25532248Sraf 	int error;
25542248Sraf 
25552248Sraf 	sigoff(curthread);
25562248Sraf 	if ((error = _private_mutex_trylock(mp)) != 0)
25572248Sraf 		sigon(curthread);
25582248Sraf 	return (error);
25592248Sraf }
25602248Sraf 
25612248Sraf /*
25622248Sraf  * sig_cond_wait() is a cancellation point.
25632248Sraf  */
25642248Sraf int
25652248Sraf sig_cond_wait(cond_t *cv, mutex_t *mp)
25662248Sraf {
25672248Sraf 	int error;
25682248Sraf 
25692248Sraf 	ASSERT(curthread->ul_sigdefer != 0);
25702248Sraf 	_private_testcancel();
25715891Sraf 	error = __cond_wait(cv, mp);
25722248Sraf 	if (error == EINTR && curthread->ul_cursig) {
25732248Sraf 		sig_mutex_unlock(mp);
25742248Sraf 		/* take the deferred signal here */
25752248Sraf 		sig_mutex_lock(mp);
25762248Sraf 	}
25772248Sraf 	_private_testcancel();
25782248Sraf 	return (error);
25792248Sraf }
25802248Sraf 
25812248Sraf /*
25822248Sraf  * sig_cond_reltimedwait() is a cancellation point.
25832248Sraf  */
25842248Sraf int
25852248Sraf sig_cond_reltimedwait(cond_t *cv, mutex_t *mp, const timespec_t *ts)
25862248Sraf {
25872248Sraf 	int error;
25882248Sraf 
25892248Sraf 	ASSERT(curthread->ul_sigdefer != 0);
25902248Sraf 	_private_testcancel();
25915891Sraf 	error = __cond_reltimedwait(cv, mp, ts);
25922248Sraf 	if (error == EINTR && curthread->ul_cursig) {
25932248Sraf 		sig_mutex_unlock(mp);
25942248Sraf 		/* take the deferred signal here */
25952248Sraf 		sig_mutex_lock(mp);
25962248Sraf 	}
25972248Sraf 	_private_testcancel();
25982248Sraf 	return (error);
25992248Sraf }
26002248Sraf 
26015891Sraf /*
26025891Sraf  * For specialized code in libc, like the stdio code.
26035891Sraf  * the following cancel_safe_*() locking primitives are used in
26045891Sraf  * order to make the code cancellation-safe.  Cancellation is
26055891Sraf  * deferred while locks acquired by these functions are held.
26065891Sraf  */
26075891Sraf void
26085891Sraf cancel_safe_mutex_lock(mutex_t *mp)
26095891Sraf {
26105891Sraf 	(void) _private_mutex_lock(mp);
26115891Sraf 	curthread->ul_libc_locks++;
26125891Sraf }
26135891Sraf 
26145891Sraf int
26155891Sraf cancel_safe_mutex_trylock(mutex_t *mp)
26165891Sraf {
26175891Sraf 	int error;
26185891Sraf 
26195891Sraf 	if ((error = _private_mutex_trylock(mp)) == 0)
26205891Sraf 		curthread->ul_libc_locks++;
26215891Sraf 	return (error);
26225891Sraf }
26235891Sraf 
26245891Sraf void
26255891Sraf cancel_safe_mutex_unlock(mutex_t *mp)
26265891Sraf {
26275891Sraf 	ulwp_t *self = curthread;
26285891Sraf 
26295891Sraf 	ASSERT(self->ul_libc_locks != 0);
26305891Sraf 
26315891Sraf 	(void) _private_mutex_unlock(mp);
26325891Sraf 
26335891Sraf 	/*
26345891Sraf 	 * Decrement the count of locks held by cancel_safe_mutex_lock().
26355891Sraf 	 * If we are then in a position to terminate cleanly and
26365891Sraf 	 * if there is a pending cancellation and cancellation
26375891Sraf 	 * is not disabled and we received EINTR from a recent
26385891Sraf 	 * system call then perform the cancellation action now.
26395891Sraf 	 */
26405891Sraf 	if (--self->ul_libc_locks == 0 &&
26415891Sraf 	    !(self->ul_vfork | self->ul_nocancel |
26425891Sraf 	    self->ul_critical | self->ul_sigdefer) &&
26435891Sraf 	    cancel_active())
26445891Sraf 		_pthread_exit(PTHREAD_CANCELED);
26455891Sraf }
26465891Sraf 
26470Sstevel@tonic-gate static int
26480Sstevel@tonic-gate shared_mutex_held(mutex_t *mparg)
26490Sstevel@tonic-gate {
26500Sstevel@tonic-gate 	/*
26514574Sraf 	 * The 'volatile' is necessary to make sure the compiler doesn't
26524574Sraf 	 * reorder the tests of the various components of the mutex.
26534574Sraf 	 * They must be tested in this order:
26544574Sraf 	 *	mutex_lockw
26554574Sraf 	 *	mutex_owner
26564574Sraf 	 *	mutex_ownerpid
26574574Sraf 	 * This relies on the fact that everywhere mutex_lockw is cleared,
26584574Sraf 	 * mutex_owner and mutex_ownerpid are cleared before mutex_lockw
26594574Sraf 	 * is cleared, and that everywhere mutex_lockw is set, mutex_owner
26604574Sraf 	 * and mutex_ownerpid are set after mutex_lockw is set, and that
26614574Sraf 	 * mutex_lockw is set or cleared with a memory barrier.
26620Sstevel@tonic-gate 	 */
26630Sstevel@tonic-gate 	volatile mutex_t *mp = (volatile mutex_t *)mparg;
26640Sstevel@tonic-gate 	ulwp_t *self = curthread;
26650Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
26660Sstevel@tonic-gate 
26674574Sraf 	return (MUTEX_OWNED(mp, self) && mp->mutex_ownerpid == udp->pid);
26680Sstevel@tonic-gate }
26690Sstevel@tonic-gate 
26700Sstevel@tonic-gate /*
26710Sstevel@tonic-gate  * Some crufty old programs define their own version of _mutex_held()
26720Sstevel@tonic-gate  * to be simply return(1).  This breaks internal libc logic, so we
26730Sstevel@tonic-gate  * define a private version for exclusive use by libc, mutex_is_held(),
26740Sstevel@tonic-gate  * and also a new public function, __mutex_held(), to be used in new
26750Sstevel@tonic-gate  * code to circumvent these crufty old programs.
26760Sstevel@tonic-gate  */
26770Sstevel@tonic-gate #pragma weak mutex_held = mutex_is_held
26780Sstevel@tonic-gate #pragma weak _mutex_held = mutex_is_held
26790Sstevel@tonic-gate #pragma weak __mutex_held = mutex_is_held
26800Sstevel@tonic-gate int
26814574Sraf mutex_is_held(mutex_t *mparg)
26820Sstevel@tonic-gate {
26834574Sraf 	volatile mutex_t *mp = (volatile mutex_t *)mparg;
26844574Sraf 
26854574Sraf 	if (mparg->mutex_type & USYNC_PROCESS)
26864574Sraf 		return (shared_mutex_held(mparg));
26870Sstevel@tonic-gate 	return (MUTEX_OWNED(mp, curthread));
26880Sstevel@tonic-gate }
26890Sstevel@tonic-gate 
26900Sstevel@tonic-gate #pragma weak _private_mutex_destroy = __mutex_destroy
26910Sstevel@tonic-gate #pragma weak mutex_destroy = __mutex_destroy
26920Sstevel@tonic-gate #pragma weak _mutex_destroy = __mutex_destroy
26930Sstevel@tonic-gate #pragma weak pthread_mutex_destroy = __mutex_destroy
26940Sstevel@tonic-gate #pragma weak _pthread_mutex_destroy = __mutex_destroy
26950Sstevel@tonic-gate int
26960Sstevel@tonic-gate __mutex_destroy(mutex_t *mp)
26970Sstevel@tonic-gate {
26984574Sraf 	if (mp->mutex_type & USYNC_PROCESS)
26994574Sraf 		forget_lock(mp);
27004574Sraf 	(void) _memset(mp, 0, sizeof (*mp));
27010Sstevel@tonic-gate 	tdb_sync_obj_deregister(mp);
27020Sstevel@tonic-gate 	return (0);
27030Sstevel@tonic-gate }
27040Sstevel@tonic-gate 
27054574Sraf #pragma weak mutex_consistent = __mutex_consistent
27064574Sraf #pragma weak _mutex_consistent = __mutex_consistent
27074574Sraf #pragma weak pthread_mutex_consistent_np = __mutex_consistent
27084574Sraf #pragma weak _pthread_mutex_consistent_np = __mutex_consistent
27094574Sraf int
27104574Sraf __mutex_consistent(mutex_t *mp)
27114574Sraf {
27124574Sraf 	/*
27134574Sraf 	 * Do this only for an inconsistent, initialized robust lock
27144574Sraf 	 * that we hold.  For all other cases, return EINVAL.
27154574Sraf 	 */
27164574Sraf 	if (mutex_is_held(mp) &&
27174574Sraf 	    (mp->mutex_type & LOCK_ROBUST) &&
27184574Sraf 	    (mp->mutex_flag & LOCK_INITED) &&
27194574Sraf 	    (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) {
27204574Sraf 		mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED);
27214574Sraf 		mp->mutex_rcount = 0;
27224574Sraf 		return (0);
27234574Sraf 	}
27244574Sraf 	return (EINVAL);
27254574Sraf }
27264574Sraf 
27270Sstevel@tonic-gate /*
27280Sstevel@tonic-gate  * Spin locks are separate from ordinary mutexes,
27290Sstevel@tonic-gate  * but we use the same data structure for them.
27300Sstevel@tonic-gate  */
27310Sstevel@tonic-gate 
27320Sstevel@tonic-gate #pragma weak pthread_spin_init = _pthread_spin_init
27330Sstevel@tonic-gate int
27340Sstevel@tonic-gate _pthread_spin_init(pthread_spinlock_t *lock, int pshared)
27350Sstevel@tonic-gate {
27360Sstevel@tonic-gate 	mutex_t *mp = (mutex_t *)lock;
27370Sstevel@tonic-gate 
27380Sstevel@tonic-gate 	(void) _memset(mp, 0, sizeof (*mp));
27390Sstevel@tonic-gate 	if (pshared == PTHREAD_PROCESS_SHARED)
27400Sstevel@tonic-gate 		mp->mutex_type = USYNC_PROCESS;
27410Sstevel@tonic-gate 	else
27420Sstevel@tonic-gate 		mp->mutex_type = USYNC_THREAD;
27430Sstevel@tonic-gate 	mp->mutex_flag = LOCK_INITED;
27440Sstevel@tonic-gate 	mp->mutex_magic = MUTEX_MAGIC;
27450Sstevel@tonic-gate 	return (0);
27460Sstevel@tonic-gate }
27470Sstevel@tonic-gate 
27480Sstevel@tonic-gate #pragma weak pthread_spin_destroy = _pthread_spin_destroy
27490Sstevel@tonic-gate int
27500Sstevel@tonic-gate _pthread_spin_destroy(pthread_spinlock_t *lock)
27510Sstevel@tonic-gate {
27520Sstevel@tonic-gate 	(void) _memset(lock, 0, sizeof (*lock));
27530Sstevel@tonic-gate 	return (0);
27540Sstevel@tonic-gate }
27550Sstevel@tonic-gate 
27560Sstevel@tonic-gate #pragma weak pthread_spin_trylock = _pthread_spin_trylock
27570Sstevel@tonic-gate int
27580Sstevel@tonic-gate _pthread_spin_trylock(pthread_spinlock_t *lock)
27590Sstevel@tonic-gate {
27600Sstevel@tonic-gate 	mutex_t *mp = (mutex_t *)lock;
27610Sstevel@tonic-gate 	ulwp_t *self = curthread;
27620Sstevel@tonic-gate 	int error = 0;
27630Sstevel@tonic-gate 
27640Sstevel@tonic-gate 	no_preempt(self);
27650Sstevel@tonic-gate 	if (set_lock_byte(&mp->mutex_lockw) != 0)
27660Sstevel@tonic-gate 		error = EBUSY;
27670Sstevel@tonic-gate 	else {
27680Sstevel@tonic-gate 		mp->mutex_owner = (uintptr_t)self;
27690Sstevel@tonic-gate 		if (mp->mutex_type == USYNC_PROCESS)
27700Sstevel@tonic-gate 			mp->mutex_ownerpid = self->ul_uberdata->pid;
27710Sstevel@tonic-gate 		DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0);
27720Sstevel@tonic-gate 	}
27730Sstevel@tonic-gate 	preempt(self);
27740Sstevel@tonic-gate 	return (error);
27750Sstevel@tonic-gate }
27760Sstevel@tonic-gate 
27770Sstevel@tonic-gate #pragma weak pthread_spin_lock = _pthread_spin_lock
27780Sstevel@tonic-gate int
27790Sstevel@tonic-gate _pthread_spin_lock(pthread_spinlock_t *lock)
27800Sstevel@tonic-gate {
27814574Sraf 	mutex_t *mp = (mutex_t *)lock;
27824574Sraf 	ulwp_t *self = curthread;
27834574Sraf 	volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw;
27844574Sraf 	int count = 0;
27854574Sraf 
27864574Sraf 	ASSERT(!self->ul_critical || self->ul_bindflags);
27874574Sraf 
27884574Sraf 	DTRACE_PROBE1(plockstat, mutex__spin, mp);
27894574Sraf 
27900Sstevel@tonic-gate 	/*
27910Sstevel@tonic-gate 	 * We don't care whether the owner is running on a processor.
27920Sstevel@tonic-gate 	 * We just spin because that's what this interface requires.
27930Sstevel@tonic-gate 	 */
27940Sstevel@tonic-gate 	for (;;) {
27950Sstevel@tonic-gate 		if (*lockp == 0) {	/* lock byte appears to be clear */
27964574Sraf 			no_preempt(self);
27974574Sraf 			if (set_lock_byte(lockp) == 0)
27984574Sraf 				break;
27994574Sraf 			preempt(self);
28000Sstevel@tonic-gate 		}
28015629Sraf 		if (count < INT_MAX)
28025629Sraf 			count++;
28030Sstevel@tonic-gate 		SMT_PAUSE();
28040Sstevel@tonic-gate 	}
28054574Sraf 	mp->mutex_owner = (uintptr_t)self;
28064574Sraf 	if (mp->mutex_type == USYNC_PROCESS)
28074574Sraf 		mp->mutex_ownerpid = self->ul_uberdata->pid;
28084574Sraf 	preempt(self);
28095629Sraf 	if (count) {
28105629Sraf 		DTRACE_PROBE2(plockstat, mutex__spun, 1, count);
28115629Sraf 	}
28124574Sraf 	DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count);
28134574Sraf 	return (0);
28140Sstevel@tonic-gate }
28150Sstevel@tonic-gate 
28160Sstevel@tonic-gate #pragma weak pthread_spin_unlock = _pthread_spin_unlock
28170Sstevel@tonic-gate int
28180Sstevel@tonic-gate _pthread_spin_unlock(pthread_spinlock_t *lock)
28190Sstevel@tonic-gate {
28200Sstevel@tonic-gate 	mutex_t *mp = (mutex_t *)lock;
28210Sstevel@tonic-gate 	ulwp_t *self = curthread;
28220Sstevel@tonic-gate 
28230Sstevel@tonic-gate 	no_preempt(self);
28240Sstevel@tonic-gate 	mp->mutex_owner = 0;
28250Sstevel@tonic-gate 	mp->mutex_ownerpid = 0;
28260Sstevel@tonic-gate 	DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
28274570Sraf 	(void) atomic_swap_32(&mp->mutex_lockword, 0);
28280Sstevel@tonic-gate 	preempt(self);
28290Sstevel@tonic-gate 	return (0);
28300Sstevel@tonic-gate }
28310Sstevel@tonic-gate 
28325629Sraf #define	INITIAL_LOCKS	8	/* initial size of ul_heldlocks.array */
28334574Sraf 
28344574Sraf /*
28354574Sraf  * Find/allocate an entry for 'lock' in our array of held locks.
28364574Sraf  */
28374574Sraf static mutex_t **
28384574Sraf find_lock_entry(mutex_t *lock)
28394574Sraf {
28404574Sraf 	ulwp_t *self = curthread;
28414574Sraf 	mutex_t **remembered = NULL;
28424574Sraf 	mutex_t **lockptr;
28434574Sraf 	uint_t nlocks;
28444574Sraf 
28454574Sraf 	if ((nlocks = self->ul_heldlockcnt) != 0)
28464574Sraf 		lockptr = self->ul_heldlocks.array;
28474574Sraf 	else {
28484574Sraf 		nlocks = 1;
28494574Sraf 		lockptr = &self->ul_heldlocks.single;
28504574Sraf 	}
28514574Sraf 
28524574Sraf 	for (; nlocks; nlocks--, lockptr++) {
28534574Sraf 		if (*lockptr == lock)
28544574Sraf 			return (lockptr);
28554574Sraf 		if (*lockptr == NULL && remembered == NULL)
28564574Sraf 			remembered = lockptr;
28574574Sraf 	}
28584574Sraf 	if (remembered != NULL) {
28594574Sraf 		*remembered = lock;
28604574Sraf 		return (remembered);
28614574Sraf 	}
28624574Sraf 
28634574Sraf 	/*
28644574Sraf 	 * No entry available.  Allocate more space, converting
28654574Sraf 	 * the single entry into an array of entries if necessary.
28664574Sraf 	 */
28674574Sraf 	if ((nlocks = self->ul_heldlockcnt) == 0) {
28684574Sraf 		/*
28694574Sraf 		 * Initial allocation of the array.
28704574Sraf 		 * Convert the single entry into an array.
28714574Sraf 		 */
28724574Sraf 		self->ul_heldlockcnt = nlocks = INITIAL_LOCKS;
28734574Sraf 		lockptr = lmalloc(nlocks * sizeof (mutex_t *));
28744574Sraf 		/*
28754574Sraf 		 * The single entry becomes the first entry in the array.
28764574Sraf 		 */
28774574Sraf 		*lockptr = self->ul_heldlocks.single;
28784574Sraf 		self->ul_heldlocks.array = lockptr;
28794574Sraf 		/*
28804574Sraf 		 * Return the next available entry in the array.
28814574Sraf 		 */
28824574Sraf 		*++lockptr = lock;
28834574Sraf 		return (lockptr);
28844574Sraf 	}
28854574Sraf 	/*
28864574Sraf 	 * Reallocate the array, double the size each time.
28874574Sraf 	 */
28884574Sraf 	lockptr = lmalloc(nlocks * 2 * sizeof (mutex_t *));
28894574Sraf 	(void) _memcpy(lockptr, self->ul_heldlocks.array,
28904574Sraf 	    nlocks * sizeof (mutex_t *));
28914574Sraf 	lfree(self->ul_heldlocks.array, nlocks * sizeof (mutex_t *));
28924574Sraf 	self->ul_heldlocks.array = lockptr;
28934574Sraf 	self->ul_heldlockcnt *= 2;
28944574Sraf 	/*
28954574Sraf 	 * Return the next available entry in the newly allocated array.
28964574Sraf 	 */
28974574Sraf 	*(lockptr += nlocks) = lock;
28984574Sraf 	return (lockptr);
28994574Sraf }
29004574Sraf 
29014574Sraf /*
29024574Sraf  * Insert 'lock' into our list of held locks.
29034574Sraf  * Currently only used for LOCK_ROBUST mutexes.
29044574Sraf  */
29054574Sraf void
29064574Sraf remember_lock(mutex_t *lock)
29074574Sraf {
29084574Sraf 	(void) find_lock_entry(lock);
29094574Sraf }
29104574Sraf 
29114574Sraf /*
29124574Sraf  * Remove 'lock' from our list of held locks.
29134574Sraf  * Currently only used for LOCK_ROBUST mutexes.
29144574Sraf  */
29154574Sraf void
29164574Sraf forget_lock(mutex_t *lock)
29174574Sraf {
29184574Sraf 	*find_lock_entry(lock) = NULL;
29194574Sraf }
29204574Sraf 
29214574Sraf /*
29224574Sraf  * Free the array of held locks.
29234574Sraf  */
29244574Sraf void
29254574Sraf heldlock_free(ulwp_t *ulwp)
29264574Sraf {
29274574Sraf 	uint_t nlocks;
29284574Sraf 
29294574Sraf 	if ((nlocks = ulwp->ul_heldlockcnt) != 0)
29304574Sraf 		lfree(ulwp->ul_heldlocks.array, nlocks * sizeof (mutex_t *));
29314574Sraf 	ulwp->ul_heldlockcnt = 0;
29324574Sraf 	ulwp->ul_heldlocks.array = NULL;
29334574Sraf }
29344574Sraf 
29354574Sraf /*
29364574Sraf  * Mark all held LOCK_ROBUST mutexes LOCK_OWNERDEAD.
29374574Sraf  * Called from _thrp_exit() to deal with abandoned locks.
29384574Sraf  */
29394574Sraf void
29404574Sraf heldlock_exit(void)
29414574Sraf {
29424574Sraf 	ulwp_t *self = curthread;
29434574Sraf 	mutex_t **lockptr;
29444574Sraf 	uint_t nlocks;
29454574Sraf 	mutex_t *mp;
29464574Sraf 
29474574Sraf 	if ((nlocks = self->ul_heldlockcnt) != 0)
29484574Sraf 		lockptr = self->ul_heldlocks.array;
29494574Sraf 	else {
29504574Sraf 		nlocks = 1;
29514574Sraf 		lockptr = &self->ul_heldlocks.single;
29524574Sraf 	}
29534574Sraf 
29544574Sraf 	for (; nlocks; nlocks--, lockptr++) {
29554574Sraf 		/*
29564574Sraf 		 * The kernel takes care of transitioning held
29574574Sraf 		 * LOCK_PRIO_INHERIT mutexes to LOCK_OWNERDEAD.
29584574Sraf 		 * We avoid that case here.
29594574Sraf 		 */
29604574Sraf 		if ((mp = *lockptr) != NULL &&
29614574Sraf 		    mutex_is_held(mp) &&
29624574Sraf 		    (mp->mutex_type & (LOCK_ROBUST | LOCK_PRIO_INHERIT)) ==
29634574Sraf 		    LOCK_ROBUST) {
29644574Sraf 			mp->mutex_rcount = 0;
29654574Sraf 			if (!(mp->mutex_flag & LOCK_UNMAPPED))
29664574Sraf 				mp->mutex_flag |= LOCK_OWNERDEAD;
29674574Sraf 			(void) mutex_unlock_internal(mp, 1);
29684574Sraf 		}
29694574Sraf 	}
29704574Sraf 
29714574Sraf 	heldlock_free(self);
29724574Sraf }
29734574Sraf 
29740Sstevel@tonic-gate #pragma weak cond_init = _cond_init
29750Sstevel@tonic-gate /* ARGSUSED2 */
29760Sstevel@tonic-gate int
29770Sstevel@tonic-gate _cond_init(cond_t *cvp, int type, void *arg)
29780Sstevel@tonic-gate {
29790Sstevel@tonic-gate 	if (type != USYNC_THREAD && type != USYNC_PROCESS)
29800Sstevel@tonic-gate 		return (EINVAL);
29810Sstevel@tonic-gate 	(void) _memset(cvp, 0, sizeof (*cvp));
29820Sstevel@tonic-gate 	cvp->cond_type = (uint16_t)type;
29830Sstevel@tonic-gate 	cvp->cond_magic = COND_MAGIC;
29840Sstevel@tonic-gate 	return (0);
29850Sstevel@tonic-gate }
29860Sstevel@tonic-gate 
29870Sstevel@tonic-gate /*
29880Sstevel@tonic-gate  * cond_sleep_queue(): utility function for cond_wait_queue().
29890Sstevel@tonic-gate  *
29900Sstevel@tonic-gate  * Go to sleep on a condvar sleep queue, expect to be waked up
29910Sstevel@tonic-gate  * by someone calling cond_signal() or cond_broadcast() or due
29920Sstevel@tonic-gate  * to receiving a UNIX signal or being cancelled, or just simply
29930Sstevel@tonic-gate  * due to a spurious wakeup (like someome calling forkall()).
29940Sstevel@tonic-gate  *
29950Sstevel@tonic-gate  * The associated mutex is *not* reacquired before returning.
29960Sstevel@tonic-gate  * That must be done by the caller of cond_sleep_queue().
29970Sstevel@tonic-gate  */
29984574Sraf static int
29990Sstevel@tonic-gate cond_sleep_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
30000Sstevel@tonic-gate {
30010Sstevel@tonic-gate 	ulwp_t *self = curthread;
30020Sstevel@tonic-gate 	queue_head_t *qp;
30030Sstevel@tonic-gate 	queue_head_t *mqp;
30040Sstevel@tonic-gate 	lwpid_t lwpid;
30050Sstevel@tonic-gate 	int signalled;
30060Sstevel@tonic-gate 	int error;
3007*6247Sraf 	int cv_wake;
30084574Sraf 	int release_all;
30090Sstevel@tonic-gate 
30100Sstevel@tonic-gate 	/*
30110Sstevel@tonic-gate 	 * Put ourself on the CV sleep queue, unlock the mutex, then
30120Sstevel@tonic-gate 	 * park ourself and unpark a candidate lwp to grab the mutex.
30130Sstevel@tonic-gate 	 * We must go onto the CV sleep queue before dropping the
30140Sstevel@tonic-gate 	 * mutex in order to guarantee atomicity of the operation.
30150Sstevel@tonic-gate 	 */
30160Sstevel@tonic-gate 	self->ul_sp = stkptr();
30170Sstevel@tonic-gate 	qp = queue_lock(cvp, CV);
3018*6247Sraf 	enqueue(qp, self, 0);
30190Sstevel@tonic-gate 	cvp->cond_waiters_user = 1;
30200Sstevel@tonic-gate 	self->ul_cvmutex = mp;
3021*6247Sraf 	self->ul_cv_wake = cv_wake = (tsp != NULL);
30220Sstevel@tonic-gate 	self->ul_signalled = 0;
30234574Sraf 	if (mp->mutex_flag & LOCK_OWNERDEAD) {
30244574Sraf 		mp->mutex_flag &= ~LOCK_OWNERDEAD;
30254574Sraf 		mp->mutex_flag |= LOCK_NOTRECOVERABLE;
30264574Sraf 	}
30274574Sraf 	release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0);
30284574Sraf 	lwpid = mutex_unlock_queue(mp, release_all);
30290Sstevel@tonic-gate 	for (;;) {
30300Sstevel@tonic-gate 		set_parking_flag(self, 1);
30310Sstevel@tonic-gate 		queue_unlock(qp);
30320Sstevel@tonic-gate 		if (lwpid != 0) {
30330Sstevel@tonic-gate 			lwpid = preempt_unpark(self, lwpid);
30340Sstevel@tonic-gate 			preempt(self);
30350Sstevel@tonic-gate 		}
30360Sstevel@tonic-gate 		/*
30370Sstevel@tonic-gate 		 * We may have a deferred signal present,
30380Sstevel@tonic-gate 		 * in which case we should return EINTR.
30390Sstevel@tonic-gate 		 * Also, we may have received a SIGCANCEL; if so
30400Sstevel@tonic-gate 		 * and we are cancelable we should return EINTR.
30410Sstevel@tonic-gate 		 * We force an immediate EINTR return from
30420Sstevel@tonic-gate 		 * __lwp_park() by turning our parking flag off.
30430Sstevel@tonic-gate 		 */
30440Sstevel@tonic-gate 		if (self->ul_cursig != 0 ||
30450Sstevel@tonic-gate 		    (self->ul_cancelable && self->ul_cancel_pending))
30460Sstevel@tonic-gate 			set_parking_flag(self, 0);
30470Sstevel@tonic-gate 		/*
30480Sstevel@tonic-gate 		 * __lwp_park() will return the residual time in tsp
30490Sstevel@tonic-gate 		 * if we are unparked before the timeout expires.
30500Sstevel@tonic-gate 		 */
30510Sstevel@tonic-gate 		error = __lwp_park(tsp, lwpid);
30520Sstevel@tonic-gate 		set_parking_flag(self, 0);
30530Sstevel@tonic-gate 		lwpid = 0;	/* unpark the other lwp only once */
30540Sstevel@tonic-gate 		/*
30550Sstevel@tonic-gate 		 * We were waked up by cond_signal(), cond_broadcast(),
30560Sstevel@tonic-gate 		 * by an interrupt or timeout (EINTR or ETIME),
30570Sstevel@tonic-gate 		 * or we may just have gotten a spurious wakeup.
30580Sstevel@tonic-gate 		 */
30590Sstevel@tonic-gate 		qp = queue_lock(cvp, CV);
3060*6247Sraf 		if (!cv_wake)
3061*6247Sraf 			mqp = queue_lock(mp, MX);
30620Sstevel@tonic-gate 		if (self->ul_sleepq == NULL)
30630Sstevel@tonic-gate 			break;
30640Sstevel@tonic-gate 		/*
30650Sstevel@tonic-gate 		 * We are on either the condvar sleep queue or the
30661893Sraf 		 * mutex sleep queue.  Break out of the sleep if we
30671893Sraf 		 * were interrupted or we timed out (EINTR or ETIME).
30680Sstevel@tonic-gate 		 * Else this is a spurious wakeup; continue the loop.
30690Sstevel@tonic-gate 		 */
3070*6247Sraf 		if (!cv_wake && self->ul_sleepq == mqp) { /* mutex queue */
30711893Sraf 			if (error) {
3072*6247Sraf 				mp->mutex_waiters = dequeue_self(mqp);
30731893Sraf 				break;
30741893Sraf 			}
30751893Sraf 			tsp = NULL;	/* no more timeout */
30761893Sraf 		} else if (self->ul_sleepq == qp) {	/* condvar queue */
30770Sstevel@tonic-gate 			if (error) {
3078*6247Sraf 				cvp->cond_waiters_user = dequeue_self(qp);
30790Sstevel@tonic-gate 				break;
30800Sstevel@tonic-gate 			}
30810Sstevel@tonic-gate 			/*
30820Sstevel@tonic-gate 			 * Else a spurious wakeup on the condvar queue.
30830Sstevel@tonic-gate 			 * __lwp_park() has already adjusted the timeout.
30840Sstevel@tonic-gate 			 */
30850Sstevel@tonic-gate 		} else {
30860Sstevel@tonic-gate 			thr_panic("cond_sleep_queue(): thread not on queue");
30870Sstevel@tonic-gate 		}
3088*6247Sraf 		if (!cv_wake)
3089*6247Sraf 			queue_unlock(mqp);
30900Sstevel@tonic-gate 	}
30910Sstevel@tonic-gate 
30920Sstevel@tonic-gate 	self->ul_sp = 0;
3093*6247Sraf 	self->ul_cv_wake = 0;
3094*6247Sraf 	ASSERT(self->ul_cvmutex == NULL);
30950Sstevel@tonic-gate 	ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL &&
30960Sstevel@tonic-gate 	    self->ul_wchan == NULL);
30970Sstevel@tonic-gate 
30980Sstevel@tonic-gate 	signalled = self->ul_signalled;
30990Sstevel@tonic-gate 	self->ul_signalled = 0;
31000Sstevel@tonic-gate 	queue_unlock(qp);
3101*6247Sraf 	if (!cv_wake)
3102*6247Sraf 		queue_unlock(mqp);
31030Sstevel@tonic-gate 
31040Sstevel@tonic-gate 	/*
31050Sstevel@tonic-gate 	 * If we were concurrently cond_signal()d and any of:
31060Sstevel@tonic-gate 	 * received a UNIX signal, were cancelled, or got a timeout,
31070Sstevel@tonic-gate 	 * then perform another cond_signal() to avoid consuming it.
31080Sstevel@tonic-gate 	 */
31090Sstevel@tonic-gate 	if (error && signalled)
31100Sstevel@tonic-gate 		(void) cond_signal_internal(cvp);
31110Sstevel@tonic-gate 
31120Sstevel@tonic-gate 	return (error);
31130Sstevel@tonic-gate }
31140Sstevel@tonic-gate 
31150Sstevel@tonic-gate int
31165629Sraf cond_wait_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
31170Sstevel@tonic-gate {
31180Sstevel@tonic-gate 	ulwp_t *self = curthread;
31190Sstevel@tonic-gate 	int error;
31204574Sraf 	int merror;
31210Sstevel@tonic-gate 
31220Sstevel@tonic-gate 	/*
31230Sstevel@tonic-gate 	 * The old thread library was programmed to defer signals
31240Sstevel@tonic-gate 	 * while in cond_wait() so that the associated mutex would
31250Sstevel@tonic-gate 	 * be guaranteed to be held when the application signal
31260Sstevel@tonic-gate 	 * handler was invoked.
31270Sstevel@tonic-gate 	 *
31280Sstevel@tonic-gate 	 * We do not behave this way by default; the state of the
31290Sstevel@tonic-gate 	 * associated mutex in the signal handler is undefined.
31300Sstevel@tonic-gate 	 *
31310Sstevel@tonic-gate 	 * To accommodate applications that depend on the old
31320Sstevel@tonic-gate 	 * behavior, the _THREAD_COND_WAIT_DEFER environment
31330Sstevel@tonic-gate 	 * variable can be set to 1 and we will behave in the
31340Sstevel@tonic-gate 	 * old way with respect to cond_wait().
31350Sstevel@tonic-gate 	 */
31360Sstevel@tonic-gate 	if (self->ul_cond_wait_defer)
31370Sstevel@tonic-gate 		sigoff(self);
31380Sstevel@tonic-gate 
31390Sstevel@tonic-gate 	error = cond_sleep_queue(cvp, mp, tsp);
31400Sstevel@tonic-gate 
31410Sstevel@tonic-gate 	/*
31420Sstevel@tonic-gate 	 * Reacquire the mutex.
31430Sstevel@tonic-gate 	 */
31445629Sraf 	if ((merror = mutex_lock_impl(mp, NULL)) != 0)
31454574Sraf 		error = merror;
31460Sstevel@tonic-gate 
31470Sstevel@tonic-gate 	/*
31480Sstevel@tonic-gate 	 * Take any deferred signal now, after we have reacquired the mutex.
31490Sstevel@tonic-gate 	 */
31500Sstevel@tonic-gate 	if (self->ul_cond_wait_defer)
31510Sstevel@tonic-gate 		sigon(self);
31520Sstevel@tonic-gate 
31530Sstevel@tonic-gate 	return (error);
31540Sstevel@tonic-gate }
31550Sstevel@tonic-gate 
31560Sstevel@tonic-gate /*
31570Sstevel@tonic-gate  * cond_sleep_kernel(): utility function for cond_wait_kernel().
31580Sstevel@tonic-gate  * See the comment ahead of cond_sleep_queue(), above.
31590Sstevel@tonic-gate  */
31604574Sraf static int
31610Sstevel@tonic-gate cond_sleep_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
31620Sstevel@tonic-gate {
31630Sstevel@tonic-gate 	int mtype = mp->mutex_type;
31640Sstevel@tonic-gate 	ulwp_t *self = curthread;
31650Sstevel@tonic-gate 	int error;
31660Sstevel@tonic-gate 
31674574Sraf 	if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp))
31684574Sraf 		_ceil_prio_waive();
31690Sstevel@tonic-gate 
31700Sstevel@tonic-gate 	self->ul_sp = stkptr();
31710Sstevel@tonic-gate 	self->ul_wchan = cvp;
31720Sstevel@tonic-gate 	mp->mutex_owner = 0;
31736057Sraf 	/* mp->mutex_ownerpid is cleared by ___lwp_cond_wait() */
3174*6247Sraf 	if (mtype & LOCK_PRIO_INHERIT) {
31750Sstevel@tonic-gate 		mp->mutex_lockw = LOCKCLEAR;
3176*6247Sraf 		self->ul_pilocks--;
3177*6247Sraf 	}
31780Sstevel@tonic-gate 	/*
31790Sstevel@tonic-gate 	 * ___lwp_cond_wait() returns immediately with EINTR if
31800Sstevel@tonic-gate 	 * set_parking_flag(self,0) is called on this lwp before it
31810Sstevel@tonic-gate 	 * goes to sleep in the kernel.  sigacthandler() calls this
31820Sstevel@tonic-gate 	 * when a deferred signal is noted.  This assures that we don't
31830Sstevel@tonic-gate 	 * get stuck in ___lwp_cond_wait() with all signals blocked
31840Sstevel@tonic-gate 	 * due to taking a deferred signal before going to sleep.
31850Sstevel@tonic-gate 	 */
31860Sstevel@tonic-gate 	set_parking_flag(self, 1);
31870Sstevel@tonic-gate 	if (self->ul_cursig != 0 ||
31880Sstevel@tonic-gate 	    (self->ul_cancelable && self->ul_cancel_pending))
31890Sstevel@tonic-gate 		set_parking_flag(self, 0);
31900Sstevel@tonic-gate 	error = ___lwp_cond_wait(cvp, mp, tsp, 1);
31910Sstevel@tonic-gate 	set_parking_flag(self, 0);
31920Sstevel@tonic-gate 	self->ul_sp = 0;
31930Sstevel@tonic-gate 	self->ul_wchan = NULL;
31940Sstevel@tonic-gate 	return (error);
31950Sstevel@tonic-gate }
31960Sstevel@tonic-gate 
31970Sstevel@tonic-gate int
31980Sstevel@tonic-gate cond_wait_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
31990Sstevel@tonic-gate {
32000Sstevel@tonic-gate 	ulwp_t *self = curthread;
32010Sstevel@tonic-gate 	int error;
32020Sstevel@tonic-gate 	int merror;
32030Sstevel@tonic-gate 
32040Sstevel@tonic-gate 	/*
32050Sstevel@tonic-gate 	 * See the large comment in cond_wait_queue(), above.
32060Sstevel@tonic-gate 	 */
32070Sstevel@tonic-gate 	if (self->ul_cond_wait_defer)
32080Sstevel@tonic-gate 		sigoff(self);
32090Sstevel@tonic-gate 
32100Sstevel@tonic-gate 	error = cond_sleep_kernel(cvp, mp, tsp);
32110Sstevel@tonic-gate 
32120Sstevel@tonic-gate 	/*
32130Sstevel@tonic-gate 	 * Override the return code from ___lwp_cond_wait()
32140Sstevel@tonic-gate 	 * with any non-zero return code from mutex_lock().
32150Sstevel@tonic-gate 	 * This addresses robust lock failures in particular;
32160Sstevel@tonic-gate 	 * the caller must see the EOWNERDEAD or ENOTRECOVERABLE
32170Sstevel@tonic-gate 	 * errors in order to take corrective action.
32180Sstevel@tonic-gate 	 */
32195629Sraf 	if ((merror = mutex_lock_impl(mp, NULL)) != 0)
32200Sstevel@tonic-gate 		error = merror;
32210Sstevel@tonic-gate 
32220Sstevel@tonic-gate 	/*
32230Sstevel@tonic-gate 	 * Take any deferred signal now, after we have reacquired the mutex.
32240Sstevel@tonic-gate 	 */
32250Sstevel@tonic-gate 	if (self->ul_cond_wait_defer)
32260Sstevel@tonic-gate 		sigon(self);
32270Sstevel@tonic-gate 
32280Sstevel@tonic-gate 	return (error);
32290Sstevel@tonic-gate }
32300Sstevel@tonic-gate 
32310Sstevel@tonic-gate /*
32320Sstevel@tonic-gate  * Common code for _cond_wait() and _cond_timedwait()
32330Sstevel@tonic-gate  */
32340Sstevel@tonic-gate int
32350Sstevel@tonic-gate cond_wait_common(cond_t *cvp, mutex_t *mp, timespec_t *tsp)
32360Sstevel@tonic-gate {
32370Sstevel@tonic-gate 	int mtype = mp->mutex_type;
32380Sstevel@tonic-gate 	hrtime_t begin_sleep = 0;
32390Sstevel@tonic-gate 	ulwp_t *self = curthread;
32400Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
32410Sstevel@tonic-gate 	tdb_cond_stats_t *csp = COND_STATS(cvp, udp);
32420Sstevel@tonic-gate 	tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp);
32430Sstevel@tonic-gate 	uint8_t rcount;
32440Sstevel@tonic-gate 	int error = 0;
32450Sstevel@tonic-gate 
32460Sstevel@tonic-gate 	/*
32470Sstevel@tonic-gate 	 * The SUSV3 Posix spec for pthread_cond_timedwait() states:
32480Sstevel@tonic-gate 	 *	Except in the case of [ETIMEDOUT], all these error checks
32490Sstevel@tonic-gate 	 *	shall act as if they were performed immediately at the
32500Sstevel@tonic-gate 	 *	beginning of processing for the function and shall cause
32510Sstevel@tonic-gate 	 *	an error return, in effect, prior to modifying the state
32520Sstevel@tonic-gate 	 *	of the mutex specified by mutex or the condition variable
32530Sstevel@tonic-gate 	 *	specified by cond.
32540Sstevel@tonic-gate 	 * Therefore, we must return EINVAL now if the timout is invalid.
32550Sstevel@tonic-gate 	 */
32560Sstevel@tonic-gate 	if (tsp != NULL &&
32570Sstevel@tonic-gate 	    (tsp->tv_sec < 0 || (ulong_t)tsp->tv_nsec >= NANOSEC))
32580Sstevel@tonic-gate 		return (EINVAL);
32590Sstevel@tonic-gate 
32600Sstevel@tonic-gate 	if (__td_event_report(self, TD_SLEEP, udp)) {
32610Sstevel@tonic-gate 		self->ul_sp = stkptr();
32620Sstevel@tonic-gate 		self->ul_wchan = cvp;
32630Sstevel@tonic-gate 		self->ul_td_evbuf.eventnum = TD_SLEEP;
32640Sstevel@tonic-gate 		self->ul_td_evbuf.eventdata = cvp;
32650Sstevel@tonic-gate 		tdb_event(TD_SLEEP, udp);
32660Sstevel@tonic-gate 		self->ul_sp = 0;
32670Sstevel@tonic-gate 	}
32680Sstevel@tonic-gate 	if (csp) {
32690Sstevel@tonic-gate 		if (tsp)
32700Sstevel@tonic-gate 			tdb_incr(csp->cond_timedwait);
32710Sstevel@tonic-gate 		else
32720Sstevel@tonic-gate 			tdb_incr(csp->cond_wait);
32730Sstevel@tonic-gate 	}
32740Sstevel@tonic-gate 	if (msp)
32750Sstevel@tonic-gate 		begin_sleep = record_hold_time(msp);
32760Sstevel@tonic-gate 	else if (csp)
32770Sstevel@tonic-gate 		begin_sleep = gethrtime();
32780Sstevel@tonic-gate 
32790Sstevel@tonic-gate 	if (self->ul_error_detection) {
32800Sstevel@tonic-gate 		if (!mutex_is_held(mp))
32810Sstevel@tonic-gate 			lock_error(mp, "cond_wait", cvp, NULL);
32820Sstevel@tonic-gate 		if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0)
32830Sstevel@tonic-gate 			lock_error(mp, "recursive mutex in cond_wait",
32845629Sraf 			    cvp, NULL);
32850Sstevel@tonic-gate 		if (cvp->cond_type & USYNC_PROCESS) {
32864574Sraf 			if (!(mtype & USYNC_PROCESS))
32870Sstevel@tonic-gate 				lock_error(mp, "cond_wait", cvp,
32885629Sraf 				    "condvar process-shared, "
32895629Sraf 				    "mutex process-private");
32900Sstevel@tonic-gate 		} else {
32914574Sraf 			if (mtype & USYNC_PROCESS)
32920Sstevel@tonic-gate 				lock_error(mp, "cond_wait", cvp,
32935629Sraf 				    "condvar process-private, "
32945629Sraf 				    "mutex process-shared");
32950Sstevel@tonic-gate 		}
32960Sstevel@tonic-gate 	}
32970Sstevel@tonic-gate 
32980Sstevel@tonic-gate 	/*
32990Sstevel@tonic-gate 	 * We deal with recursive mutexes by completely
33000Sstevel@tonic-gate 	 * dropping the lock and restoring the recursion
33010Sstevel@tonic-gate 	 * count after waking up.  This is arguably wrong,
33020Sstevel@tonic-gate 	 * but it obeys the principle of least astonishment.
33030Sstevel@tonic-gate 	 */
33040Sstevel@tonic-gate 	rcount = mp->mutex_rcount;
33050Sstevel@tonic-gate 	mp->mutex_rcount = 0;
33064574Sraf 	if ((mtype &
33074574Sraf 	    (USYNC_PROCESS | LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT)) |
33080Sstevel@tonic-gate 	    (cvp->cond_type & USYNC_PROCESS))
33090Sstevel@tonic-gate 		error = cond_wait_kernel(cvp, mp, tsp);
33100Sstevel@tonic-gate 	else
33115629Sraf 		error = cond_wait_queue(cvp, mp, tsp);
33120Sstevel@tonic-gate 	mp->mutex_rcount = rcount;
33130Sstevel@tonic-gate 
33140Sstevel@tonic-gate 	if (csp) {
33150Sstevel@tonic-gate 		hrtime_t lapse = gethrtime() - begin_sleep;
33160Sstevel@tonic-gate 		if (tsp == NULL)
33170Sstevel@tonic-gate 			csp->cond_wait_sleep_time += lapse;
33180Sstevel@tonic-gate 		else {
33190Sstevel@tonic-gate 			csp->cond_timedwait_sleep_time += lapse;
33200Sstevel@tonic-gate 			if (error == ETIME)
33210Sstevel@tonic-gate 				tdb_incr(csp->cond_timedwait_timeout);
33220Sstevel@tonic-gate 		}
33230Sstevel@tonic-gate 	}
33240Sstevel@tonic-gate 	return (error);
33250Sstevel@tonic-gate }
33260Sstevel@tonic-gate 
33270Sstevel@tonic-gate /*
33285891Sraf  * cond_wait() and _cond_wait() are cancellation points but __cond_wait()
33295891Sraf  * is not.  Internally, libc calls the non-cancellation version.
33305891Sraf  * Other libraries need to use pthread_setcancelstate(), as appropriate,
33315891Sraf  * since __cond_wait() is not exported from libc.
33320Sstevel@tonic-gate  */
33330Sstevel@tonic-gate int
33345891Sraf __cond_wait(cond_t *cvp, mutex_t *mp)
33350Sstevel@tonic-gate {
33360Sstevel@tonic-gate 	ulwp_t *self = curthread;
33370Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
33380Sstevel@tonic-gate 	uberflags_t *gflags;
33390Sstevel@tonic-gate 
33400Sstevel@tonic-gate 	/*
33410Sstevel@tonic-gate 	 * Optimize the common case of USYNC_THREAD plus
33420Sstevel@tonic-gate 	 * no error detection, no lock statistics, and no event tracing.
33430Sstevel@tonic-gate 	 */
33440Sstevel@tonic-gate 	if ((gflags = self->ul_schedctl_called) != NULL &&
33450Sstevel@tonic-gate 	    (cvp->cond_type | mp->mutex_type | gflags->uf_trs_ted |
33460Sstevel@tonic-gate 	    self->ul_td_events_enable |
33470Sstevel@tonic-gate 	    udp->tdb.tdb_ev_global_mask.event_bits[0]) == 0)
33485629Sraf 		return (cond_wait_queue(cvp, mp, NULL));
33490Sstevel@tonic-gate 
33500Sstevel@tonic-gate 	/*
33510Sstevel@tonic-gate 	 * Else do it the long way.
33520Sstevel@tonic-gate 	 */
33530Sstevel@tonic-gate 	return (cond_wait_common(cvp, mp, NULL));
33540Sstevel@tonic-gate }
33550Sstevel@tonic-gate 
33565891Sraf #pragma weak cond_wait = _cond_wait
33570Sstevel@tonic-gate int
33585891Sraf _cond_wait(cond_t *cvp, mutex_t *mp)
33590Sstevel@tonic-gate {
33600Sstevel@tonic-gate 	int error;
33610Sstevel@tonic-gate 
33620Sstevel@tonic-gate 	_cancelon();
33635891Sraf 	error = __cond_wait(cvp, mp);
33640Sstevel@tonic-gate 	if (error == EINTR)
33650Sstevel@tonic-gate 		_canceloff();
33660Sstevel@tonic-gate 	else
33670Sstevel@tonic-gate 		_canceloff_nocancel();
33680Sstevel@tonic-gate 	return (error);
33690Sstevel@tonic-gate }
33700Sstevel@tonic-gate 
33715891Sraf /*
33725891Sraf  * pthread_cond_wait() is a cancellation point.
33735891Sraf  */
33740Sstevel@tonic-gate #pragma weak pthread_cond_wait = _pthread_cond_wait
33750Sstevel@tonic-gate int
33760Sstevel@tonic-gate _pthread_cond_wait(cond_t *cvp, mutex_t *mp)
33770Sstevel@tonic-gate {
33780Sstevel@tonic-gate 	int error;
33790Sstevel@tonic-gate 
33805891Sraf 	error = _cond_wait(cvp, mp);
33810Sstevel@tonic-gate 	return ((error == EINTR)? 0 : error);
33820Sstevel@tonic-gate }
33830Sstevel@tonic-gate 
33840Sstevel@tonic-gate /*
33855891Sraf  * cond_timedwait() and _cond_timedwait() are cancellation points
33865891Sraf  * but __cond_timedwait() is not.
33870Sstevel@tonic-gate  */
33880Sstevel@tonic-gate int
33895891Sraf __cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime)
33900Sstevel@tonic-gate {
33910Sstevel@tonic-gate 	clockid_t clock_id = cvp->cond_clockid;
33920Sstevel@tonic-gate 	timespec_t reltime;
33930Sstevel@tonic-gate 	int error;
33940Sstevel@tonic-gate 
33950Sstevel@tonic-gate 	if (clock_id != CLOCK_REALTIME && clock_id != CLOCK_HIGHRES)
33960Sstevel@tonic-gate 		clock_id = CLOCK_REALTIME;
33970Sstevel@tonic-gate 	abstime_to_reltime(clock_id, abstime, &reltime);
33980Sstevel@tonic-gate 	error = cond_wait_common(cvp, mp, &reltime);
33990Sstevel@tonic-gate 	if (error == ETIME && clock_id == CLOCK_HIGHRES) {
34000Sstevel@tonic-gate 		/*
34010Sstevel@tonic-gate 		 * Don't return ETIME if we didn't really get a timeout.
34020Sstevel@tonic-gate 		 * This can happen if we return because someone resets
34030Sstevel@tonic-gate 		 * the system clock.  Just return zero in this case,
34040Sstevel@tonic-gate 		 * giving a spurious wakeup but not a timeout.
34050Sstevel@tonic-gate 		 */
34060Sstevel@tonic-gate 		if ((hrtime_t)(uint32_t)abstime->tv_sec * NANOSEC +
34070Sstevel@tonic-gate 		    abstime->tv_nsec > gethrtime())
34080Sstevel@tonic-gate 			error = 0;
34090Sstevel@tonic-gate 	}
34100Sstevel@tonic-gate 	return (error);
34110Sstevel@tonic-gate }
34120Sstevel@tonic-gate 
34135891Sraf #pragma weak cond_timedwait = _cond_timedwait
34140Sstevel@tonic-gate int
34155891Sraf _cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime)
34160Sstevel@tonic-gate {
34170Sstevel@tonic-gate 	int error;
34180Sstevel@tonic-gate 
34190Sstevel@tonic-gate 	_cancelon();
34205891Sraf 	error = __cond_timedwait(cvp, mp, abstime);
34210Sstevel@tonic-gate 	if (error == EINTR)
34220Sstevel@tonic-gate 		_canceloff();
34230Sstevel@tonic-gate 	else
34240Sstevel@tonic-gate 		_canceloff_nocancel();
34250Sstevel@tonic-gate 	return (error);
34260Sstevel@tonic-gate }
34270Sstevel@tonic-gate 
34285891Sraf /*
34295891Sraf  * pthread_cond_timedwait() is a cancellation point.
34305891Sraf  */
34310Sstevel@tonic-gate #pragma weak pthread_cond_timedwait = _pthread_cond_timedwait
34320Sstevel@tonic-gate int
34330Sstevel@tonic-gate _pthread_cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime)
34340Sstevel@tonic-gate {
34350Sstevel@tonic-gate 	int error;
34360Sstevel@tonic-gate 
34375891Sraf 	error = _cond_timedwait(cvp, mp, abstime);
34380Sstevel@tonic-gate 	if (error == ETIME)
34390Sstevel@tonic-gate 		error = ETIMEDOUT;
34400Sstevel@tonic-gate 	else if (error == EINTR)
34410Sstevel@tonic-gate 		error = 0;
34420Sstevel@tonic-gate 	return (error);
34430Sstevel@tonic-gate }
34440Sstevel@tonic-gate 
34450Sstevel@tonic-gate /*
34465891Sraf  * cond_reltimedwait() and _cond_reltimedwait() are cancellation points
34475891Sraf  * but __cond_reltimedwait() is not.
34480Sstevel@tonic-gate  */
34490Sstevel@tonic-gate int
34505891Sraf __cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime)
34510Sstevel@tonic-gate {
34520Sstevel@tonic-gate 	timespec_t tslocal = *reltime;
34530Sstevel@tonic-gate 
34540Sstevel@tonic-gate 	return (cond_wait_common(cvp, mp, &tslocal));
34550Sstevel@tonic-gate }
34560Sstevel@tonic-gate 
34575891Sraf #pragma weak cond_reltimedwait = _cond_reltimedwait
34580Sstevel@tonic-gate int
34595891Sraf _cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime)
34600Sstevel@tonic-gate {
34610Sstevel@tonic-gate 	int error;
34620Sstevel@tonic-gate 
34630Sstevel@tonic-gate 	_cancelon();
34645891Sraf 	error = __cond_reltimedwait(cvp, mp, reltime);
34650Sstevel@tonic-gate 	if (error == EINTR)
34660Sstevel@tonic-gate 		_canceloff();
34670Sstevel@tonic-gate 	else
34680Sstevel@tonic-gate 		_canceloff_nocancel();
34690Sstevel@tonic-gate 	return (error);
34700Sstevel@tonic-gate }
34710Sstevel@tonic-gate 
34720Sstevel@tonic-gate #pragma weak pthread_cond_reltimedwait_np = _pthread_cond_reltimedwait_np
34730Sstevel@tonic-gate int
34740Sstevel@tonic-gate _pthread_cond_reltimedwait_np(cond_t *cvp, mutex_t *mp,
34750Sstevel@tonic-gate 	const timespec_t *reltime)
34760Sstevel@tonic-gate {
34770Sstevel@tonic-gate 	int error;
34780Sstevel@tonic-gate 
34795891Sraf 	error = _cond_reltimedwait(cvp, mp, reltime);
34800Sstevel@tonic-gate 	if (error == ETIME)
34810Sstevel@tonic-gate 		error = ETIMEDOUT;
34820Sstevel@tonic-gate 	else if (error == EINTR)
34830Sstevel@tonic-gate 		error = 0;
34840Sstevel@tonic-gate 	return (error);
34850Sstevel@tonic-gate }
34860Sstevel@tonic-gate 
34870Sstevel@tonic-gate #pragma weak pthread_cond_signal = cond_signal_internal
34880Sstevel@tonic-gate #pragma weak _pthread_cond_signal = cond_signal_internal
34890Sstevel@tonic-gate #pragma weak cond_signal = cond_signal_internal
34900Sstevel@tonic-gate #pragma weak _cond_signal = cond_signal_internal
34910Sstevel@tonic-gate int
34920Sstevel@tonic-gate cond_signal_internal(cond_t *cvp)
34930Sstevel@tonic-gate {
34940Sstevel@tonic-gate 	ulwp_t *self = curthread;
34950Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
34960Sstevel@tonic-gate 	tdb_cond_stats_t *csp = COND_STATS(cvp, udp);
34970Sstevel@tonic-gate 	int error = 0;
3498*6247Sraf 	int more;
3499*6247Sraf 	lwpid_t lwpid;
35000Sstevel@tonic-gate 	queue_head_t *qp;
35010Sstevel@tonic-gate 	mutex_t *mp;
35020Sstevel@tonic-gate 	queue_head_t *mqp;
35030Sstevel@tonic-gate 	ulwp_t **ulwpp;
35040Sstevel@tonic-gate 	ulwp_t *ulwp;
3505*6247Sraf 	ulwp_t *prev;
35060Sstevel@tonic-gate 
35070Sstevel@tonic-gate 	if (csp)
35080Sstevel@tonic-gate 		tdb_incr(csp->cond_signal);
35090Sstevel@tonic-gate 
35100Sstevel@tonic-gate 	if (cvp->cond_waiters_kernel)	/* someone sleeping in the kernel? */
35110Sstevel@tonic-gate 		error = __lwp_cond_signal(cvp);
35120Sstevel@tonic-gate 
35130Sstevel@tonic-gate 	if (!cvp->cond_waiters_user)	/* no one sleeping at user-level */
35140Sstevel@tonic-gate 		return (error);
35150Sstevel@tonic-gate 
35160Sstevel@tonic-gate 	/*
35170Sstevel@tonic-gate 	 * Move someone from the condvar sleep queue to the mutex sleep
35180Sstevel@tonic-gate 	 * queue for the mutex that he will acquire on being waked up.
35190Sstevel@tonic-gate 	 * We can do this only if we own the mutex he will acquire.
35200Sstevel@tonic-gate 	 * If we do not own the mutex, or if his ul_cv_wake flag
35210Sstevel@tonic-gate 	 * is set, just dequeue and unpark him.
35220Sstevel@tonic-gate 	 */
35230Sstevel@tonic-gate 	qp = queue_lock(cvp, CV);
3524*6247Sraf 	ulwpp = queue_slot(qp, &prev, &more);
3525*6247Sraf 	cvp->cond_waiters_user = more;
3526*6247Sraf 	if (ulwpp == NULL) {	/* no one on the sleep queue */
35270Sstevel@tonic-gate 		queue_unlock(qp);
35280Sstevel@tonic-gate 		return (error);
35290Sstevel@tonic-gate 	}
3530*6247Sraf 	ulwp = *ulwpp;
35310Sstevel@tonic-gate 
35320Sstevel@tonic-gate 	/*
35330Sstevel@tonic-gate 	 * Inform the thread that he was the recipient of a cond_signal().
35340Sstevel@tonic-gate 	 * This lets him deal with cond_signal() and, concurrently,
35350Sstevel@tonic-gate 	 * one or more of a cancellation, a UNIX signal, or a timeout.
35360Sstevel@tonic-gate 	 * These latter conditions must not consume a cond_signal().
35370Sstevel@tonic-gate 	 */
35380Sstevel@tonic-gate 	ulwp->ul_signalled = 1;
35390Sstevel@tonic-gate 
35400Sstevel@tonic-gate 	/*
35410Sstevel@tonic-gate 	 * Dequeue the waiter but leave his ul_sleepq non-NULL
35420Sstevel@tonic-gate 	 * while we move him to the mutex queue so that he can
35430Sstevel@tonic-gate 	 * deal properly with spurious wakeups.
35440Sstevel@tonic-gate 	 */
3545*6247Sraf 	queue_unlink(qp, ulwpp, prev);
35460Sstevel@tonic-gate 
35470Sstevel@tonic-gate 	mp = ulwp->ul_cvmutex;		/* the mutex he will acquire */
35480Sstevel@tonic-gate 	ulwp->ul_cvmutex = NULL;
35490Sstevel@tonic-gate 	ASSERT(mp != NULL);
35500Sstevel@tonic-gate 
35510Sstevel@tonic-gate 	if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) {
3552*6247Sraf 		/* just wake him up */
3553*6247Sraf 		lwpid = ulwp->ul_lwpid;
35540Sstevel@tonic-gate 		no_preempt(self);
35550Sstevel@tonic-gate 		ulwp->ul_sleepq = NULL;
35560Sstevel@tonic-gate 		ulwp->ul_wchan = NULL;
35570Sstevel@tonic-gate 		queue_unlock(qp);
35580Sstevel@tonic-gate 		(void) __lwp_unpark(lwpid);
35590Sstevel@tonic-gate 		preempt(self);
35600Sstevel@tonic-gate 	} else {
3561*6247Sraf 		/* move him to the mutex queue */
35620Sstevel@tonic-gate 		mqp = queue_lock(mp, MX);
3563*6247Sraf 		enqueue(mqp, ulwp, 0);
35640Sstevel@tonic-gate 		mp->mutex_waiters = 1;
35650Sstevel@tonic-gate 		queue_unlock(mqp);
35660Sstevel@tonic-gate 		queue_unlock(qp);
35670Sstevel@tonic-gate 	}
35680Sstevel@tonic-gate 
35690Sstevel@tonic-gate 	return (error);
35700Sstevel@tonic-gate }
35710Sstevel@tonic-gate 
35724570Sraf /*
35734574Sraf  * Utility function called by mutex_wakeup_all(), cond_broadcast(),
35744574Sraf  * and rw_queue_release() to (re)allocate a big buffer to hold the
35754574Sraf  * lwpids of all the threads to be set running after they are removed
35764574Sraf  * from their sleep queues.  Since we are holding a queue lock, we
35774574Sraf  * cannot call any function that might acquire a lock.  mmap(), munmap(),
35784574Sraf  * lwp_unpark_all() are simple system calls and are safe in this regard.
35794570Sraf  */
35804570Sraf lwpid_t *
35814570Sraf alloc_lwpids(lwpid_t *lwpid, int *nlwpid_ptr, int *maxlwps_ptr)
35824570Sraf {
35834570Sraf 	/*
35844570Sraf 	 * Allocate NEWLWPS ids on the first overflow.
35854570Sraf 	 * Double the allocation each time after that.
35864570Sraf 	 */
35874570Sraf 	int nlwpid = *nlwpid_ptr;
35884570Sraf 	int maxlwps = *maxlwps_ptr;
35894570Sraf 	int first_allocation;
35904570Sraf 	int newlwps;
35914570Sraf 	void *vaddr;
35924570Sraf 
35934570Sraf 	ASSERT(nlwpid == maxlwps);
35944570Sraf 
35954570Sraf 	first_allocation = (maxlwps == MAXLWPS);
35964570Sraf 	newlwps = first_allocation? NEWLWPS : 2 * maxlwps;
35974570Sraf 	vaddr = _private_mmap(NULL, newlwps * sizeof (lwpid_t),
35984570Sraf 	    PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0);
35994570Sraf 
36004570Sraf 	if (vaddr == MAP_FAILED) {
36014570Sraf 		/*
36024570Sraf 		 * Let's hope this never happens.
36034570Sraf 		 * If it does, then we have a terrible
36044570Sraf 		 * thundering herd on our hands.
36054570Sraf 		 */
36064570Sraf 		(void) __lwp_unpark_all(lwpid, nlwpid);
36074570Sraf 		*nlwpid_ptr = 0;
36084570Sraf 	} else {
36094570Sraf 		(void) _memcpy(vaddr, lwpid, maxlwps * sizeof (lwpid_t));
36104570Sraf 		if (!first_allocation)
36114570Sraf 			(void) _private_munmap(lwpid,
36124570Sraf 			    maxlwps * sizeof (lwpid_t));
36134570Sraf 		lwpid = vaddr;
36144570Sraf 		*maxlwps_ptr = newlwps;
36154570Sraf 	}
36164570Sraf 
36174570Sraf 	return (lwpid);
36184570Sraf }
36190Sstevel@tonic-gate 
36200Sstevel@tonic-gate #pragma weak pthread_cond_broadcast = cond_broadcast_internal
36210Sstevel@tonic-gate #pragma weak _pthread_cond_broadcast = cond_broadcast_internal
36220Sstevel@tonic-gate #pragma weak cond_broadcast = cond_broadcast_internal
36230Sstevel@tonic-gate #pragma weak _cond_broadcast = cond_broadcast_internal
36240Sstevel@tonic-gate int
36250Sstevel@tonic-gate cond_broadcast_internal(cond_t *cvp)
36260Sstevel@tonic-gate {
36270Sstevel@tonic-gate 	ulwp_t *self = curthread;
36280Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
36290Sstevel@tonic-gate 	tdb_cond_stats_t *csp = COND_STATS(cvp, udp);
36300Sstevel@tonic-gate 	int error = 0;
36310Sstevel@tonic-gate 	queue_head_t *qp;
3632*6247Sraf 	queue_root_t *qrp;
36330Sstevel@tonic-gate 	mutex_t *mp;
36340Sstevel@tonic-gate 	mutex_t *mp_cache = NULL;
36354570Sraf 	queue_head_t *mqp = NULL;
36360Sstevel@tonic-gate 	ulwp_t *ulwp;
36374570Sraf 	int nlwpid = 0;
36384570Sraf 	int maxlwps = MAXLWPS;
36390Sstevel@tonic-gate 	lwpid_t buffer[MAXLWPS];
36400Sstevel@tonic-gate 	lwpid_t *lwpid = buffer;
36410Sstevel@tonic-gate 
36420Sstevel@tonic-gate 	if (csp)
36430Sstevel@tonic-gate 		tdb_incr(csp->cond_broadcast);
36440Sstevel@tonic-gate 
36450Sstevel@tonic-gate 	if (cvp->cond_waiters_kernel)	/* someone sleeping in the kernel? */
36460Sstevel@tonic-gate 		error = __lwp_cond_broadcast(cvp);
36470Sstevel@tonic-gate 
36480Sstevel@tonic-gate 	if (!cvp->cond_waiters_user)	/* no one sleeping at user-level */
36490Sstevel@tonic-gate 		return (error);
36500Sstevel@tonic-gate 
36510Sstevel@tonic-gate 	/*
36520Sstevel@tonic-gate 	 * Move everyone from the condvar sleep queue to the mutex sleep
36530Sstevel@tonic-gate 	 * queue for the mutex that they will acquire on being waked up.
36540Sstevel@tonic-gate 	 * We can do this only if we own the mutex they will acquire.
36550Sstevel@tonic-gate 	 * If we do not own the mutex, or if their ul_cv_wake flag
36560Sstevel@tonic-gate 	 * is set, just dequeue and unpark them.
36570Sstevel@tonic-gate 	 *
36580Sstevel@tonic-gate 	 * We keep track of lwpids that are to be unparked in lwpid[].
36590Sstevel@tonic-gate 	 * __lwp_unpark_all() is called to unpark all of them after
36600Sstevel@tonic-gate 	 * they have been removed from the sleep queue and the sleep
36610Sstevel@tonic-gate 	 * queue lock has been dropped.  If we run out of space in our
36620Sstevel@tonic-gate 	 * on-stack buffer, we need to allocate more but we can't call
36630Sstevel@tonic-gate 	 * lmalloc() because we are holding a queue lock when the overflow
36640Sstevel@tonic-gate 	 * occurs and lmalloc() acquires a lock.  We can't use alloca()
36654570Sraf 	 * either because the application may have allocated a small
36664570Sraf 	 * stack and we don't want to overrun the stack.  So we call
36674570Sraf 	 * alloc_lwpids() to allocate a bigger buffer using the mmap()
36680Sstevel@tonic-gate 	 * system call directly since that path acquires no locks.
36690Sstevel@tonic-gate 	 */
36700Sstevel@tonic-gate 	qp = queue_lock(cvp, CV);
36710Sstevel@tonic-gate 	cvp->cond_waiters_user = 0;
3672*6247Sraf 	for (;;) {
3673*6247Sraf 		if ((qrp = qp->qh_root) == NULL ||
3674*6247Sraf 		    (ulwp = qrp->qr_head) == NULL)
3675*6247Sraf 			break;
3676*6247Sraf 		ASSERT(ulwp->ul_wchan == cvp);
3677*6247Sraf 		queue_unlink(qp, &qrp->qr_head, NULL);
36780Sstevel@tonic-gate 		mp = ulwp->ul_cvmutex;		/* his mutex */
36790Sstevel@tonic-gate 		ulwp->ul_cvmutex = NULL;
36800Sstevel@tonic-gate 		ASSERT(mp != NULL);
36810Sstevel@tonic-gate 		if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) {
3682*6247Sraf 			/* just wake him up */
36830Sstevel@tonic-gate 			ulwp->ul_sleepq = NULL;
36840Sstevel@tonic-gate 			ulwp->ul_wchan = NULL;
36854570Sraf 			if (nlwpid == maxlwps)
36864570Sraf 				lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps);
36870Sstevel@tonic-gate 			lwpid[nlwpid++] = ulwp->ul_lwpid;
36880Sstevel@tonic-gate 		} else {
3689*6247Sraf 			/* move him to the mutex queue */
36900Sstevel@tonic-gate 			if (mp != mp_cache) {
36910Sstevel@tonic-gate 				mp_cache = mp;
36924570Sraf 				if (mqp != NULL)
36934570Sraf 					queue_unlock(mqp);
36944570Sraf 				mqp = queue_lock(mp, MX);
36950Sstevel@tonic-gate 			}
3696*6247Sraf 			enqueue(mqp, ulwp, 0);
36970Sstevel@tonic-gate 			mp->mutex_waiters = 1;
36980Sstevel@tonic-gate 		}
36990Sstevel@tonic-gate 	}
37004570Sraf 	if (mqp != NULL)
37014570Sraf 		queue_unlock(mqp);
37024570Sraf 	if (nlwpid == 0) {
37034570Sraf 		queue_unlock(qp);
37044570Sraf 	} else {
37054570Sraf 		no_preempt(self);
37064570Sraf 		queue_unlock(qp);
37070Sstevel@tonic-gate 		if (nlwpid == 1)
37080Sstevel@tonic-gate 			(void) __lwp_unpark(lwpid[0]);
37090Sstevel@tonic-gate 		else
37100Sstevel@tonic-gate 			(void) __lwp_unpark_all(lwpid, nlwpid);
37114570Sraf 		preempt(self);
37120Sstevel@tonic-gate 	}
37130Sstevel@tonic-gate 	if (lwpid != buffer)
37140Sstevel@tonic-gate 		(void) _private_munmap(lwpid, maxlwps * sizeof (lwpid_t));
37150Sstevel@tonic-gate 	return (error);
37160Sstevel@tonic-gate }
37170Sstevel@tonic-gate 
37180Sstevel@tonic-gate #pragma weak pthread_cond_destroy = _cond_destroy
37190Sstevel@tonic-gate #pragma weak _pthread_cond_destroy = _cond_destroy
37200Sstevel@tonic-gate #pragma weak cond_destroy = _cond_destroy
37210Sstevel@tonic-gate int
37220Sstevel@tonic-gate _cond_destroy(cond_t *cvp)
37230Sstevel@tonic-gate {
37240Sstevel@tonic-gate 	cvp->cond_magic = 0;
37250Sstevel@tonic-gate 	tdb_sync_obj_deregister(cvp);
37260Sstevel@tonic-gate 	return (0);
37270Sstevel@tonic-gate }
37280Sstevel@tonic-gate 
37290Sstevel@tonic-gate #if defined(THREAD_DEBUG)
37300Sstevel@tonic-gate void
37310Sstevel@tonic-gate assert_no_libc_locks_held(void)
37320Sstevel@tonic-gate {
37330Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
37340Sstevel@tonic-gate }
37350Sstevel@tonic-gate 
37360Sstevel@tonic-gate /* protected by link_lock */
37370Sstevel@tonic-gate uint64_t spin_lock_spin;
37380Sstevel@tonic-gate uint64_t spin_lock_spin2;
37390Sstevel@tonic-gate uint64_t spin_lock_sleep;
37400Sstevel@tonic-gate uint64_t spin_lock_wakeup;
37410Sstevel@tonic-gate 
37420Sstevel@tonic-gate /*
37430Sstevel@tonic-gate  * Record spin lock statistics.
37440Sstevel@tonic-gate  * Called by a thread exiting itself in thrp_exit().
37450Sstevel@tonic-gate  * Also called via atexit() from the thread calling
37460Sstevel@tonic-gate  * exit() to do all the other threads as well.
37470Sstevel@tonic-gate  */
37480Sstevel@tonic-gate void
37490Sstevel@tonic-gate record_spin_locks(ulwp_t *ulwp)
37500Sstevel@tonic-gate {
37510Sstevel@tonic-gate 	spin_lock_spin += ulwp->ul_spin_lock_spin;
37520Sstevel@tonic-gate 	spin_lock_spin2 += ulwp->ul_spin_lock_spin2;
37530Sstevel@tonic-gate 	spin_lock_sleep += ulwp->ul_spin_lock_sleep;
37540Sstevel@tonic-gate 	spin_lock_wakeup += ulwp->ul_spin_lock_wakeup;
37550Sstevel@tonic-gate 	ulwp->ul_spin_lock_spin = 0;
37560Sstevel@tonic-gate 	ulwp->ul_spin_lock_spin2 = 0;
37570Sstevel@tonic-gate 	ulwp->ul_spin_lock_sleep = 0;
37580Sstevel@tonic-gate 	ulwp->ul_spin_lock_wakeup = 0;
37590Sstevel@tonic-gate }
37600Sstevel@tonic-gate 
37610Sstevel@tonic-gate /*
37620Sstevel@tonic-gate  * atexit function:  dump the queue statistics to stderr.
37630Sstevel@tonic-gate  */
37641219Sraf #if !defined(__lint)
37651219Sraf #define	fprintf	_fprintf
37661219Sraf #endif
37670Sstevel@tonic-gate #include <stdio.h>
37680Sstevel@tonic-gate void
37690Sstevel@tonic-gate dump_queue_statistics(void)
37700Sstevel@tonic-gate {
37710Sstevel@tonic-gate 	uberdata_t *udp = curthread->ul_uberdata;
37720Sstevel@tonic-gate 	queue_head_t *qp;
37730Sstevel@tonic-gate 	int qn;
37740Sstevel@tonic-gate 	uint64_t spin_lock_total = 0;
37750Sstevel@tonic-gate 
37760Sstevel@tonic-gate 	if (udp->queue_head == NULL || thread_queue_dump == 0)
37770Sstevel@tonic-gate 		return;
37780Sstevel@tonic-gate 
37790Sstevel@tonic-gate 	if (fprintf(stderr, "\n%5d mutex queues:\n", QHASHSIZE) < 0 ||
3780*6247Sraf 	    fprintf(stderr, "queue#   lockcount    max qlen    max hlen\n") < 0)
37810Sstevel@tonic-gate 		return;
37820Sstevel@tonic-gate 	for (qn = 0, qp = udp->queue_head; qn < QHASHSIZE; qn++, qp++) {
37830Sstevel@tonic-gate 		if (qp->qh_lockcount == 0)
37840Sstevel@tonic-gate 			continue;
37850Sstevel@tonic-gate 		spin_lock_total += qp->qh_lockcount;
3786*6247Sraf 		if (fprintf(stderr, "%5d %12llu%12u%12u\n", qn,
3787*6247Sraf 		    (u_longlong_t)qp->qh_lockcount,
3788*6247Sraf 		    qp->qh_qmax, qp->qh_hmax) < 0)
37895629Sraf 			return;
37900Sstevel@tonic-gate 	}
37910Sstevel@tonic-gate 
37920Sstevel@tonic-gate 	if (fprintf(stderr, "\n%5d condvar queues:\n", QHASHSIZE) < 0 ||
3793*6247Sraf 	    fprintf(stderr, "queue#   lockcount    max qlen    max hlen\n") < 0)
37940Sstevel@tonic-gate 		return;
37950Sstevel@tonic-gate 	for (qn = 0; qn < QHASHSIZE; qn++, qp++) {
37960Sstevel@tonic-gate 		if (qp->qh_lockcount == 0)
37970Sstevel@tonic-gate 			continue;
37980Sstevel@tonic-gate 		spin_lock_total += qp->qh_lockcount;
3799*6247Sraf 		if (fprintf(stderr, "%5d %12llu%12u%12u\n", qn,
3800*6247Sraf 		    (u_longlong_t)qp->qh_lockcount,
3801*6247Sraf 		    qp->qh_qmax, qp->qh_hmax) < 0)
38025629Sraf 			return;
38030Sstevel@tonic-gate 	}
38040Sstevel@tonic-gate 
38050Sstevel@tonic-gate 	(void) fprintf(stderr, "\n  spin_lock_total  = %10llu\n",
38065629Sraf 	    (u_longlong_t)spin_lock_total);
38070Sstevel@tonic-gate 	(void) fprintf(stderr, "  spin_lock_spin   = %10llu\n",
38085629Sraf 	    (u_longlong_t)spin_lock_spin);
38090Sstevel@tonic-gate 	(void) fprintf(stderr, "  spin_lock_spin2  = %10llu\n",
38105629Sraf 	    (u_longlong_t)spin_lock_spin2);
38110Sstevel@tonic-gate 	(void) fprintf(stderr, "  spin_lock_sleep  = %10llu\n",
38125629Sraf 	    (u_longlong_t)spin_lock_sleep);
38130Sstevel@tonic-gate 	(void) fprintf(stderr, "  spin_lock_wakeup = %10llu\n",
38145629Sraf 	    (u_longlong_t)spin_lock_wakeup);
38150Sstevel@tonic-gate }
3816*6247Sraf #endif
3817