xref: /onnv-gate/usr/src/uts/common/disp/disp_lock.c (revision 6103:2017e7795668)
10Sstevel@tonic-gate /*
20Sstevel@tonic-gate  * CDDL HEADER START
30Sstevel@tonic-gate  *
40Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*6103Sck142721  * Common Development and Distribution License (the "License").
6*6103Sck142721  * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate  *
80Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate  * See the License for the specific language governing permissions
110Sstevel@tonic-gate  * and limitations under the License.
120Sstevel@tonic-gate  *
130Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate  *
190Sstevel@tonic-gate  * CDDL HEADER END
200Sstevel@tonic-gate  */
210Sstevel@tonic-gate /*
22*6103Sck142721  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate  * Use is subject to license terms.
240Sstevel@tonic-gate  */
250Sstevel@tonic-gate 
260Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
270Sstevel@tonic-gate 
280Sstevel@tonic-gate #include <sys/types.h>
290Sstevel@tonic-gate #include <sys/param.h>
300Sstevel@tonic-gate #include <sys/sysmacros.h>
310Sstevel@tonic-gate #include <sys/systm.h>
320Sstevel@tonic-gate #include <sys/cmn_err.h>
330Sstevel@tonic-gate #include <sys/debug.h>
340Sstevel@tonic-gate #include <sys/inline.h>
350Sstevel@tonic-gate #include <sys/disp.h>
360Sstevel@tonic-gate #include <sys/kmem.h>
370Sstevel@tonic-gate #include <sys/cpuvar.h>
380Sstevel@tonic-gate #include <sys/vtrace.h>
390Sstevel@tonic-gate #include <sys/lockstat.h>
400Sstevel@tonic-gate #include <sys/spl.h>
410Sstevel@tonic-gate #include <sys/atomic.h>
420Sstevel@tonic-gate #include <sys/cpu.h>
430Sstevel@tonic-gate 
440Sstevel@tonic-gate /*
450Sstevel@tonic-gate  * We check CPU_ON_INTR(CPU) when exiting a disp lock, rather than when
460Sstevel@tonic-gate  * entering it, for a purely pragmatic reason: when exiting a disp lock
470Sstevel@tonic-gate  * we know that we must be at PIL 10, and thus not preemptible; therefore
480Sstevel@tonic-gate  * we can safely load the CPU pointer without worrying about it changing.
490Sstevel@tonic-gate  */
500Sstevel@tonic-gate static void
disp_onintr_panic(void)510Sstevel@tonic-gate disp_onintr_panic(void)
520Sstevel@tonic-gate {
530Sstevel@tonic-gate 	panic("dispatcher invoked from high-level interrupt handler");
540Sstevel@tonic-gate }
550Sstevel@tonic-gate 
560Sstevel@tonic-gate /* ARGSUSED */
570Sstevel@tonic-gate void
disp_lock_init(disp_lock_t * lp,char * name)580Sstevel@tonic-gate disp_lock_init(disp_lock_t *lp, char *name)
590Sstevel@tonic-gate {
600Sstevel@tonic-gate 	DISP_LOCK_INIT(lp);
610Sstevel@tonic-gate }
620Sstevel@tonic-gate 
630Sstevel@tonic-gate /* ARGSUSED */
640Sstevel@tonic-gate void
disp_lock_destroy(disp_lock_t * lp)650Sstevel@tonic-gate disp_lock_destroy(disp_lock_t *lp)
660Sstevel@tonic-gate {
670Sstevel@tonic-gate 	DISP_LOCK_DESTROY(lp);
680Sstevel@tonic-gate }
690Sstevel@tonic-gate 
700Sstevel@tonic-gate void
disp_lock_enter_high(disp_lock_t * lp)710Sstevel@tonic-gate disp_lock_enter_high(disp_lock_t *lp)
720Sstevel@tonic-gate {
730Sstevel@tonic-gate 	lock_set(lp);
740Sstevel@tonic-gate }
750Sstevel@tonic-gate 
760Sstevel@tonic-gate void
disp_lock_exit_high(disp_lock_t * lp)770Sstevel@tonic-gate disp_lock_exit_high(disp_lock_t *lp)
780Sstevel@tonic-gate {
790Sstevel@tonic-gate 	if (CPU_ON_INTR(CPU) != 0)
800Sstevel@tonic-gate 		disp_onintr_panic();
810Sstevel@tonic-gate 	ASSERT(DISP_LOCK_HELD(lp));
820Sstevel@tonic-gate 	lock_clear(lp);
830Sstevel@tonic-gate }
840Sstevel@tonic-gate 
850Sstevel@tonic-gate void
disp_lock_enter(disp_lock_t * lp)860Sstevel@tonic-gate disp_lock_enter(disp_lock_t *lp)
870Sstevel@tonic-gate {
880Sstevel@tonic-gate 	lock_set_spl(lp, ipltospl(DISP_LEVEL), &curthread->t_oldspl);
890Sstevel@tonic-gate }
900Sstevel@tonic-gate 
910Sstevel@tonic-gate void
disp_lock_exit(disp_lock_t * lp)920Sstevel@tonic-gate disp_lock_exit(disp_lock_t *lp)
930Sstevel@tonic-gate {
940Sstevel@tonic-gate 	if (CPU_ON_INTR(CPU) != 0)
950Sstevel@tonic-gate 		disp_onintr_panic();
960Sstevel@tonic-gate 	ASSERT(DISP_LOCK_HELD(lp));
970Sstevel@tonic-gate 	if (CPU->cpu_kprunrun) {
980Sstevel@tonic-gate 		lock_clear_splx(lp, curthread->t_oldspl);
990Sstevel@tonic-gate 		kpreempt(KPREEMPT_SYNC);
1000Sstevel@tonic-gate 	} else {
1010Sstevel@tonic-gate 		lock_clear_splx(lp, curthread->t_oldspl);
1020Sstevel@tonic-gate 	}
1030Sstevel@tonic-gate }
1040Sstevel@tonic-gate 
1050Sstevel@tonic-gate void
disp_lock_exit_nopreempt(disp_lock_t * lp)1060Sstevel@tonic-gate disp_lock_exit_nopreempt(disp_lock_t *lp)
1070Sstevel@tonic-gate {
1080Sstevel@tonic-gate 	if (CPU_ON_INTR(CPU) != 0)
1090Sstevel@tonic-gate 		disp_onintr_panic();
1100Sstevel@tonic-gate 	ASSERT(DISP_LOCK_HELD(lp));
1110Sstevel@tonic-gate 	lock_clear_splx(lp, curthread->t_oldspl);
1120Sstevel@tonic-gate }
1130Sstevel@tonic-gate 
1140Sstevel@tonic-gate /*
1150Sstevel@tonic-gate  * Thread_lock() - get the correct dispatcher lock for the thread.
1160Sstevel@tonic-gate  */
1170Sstevel@tonic-gate void
thread_lock(kthread_id_t t)1180Sstevel@tonic-gate thread_lock(kthread_id_t t)
1190Sstevel@tonic-gate {
1200Sstevel@tonic-gate 	int s = splhigh();
1210Sstevel@tonic-gate 
1220Sstevel@tonic-gate 	if (CPU_ON_INTR(CPU) != 0)
1230Sstevel@tonic-gate 		disp_onintr_panic();
1240Sstevel@tonic-gate 
1250Sstevel@tonic-gate 	for (;;) {
1260Sstevel@tonic-gate 		lock_t *volatile *tlpp = &t->t_lockp;
1270Sstevel@tonic-gate 		lock_t *lp = *tlpp;
1280Sstevel@tonic-gate 		if (lock_try(lp)) {
1290Sstevel@tonic-gate 			if (lp == *tlpp) {
1300Sstevel@tonic-gate 				curthread->t_oldspl = (ushort_t)s;
1310Sstevel@tonic-gate 				return;
1320Sstevel@tonic-gate 			}
1330Sstevel@tonic-gate 			lock_clear(lp);
1340Sstevel@tonic-gate 		} else {
135*6103Sck142721 			hrtime_t spin_time =
136*6103Sck142721 			    LOCKSTAT_START_TIME(LS_THREAD_LOCK_SPIN);
1370Sstevel@tonic-gate 			/*
1380Sstevel@tonic-gate 			 * Lower spl and spin on lock with non-atomic load
1390Sstevel@tonic-gate 			 * to avoid cache activity.  Spin until the lock
1400Sstevel@tonic-gate 			 * becomes available or spontaneously changes.
1410Sstevel@tonic-gate 			 */
1420Sstevel@tonic-gate 			splx(s);
1430Sstevel@tonic-gate 			while (lp == *tlpp && LOCK_HELD(lp)) {
1440Sstevel@tonic-gate 				if (panicstr) {
1450Sstevel@tonic-gate 					curthread->t_oldspl = splhigh();
1460Sstevel@tonic-gate 					return;
1470Sstevel@tonic-gate 				}
1480Sstevel@tonic-gate 				SMT_PAUSE();
1490Sstevel@tonic-gate 			}
150*6103Sck142721 
151*6103Sck142721 			LOCKSTAT_RECORD_TIME(LS_THREAD_LOCK_SPIN,
152*6103Sck142721 			    lp, spin_time);
1530Sstevel@tonic-gate 			s = splhigh();
1540Sstevel@tonic-gate 		}
1550Sstevel@tonic-gate 	}
1560Sstevel@tonic-gate }
1570Sstevel@tonic-gate 
1580Sstevel@tonic-gate /*
1590Sstevel@tonic-gate  * Thread_lock_high() - get the correct dispatcher lock for the thread.
1600Sstevel@tonic-gate  *	This version is called when already at high spl.
1610Sstevel@tonic-gate  */
1620Sstevel@tonic-gate void
thread_lock_high(kthread_id_t t)1630Sstevel@tonic-gate thread_lock_high(kthread_id_t t)
1640Sstevel@tonic-gate {
1650Sstevel@tonic-gate 	if (CPU_ON_INTR(CPU) != 0)
1660Sstevel@tonic-gate 		disp_onintr_panic();
1670Sstevel@tonic-gate 
1680Sstevel@tonic-gate 	for (;;) {
1690Sstevel@tonic-gate 		lock_t *volatile *tlpp = &t->t_lockp;
1700Sstevel@tonic-gate 		lock_t *lp = *tlpp;
1710Sstevel@tonic-gate 		if (lock_try(lp)) {
1720Sstevel@tonic-gate 			if (lp == *tlpp)
1730Sstevel@tonic-gate 				return;
1740Sstevel@tonic-gate 			lock_clear(lp);
1750Sstevel@tonic-gate 		} else {
176*6103Sck142721 			hrtime_t spin_time =
177*6103Sck142721 			    LOCKSTAT_START_TIME(LS_THREAD_LOCK_HIGH_SPIN);
1780Sstevel@tonic-gate 			while (lp == *tlpp && LOCK_HELD(lp)) {
1790Sstevel@tonic-gate 				if (panicstr)
1800Sstevel@tonic-gate 					return;
1810Sstevel@tonic-gate 				SMT_PAUSE();
1820Sstevel@tonic-gate 			}
183*6103Sck142721 			LOCKSTAT_RECORD_TIME(LS_THREAD_LOCK_HIGH_SPIN,
184*6103Sck142721 			    lp, spin_time);
1850Sstevel@tonic-gate 		}
1860Sstevel@tonic-gate 	}
1870Sstevel@tonic-gate }
1880Sstevel@tonic-gate 
1890Sstevel@tonic-gate /*
1900Sstevel@tonic-gate  * Called by THREAD_TRANSITION macro to change the thread state to
1910Sstevel@tonic-gate  * the intermediate state-in-transititon state.
1920Sstevel@tonic-gate  */
1930Sstevel@tonic-gate void
thread_transition(kthread_id_t t)1940Sstevel@tonic-gate thread_transition(kthread_id_t t)
1950Sstevel@tonic-gate {
1960Sstevel@tonic-gate 	disp_lock_t	*lp;
1970Sstevel@tonic-gate 
1980Sstevel@tonic-gate 	ASSERT(THREAD_LOCK_HELD(t));
1990Sstevel@tonic-gate 	ASSERT(t->t_lockp != &transition_lock);
2000Sstevel@tonic-gate 
2010Sstevel@tonic-gate 	lp = t->t_lockp;
2020Sstevel@tonic-gate 	t->t_lockp = &transition_lock;
2030Sstevel@tonic-gate 	disp_lock_exit_high(lp);
2040Sstevel@tonic-gate }
2050Sstevel@tonic-gate 
2060Sstevel@tonic-gate /*
2070Sstevel@tonic-gate  * Put thread in stop state, and set the lock pointer to the stop_lock.
2080Sstevel@tonic-gate  * This effectively drops the lock on the thread, since the stop_lock
2090Sstevel@tonic-gate  * isn't held.
2100Sstevel@tonic-gate  * Eventually, stop_lock could be hashed if there is too much contention.
2110Sstevel@tonic-gate  */
2120Sstevel@tonic-gate void
thread_stop(kthread_id_t t)2130Sstevel@tonic-gate thread_stop(kthread_id_t t)
2140Sstevel@tonic-gate {
2150Sstevel@tonic-gate 	disp_lock_t	*lp;
2160Sstevel@tonic-gate 
2170Sstevel@tonic-gate 	ASSERT(THREAD_LOCK_HELD(t));
2180Sstevel@tonic-gate 	ASSERT(t->t_lockp != &stop_lock);
2190Sstevel@tonic-gate 
2200Sstevel@tonic-gate 	lp = t->t_lockp;
2210Sstevel@tonic-gate 	t->t_state = TS_STOPPED;
2220Sstevel@tonic-gate 	/*
2230Sstevel@tonic-gate 	 * Ensure that t_state reaches global visibility before t_lockp
2240Sstevel@tonic-gate 	 */
2250Sstevel@tonic-gate 	membar_producer();
2260Sstevel@tonic-gate 	t->t_lockp = &stop_lock;
2270Sstevel@tonic-gate 	disp_lock_exit(lp);
2280Sstevel@tonic-gate }
229