xref: /onnv-gate/usr/src/uts/intel/ia32/ml/lock_prim.s (revision 5834:66e26b3fbcc7)
10Sstevel@tonic-gate/*
20Sstevel@tonic-gate * CDDL HEADER START
30Sstevel@tonic-gate *
40Sstevel@tonic-gate * The contents of this file are subject to the terms of the
51582Skchow * Common Development and Distribution License (the "License").
61582Skchow * You may not use this file except in compliance with the License.
70Sstevel@tonic-gate *
80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
100Sstevel@tonic-gate * See the License for the specific language governing permissions
110Sstevel@tonic-gate * and limitations under the License.
120Sstevel@tonic-gate *
130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
180Sstevel@tonic-gate *
190Sstevel@tonic-gate * CDDL HEADER END
200Sstevel@tonic-gate */
210Sstevel@tonic-gate/*
225788Smv143129 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
230Sstevel@tonic-gate * Use is subject to license terms.
240Sstevel@tonic-gate */
250Sstevel@tonic-gate
260Sstevel@tonic-gate#pragma ident	"%Z%%M%	%I%	%E% SMI"
270Sstevel@tonic-gate
280Sstevel@tonic-gate#if defined(lint) || defined(__lint)
290Sstevel@tonic-gate#include <sys/types.h>
300Sstevel@tonic-gate#include <sys/thread.h>
310Sstevel@tonic-gate#include <sys/cpuvar.h>
320Sstevel@tonic-gate#include <vm/page.h>
330Sstevel@tonic-gate#else	/* __lint */
340Sstevel@tonic-gate#include "assym.h"
350Sstevel@tonic-gate#endif	/* __lint */
360Sstevel@tonic-gate
37*5834Spt157919#include <sys/mutex_impl.h>
380Sstevel@tonic-gate#include <sys/asm_linkage.h>
390Sstevel@tonic-gate#include <sys/asm_misc.h>
400Sstevel@tonic-gate#include <sys/regset.h>
410Sstevel@tonic-gate#include <sys/rwlock_impl.h>
420Sstevel@tonic-gate#include <sys/lockstat.h>
430Sstevel@tonic-gate
440Sstevel@tonic-gate/*
450Sstevel@tonic-gate * lock_try(lp), ulock_try(lp)
460Sstevel@tonic-gate *	- returns non-zero on success.
470Sstevel@tonic-gate *	- doesn't block interrupts so don't use this to spin on a lock.
480Sstevel@tonic-gate *
490Sstevel@tonic-gate * ulock_try() is for a lock in the user address space.
500Sstevel@tonic-gate */
510Sstevel@tonic-gate
520Sstevel@tonic-gate#if defined(lint) || defined(__lint)
530Sstevel@tonic-gate
540Sstevel@tonic-gate/* ARGSUSED */
550Sstevel@tonic-gateint
560Sstevel@tonic-gatelock_try(lock_t *lp)
570Sstevel@tonic-gate{ return (0); }
580Sstevel@tonic-gate
590Sstevel@tonic-gate/* ARGSUSED */
600Sstevel@tonic-gateint
610Sstevel@tonic-gatelock_spin_try(lock_t *lp)
620Sstevel@tonic-gate{ return (0); }
630Sstevel@tonic-gate
640Sstevel@tonic-gate/* ARGSUSED */
650Sstevel@tonic-gateint
660Sstevel@tonic-gateulock_try(lock_t *lp)
670Sstevel@tonic-gate{ return (0); }
680Sstevel@tonic-gate
690Sstevel@tonic-gate#else	/* __lint */
700Sstevel@tonic-gate	.globl	kernelbase
710Sstevel@tonic-gate
720Sstevel@tonic-gate#if defined(__amd64)
730Sstevel@tonic-gate
740Sstevel@tonic-gate	ENTRY(lock_try)
750Sstevel@tonic-gate	movb	$-1, %dl
760Sstevel@tonic-gate	movzbq	%dl, %rax
770Sstevel@tonic-gate	xchgb	%dl, (%rdi)
780Sstevel@tonic-gate	xorb	%dl, %al
790Sstevel@tonic-gate.lock_try_lockstat_patch_point:
800Sstevel@tonic-gate	ret
810Sstevel@tonic-gate	testb	%al, %al
820Sstevel@tonic-gate	jnz	0f
830Sstevel@tonic-gate	ret
840Sstevel@tonic-gate0:
850Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
860Sstevel@tonic-gate	movq	%rdi, %rsi		/* rsi = lock addr */
870Sstevel@tonic-gate	movl	$LS_LOCK_TRY_ACQUIRE, %edi /* edi = event */
880Sstevel@tonic-gate	jmp	lockstat_wrapper
890Sstevel@tonic-gate	SET_SIZE(lock_try)
900Sstevel@tonic-gate
910Sstevel@tonic-gate	ENTRY(lock_spin_try)
920Sstevel@tonic-gate	movb	$-1, %dl
930Sstevel@tonic-gate	movzbq	%dl, %rax
940Sstevel@tonic-gate	xchgb	%dl, (%rdi)
950Sstevel@tonic-gate	xorb	%dl, %al
960Sstevel@tonic-gate	ret
970Sstevel@tonic-gate	SET_SIZE(lock_spin_try)
980Sstevel@tonic-gate
990Sstevel@tonic-gate	ENTRY(ulock_try)
1000Sstevel@tonic-gate#ifdef DEBUG
1010Sstevel@tonic-gate	movq	kernelbase(%rip), %rax
1020Sstevel@tonic-gate	cmpq	%rax, %rdi		/* test uaddr < kernelbase */
1030Sstevel@tonic-gate	jb	ulock_pass		/*	uaddr < kernelbase, proceed */
1040Sstevel@tonic-gate
1050Sstevel@tonic-gate	movq	%rdi, %r12		/* preserve lock ptr for debugging */
1060Sstevel@tonic-gate	leaq	.ulock_panic_msg(%rip), %rdi
1070Sstevel@tonic-gate	pushq	%rbp			/* align stack properly */
1080Sstevel@tonic-gate	movq	%rsp, %rbp
1090Sstevel@tonic-gate	xorl	%eax, %eax		/* clear for varargs */
1100Sstevel@tonic-gate	call	panic
1110Sstevel@tonic-gate
1120Sstevel@tonic-gate#endif /* DEBUG */
1130Sstevel@tonic-gate
1140Sstevel@tonic-gateulock_pass:
1150Sstevel@tonic-gate	movl	$1, %eax
1160Sstevel@tonic-gate	xchgb	%al, (%rdi)
1170Sstevel@tonic-gate	xorb	$1, %al
1180Sstevel@tonic-gate	ret
1190Sstevel@tonic-gate	SET_SIZE(ulock_try)
1200Sstevel@tonic-gate
1210Sstevel@tonic-gate#else
1220Sstevel@tonic-gate
1230Sstevel@tonic-gate	ENTRY(lock_try)
1240Sstevel@tonic-gate	movl	$1,%edx
1250Sstevel@tonic-gate	movl	4(%esp),%ecx		/* ecx = lock addr */
1260Sstevel@tonic-gate	xorl	%eax,%eax
1270Sstevel@tonic-gate	xchgb	%dl, (%ecx)		/* using dl will avoid partial */
1280Sstevel@tonic-gate	testb	%dl,%dl			/* stalls on P6 ? */
1290Sstevel@tonic-gate	setz	%al
1300Sstevel@tonic-gate.lock_try_lockstat_patch_point:
1310Sstevel@tonic-gate	ret
1320Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx	/* edx = thread addr */
1330Sstevel@tonic-gate	testl	%eax, %eax
1340Sstevel@tonic-gate	jz	0f
1350Sstevel@tonic-gate	movl	$LS_LOCK_TRY_ACQUIRE, %eax
1360Sstevel@tonic-gate	jmp	lockstat_wrapper
1370Sstevel@tonic-gate0:
1380Sstevel@tonic-gate	ret
1390Sstevel@tonic-gate	SET_SIZE(lock_try)
1400Sstevel@tonic-gate
1410Sstevel@tonic-gate	ENTRY(lock_spin_try)
1420Sstevel@tonic-gate	movl	$-1,%edx
1430Sstevel@tonic-gate	movl	4(%esp),%ecx		/* ecx = lock addr */
1440Sstevel@tonic-gate	xorl	%eax,%eax
1450Sstevel@tonic-gate	xchgb	%dl, (%ecx)		/* using dl will avoid partial */
1460Sstevel@tonic-gate	testb	%dl,%dl			/* stalls on P6 ? */
1470Sstevel@tonic-gate	setz	%al
1480Sstevel@tonic-gate	ret
1490Sstevel@tonic-gate	SET_SIZE(lock_spin_try)
1500Sstevel@tonic-gate
1510Sstevel@tonic-gate	ENTRY(ulock_try)
1520Sstevel@tonic-gate#ifdef DEBUG
1530Sstevel@tonic-gate	movl	kernelbase, %eax
1540Sstevel@tonic-gate	cmpl	%eax, 4(%esp)		/* test uaddr < kernelbase */
1550Sstevel@tonic-gate	jb	ulock_pass		/* uaddr < kernelbase, proceed */
1560Sstevel@tonic-gate
1570Sstevel@tonic-gate	pushl	$.ulock_panic_msg
1580Sstevel@tonic-gate	call	panic
1590Sstevel@tonic-gate
1600Sstevel@tonic-gate#endif /* DEBUG */
1610Sstevel@tonic-gate
1620Sstevel@tonic-gateulock_pass:
1630Sstevel@tonic-gate	movl	$1,%eax
1640Sstevel@tonic-gate	movl	4(%esp),%ecx
1650Sstevel@tonic-gate	xchgb	%al, (%ecx)
1660Sstevel@tonic-gate	xorb	$1, %al
1670Sstevel@tonic-gate	ret
1680Sstevel@tonic-gate	SET_SIZE(ulock_try)
1690Sstevel@tonic-gate
1700Sstevel@tonic-gate#endif	/* !__amd64 */
1710Sstevel@tonic-gate
1720Sstevel@tonic-gate#ifdef DEBUG
1730Sstevel@tonic-gate	.data
1740Sstevel@tonic-gate.ulock_panic_msg:
1750Sstevel@tonic-gate	.string "ulock_try: Argument is above kernelbase"
1760Sstevel@tonic-gate	.text
1770Sstevel@tonic-gate#endif	/* DEBUG */
1780Sstevel@tonic-gate
1790Sstevel@tonic-gate#endif	/* __lint */
1800Sstevel@tonic-gate
1810Sstevel@tonic-gate/*
1820Sstevel@tonic-gate * lock_clear(lp)
1830Sstevel@tonic-gate *	- unlock lock without changing interrupt priority level.
1840Sstevel@tonic-gate */
1850Sstevel@tonic-gate
1860Sstevel@tonic-gate#if defined(lint) || defined(__lint)
1870Sstevel@tonic-gate
1880Sstevel@tonic-gate/* ARGSUSED */
1890Sstevel@tonic-gatevoid
1900Sstevel@tonic-gatelock_clear(lock_t *lp)
1910Sstevel@tonic-gate{}
1920Sstevel@tonic-gate
1930Sstevel@tonic-gate/* ARGSUSED */
1940Sstevel@tonic-gatevoid
1950Sstevel@tonic-gateulock_clear(lock_t *lp)
1960Sstevel@tonic-gate{}
1970Sstevel@tonic-gate
1980Sstevel@tonic-gate#else	/* __lint */
1990Sstevel@tonic-gate
2000Sstevel@tonic-gate#if defined(__amd64)
2010Sstevel@tonic-gate
2020Sstevel@tonic-gate	ENTRY(lock_clear)
2030Sstevel@tonic-gate	movb	$0, (%rdi)
2040Sstevel@tonic-gate.lock_clear_lockstat_patch_point:
2050Sstevel@tonic-gate	ret
2060Sstevel@tonic-gate	movq	%rdi, %rsi			/* rsi = lock addr */
2070Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread addr */
2080Sstevel@tonic-gate	movl	$LS_LOCK_CLEAR_RELEASE, %edi	/* edi = event */
2090Sstevel@tonic-gate	jmp	lockstat_wrapper
2100Sstevel@tonic-gate	SET_SIZE(lock_clear)
2110Sstevel@tonic-gate
2120Sstevel@tonic-gate	ENTRY(ulock_clear)
2130Sstevel@tonic-gate#ifdef DEBUG
2140Sstevel@tonic-gate	movq	kernelbase(%rip), %rcx
2150Sstevel@tonic-gate	cmpq	%rcx, %rdi		/* test uaddr < kernelbase */
2160Sstevel@tonic-gate	jb	ulock_clr		/*	 uaddr < kernelbase, proceed */
2170Sstevel@tonic-gate
2180Sstevel@tonic-gate	leaq	.ulock_clear_msg(%rip), %rdi
2190Sstevel@tonic-gate	pushq	%rbp			/* align stack properly */
2200Sstevel@tonic-gate	movq	%rsp, %rbp
2210Sstevel@tonic-gate	xorl	%eax, %eax		/* clear for varargs */
2220Sstevel@tonic-gate	call	panic
2230Sstevel@tonic-gate#endif
2240Sstevel@tonic-gate
2250Sstevel@tonic-gateulock_clr:
2260Sstevel@tonic-gate	movb	$0, (%rdi)
2270Sstevel@tonic-gate	ret
2280Sstevel@tonic-gate	SET_SIZE(ulock_clear)
2290Sstevel@tonic-gate
2300Sstevel@tonic-gate#else
2310Sstevel@tonic-gate
2320Sstevel@tonic-gate	ENTRY(lock_clear)
2330Sstevel@tonic-gate	movl	4(%esp), %eax
2340Sstevel@tonic-gate	movb	$0, (%eax)
2350Sstevel@tonic-gate.lock_clear_lockstat_patch_point:
2360Sstevel@tonic-gate	ret
2370Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx		/* edx = thread addr */
2380Sstevel@tonic-gate	movl	%eax, %ecx			/* ecx = lock pointer */
2390Sstevel@tonic-gate	movl	$LS_LOCK_CLEAR_RELEASE, %eax
2400Sstevel@tonic-gate	jmp	lockstat_wrapper
2410Sstevel@tonic-gate	SET_SIZE(lock_clear)
2420Sstevel@tonic-gate
2430Sstevel@tonic-gate	ENTRY(ulock_clear)
2440Sstevel@tonic-gate#ifdef DEBUG
2450Sstevel@tonic-gate	movl	kernelbase, %ecx
2460Sstevel@tonic-gate	cmpl	%ecx, 4(%esp)		/* test uaddr < kernelbase */
2470Sstevel@tonic-gate	jb	ulock_clr		/* uaddr < kernelbase, proceed */
2480Sstevel@tonic-gate
2490Sstevel@tonic-gate	pushl	$.ulock_clear_msg
2500Sstevel@tonic-gate	call	panic
2510Sstevel@tonic-gate#endif
2520Sstevel@tonic-gate
2530Sstevel@tonic-gateulock_clr:
2540Sstevel@tonic-gate	movl	4(%esp),%eax
2550Sstevel@tonic-gate	xorl	%ecx,%ecx
2560Sstevel@tonic-gate	movb	%cl, (%eax)
2570Sstevel@tonic-gate	ret
2580Sstevel@tonic-gate	SET_SIZE(ulock_clear)
2590Sstevel@tonic-gate
2600Sstevel@tonic-gate#endif	/* !__amd64 */
2610Sstevel@tonic-gate
2620Sstevel@tonic-gate#ifdef DEBUG
2630Sstevel@tonic-gate	.data
2640Sstevel@tonic-gate.ulock_clear_msg:
2650Sstevel@tonic-gate	.string "ulock_clear: Argument is above kernelbase"
2660Sstevel@tonic-gate	.text
2670Sstevel@tonic-gate#endif	/* DEBUG */
2680Sstevel@tonic-gate
2690Sstevel@tonic-gate
2700Sstevel@tonic-gate#endif	/* __lint */
2710Sstevel@tonic-gate
2720Sstevel@tonic-gate/*
2730Sstevel@tonic-gate * lock_set_spl(lock_t *lp, int new_pil, u_short *old_pil)
2740Sstevel@tonic-gate * Drops lp, sets pil to new_pil, stores old pil in *old_pil.
2750Sstevel@tonic-gate */
2760Sstevel@tonic-gate
2770Sstevel@tonic-gate#if defined(lint) || defined(__lint)
2780Sstevel@tonic-gate
2790Sstevel@tonic-gate/* ARGSUSED */
2800Sstevel@tonic-gatevoid
2810Sstevel@tonic-gatelock_set_spl(lock_t *lp, int new_pil, u_short *old_pil)
2820Sstevel@tonic-gate{}
2830Sstevel@tonic-gate
2840Sstevel@tonic-gate#else	/* __lint */
2850Sstevel@tonic-gate
2860Sstevel@tonic-gate#if defined(__amd64)
2870Sstevel@tonic-gate
2880Sstevel@tonic-gate	ENTRY(lock_set_spl)
2890Sstevel@tonic-gate	pushq	%rbp
2900Sstevel@tonic-gate	movq	%rsp, %rbp
2910Sstevel@tonic-gate	subq	$32, %rsp
2920Sstevel@tonic-gate	movl	%esi, 8(%rsp)		/* save priority level */
2930Sstevel@tonic-gate	movq	%rdx, 16(%rsp)		/* save old pil ptr */
2940Sstevel@tonic-gate	movq	%rdi, 24(%rsp)		/* save lock pointer */
2950Sstevel@tonic-gate	movl	%esi, %edi		/* pass priority level */
2960Sstevel@tonic-gate	call	splr			/* raise priority level */
2970Sstevel@tonic-gate	movq	24(%rsp), %rdi		/* rdi = lock addr */
2980Sstevel@tonic-gate	movb	$-1, %dl
2990Sstevel@tonic-gate	xchgb	%dl, (%rdi)		/* try to set lock */
3000Sstevel@tonic-gate	testb	%dl, %dl		/* did we get the lock? ... */
3010Sstevel@tonic-gate	jnz	.lss_miss		/* ... no, go to C for the hard case */
3020Sstevel@tonic-gate	movq	16(%rsp), %rdx		/* rdx = old pil addr */
3030Sstevel@tonic-gate	movw	%ax, (%rdx)		/* store old pil */
3040Sstevel@tonic-gate	leave
3050Sstevel@tonic-gate.lock_set_spl_lockstat_patch_point:
3060Sstevel@tonic-gate	ret
3070Sstevel@tonic-gate	movq	%rdi, %rsi		/* rsi = lock addr */
3080Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
3090Sstevel@tonic-gate	movl	$LS_LOCK_SET_SPL_ACQUIRE, %edi
3100Sstevel@tonic-gate	jmp	lockstat_wrapper
3110Sstevel@tonic-gate.lss_miss:
3120Sstevel@tonic-gate	movl	8(%rsp), %esi		/* new_pil */
3130Sstevel@tonic-gate	movq	16(%rsp), %rdx		/* old_pil_addr */
3140Sstevel@tonic-gate	movl	%eax, %ecx		/* original pil */
3150Sstevel@tonic-gate	leave				/* unwind stack */
3160Sstevel@tonic-gate	jmp	lock_set_spl_spin
3170Sstevel@tonic-gate	SET_SIZE(lock_set_spl)
3180Sstevel@tonic-gate
3190Sstevel@tonic-gate#else
3200Sstevel@tonic-gate
3210Sstevel@tonic-gate	ENTRY(lock_set_spl)
3220Sstevel@tonic-gate	movl	8(%esp), %eax		/* get priority level */
3230Sstevel@tonic-gate	pushl	%eax
3240Sstevel@tonic-gate	call	splr			/* raise priority level */
3250Sstevel@tonic-gate	movl 	8(%esp), %ecx		/* ecx = lock addr */
3260Sstevel@tonic-gate	movl	$-1, %edx
3270Sstevel@tonic-gate	addl	$4, %esp
3280Sstevel@tonic-gate	xchgb	%dl, (%ecx)		/* try to set lock */
3290Sstevel@tonic-gate	testb	%dl, %dl		/* did we get the lock? ... */
3300Sstevel@tonic-gate	movl	12(%esp), %edx		/* edx = olp pil addr (ZF unaffected) */
3310Sstevel@tonic-gate	jnz	.lss_miss		/* ... no, go to C for the hard case */
3320Sstevel@tonic-gate	movw	%ax, (%edx)		/* store old pil */
3330Sstevel@tonic-gate.lock_set_spl_lockstat_patch_point:
3340Sstevel@tonic-gate	ret
3350Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx	/* edx = thread addr*/
3360Sstevel@tonic-gate	movl	$LS_LOCK_SET_SPL_ACQUIRE, %eax
3370Sstevel@tonic-gate	jmp	lockstat_wrapper
3380Sstevel@tonic-gate.lss_miss:
3390Sstevel@tonic-gate	pushl	%eax			/* original pil */
3400Sstevel@tonic-gate	pushl	%edx			/* old_pil addr */
3410Sstevel@tonic-gate	pushl	16(%esp)		/* new_pil */
3420Sstevel@tonic-gate	pushl	%ecx			/* lock addr */
3430Sstevel@tonic-gate	call	lock_set_spl_spin
3440Sstevel@tonic-gate	addl	$16, %esp
3450Sstevel@tonic-gate	ret
3460Sstevel@tonic-gate	SET_SIZE(lock_set_spl)
3470Sstevel@tonic-gate
3480Sstevel@tonic-gate#endif	/* !__amd64 */
3490Sstevel@tonic-gate
3500Sstevel@tonic-gate#endif	/* __lint */
3510Sstevel@tonic-gate
3520Sstevel@tonic-gate/*
3530Sstevel@tonic-gate * void
3540Sstevel@tonic-gate * lock_init(lp)
3550Sstevel@tonic-gate */
3560Sstevel@tonic-gate
3570Sstevel@tonic-gate#if defined(__lint)
3580Sstevel@tonic-gate
3590Sstevel@tonic-gate/* ARGSUSED */
3600Sstevel@tonic-gatevoid
3610Sstevel@tonic-gatelock_init(lock_t *lp)
3620Sstevel@tonic-gate{}
3630Sstevel@tonic-gate
3640Sstevel@tonic-gate#else	/* __lint */
3650Sstevel@tonic-gate
3660Sstevel@tonic-gate#if defined(__amd64)
3670Sstevel@tonic-gate
3680Sstevel@tonic-gate	ENTRY(lock_init)
3690Sstevel@tonic-gate	movb	$0, (%rdi)
3700Sstevel@tonic-gate	ret
3710Sstevel@tonic-gate	SET_SIZE(lock_init)
3720Sstevel@tonic-gate
3730Sstevel@tonic-gate#else
3740Sstevel@tonic-gate
3750Sstevel@tonic-gate	ENTRY(lock_init)
3760Sstevel@tonic-gate	movl	4(%esp), %eax
3770Sstevel@tonic-gate	movb	$0, (%eax)
3780Sstevel@tonic-gate	ret
3790Sstevel@tonic-gate	SET_SIZE(lock_init)
3800Sstevel@tonic-gate
3810Sstevel@tonic-gate#endif	/* !__amd64 */
3820Sstevel@tonic-gate
3830Sstevel@tonic-gate#endif	/* __lint */
3840Sstevel@tonic-gate
3850Sstevel@tonic-gate/*
3860Sstevel@tonic-gate * void
3870Sstevel@tonic-gate * lock_set(lp)
3880Sstevel@tonic-gate */
3890Sstevel@tonic-gate
3900Sstevel@tonic-gate#if defined(lint) || defined(__lint)
3910Sstevel@tonic-gate
3920Sstevel@tonic-gate/* ARGSUSED */
3930Sstevel@tonic-gatevoid
3940Sstevel@tonic-gatelock_set(lock_t *lp)
3950Sstevel@tonic-gate{}
3960Sstevel@tonic-gate
3970Sstevel@tonic-gate#else	/* __lint */
3980Sstevel@tonic-gate
3990Sstevel@tonic-gate#if defined(__amd64)
4000Sstevel@tonic-gate
4010Sstevel@tonic-gate	ENTRY(lock_set)
4020Sstevel@tonic-gate	movb	$-1, %dl
4030Sstevel@tonic-gate	xchgb	%dl, (%rdi)		/* try to set lock */
4040Sstevel@tonic-gate	testb	%dl, %dl		/* did we get it? */
4050Sstevel@tonic-gate	jnz	lock_set_spin		/* no, go to C for the hard case */
4060Sstevel@tonic-gate.lock_set_lockstat_patch_point:
4070Sstevel@tonic-gate	ret
4080Sstevel@tonic-gate	movq	%rdi, %rsi		/* rsi = lock addr */
4090Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
4100Sstevel@tonic-gate	movl	$LS_LOCK_SET_ACQUIRE, %edi
4110Sstevel@tonic-gate	jmp	lockstat_wrapper
4120Sstevel@tonic-gate	SET_SIZE(lock_set)
4130Sstevel@tonic-gate
4140Sstevel@tonic-gate#else
4150Sstevel@tonic-gate
4160Sstevel@tonic-gate	ENTRY(lock_set)
4170Sstevel@tonic-gate	movl	4(%esp), %ecx		/* ecx = lock addr */
4180Sstevel@tonic-gate	movl	$-1, %edx
4190Sstevel@tonic-gate	xchgb	%dl, (%ecx)		/* try to set lock */
4200Sstevel@tonic-gate	testb	%dl, %dl		/* did we get it? */
4210Sstevel@tonic-gate	jnz	lock_set_spin		/* no, go to C for the hard case */
4220Sstevel@tonic-gate.lock_set_lockstat_patch_point:
4230Sstevel@tonic-gate	ret
4240Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx	/* edx = thread addr */
4250Sstevel@tonic-gate	movl	$LS_LOCK_SET_ACQUIRE, %eax
4260Sstevel@tonic-gate	jmp	lockstat_wrapper
4270Sstevel@tonic-gate	SET_SIZE(lock_set)
4280Sstevel@tonic-gate
4290Sstevel@tonic-gate#endif	/* !__amd64 */
4300Sstevel@tonic-gate
4310Sstevel@tonic-gate#endif	/* __lint */
4320Sstevel@tonic-gate
4330Sstevel@tonic-gate/*
4340Sstevel@tonic-gate * lock_clear_splx(lp, s)
4350Sstevel@tonic-gate */
4360Sstevel@tonic-gate
4370Sstevel@tonic-gate#if defined(lint) || defined(__lint)
4380Sstevel@tonic-gate
4390Sstevel@tonic-gate/* ARGSUSED */
4400Sstevel@tonic-gatevoid
4410Sstevel@tonic-gatelock_clear_splx(lock_t *lp, int s)
4420Sstevel@tonic-gate{}
4430Sstevel@tonic-gate
4440Sstevel@tonic-gate#else	/* __lint */
4450Sstevel@tonic-gate
4460Sstevel@tonic-gate#if defined(__amd64)
4470Sstevel@tonic-gate
4480Sstevel@tonic-gate	ENTRY(lock_clear_splx)
4490Sstevel@tonic-gate	movb	$0, (%rdi)		/* clear lock */
4500Sstevel@tonic-gate.lock_clear_splx_lockstat_patch_point:
4510Sstevel@tonic-gate	jmp	0f
4520Sstevel@tonic-gate0:
4530Sstevel@tonic-gate	movl	%esi, %edi		/* arg for splx */
4540Sstevel@tonic-gate	jmp	splx			/* let splx do its thing */
4550Sstevel@tonic-gate.lock_clear_splx_lockstat:
4560Sstevel@tonic-gate	pushq	%rbp			/* align stack properly */
4570Sstevel@tonic-gate	movq	%rsp, %rbp
4580Sstevel@tonic-gate	subq	$16, %rsp		/* space to save args across splx */
4590Sstevel@tonic-gate	movq	%rdi, 8(%rsp)		/* save lock ptr across splx call */
4600Sstevel@tonic-gate	movl	%esi, %edi		/* arg for splx */
4610Sstevel@tonic-gate	call	splx			/* lower the priority */
4620Sstevel@tonic-gate	movq	8(%rsp), %rsi		/* rsi = lock ptr */
4630Sstevel@tonic-gate	leave				/* unwind stack */
4640Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx	/* rdx = thread addr */
4650Sstevel@tonic-gate	movl	$LS_LOCK_CLEAR_SPLX_RELEASE, %edi
4660Sstevel@tonic-gate	jmp	lockstat_wrapper
4670Sstevel@tonic-gate	SET_SIZE(lock_clear_splx)
4680Sstevel@tonic-gate
4693446Smrj#else
4703446Smrj
4713446Smrj	ENTRY(lock_clear_splx)
4723446Smrj	movl	4(%esp), %eax		/* eax = lock addr */
4733446Smrj	movb	$0, (%eax)		/* clear lock */
4743446Smrj.lock_clear_splx_lockstat_patch_point:
4753446Smrj	jmp	0f
4763446Smrj0:
4773446Smrj	movl	8(%esp), %edx		/* edx = desired pil */
4783446Smrj	movl	%edx, 4(%esp)		/* set spl arg up for splx */
4793446Smrj	jmp	splx			/* let splx do it's thing */
4803446Smrj.lock_clear_splx_lockstat:
4813446Smrj	movl	8(%esp), %edx		/* edx = desired pil */
4823446Smrj	pushl	%ebp			/* set up stack frame */
4833446Smrj	movl	%esp, %ebp
4843446Smrj	pushl	%edx
4853446Smrj	call	splx
4863446Smrj	leave				/* unwind stack */
4873446Smrj	movl	4(%esp), %ecx		/* ecx = lock pointer */
4883446Smrj	movl	%gs:CPU_THREAD, %edx	/* edx = thread addr */
4893446Smrj	movl	$LS_LOCK_CLEAR_SPLX_RELEASE, %eax
4903446Smrj	jmp	lockstat_wrapper
4913446Smrj	SET_SIZE(lock_clear_splx)
4923446Smrj
4933446Smrj#endif	/* !__amd64 */
4943446Smrj
4950Sstevel@tonic-gate#if defined(__GNUC_AS__)
4960Sstevel@tonic-gate#define	LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL	\
4970Sstevel@tonic-gate	(.lock_clear_splx_lockstat - .lock_clear_splx_lockstat_patch_point - 2)
4980Sstevel@tonic-gate
4990Sstevel@tonic-gate#define LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT	\
5000Sstevel@tonic-gate	(.lock_clear_splx_lockstat_patch_point + 1)
5010Sstevel@tonic-gate#else
5020Sstevel@tonic-gate#define	LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL	\
5030Sstevel@tonic-gate	[.lock_clear_splx_lockstat - .lock_clear_splx_lockstat_patch_point - 2]
5040Sstevel@tonic-gate
5050Sstevel@tonic-gate#define LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT	\
5060Sstevel@tonic-gate	[.lock_clear_splx_lockstat_patch_point + 1]
5070Sstevel@tonic-gate#endif
5080Sstevel@tonic-gate
5090Sstevel@tonic-gate#endif	/* __lint */
5100Sstevel@tonic-gate
5110Sstevel@tonic-gate/*
5120Sstevel@tonic-gate * mutex_enter() and mutex_exit().
5130Sstevel@tonic-gate *
5140Sstevel@tonic-gate * These routines handle the simple cases of mutex_enter() (adaptive
5150Sstevel@tonic-gate * lock, not held) and mutex_exit() (adaptive lock, held, no waiters).
5160Sstevel@tonic-gate * If anything complicated is going on we punt to mutex_vector_enter().
5170Sstevel@tonic-gate *
5180Sstevel@tonic-gate * mutex_tryenter() is similar to mutex_enter() but returns zero if
5190Sstevel@tonic-gate * the lock cannot be acquired, nonzero on success.
5200Sstevel@tonic-gate *
5210Sstevel@tonic-gate * If mutex_exit() gets preempted in the window between checking waiters
5220Sstevel@tonic-gate * and clearing the lock, we can miss wakeups.  Disabling preemption
5230Sstevel@tonic-gate * in the mutex code is prohibitively expensive, so instead we detect
5240Sstevel@tonic-gate * mutex preemption by examining the trapped PC in the interrupt path.
5250Sstevel@tonic-gate * If we interrupt a thread in mutex_exit() that has not yet cleared
5260Sstevel@tonic-gate * the lock, cmnint() resets its PC back to the beginning of
5270Sstevel@tonic-gate * mutex_exit() so it will check again for waiters when it resumes.
5280Sstevel@tonic-gate *
5290Sstevel@tonic-gate * The lockstat code below is activated when the lockstat driver
5300Sstevel@tonic-gate * calls lockstat_hot_patch() to hot-patch the kernel mutex code.
5310Sstevel@tonic-gate * Note that we don't need to test lockstat_event_mask here -- we won't
5320Sstevel@tonic-gate * patch this code in unless we're gathering ADAPTIVE_HOLD lockstats.
5330Sstevel@tonic-gate */
5340Sstevel@tonic-gate#if defined(lint) || defined(__lint)
5350Sstevel@tonic-gate
5360Sstevel@tonic-gate/* ARGSUSED */
5370Sstevel@tonic-gatevoid
5380Sstevel@tonic-gatemutex_enter(kmutex_t *lp)
5390Sstevel@tonic-gate{}
5400Sstevel@tonic-gate
5410Sstevel@tonic-gate/* ARGSUSED */
5420Sstevel@tonic-gateint
5430Sstevel@tonic-gatemutex_tryenter(kmutex_t *lp)
5440Sstevel@tonic-gate{ return (0); }
5450Sstevel@tonic-gate
5460Sstevel@tonic-gate/* ARGSUSED */
5470Sstevel@tonic-gateint
5480Sstevel@tonic-gatemutex_adaptive_tryenter(mutex_impl_t *lp)
5490Sstevel@tonic-gate{ return (0); }
5500Sstevel@tonic-gate
5510Sstevel@tonic-gate/* ARGSUSED */
5520Sstevel@tonic-gatevoid
5530Sstevel@tonic-gatemutex_exit(kmutex_t *lp)
5540Sstevel@tonic-gate{}
5550Sstevel@tonic-gate
5560Sstevel@tonic-gate#else
5570Sstevel@tonic-gate
5580Sstevel@tonic-gate#if defined(__amd64)
5590Sstevel@tonic-gate
5600Sstevel@tonic-gate	ENTRY_NP(mutex_enter)
5610Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
5620Sstevel@tonic-gate	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
5630Sstevel@tonic-gate	lock
5640Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)
5650Sstevel@tonic-gate	jnz	mutex_vector_enter
5660Sstevel@tonic-gate.mutex_enter_lockstat_patch_point:
5671582Skchow#if defined(OPTERON_WORKAROUND_6323525)
5681582Skchow.mutex_enter_6323525_patch_point:
5691582Skchow	ret					/* nop space for lfence */
5701582Skchow	nop
5711582Skchow	nop
5721582Skchow.mutex_enter_lockstat_6323525_patch_point:	/* new patch point if lfence */
5731582Skchow	nop
5741582Skchow#else	/* OPTERON_WORKAROUND_6323525 */
5750Sstevel@tonic-gate	ret
5761582Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
5770Sstevel@tonic-gate	movq	%rdi, %rsi
5780Sstevel@tonic-gate	movl	$LS_MUTEX_ENTER_ACQUIRE, %edi
5790Sstevel@tonic-gate/*
5800Sstevel@tonic-gate * expects %rdx=thread, %rsi=lock, %edi=lockstat event
5810Sstevel@tonic-gate */
5820Sstevel@tonic-gate	ALTENTRY(lockstat_wrapper)
5830Sstevel@tonic-gate	incb	T_LOCKSTAT(%rdx)		/* curthread->t_lockstat++ */
5840Sstevel@tonic-gate	leaq	lockstat_probemap(%rip), %rax
5850Sstevel@tonic-gate	movl	(%rax, %rdi, DTRACE_IDSIZE), %eax
5860Sstevel@tonic-gate	testl	%eax, %eax			/* check for non-zero probe */
5870Sstevel@tonic-gate	jz	1f
5880Sstevel@tonic-gate	pushq	%rbp				/* align stack properly */
5890Sstevel@tonic-gate	movq	%rsp, %rbp
5900Sstevel@tonic-gate	movl	%eax, %edi
5910Sstevel@tonic-gate	call	*lockstat_probe
5920Sstevel@tonic-gate	leave					/* unwind stack */
5930Sstevel@tonic-gate1:
5940Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* reload thread ptr */
5950Sstevel@tonic-gate	decb	T_LOCKSTAT(%rdx)		/* curthread->t_lockstat-- */
5960Sstevel@tonic-gate	movl	$1, %eax			/* return success if tryenter */
5970Sstevel@tonic-gate	ret
5980Sstevel@tonic-gate	SET_SIZE(lockstat_wrapper)
5990Sstevel@tonic-gate	SET_SIZE(mutex_enter)
6000Sstevel@tonic-gate
6010Sstevel@tonic-gate/*
6020Sstevel@tonic-gate * expects %rcx=thread, %rdx=arg, %rsi=lock, %edi=lockstat event
6030Sstevel@tonic-gate */
6040Sstevel@tonic-gate	ENTRY(lockstat_wrapper_arg)
6050Sstevel@tonic-gate	incb	T_LOCKSTAT(%rcx)		/* curthread->t_lockstat++ */
6060Sstevel@tonic-gate	leaq	lockstat_probemap(%rip), %rax
6070Sstevel@tonic-gate	movl	(%rax, %rdi, DTRACE_IDSIZE), %eax
6080Sstevel@tonic-gate	testl	%eax, %eax			/* check for non-zero probe */
6090Sstevel@tonic-gate	jz	1f
6100Sstevel@tonic-gate	pushq	%rbp				/* align stack properly */
6110Sstevel@tonic-gate	movq	%rsp, %rbp
6120Sstevel@tonic-gate	movl	%eax, %edi
6130Sstevel@tonic-gate	call	*lockstat_probe
6140Sstevel@tonic-gate	leave					/* unwind stack */
6150Sstevel@tonic-gate1:
6160Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* reload thread ptr */
6170Sstevel@tonic-gate	decb	T_LOCKSTAT(%rdx)		/* curthread->t_lockstat-- */
6180Sstevel@tonic-gate	movl	$1, %eax			/* return success if tryenter */
6190Sstevel@tonic-gate	ret
6200Sstevel@tonic-gate	SET_SIZE(lockstat_wrapper_arg)
6210Sstevel@tonic-gate
6220Sstevel@tonic-gate
6230Sstevel@tonic-gate	ENTRY(mutex_tryenter)
6240Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
6250Sstevel@tonic-gate	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
6260Sstevel@tonic-gate	lock
6270Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)
6280Sstevel@tonic-gate	jnz	mutex_vector_tryenter
6290Sstevel@tonic-gate	not	%eax				/* return success (nonzero) */
6301582Skchow#if defined(OPTERON_WORKAROUND_6323525)
6311582Skchow.mutex_tryenter_lockstat_patch_point:
6321582Skchow.mutex_tryenter_6323525_patch_point:
6331582Skchow	ret					/* nop space for lfence */
6341582Skchow	nop
6351582Skchow	nop
6361582Skchow.mutex_tryenter_lockstat_6323525_patch_point:	/* new patch point if lfence */
6371582Skchow	nop
6381582Skchow#else	/* OPTERON_WORKAROUND_6323525 */
6390Sstevel@tonic-gate.mutex_tryenter_lockstat_patch_point:
6400Sstevel@tonic-gate	ret
6411582Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
6420Sstevel@tonic-gate	movq	%rdi, %rsi
6430Sstevel@tonic-gate	movl	$LS_MUTEX_ENTER_ACQUIRE, %edi
6440Sstevel@tonic-gate	jmp	lockstat_wrapper
6450Sstevel@tonic-gate	SET_SIZE(mutex_tryenter)
6460Sstevel@tonic-gate
6470Sstevel@tonic-gate	ENTRY(mutex_adaptive_tryenter)
6480Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
6490Sstevel@tonic-gate	xorl	%eax, %eax			/* rax = 0 (unheld adaptive) */
6500Sstevel@tonic-gate	lock
6510Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)
6520Sstevel@tonic-gate	jnz	0f
6530Sstevel@tonic-gate	not	%eax				/* return success (nonzero) */
6541582Skchow#if defined(OPTERON_WORKAROUND_6323525)
6551582Skchow.mutex_atryenter_6323525_patch_point:
6561582Skchow	ret					/* nop space for lfence */
6571582Skchow	nop
6581582Skchow	nop
6591582Skchow	nop
6601582Skchow#else	/* OPTERON_WORKAROUND_6323525 */
6610Sstevel@tonic-gate	ret
6621582Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
6630Sstevel@tonic-gate0:
6640Sstevel@tonic-gate	xorl	%eax, %eax			/* return failure */
6650Sstevel@tonic-gate	ret
6660Sstevel@tonic-gate	SET_SIZE(mutex_adaptive_tryenter)
6670Sstevel@tonic-gate
668*5834Spt157919	.globl	mutex_owner_running_critical_start
669*5834Spt157919
670*5834Spt157919	ENTRY(mutex_owner_running)
671*5834Spt157919mutex_owner_running_critical_start:
672*5834Spt157919	movq	(%rdi), %r11		/* get owner field */
673*5834Spt157919	andq	$MUTEX_THREAD, %r11	/* remove waiters bit */
674*5834Spt157919	cmpq	$0, %r11		/* if free, skip */
675*5834Spt157919	je	1f			/* go return 0 */
676*5834Spt157919	movq	T_CPU(%r11), %r8	/* get owner->t_cpu */
677*5834Spt157919	movq	CPU_THREAD(%r8), %r9	/* get t_cpu->cpu_thread */
678*5834Spt157919.mutex_owner_running_critical_end:
679*5834Spt157919	cmpq	%r11, %r9	/* owner == running thread? */
680*5834Spt157919	je	2f		/* yes, go return cpu */
681*5834Spt1579191:
682*5834Spt157919	xorq	%rax, %rax	/* return 0 */
683*5834Spt157919	ret
684*5834Spt1579192:
685*5834Spt157919	movq	%r8, %rax		/* return cpu */
686*5834Spt157919	ret
687*5834Spt157919	SET_SIZE(mutex_owner_running)
688*5834Spt157919
689*5834Spt157919	.globl	mutex_owner_running_critical_size
690*5834Spt157919	.type	mutex_owner_running_critical_size, @object
691*5834Spt157919	.align	CPTRSIZE
692*5834Spt157919mutex_owner_running_critical_size:
693*5834Spt157919	.quad	.mutex_owner_running_critical_end - mutex_owner_running_critical_start
694*5834Spt157919	SET_SIZE(mutex_owner_running_critical_size)
695*5834Spt157919
6961582Skchow	.globl	mutex_exit_critical_start
6970Sstevel@tonic-gate
6980Sstevel@tonic-gate	ENTRY(mutex_exit)
6990Sstevel@tonic-gatemutex_exit_critical_start:		/* If interrupted, restart here */
7000Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx
7010Sstevel@tonic-gate	cmpq	%rdx, (%rdi)
7020Sstevel@tonic-gate	jne	mutex_vector_exit		/* wrong type or wrong owner */
7030Sstevel@tonic-gate	movq	$0, (%rdi)			/* clear owner AND lock */
7040Sstevel@tonic-gate.mutex_exit_critical_end:
7050Sstevel@tonic-gate.mutex_exit_lockstat_patch_point:
7060Sstevel@tonic-gate	ret
7070Sstevel@tonic-gate	movq	%rdi, %rsi
7080Sstevel@tonic-gate	movl	$LS_MUTEX_EXIT_RELEASE, %edi
7090Sstevel@tonic-gate	jmp	lockstat_wrapper
7100Sstevel@tonic-gate	SET_SIZE(mutex_exit)
7110Sstevel@tonic-gate
7120Sstevel@tonic-gate	.globl	mutex_exit_critical_size
7130Sstevel@tonic-gate	.type	mutex_exit_critical_size, @object
7140Sstevel@tonic-gate	.align	CPTRSIZE
7150Sstevel@tonic-gatemutex_exit_critical_size:
7160Sstevel@tonic-gate	.quad	.mutex_exit_critical_end - mutex_exit_critical_start
7170Sstevel@tonic-gate	SET_SIZE(mutex_exit_critical_size)
7180Sstevel@tonic-gate
7190Sstevel@tonic-gate#else
7200Sstevel@tonic-gate
7210Sstevel@tonic-gate	ENTRY_NP(mutex_enter)
7220Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
7230Sstevel@tonic-gate	movl	4(%esp), %ecx			/* ecx = lock ptr */
7240Sstevel@tonic-gate	xorl	%eax, %eax			/* eax = 0 (unheld adaptive) */
7250Sstevel@tonic-gate	lock
7260Sstevel@tonic-gate	cmpxchgl %edx, (%ecx)
7270Sstevel@tonic-gate	jnz	mutex_vector_enter
7281582Skchow#if defined(OPTERON_WORKAROUND_6323525)
7291582Skchow.mutex_enter_lockstat_patch_point:
7301582Skchow.mutex_enter_6323525_patch_point:
7311582Skchow	ret					/* nop space for lfence */
7321582Skchow	nop
7331582Skchow	nop
7341582Skchow.mutex_enter_lockstat_6323525_patch_point:	/* new patch point if lfence */
7351582Skchow	nop
7361582Skchow#else	/* OPTERON_WORKAROUND_6323525 */
7370Sstevel@tonic-gate.mutex_enter_lockstat_patch_point:
7380Sstevel@tonic-gate	ret
7391582Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
7400Sstevel@tonic-gate	movl	$LS_MUTEX_ENTER_ACQUIRE, %eax
7410Sstevel@tonic-gate	ALTENTRY(lockstat_wrapper)	/* expects edx=thread, ecx=lock, */
7420Sstevel@tonic-gate					/*   eax=lockstat event */
7430Sstevel@tonic-gate	pushl	%ebp				/* buy a frame */
7440Sstevel@tonic-gate	movl	%esp, %ebp
7450Sstevel@tonic-gate	incb	T_LOCKSTAT(%edx)		/* curthread->t_lockstat++ */
7460Sstevel@tonic-gate	pushl	%edx				/* save thread pointer	 */
7470Sstevel@tonic-gate	movl	$lockstat_probemap, %edx
7480Sstevel@tonic-gate	movl	(%edx, %eax, DTRACE_IDSIZE), %eax
7490Sstevel@tonic-gate	testl	%eax, %eax			/* check for non-zero probe */
7500Sstevel@tonic-gate	jz	1f
7510Sstevel@tonic-gate	pushl	%ecx				/* push lock */
7520Sstevel@tonic-gate	pushl	%eax				/* push probe ID */
7530Sstevel@tonic-gate	call	*lockstat_probe
7540Sstevel@tonic-gate	addl	$8, %esp
7550Sstevel@tonic-gate1:
7560Sstevel@tonic-gate	popl	%edx				/* restore thread pointer */
7570Sstevel@tonic-gate	decb	T_LOCKSTAT(%edx)		/* curthread->t_lockstat-- */
7580Sstevel@tonic-gate	movl	$1, %eax			/* return success if tryenter */
7590Sstevel@tonic-gate	popl	%ebp				/* pop off frame */
7600Sstevel@tonic-gate	ret
7610Sstevel@tonic-gate	SET_SIZE(lockstat_wrapper)
7620Sstevel@tonic-gate	SET_SIZE(mutex_enter)
7630Sstevel@tonic-gate
7640Sstevel@tonic-gate	ENTRY(lockstat_wrapper_arg)	/* expects edx=thread, ecx=lock, */
7650Sstevel@tonic-gate					/* eax=lockstat event, pushed arg */
7660Sstevel@tonic-gate	incb	T_LOCKSTAT(%edx)		/* curthread->t_lockstat++ */
7670Sstevel@tonic-gate	pushl	%edx				/* save thread pointer	 */
7680Sstevel@tonic-gate	movl	$lockstat_probemap, %edx
7690Sstevel@tonic-gate	movl	(%edx, %eax, DTRACE_IDSIZE), %eax
7700Sstevel@tonic-gate	testl	%eax, %eax			/* check for non-zero probe */
7710Sstevel@tonic-gate	jz	1f
7720Sstevel@tonic-gate	pushl	%ebp				/* save %ebp */
7730Sstevel@tonic-gate	pushl	8(%esp)				/* push arg1 */
7740Sstevel@tonic-gate	movl	%ebp, 12(%esp)			/* fake up the stack frame */
7750Sstevel@tonic-gate	movl	%esp, %ebp			/* fake up base pointer */
7760Sstevel@tonic-gate	addl	$12, %ebp			/* adjust faked base pointer */
7770Sstevel@tonic-gate	pushl	%ecx				/* push lock */
7780Sstevel@tonic-gate	pushl	%eax				/* push probe ID */
7790Sstevel@tonic-gate	call	*lockstat_probe
7800Sstevel@tonic-gate	addl	$12, %esp			/* adjust for arguments */
7810Sstevel@tonic-gate	popl	%ebp				/* pop frame */
7820Sstevel@tonic-gate1:
7830Sstevel@tonic-gate	popl	%edx				/* restore thread pointer */
7840Sstevel@tonic-gate	decb	T_LOCKSTAT(%edx)		/* curthread->t_lockstat-- */
7850Sstevel@tonic-gate	movl	$1, %eax			/* return success if tryenter */
7860Sstevel@tonic-gate	addl	$4, %esp			/* pop argument */
7870Sstevel@tonic-gate	ret
7880Sstevel@tonic-gate	SET_SIZE(lockstat_wrapper_arg)
7890Sstevel@tonic-gate
7900Sstevel@tonic-gate
7910Sstevel@tonic-gate	ENTRY(mutex_tryenter)
7920Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
7930Sstevel@tonic-gate	movl	4(%esp), %ecx			/* ecx = lock ptr */
7940Sstevel@tonic-gate	xorl	%eax, %eax			/* eax = 0 (unheld adaptive) */
7950Sstevel@tonic-gate	lock
7960Sstevel@tonic-gate	cmpxchgl %edx, (%ecx)
7970Sstevel@tonic-gate	jnz	mutex_vector_tryenter
7980Sstevel@tonic-gate	movl	%ecx, %eax
7991582Skchow#if defined(OPTERON_WORKAROUND_6323525)
8001582Skchow.mutex_tryenter_lockstat_patch_point:
8011582Skchow.mutex_tryenter_6323525_patch_point:
8021582Skchow	ret					/* nop space for lfence */
8031582Skchow	nop
8041582Skchow	nop
8051582Skchow.mutex_tryenter_lockstat_6323525_patch_point:	/* new patch point if lfence */
8061582Skchow	nop
8071582Skchow#else	/* OPTERON_WORKAROUND_6323525 */
8080Sstevel@tonic-gate.mutex_tryenter_lockstat_patch_point:
8090Sstevel@tonic-gate	ret
8101582Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
8110Sstevel@tonic-gate	movl	$LS_MUTEX_ENTER_ACQUIRE, %eax
8120Sstevel@tonic-gate	jmp	lockstat_wrapper
8130Sstevel@tonic-gate	SET_SIZE(mutex_tryenter)
8140Sstevel@tonic-gate
8150Sstevel@tonic-gate	ENTRY(mutex_adaptive_tryenter)
8160Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
8170Sstevel@tonic-gate	movl	4(%esp), %ecx			/* ecx = lock ptr */
8180Sstevel@tonic-gate	xorl	%eax, %eax			/* eax = 0 (unheld adaptive) */
8190Sstevel@tonic-gate	lock
8200Sstevel@tonic-gate	cmpxchgl %edx, (%ecx)
8210Sstevel@tonic-gate	jnz	0f
8220Sstevel@tonic-gate	movl	%ecx, %eax
8231582Skchow#if defined(OPTERON_WORKAROUND_6323525)
8241582Skchow.mutex_atryenter_6323525_patch_point:
8251582Skchow	ret					/* nop space for lfence */
8261582Skchow	nop
8271582Skchow	nop
8281582Skchow	nop
8291582Skchow#else	/* OPTERON_WORKAROUND_6323525 */
8300Sstevel@tonic-gate	ret
8311582Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
8320Sstevel@tonic-gate0:
8330Sstevel@tonic-gate	xorl	%eax, %eax
8340Sstevel@tonic-gate	ret
8350Sstevel@tonic-gate	SET_SIZE(mutex_adaptive_tryenter)
8360Sstevel@tonic-gate
837*5834Spt157919	.globl	mutex_owner_running_critical_start
838*5834Spt157919
839*5834Spt157919	ENTRY(mutex_owner_running)
840*5834Spt157919mutex_owner_running_critical_start:
841*5834Spt157919	movl	4(%esp), %eax		/* get owner field */
842*5834Spt157919	movl	(%eax), %eax
843*5834Spt157919	andl	$MUTEX_THREAD, %eax	/* remove waiters bit */
844*5834Spt157919	cmpl	$0, %eax		/* if free, skip */
845*5834Spt157919	je	1f			/* go return 0 */
846*5834Spt157919	movl	T_CPU(%eax), %ecx	/* get owner->t_cpu */
847*5834Spt157919	movl	CPU_THREAD(%ecx), %edx	/* get t_cpu->cpu_thread */
848*5834Spt157919.mutex_owner_running_critical_end:
849*5834Spt157919	cmpl	%eax, %edx	/* owner == running thread? */
850*5834Spt157919	je	2f		/* yes, go return cpu */
851*5834Spt1579191:
852*5834Spt157919	xorl	%eax, %eax	/* return 0 */
853*5834Spt157919	ret
854*5834Spt1579192:
855*5834Spt157919	movl	%ecx, %eax	/* return cpu */
856*5834Spt157919	ret
857*5834Spt157919
858*5834Spt157919	SET_SIZE(mutex_owner_running)
859*5834Spt157919
860*5834Spt157919	.globl	mutex_owner_running_critical_size
861*5834Spt157919	.type	mutex_owner_running_critical_size, @object
862*5834Spt157919	.align	CPTRSIZE
863*5834Spt157919mutex_owner_running_critical_size:
864*5834Spt157919	.long	.mutex_owner_running_critical_end - mutex_owner_running_critical_start
865*5834Spt157919	SET_SIZE(mutex_owner_running_critical_size)
866*5834Spt157919
8671582Skchow	.globl	mutex_exit_critical_start
8680Sstevel@tonic-gate
8690Sstevel@tonic-gate	ENTRY(mutex_exit)
8700Sstevel@tonic-gatemutex_exit_critical_start:		/* If interrupted, restart here */
8710Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx
8720Sstevel@tonic-gate	movl	4(%esp), %ecx
8730Sstevel@tonic-gate	cmpl	%edx, (%ecx)
8740Sstevel@tonic-gate	jne	mutex_vector_exit		/* wrong type or wrong owner */
8750Sstevel@tonic-gate	movl	$0, (%ecx)			/* clear owner AND lock */
8760Sstevel@tonic-gate.mutex_exit_critical_end:
8770Sstevel@tonic-gate.mutex_exit_lockstat_patch_point:
8780Sstevel@tonic-gate	ret
8790Sstevel@tonic-gate	movl	$LS_MUTEX_EXIT_RELEASE, %eax
8800Sstevel@tonic-gate	jmp	lockstat_wrapper
8810Sstevel@tonic-gate	SET_SIZE(mutex_exit)
8820Sstevel@tonic-gate
8830Sstevel@tonic-gate	.globl	mutex_exit_critical_size
8840Sstevel@tonic-gate	.type	mutex_exit_critical_size, @object
8850Sstevel@tonic-gate	.align	CPTRSIZE
8860Sstevel@tonic-gatemutex_exit_critical_size:
8870Sstevel@tonic-gate	.long	.mutex_exit_critical_end - mutex_exit_critical_start
8880Sstevel@tonic-gate	SET_SIZE(mutex_exit_critical_size)
8890Sstevel@tonic-gate
8900Sstevel@tonic-gate#endif	/* !__amd64 */
8910Sstevel@tonic-gate
8920Sstevel@tonic-gate#endif	/* __lint */
8930Sstevel@tonic-gate
8940Sstevel@tonic-gate/*
8950Sstevel@tonic-gate * rw_enter() and rw_exit().
8960Sstevel@tonic-gate *
8970Sstevel@tonic-gate * These routines handle the simple cases of rw_enter (write-locking an unheld
8980Sstevel@tonic-gate * lock or read-locking a lock that's neither write-locked nor write-wanted)
8990Sstevel@tonic-gate * and rw_exit (no waiters or not the last reader).  If anything complicated
9000Sstevel@tonic-gate * is going on we punt to rw_enter_sleep() and rw_exit_wakeup(), respectively.
9010Sstevel@tonic-gate */
9020Sstevel@tonic-gate#if defined(lint) || defined(__lint)
9030Sstevel@tonic-gate
9040Sstevel@tonic-gate/* ARGSUSED */
9050Sstevel@tonic-gatevoid
9060Sstevel@tonic-gaterw_enter(krwlock_t *lp, krw_t rw)
9070Sstevel@tonic-gate{}
9080Sstevel@tonic-gate
9090Sstevel@tonic-gate/* ARGSUSED */
9100Sstevel@tonic-gatevoid
9110Sstevel@tonic-gaterw_exit(krwlock_t *lp)
9120Sstevel@tonic-gate{}
9130Sstevel@tonic-gate
9140Sstevel@tonic-gate#else	/* __lint */
9150Sstevel@tonic-gate
9160Sstevel@tonic-gate#if defined(__amd64)
9170Sstevel@tonic-gate
9180Sstevel@tonic-gate	ENTRY(rw_enter)
9190Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rdx		/* rdx = thread ptr */
9200Sstevel@tonic-gate	cmpl	$RW_WRITER, %esi
9210Sstevel@tonic-gate	je	.rw_write_enter
9220Sstevel@tonic-gate	incl	T_KPRI_REQ(%rdx)		/* THREAD_KPRI_REQUEST() */
9230Sstevel@tonic-gate	movq	(%rdi), %rax			/* rax = old rw_wwwh value */
9240Sstevel@tonic-gate	testl	$RW_WRITE_LOCKED|RW_WRITE_WANTED, %eax
9250Sstevel@tonic-gate	jnz	rw_enter_sleep
9260Sstevel@tonic-gate	leaq	RW_READ_LOCK(%rax), %rdx	/* rdx = new rw_wwwh value */
9270Sstevel@tonic-gate	lock
9280Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)			/* try to grab read lock */
9290Sstevel@tonic-gate	jnz	rw_enter_sleep
9300Sstevel@tonic-gate.rw_read_enter_lockstat_patch_point:
9310Sstevel@tonic-gate	ret
9320Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
9330Sstevel@tonic-gate	movq	%rdi, %rsi			/* rsi = lock ptr */
9340Sstevel@tonic-gate	movl	$LS_RW_ENTER_ACQUIRE, %edi
9350Sstevel@tonic-gate	movl	$RW_READER, %edx
9360Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
9370Sstevel@tonic-gate.rw_write_enter:
9380Sstevel@tonic-gate	orq	$RW_WRITE_LOCKED, %rdx		/* rdx = write-locked value */
9390Sstevel@tonic-gate	xorl	%eax, %eax			/* rax = unheld value */
9400Sstevel@tonic-gate	lock
9410Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)			/* try to grab write lock */
9420Sstevel@tonic-gate	jnz	rw_enter_sleep
9431582Skchow
9441582Skchow#if defined(OPTERON_WORKAROUND_6323525)
9451582Skchow.rw_write_enter_lockstat_patch_point:
9461582Skchow.rw_write_enter_6323525_patch_point:
9471582Skchow	ret
9481582Skchow	nop
9491582Skchow	nop
9501582Skchow.rw_write_enter_lockstat_6323525_patch_point:
9511582Skchow	nop
9521582Skchow#else	/* OPTERON_WORKAROUND_6323525 */
9530Sstevel@tonic-gate.rw_write_enter_lockstat_patch_point:
9540Sstevel@tonic-gate	ret
9551582Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
9561582Skchow
9570Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
9580Sstevel@tonic-gate	movq	%rdi, %rsi			/* rsi = lock ptr */
9590Sstevel@tonic-gate	movl	$LS_RW_ENTER_ACQUIRE, %edi
9600Sstevel@tonic-gate	movl	$RW_WRITER, %edx
9610Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
9620Sstevel@tonic-gate	SET_SIZE(rw_enter)
9630Sstevel@tonic-gate
9640Sstevel@tonic-gate	ENTRY(rw_exit)
9650Sstevel@tonic-gate	movq	(%rdi), %rax			/* rax = old rw_wwwh value */
9660Sstevel@tonic-gate	cmpl	$RW_READ_LOCK, %eax		/* single-reader, no waiters? */
9670Sstevel@tonic-gate	jne	.rw_not_single_reader
9680Sstevel@tonic-gate	xorl	%edx, %edx			/* rdx = new value (unheld) */
9690Sstevel@tonic-gate.rw_read_exit:
9700Sstevel@tonic-gate	lock
9710Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)			/* try to drop read lock */
9720Sstevel@tonic-gate	jnz	rw_exit_wakeup
9730Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
9740Sstevel@tonic-gate	decl	T_KPRI_REQ(%rcx)		/* THREAD_KPRI_RELEASE() */
9750Sstevel@tonic-gate.rw_read_exit_lockstat_patch_point:
9760Sstevel@tonic-gate	ret
9770Sstevel@tonic-gate	movq	%rdi, %rsi			/* rsi = lock ptr */
9780Sstevel@tonic-gate	movl	$LS_RW_EXIT_RELEASE, %edi
9790Sstevel@tonic-gate	movl	$RW_READER, %edx
9800Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
9810Sstevel@tonic-gate.rw_not_single_reader:
9820Sstevel@tonic-gate	testl	$RW_WRITE_LOCKED, %eax	/* write-locked or write-wanted? */
9830Sstevel@tonic-gate	jnz	.rw_write_exit
9840Sstevel@tonic-gate	leaq	-RW_READ_LOCK(%rax), %rdx	/* rdx = new value */
9850Sstevel@tonic-gate	cmpl	$RW_READ_LOCK, %edx
9860Sstevel@tonic-gate	jge	.rw_read_exit		/* not last reader, safe to drop */
9870Sstevel@tonic-gate	jmp	rw_exit_wakeup			/* last reader with waiters */
9880Sstevel@tonic-gate.rw_write_exit:
9890Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rax		/* rax = thread ptr */
9900Sstevel@tonic-gate	xorl	%edx, %edx			/* rdx = new value (unheld) */
9910Sstevel@tonic-gate	orq	$RW_WRITE_LOCKED, %rax		/* eax = write-locked value */
9920Sstevel@tonic-gate	lock
9930Sstevel@tonic-gate	cmpxchgq %rdx, (%rdi)			/* try to drop read lock */
9940Sstevel@tonic-gate	jnz	rw_exit_wakeup
9950Sstevel@tonic-gate.rw_write_exit_lockstat_patch_point:
9960Sstevel@tonic-gate	ret
9970Sstevel@tonic-gate	movq	%gs:CPU_THREAD, %rcx		/* rcx = thread ptr */
9980Sstevel@tonic-gate	movq	%rdi, %rsi			/* rsi - lock ptr */
9990Sstevel@tonic-gate	movl	$LS_RW_EXIT_RELEASE, %edi
10000Sstevel@tonic-gate	movl	$RW_WRITER, %edx
10010Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
10020Sstevel@tonic-gate	SET_SIZE(rw_exit)
10030Sstevel@tonic-gate
10040Sstevel@tonic-gate#else
10050Sstevel@tonic-gate
10060Sstevel@tonic-gate	ENTRY(rw_enter)
10070Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
10080Sstevel@tonic-gate	movl	4(%esp), %ecx			/* ecx = lock ptr */
10090Sstevel@tonic-gate	cmpl	$RW_WRITER, 8(%esp)
10100Sstevel@tonic-gate	je	.rw_write_enter
10110Sstevel@tonic-gate	incl	T_KPRI_REQ(%edx)		/* THREAD_KPRI_REQUEST() */
10120Sstevel@tonic-gate	movl	(%ecx), %eax			/* eax = old rw_wwwh value */
10130Sstevel@tonic-gate	testl	$RW_WRITE_LOCKED|RW_WRITE_WANTED, %eax
10140Sstevel@tonic-gate	jnz	rw_enter_sleep
10150Sstevel@tonic-gate	leal	RW_READ_LOCK(%eax), %edx	/* edx = new rw_wwwh value */
10160Sstevel@tonic-gate	lock
10170Sstevel@tonic-gate	cmpxchgl %edx, (%ecx)			/* try to grab read lock */
10180Sstevel@tonic-gate	jnz	rw_enter_sleep
10190Sstevel@tonic-gate.rw_read_enter_lockstat_patch_point:
10200Sstevel@tonic-gate	ret
10210Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
10220Sstevel@tonic-gate	movl	$LS_RW_ENTER_ACQUIRE, %eax
10230Sstevel@tonic-gate	pushl	$RW_READER
10240Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
10250Sstevel@tonic-gate.rw_write_enter:
10260Sstevel@tonic-gate	orl	$RW_WRITE_LOCKED, %edx		/* edx = write-locked value */
10270Sstevel@tonic-gate	xorl	%eax, %eax			/* eax = unheld value */
10280Sstevel@tonic-gate	lock
10290Sstevel@tonic-gate	cmpxchgl %edx, (%ecx)			/* try to grab write lock */
10300Sstevel@tonic-gate	jnz	rw_enter_sleep
10311582Skchow
10321582Skchow#if defined(OPTERON_WORKAROUND_6323525)
10331582Skchow.rw_write_enter_lockstat_patch_point:
10341582Skchow.rw_write_enter_6323525_patch_point:
10351582Skchow	ret
10361582Skchow	nop
10371582Skchow	nop
10381582Skchow.rw_write_enter_lockstat_6323525_patch_point:
10391582Skchow	nop
10401582Skchow#else	/* OPTERON_WORKAROUND_6323525 */
10410Sstevel@tonic-gate.rw_write_enter_lockstat_patch_point:
10420Sstevel@tonic-gate	ret
10431582Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
10441582Skchow
10450Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
10460Sstevel@tonic-gate	movl	$LS_RW_ENTER_ACQUIRE, %eax
10470Sstevel@tonic-gate	pushl	$RW_WRITER
10480Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
10490Sstevel@tonic-gate	SET_SIZE(rw_enter)
10500Sstevel@tonic-gate
10510Sstevel@tonic-gate	ENTRY(rw_exit)
10520Sstevel@tonic-gate	movl	4(%esp), %ecx			/* ecx = lock ptr */
10530Sstevel@tonic-gate	movl	(%ecx), %eax			/* eax = old rw_wwwh value */
10540Sstevel@tonic-gate	cmpl	$RW_READ_LOCK, %eax		/* single-reader, no waiters? */
10550Sstevel@tonic-gate	jne	.rw_not_single_reader
10560Sstevel@tonic-gate	xorl	%edx, %edx			/* edx = new value (unheld) */
10570Sstevel@tonic-gate.rw_read_exit:
10580Sstevel@tonic-gate	lock
10590Sstevel@tonic-gate	cmpxchgl %edx, (%ecx)			/* try to drop read lock */
10600Sstevel@tonic-gate	jnz	rw_exit_wakeup
10610Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
10620Sstevel@tonic-gate	decl	T_KPRI_REQ(%edx)		/* THREAD_KPRI_RELEASE() */
10630Sstevel@tonic-gate.rw_read_exit_lockstat_patch_point:
10640Sstevel@tonic-gate	ret
10650Sstevel@tonic-gate	movl	$LS_RW_EXIT_RELEASE, %eax
10660Sstevel@tonic-gate	pushl	$RW_READER
10670Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
10680Sstevel@tonic-gate.rw_not_single_reader:
10690Sstevel@tonic-gate	testl	$RW_WRITE_LOCKED, %eax	/* write-locked or write-wanted? */
10700Sstevel@tonic-gate	jnz	.rw_write_exit
10710Sstevel@tonic-gate	leal	-RW_READ_LOCK(%eax), %edx	/* edx = new value */
10720Sstevel@tonic-gate	cmpl	$RW_READ_LOCK, %edx
10730Sstevel@tonic-gate	jge	.rw_read_exit		/* not last reader, safe to drop */
10740Sstevel@tonic-gate	jmp	rw_exit_wakeup			/* last reader with waiters */
10750Sstevel@tonic-gate.rw_write_exit:
10760Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %eax		/* eax = thread ptr */
10770Sstevel@tonic-gate	xorl	%edx, %edx			/* edx = new value (unheld) */
10780Sstevel@tonic-gate	orl	$RW_WRITE_LOCKED, %eax		/* eax = write-locked value */
10790Sstevel@tonic-gate	lock
10800Sstevel@tonic-gate	cmpxchgl %edx, (%ecx)			/* try to drop read lock */
10810Sstevel@tonic-gate	jnz	rw_exit_wakeup
10820Sstevel@tonic-gate.rw_write_exit_lockstat_patch_point:
10830Sstevel@tonic-gate	ret
10840Sstevel@tonic-gate	movl	%gs:CPU_THREAD, %edx		/* edx = thread ptr */
10850Sstevel@tonic-gate	movl	$LS_RW_EXIT_RELEASE, %eax
10860Sstevel@tonic-gate	pushl	$RW_WRITER
10870Sstevel@tonic-gate	jmp	lockstat_wrapper_arg
10880Sstevel@tonic-gate	SET_SIZE(rw_exit)
10890Sstevel@tonic-gate
10900Sstevel@tonic-gate#endif	/* !__amd64 */
10910Sstevel@tonic-gate
10920Sstevel@tonic-gate#endif	/* __lint */
10930Sstevel@tonic-gate
10941582Skchow#if defined(OPTERON_WORKAROUND_6323525)
10951582Skchow#if defined(lint) || defined(__lint)
10961582Skchow
10971582Skchowint	workaround_6323525_patched;
10981582Skchow
10991582Skchowvoid
11001582Skchowpatch_workaround_6323525(void)
11011582Skchow{}
11021582Skchow
11031582Skchow#else	/* lint */
11041582Skchow
11051582Skchow/*
11061582Skchow * If it is necessary to patch the lock enter routines with the lfence
11071582Skchow * workaround, workaround_6323525_patched is set to a non-zero value so that
11081582Skchow * the lockstat_hat_patch routine can patch to the new location of the 'ret'
11091582Skchow * instruction.
11101582Skchow */
11111582Skchow	DGDEF3(workaround_6323525_patched, 4, 4)
11121582Skchow	.long	0
11131582Skchow
11141582Skchow#if defined(__amd64)
11151582Skchow
11161582Skchow#define HOT_MUTEX_PATCH(srcaddr, dstaddr, size)	\
11171582Skchow	movq	$size, %rbx;			\
11181582Skchow	movq	$dstaddr, %r13;			\
11191582Skchow	addq	%rbx, %r13;			\
11201582Skchow	movq	$srcaddr, %r12;			\
11211582Skchow	addq	%rbx, %r12;			\
11221582Skchow0:						\
11231582Skchow	decq	%r13;				\
11241582Skchow	decq	%r12;				\
11251582Skchow	movzbl	(%r12), %esi;			\
11261582Skchow	movq	$1, %rdx;			\
11271582Skchow	movq	%r13, %rdi;			\
11281582Skchow	call	hot_patch_kernel_text;		\
11291582Skchow	decq	%rbx;				\
11301582Skchow	testq	%rbx, %rbx;			\
11311582Skchow	jg	0b;
11321582Skchow
11331582Skchow/*
11341582Skchow * patch_workaround_6323525: provide workaround for 6323525
11351582Skchow *
11361582Skchow * The workaround is to place a fencing instruction (lfence) between the
11371582Skchow * mutex operation and the subsequent read-modify-write instruction.
11381582Skchow *
11391582Skchow * This routine hot patches the lfence instruction on top of the space
11401582Skchow * reserved by nops in the lock enter routines.
11411582Skchow */
11421582Skchow	ENTRY_NP(patch_workaround_6323525)
11431582Skchow	pushq	%rbp
11441582Skchow	movq	%rsp, %rbp
11451582Skchow	pushq	%r12
11461582Skchow	pushq	%r13
11471582Skchow	pushq	%rbx
11481582Skchow
11491582Skchow	/*
11501582Skchow	 * lockstat_hot_patch() to use the alternate lockstat workaround
11511582Skchow	 * 6323525 patch points (points past the lfence instruction to the
11521582Skchow	 * new ret) when workaround_6323525_patched is set.
11531582Skchow	 */
11541582Skchow	movl	$1, workaround_6323525_patched
11551582Skchow
11561582Skchow	/*
11571582Skchow	 * patch ret/nop/nop/nop to lfence/ret at the end of the lock enter
11581582Skchow	 * routines. The 4 bytes are patched in reverse order so that the
11591582Skchow	 * the existing ret is overwritten last. This provides lock enter
11601582Skchow	 * sanity during the intermediate patching stages.
11611582Skchow	 */
11621582Skchow	HOT_MUTEX_PATCH(_lfence_insn, .mutex_enter_6323525_patch_point, 4)
11631582Skchow	HOT_MUTEX_PATCH(_lfence_insn, .mutex_tryenter_6323525_patch_point, 4)
11641582Skchow	HOT_MUTEX_PATCH(_lfence_insn, .mutex_atryenter_6323525_patch_point, 4)
11651582Skchow	HOT_MUTEX_PATCH(_lfence_insn, .rw_write_enter_6323525_patch_point, 4)
11661582Skchow
11671582Skchow	popq	%rbx
11681582Skchow	popq	%r13
11691582Skchow	popq	%r12
11701582Skchow	movq	%rbp, %rsp
11711582Skchow	popq	%rbp
11721582Skchow	ret
11731582Skchow_lfence_insn:
11741582Skchow	lfence
11751582Skchow	ret
11761582Skchow	SET_SIZE(patch_workaround_6323525)
11771582Skchow
11781582Skchow
11791582Skchow#else	/* __amd64 */
11801582Skchow
11811582Skchow#define HOT_MUTEX_PATCH(srcaddr, dstaddr, size)	\
11821582Skchow	movl	$size, %ebx;			\
11831582Skchow	movl	$srcaddr, %esi;			\
11841582Skchow	addl	%ebx, %esi;			\
11851582Skchow	movl	$dstaddr, %edi;			\
11861582Skchow	addl	%ebx, %edi;			\
11871582Skchow0:      					\
11881582Skchow	decl	%esi;				\
11891582Skchow	decl	%edi;				\
11901582Skchow	pushl	$1;				\
11911582Skchow	movzbl	(%esi), %eax;			\
11921582Skchow	pushl	%eax;				\
11931582Skchow	pushl	%edi;				\
11941582Skchow	call	hot_patch_kernel_text;		\
11951582Skchow	addl	$12, %esp;			\
11961582Skchow	decl	%ebx;				\
11971582Skchow	testl	%ebx, %ebx;			\
11981582Skchow	jg	0b;
11991582Skchow
12001582Skchow
12011582Skchow	/* see comments above */
12021582Skchow	ENTRY_NP(patch_workaround_6323525)
12031582Skchow	pushl	%ebp
12041582Skchow	movl	%esp, %ebp
12051582Skchow	pushl	%ebx
12061582Skchow	pushl	%esi
12071582Skchow	pushl	%edi
12081582Skchow
12091582Skchow	movl	$1, workaround_6323525_patched
12101582Skchow
12111582Skchow	HOT_MUTEX_PATCH(_lfence_insn, .mutex_enter_6323525_patch_point, 4)
12121582Skchow	HOT_MUTEX_PATCH(_lfence_insn, .mutex_tryenter_6323525_patch_point, 4)
12131582Skchow	HOT_MUTEX_PATCH(_lfence_insn, .mutex_atryenter_6323525_patch_point, 4)
12141582Skchow	HOT_MUTEX_PATCH(_lfence_insn, .rw_write_enter_6323525_patch_point, 4)
12151582Skchow
12161582Skchow	popl	%edi
12171582Skchow	popl	%esi
12181582Skchow	popl	%ebx
12191582Skchow	movl	%ebp, %esp
12201582Skchow	popl	%ebp
12211582Skchow	ret
12221582Skchow_lfence_insn:
12231582Skchow	.byte	0xf, 0xae, 0xe8		/ [lfence instruction]
12241582Skchow	ret
12251582Skchow	SET_SIZE(patch_workaround_6323525)
12261582Skchow
12271582Skchow#endif	/* !__amd64 */
12281582Skchow#endif	/* !lint */
12291582Skchow#endif	/* OPTERON_WORKAROUND_6323525 */
12301582Skchow
12311582Skchow
12320Sstevel@tonic-gate#if defined(lint) || defined(__lint)
12330Sstevel@tonic-gate
12340Sstevel@tonic-gatevoid
12350Sstevel@tonic-gatelockstat_hot_patch(void)
12360Sstevel@tonic-gate{}
12370Sstevel@tonic-gate
12380Sstevel@tonic-gate#else
12390Sstevel@tonic-gate
12400Sstevel@tonic-gate#if defined(__amd64)
12410Sstevel@tonic-gate
12420Sstevel@tonic-gate#define	HOT_PATCH(addr, event, active_instr, normal_instr, len)	\
12430Sstevel@tonic-gate	movq	$normal_instr, %rsi;		\
12440Sstevel@tonic-gate	movq	$active_instr, %rdi;		\
12450Sstevel@tonic-gate	leaq	lockstat_probemap(%rip), %rax;	\
12460Sstevel@tonic-gate	movl 	_MUL(event, DTRACE_IDSIZE)(%rax), %eax;	\
12470Sstevel@tonic-gate	testl	%eax, %eax;			\
12480Sstevel@tonic-gate	jz	9f;				\
12490Sstevel@tonic-gate	movq	%rdi, %rsi;			\
12500Sstevel@tonic-gate9:						\
12510Sstevel@tonic-gate	movq	$len, %rdx;			\
12520Sstevel@tonic-gate	movq	$addr, %rdi;			\
12530Sstevel@tonic-gate	call	hot_patch_kernel_text
12540Sstevel@tonic-gate
12550Sstevel@tonic-gate#else
12560Sstevel@tonic-gate
12570Sstevel@tonic-gate#define	HOT_PATCH(addr, event, active_instr, normal_instr, len)	\
12580Sstevel@tonic-gate	movl	$normal_instr, %ecx;		\
12590Sstevel@tonic-gate	movl	$active_instr, %edx;		\
12600Sstevel@tonic-gate	movl	$lockstat_probemap, %eax;	\
12610Sstevel@tonic-gate	movl	_MUL(event, DTRACE_IDSIZE)(%eax), %eax;	\
12620Sstevel@tonic-gate	testl	%eax, %eax;			\
12630Sstevel@tonic-gate	jz	. + 4;				\
12640Sstevel@tonic-gate	movl	%edx, %ecx;			\
12650Sstevel@tonic-gate	pushl	$len;				\
12660Sstevel@tonic-gate	pushl	%ecx;				\
12670Sstevel@tonic-gate	pushl	$addr;				\
12680Sstevel@tonic-gate	call	hot_patch_kernel_text;		\
12690Sstevel@tonic-gate	addl	$12, %esp;
12700Sstevel@tonic-gate
12710Sstevel@tonic-gate#endif	/* !__amd64 */
12720Sstevel@tonic-gate
12730Sstevel@tonic-gate	ENTRY(lockstat_hot_patch)
12740Sstevel@tonic-gate#if defined(__amd64)
12750Sstevel@tonic-gate	pushq	%rbp			/* align stack properly */
12760Sstevel@tonic-gate	movq	%rsp, %rbp
12770Sstevel@tonic-gate#endif	/* __amd64 */
12781582Skchow
12791582Skchow#if defined(OPTERON_WORKAROUND_6323525)
12801582Skchow	cmpl	$0, workaround_6323525_patched
12811582Skchow	je	1f
12821582Skchow	HOT_PATCH(.mutex_enter_lockstat_6323525_patch_point,
12831582Skchow		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
12841582Skchow	HOT_PATCH(.mutex_tryenter_lockstat_6323525_patch_point,
12851582Skchow		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
12861582Skchow	HOT_PATCH(.rw_write_enter_lockstat_6323525_patch_point,
12871582Skchow		LS_RW_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
12881582Skchow	jmp	2f
12891582Skchow1:
12900Sstevel@tonic-gate	HOT_PATCH(.mutex_enter_lockstat_patch_point,
12910Sstevel@tonic-gate		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
12920Sstevel@tonic-gate	HOT_PATCH(.mutex_tryenter_lockstat_patch_point,
12930Sstevel@tonic-gate		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
12941582Skchow	HOT_PATCH(.rw_write_enter_lockstat_patch_point,
12951582Skchow		LS_RW_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
12961582Skchow2:
12971582Skchow#else	/* OPTERON_WORKAROUND_6323525 */
12981582Skchow	HOT_PATCH(.mutex_enter_lockstat_patch_point,
12991582Skchow		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
13001582Skchow	HOT_PATCH(.mutex_tryenter_lockstat_patch_point,
13011582Skchow		LS_MUTEX_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
13021582Skchow	HOT_PATCH(.rw_write_enter_lockstat_patch_point,
13031582Skchow		LS_RW_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
13041582Skchow#endif	/* !OPTERON_WORKAROUND_6323525 */
13050Sstevel@tonic-gate	HOT_PATCH(.mutex_exit_lockstat_patch_point,
13060Sstevel@tonic-gate		LS_MUTEX_EXIT_RELEASE, NOP_INSTR, RET_INSTR, 1)
13070Sstevel@tonic-gate	HOT_PATCH(.rw_read_enter_lockstat_patch_point,
13080Sstevel@tonic-gate		LS_RW_ENTER_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
13090Sstevel@tonic-gate	HOT_PATCH(.rw_write_exit_lockstat_patch_point,
13100Sstevel@tonic-gate		LS_RW_EXIT_RELEASE, NOP_INSTR, RET_INSTR, 1)
13110Sstevel@tonic-gate	HOT_PATCH(.rw_read_exit_lockstat_patch_point,
13120Sstevel@tonic-gate		LS_RW_EXIT_RELEASE, NOP_INSTR, RET_INSTR, 1)
13130Sstevel@tonic-gate	HOT_PATCH(.lock_set_lockstat_patch_point,
13140Sstevel@tonic-gate		LS_LOCK_SET_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
13150Sstevel@tonic-gate	HOT_PATCH(.lock_try_lockstat_patch_point,
13160Sstevel@tonic-gate		LS_LOCK_TRY_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
13170Sstevel@tonic-gate	HOT_PATCH(.lock_clear_lockstat_patch_point,
13180Sstevel@tonic-gate		LS_LOCK_CLEAR_RELEASE, NOP_INSTR, RET_INSTR, 1)
13190Sstevel@tonic-gate	HOT_PATCH(.lock_set_spl_lockstat_patch_point,
13200Sstevel@tonic-gate		LS_LOCK_SET_SPL_ACQUIRE, NOP_INSTR, RET_INSTR, 1)
13210Sstevel@tonic-gate
13220Sstevel@tonic-gate	HOT_PATCH(LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_POINT,
13230Sstevel@tonic-gate		LS_LOCK_CLEAR_SPLX_RELEASE,
13240Sstevel@tonic-gate		LOCK_CLEAR_SPLX_LOCKSTAT_PATCH_VAL, 0, 1);
13250Sstevel@tonic-gate#if defined(__amd64)
13260Sstevel@tonic-gate	leave			/* unwind stack */
13270Sstevel@tonic-gate#endif	/* __amd64 */
13280Sstevel@tonic-gate	ret
13290Sstevel@tonic-gate	SET_SIZE(lockstat_hot_patch)
13300Sstevel@tonic-gate
13310Sstevel@tonic-gate#endif	/* __lint */
13320Sstevel@tonic-gate
13330Sstevel@tonic-gate#if defined(lint) || defined(__lint)
13340Sstevel@tonic-gate
13350Sstevel@tonic-gate/* XX64 membar_*() should be inlines */
13360Sstevel@tonic-gate
13370Sstevel@tonic-gatevoid
13385788Smv143129membar_sync(void)
13395788Smv143129{}
13405788Smv143129
13415788Smv143129void
13420Sstevel@tonic-gatemembar_enter(void)
13430Sstevel@tonic-gate{}
13440Sstevel@tonic-gate
13450Sstevel@tonic-gatevoid
13460Sstevel@tonic-gatemembar_exit(void)
13470Sstevel@tonic-gate{}
13480Sstevel@tonic-gate
13490Sstevel@tonic-gatevoid
13500Sstevel@tonic-gatemembar_producer(void)
13510Sstevel@tonic-gate{}
13520Sstevel@tonic-gate
13530Sstevel@tonic-gatevoid
13540Sstevel@tonic-gatemembar_consumer(void)
13550Sstevel@tonic-gate{}
13560Sstevel@tonic-gate
13570Sstevel@tonic-gate#else	/* __lint */
13580Sstevel@tonic-gate
13590Sstevel@tonic-gate#if defined(__amd64)
13600Sstevel@tonic-gate
13610Sstevel@tonic-gate	ENTRY(membar_enter)
13620Sstevel@tonic-gate	ALTENTRY(membar_exit)
13635788Smv143129	ALTENTRY(membar_sync)
13640Sstevel@tonic-gate	mfence			/* lighter weight than lock; xorq $0,(%rsp) */
13650Sstevel@tonic-gate	ret
13665788Smv143129	SET_SIZE(membar_sync)
13670Sstevel@tonic-gate	SET_SIZE(membar_exit)
13680Sstevel@tonic-gate	SET_SIZE(membar_enter)
13690Sstevel@tonic-gate
13700Sstevel@tonic-gate	ENTRY(membar_producer)
13710Sstevel@tonic-gate	sfence
13720Sstevel@tonic-gate	ret
13730Sstevel@tonic-gate	SET_SIZE(membar_producer)
13740Sstevel@tonic-gate
13750Sstevel@tonic-gate	ENTRY(membar_consumer)
13760Sstevel@tonic-gate	lfence
13770Sstevel@tonic-gate	ret
13780Sstevel@tonic-gate	SET_SIZE(membar_consumer)
13790Sstevel@tonic-gate
13800Sstevel@tonic-gate#else
13810Sstevel@tonic-gate
13820Sstevel@tonic-gate	ENTRY(membar_enter)
13830Sstevel@tonic-gate	ALTENTRY(membar_exit)
13845788Smv143129	ALTENTRY(membar_sync)
13850Sstevel@tonic-gate	lock
13860Sstevel@tonic-gate	xorl	$0, (%esp)
13870Sstevel@tonic-gate	ret
13885788Smv143129	SET_SIZE(membar_sync)
13890Sstevel@tonic-gate	SET_SIZE(membar_exit)
13900Sstevel@tonic-gate	SET_SIZE(membar_enter)
13910Sstevel@tonic-gate
13920Sstevel@tonic-gate/*
13930Sstevel@tonic-gate * On machines that support sfence and lfence, these
13940Sstevel@tonic-gate * memory barriers can be more precisely implemented
13950Sstevel@tonic-gate * without causing the whole world to stop
13960Sstevel@tonic-gate */
13970Sstevel@tonic-gate	ENTRY(membar_producer)
13980Sstevel@tonic-gate	.globl	_patch_sfence_ret
13990Sstevel@tonic-gate_patch_sfence_ret:			/* c.f. membar #StoreStore */
14000Sstevel@tonic-gate	lock
14010Sstevel@tonic-gate	xorl	$0, (%esp)
14020Sstevel@tonic-gate	ret
14030Sstevel@tonic-gate	SET_SIZE(membar_producer)
14040Sstevel@tonic-gate
14050Sstevel@tonic-gate	ENTRY(membar_consumer)
14060Sstevel@tonic-gate	.globl	_patch_lfence_ret
14070Sstevel@tonic-gate_patch_lfence_ret:			/* c.f. membar #LoadLoad */
14080Sstevel@tonic-gate	lock
14090Sstevel@tonic-gate	xorl	$0, (%esp)
14100Sstevel@tonic-gate	ret
14110Sstevel@tonic-gate	SET_SIZE(membar_consumer)
14120Sstevel@tonic-gate
14130Sstevel@tonic-gate#endif	/* !__amd64 */
14140Sstevel@tonic-gate
14150Sstevel@tonic-gate#endif	/* __lint */
14160Sstevel@tonic-gate
14170Sstevel@tonic-gate/*
14180Sstevel@tonic-gate * thread_onproc()
14190Sstevel@tonic-gate * Set thread in onproc state for the specified CPU.
14200Sstevel@tonic-gate * Also set the thread lock pointer to the CPU's onproc lock.
14210Sstevel@tonic-gate * Since the new lock isn't held, the store ordering is important.
14220Sstevel@tonic-gate * If not done in assembler, the compiler could reorder the stores.
14230Sstevel@tonic-gate */
14240Sstevel@tonic-gate#if defined(lint) || defined(__lint)
14250Sstevel@tonic-gate
14260Sstevel@tonic-gatevoid
14270Sstevel@tonic-gatethread_onproc(kthread_id_t t, cpu_t *cp)
14280Sstevel@tonic-gate{
14290Sstevel@tonic-gate	t->t_state = TS_ONPROC;
14300Sstevel@tonic-gate	t->t_lockp = &cp->cpu_thread_lock;
14310Sstevel@tonic-gate}
14320Sstevel@tonic-gate
14330Sstevel@tonic-gate#else	/* __lint */
14340Sstevel@tonic-gate
14350Sstevel@tonic-gate#if defined(__amd64)
14360Sstevel@tonic-gate
14370Sstevel@tonic-gate	ENTRY(thread_onproc)
14380Sstevel@tonic-gate	addq	$CPU_THREAD_LOCK, %rsi	/* pointer to disp_lock while running */
14390Sstevel@tonic-gate	movl	$ONPROC_THREAD, T_STATE(%rdi)	/* set state to TS_ONPROC */
14400Sstevel@tonic-gate	movq	%rsi, T_LOCKP(%rdi)	/* store new lock pointer */
14410Sstevel@tonic-gate	ret
14420Sstevel@tonic-gate	SET_SIZE(thread_onproc)
14430Sstevel@tonic-gate
14440Sstevel@tonic-gate#else
14450Sstevel@tonic-gate
14460Sstevel@tonic-gate	ENTRY(thread_onproc)
14470Sstevel@tonic-gate	movl	4(%esp), %eax
14480Sstevel@tonic-gate	movl	8(%esp), %ecx
14490Sstevel@tonic-gate	addl	$CPU_THREAD_LOCK, %ecx	/* pointer to disp_lock while running */
14500Sstevel@tonic-gate	movl	$ONPROC_THREAD, T_STATE(%eax)	/* set state to TS_ONPROC */
14510Sstevel@tonic-gate	movl	%ecx, T_LOCKP(%eax)	/* store new lock pointer */
14520Sstevel@tonic-gate	ret
14530Sstevel@tonic-gate	SET_SIZE(thread_onproc)
14540Sstevel@tonic-gate
14550Sstevel@tonic-gate#endif	/* !__amd64 */
14560Sstevel@tonic-gate
14570Sstevel@tonic-gate#endif	/* __lint */
1458*5834Spt157919
1459*5834Spt157919/*
1460*5834Spt157919 * mutex_delay_default(void)
1461*5834Spt157919 * Spins for approx a few hundred processor cycles and returns to caller.
1462*5834Spt157919 */
1463*5834Spt157919
1464*5834Spt157919#if defined(lint) || defined(__lint)
1465*5834Spt157919
1466*5834Spt157919void
1467*5834Spt157919mutex_delay_default(void)
1468*5834Spt157919{}
1469*5834Spt157919
1470*5834Spt157919#else	/* __lint */
1471*5834Spt157919
1472*5834Spt157919#if defined(__amd64)
1473*5834Spt157919
1474*5834Spt157919	ENTRY(mutex_delay_default)
1475*5834Spt157919	movq	$92,%r11
1476*5834Spt1579190:	decq	%r11
1477*5834Spt157919	jg	0b
1478*5834Spt157919	ret
1479*5834Spt157919	SET_SIZE(mutex_delay_default)
1480*5834Spt157919
1481*5834Spt157919#else
1482*5834Spt157919
1483*5834Spt157919	ENTRY(mutex_delay_default)
1484*5834Spt157919	push	%ebp
1485*5834Spt157919	movl	%esp,%ebp
1486*5834Spt157919	andl	$-16,%esp
1487*5834Spt157919	push	%ebx
1488*5834Spt157919	movl	$93,%ebx
1489*5834Spt1579190:	decl	%ebx
1490*5834Spt157919	jg	0b
1491*5834Spt157919	pop	%ebx
1492*5834Spt157919	leave
1493*5834Spt157919	ret
1494*5834Spt157919	SET_SIZE(mutex_delay_default)
1495*5834Spt157919
1496*5834Spt157919#endif	/* !__amd64 */
1497*5834Spt157919#endif	/* __lint */
1498