xref: /netbsd-src/sys/arch/amd64/amd64/lock_stubs.S (revision e55c6fbdab9dbdedf7d3efabb3f8faa9bd765415)
1/*	$NetBSD: lock_stubs.S,v 1.38 2022/09/13 05:36:29 knakahara Exp $	*/
2
3/*
4 * Copyright (c) 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include "opt_multiprocessor.h"
33#include "opt_lockdebug.h"
34
35#include <machine/asm.h>
36#include <machine/frameasm.h>
37
38#include "assym.h"
39
40#define LOCK	\
41	HOTPATCH(HP_NAME_NOLOCK, 1)	; \
42	lock
43#define RET	\
44	HOTPATCH(HP_NAME_RETFENCE, 3)	; \
45	ret; nop; nop			; \
46	ret
47
48#ifndef LOCKDEBUG
49
50	.align	64
51
52/*
53 * void mutex_enter(kmutex_t *mtx);
54 *
55 * Acquire a mutex and post a load fence.
56 */
57ENTRY(mutex_enter)
58	movq	CPUVAR(CURLWP), %rcx
59	xorq	%rax, %rax
60	LOCK
61	cmpxchgq %rcx, (%rdi)
62	jnz	1f
63	RET
641:
65	jmp	_C_LABEL(mutex_vector_enter)
66END(mutex_enter)
67
68/*
69 * void mutex_exit(kmutex_t *mtx);
70 *
71 * Release a mutex and post a load fence.
72 *
73 * See comments in mutex_vector_enter() about doing this operation unlocked
74 * on multiprocessor systems, and comments in arch/x86/include/lock.h about
75 * memory ordering on Intel x86 systems.
76 */
77ENTRY(mutex_exit)
78	movq	CPUVAR(CURLWP), %rax
79	xorq	%rdx, %rdx
80	cmpxchgq %rdx, (%rdi)
81	jnz	1f
82	ret
831:
84	jmp	_C_LABEL(mutex_vector_exit)
85END(mutex_exit)
86
87/*
88 * void mutex_spin_enter(kmutex_t *mtx);
89 *
90 * Acquire a spin mutex and post a load fence.
91 */
92ENTRY(mutex_spin_enter)
93	movl	$1, %eax
94	movzbl	CPUVAR(ILEVEL), %esi
95	movzbl	MTX_IPL(%rdi), %ecx		/* new SPL */
96	cmpl	%ecx, %esi			/* higher? */
97	cmovgl	%esi, %ecx
98	movb	%cl, CPUVAR(ILEVEL)		/* splraiseipl() */
99	subl	%eax, CPUVAR(MTX_COUNT)		/* decl doesnt set CF */
100	cmovncl	CPUVAR(MTX_OLDSPL), %esi
101	movl	%esi, CPUVAR(MTX_OLDSPL)
102	xchgb	%al, MTX_LOCK(%rdi)		/* lock */
103#ifdef MULTIPROCESSOR	/* XXX for xen */
104	testb	%al, %al
105	jnz	1f
106#endif
107	RET
1081:
109	jmp	_C_LABEL(mutex_spin_retry)	/* failed; hard case */
110END(mutex_spin_enter)
111
112/*
113 * void mutex_spin_exit(kmutex_t *mtx);
114 *
115 * Release a spin mutex and post a load fence.
116 */
117ENTRY(mutex_spin_exit)
118#ifdef DIAGNOSTIC
119
120	movl	$0x0001, %eax			/* new + expected value */
121	movq	CPUVAR(SELF), %r8
122	cmpxchgb %ah, MTX_LOCK(%rdi)		/* unlock */
123	jnz	_C_LABEL(mutex_vector_exit)	/* hard case if problems */
124	movl	CPU_INFO_MTX_OLDSPL(%r8), %edi
125	incl	CPU_INFO_MTX_COUNT(%r8)
126	jnz	1f
127	cmpb	CPU_INFO_ILEVEL(%r8), %dil
128	jae	1f
129	movq	CPU_INFO_IUNMASK(%r8,%rdi,8), %rsi
130	CLI(ax)
131	testq	CPU_INFO_IPENDING(%r8), %rsi
132	jnz	_C_LABEL(Xspllower)
133	movb	%dil, CPU_INFO_ILEVEL(%r8)
134	STI(ax)
1351:	rep					/* double byte ret as branch */
136	ret					/* target: see AMD docs */
137
138#else	/* DIAGNOSTIC */
139
140	movq	CPUVAR(SELF), %rsi
141	movb	$0x00, MTX_LOCK(%rdi)
142	movl	CPU_INFO_MTX_OLDSPL(%rsi), %ecx
143	incl	CPU_INFO_MTX_COUNT(%rsi)
144	movzbl	CPU_INFO_ILEVEL(%rsi),%edx
145	cmovnzl	%edx,%ecx
146	cmpl	%edx,%ecx			/* new level is lower? */
147	jae	2f
148	xorq	%rdi,%rdi			/* rdi: ci_ipending mask */
149	notq	%rdi
150	shrq	$8,%rdi
151	movq	%rcx,%r9			/* r9: shifted new level */
152	shlq	$56,%r9
1531:
154	movq	CPU_INFO_IPENDING(%rsi),%rax
155	testq	%rax,CPU_INFO_IUNMASK(%rsi,%rcx,8)/* deferred interrupts? */
156	jnz	3f
157	movq	%rax,%r8
158	andq	%rdi,%r8
159	orq	%r9,%r8
160	cmpxchgq %r8,CPU_INFO_ISTATE(%rsi)	/* swap in new ilevel */
161	jnz	4f
1622:
163	ret
1643:
165	movl	%ecx, %edi
166	jmp	_C_LABEL(Xspllower)
1674:
168	jmp	1b
169
170#endif	/* DIAGNOSTIC */
171
172END(mutex_spin_exit)
173
174/*
175 * void	rw_enter(krwlock_t *rwl, krw_t op);
176 *
177 * Acquire one hold on a RW lock.
178 */
179ENTRY(rw_enter)
180	cmpl	$RW_READER, %esi
181	jne	2f
182
183	/*
184	 * Reader: this is the most common case.
185	 */
186	movq	(%rdi), %rax
1870:
188	testb	$(RW_WRITE_LOCKED|RW_WRITE_WANTED), %al
189	jnz	3f
190	leaq	RW_READ_INCR(%rax), %rdx
191	LOCK
192	cmpxchgq %rdx, (%rdi)
193	jnz	1f
194	RET
1951:
196	jmp	0b
197
198	/*
199	 * Writer: if the compare-and-set fails, don't bother retrying.
200	 */
2012:	movq	CPUVAR(CURLWP), %rcx
202	xorq	%rax, %rax
203	orq	$RW_WRITE_LOCKED, %rcx
204	LOCK
205	cmpxchgq %rcx, (%rdi)
206	jnz	3f
207	RET
2083:
209	jmp	_C_LABEL(rw_vector_enter)
210END(rw_enter)
211
212/*
213 * void	rw_exit(krwlock_t *rwl);
214 *
215 * Release one hold on a RW lock.
216 */
217ENTRY(rw_exit)
218	movq	(%rdi), %rax
219	testb	$RW_WRITE_LOCKED, %al
220	jnz	2f
221
222	/*
223	 * Reader
224	 */
2250:	testb	$RW_HAS_WAITERS, %al
226	jnz	3f
227	cmpq	$RW_READ_INCR, %rax
228	jb	3f
229	leaq	-RW_READ_INCR(%rax), %rdx
230	LOCK
231	cmpxchgq %rdx, (%rdi)
232	jnz	1f
233	ret
2341:
235	jmp	0b
236
237	/*
238	 * Writer
239	 */
2402:	leaq	-RW_WRITE_LOCKED(%rax), %rdx
241	subq	CPUVAR(CURLWP), %rdx
242	jnz	3f
243	LOCK
244	cmpxchgq %rdx, (%rdi)
245	jnz	3f
246	ret
247
2483:	jmp	_C_LABEL(rw_vector_exit)
249END(rw_exit)
250
251/*
252 * int	rw_tryenter(krwlock_t *rwl, krw_t op);
253 *
254 * Try to acquire one hold on a RW lock.
255 */
256ENTRY(rw_tryenter)
257	cmpl	$RW_READER, %esi
258	jne	2f
259
260	/*
261	 * Reader: this is the most common case.
262	 */
263	movq	(%rdi), %rax
2640:
265	testb	$(RW_WRITE_LOCKED|RW_WRITE_WANTED), %al
266	jnz	4f
267	leaq	RW_READ_INCR(%rax), %rdx
268	LOCK
269	cmpxchgq %rdx, (%rdi)
270	jnz	1f
271	movl	%edx, %eax			/* nonzero */
272	RET
2731:
274	jmp	0b
275
276	/*
277	 * Writer: if the compare-and-set fails, don't bother retrying.
278	 */
2792:	movq	CPUVAR(CURLWP), %rcx
280	xorq	%rax, %rax
281	orq	$RW_WRITE_LOCKED, %rcx
282	LOCK
283	cmpxchgq %rcx, (%rdi)
284	movl	$0, %eax
285	setz	%al
2863:
287	RET
288	ret
2894:
290	xorl	%eax, %eax
291	jmp	3b
292END(rw_tryenter)
293
294#endif	/* LOCKDEBUG */
295
296/*
297 * Spinlocks.
298 */
299ENTRY(__cpu_simple_lock_init)
300	movb	$0, (%rdi)
301	ret
302END(__cpu_simple_lock_init)
303
304ENTRY(__cpu_simple_lock)
305	movl	$0x0100, %eax
3061:
307	LOCK
308	cmpxchgb %ah, (%rdi)
309	jnz	2f
310	RET
3112:
312	movl	$0x0100, %eax
313	pause
314	nop
315	nop
316	cmpb	$0, (%rdi)
317	je	1b
318	jmp	2b
319END(__cpu_simple_lock)
320
321ENTRY(__cpu_simple_unlock)
322	movb	$0, (%rdi)
323	ret
324END(__cpu_simple_unlock)
325
326ENTRY(__cpu_simple_lock_try)
327	movl	$0x0100, %eax
328	LOCK
329	cmpxchgb %ah, (%rdi)
330	movl	$0, %eax
331	setz	%al
332	KMSAN_INIT_RET(4)
333	RET
334END(__cpu_simple_lock_try)
335
336