xref: /netbsd-src/sys/arch/mips/mips/lock_stubs_llsc.S (revision a012e373f2d02b483708cfc53853f137fc97f5a8)
1/*	$NetBSD: lock_stubs_llsc.S,v 1.18 2023/12/05 17:38:40 andvar Exp $	*/
2
3/*-
4 * Copyright (c) 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include "opt_cputype.h"
33#include "opt_lockdebug.h"
34
35#include <sys/errno.h>
36
37#include <machine/asm.h>
38
39RCSID("$NetBSD: lock_stubs_llsc.S,v 1.18 2023/12/05 17:38:40 andvar Exp $")
40
41#include "assym.h"
42
43/*
44 * Set ISA level for the assembler.
45 * XXX Clean up with a macro?  Same code fragment is in mipsX_subr.S too.
46 * XXX Key off build abi instead of processor type?
47 */
48#if defined(MIPS3)
49	.set	mips3
50#endif
51
52#if defined(MIPS32)
53	.set	mips32
54#endif
55
56#if defined(MIPS64)
57	.set	mips64
58#endif
59
60	.set	noreorder
61	.set	noat
62
63/*
64 * unsigned long atomic_cas_ulong_llsc(volatile unsigned long *val,
65 *     unsigned long old, unsigned long new);
66 *
67 *	For hysterical raisins in sys/arch/mips/include/lock.h, success
68 *	implies load-acquire.  The SYNC_ACQ here could be moved there
69 *	instead.
70 */
71STATIC_LEAF(llsc_atomic_cas_ulong)
72	LLSCSYNC
731:
74	LONG_LL	t0, (a0)
75	bne	t0, a1, 2f
76	 move t1, a2
77	LONG_SC	t1, (a0)
78	beqz	t1, 1b
79	 nop
80	SYNC_ACQ
81	j	ra
82	 move	v0, a1
832:
84	j	ra
85	 move	v0, t0
86END(llsc_atomic_cas_ulong)
87
88/*
89 * unsigned int _atomic_cas_uint_llsc(volatile unsigned int *val,
90 *    unsigned int old, unsigned int new);
91 *
92 *	For hysterical raisins in sys/arch/mips/include/lock.h, success
93 *	implies load-acquire.  The SYNC_ACQ here could be moved there
94 *	instead.
95 */
96STATIC_LEAF(llsc_atomic_cas_uint)
97	LLSCSYNC
981:
99	INT_LL	t0, (a0)
100	bne	t0, a1, 2f
101	 move	t1, a2
102	INT_SC	t1, (a0)
103	beqz	t1, 1b
104	 nop
105	SYNC_ACQ
106	j	ra
107	 move	v0, a1
1082:
109	j	ra
110	 move	v0, t0
111END(llsc_atomic_cas_uint)
112
113/*
114 * int llsc_ucas_32(volatile uint32_t *ptr, uint32_t old,
115 *	uint32_t new, uint32_t *ret)
116 *
117 *	Implies release/acquire barriers until someone tells me
118 *	otherwise about _ucas_32/64.
119 */
120STATIC_LEAF(llsc_ucas_32)
121	.set at
122	PTR_LA	v0, _C_LABEL(llsc_ucaserr)
123	.set noat
124	PTR_L	v1, L_PCB(MIPS_CURLWP)
125	PTR_S	v0, PCB_ONFAULT(v1)
126	bltz	a0, _C_LABEL(llsc_ucaserr)
127	 nop
128	move	v0, zero
129	SYNC_REL
130
131	LLSCSYNC
1321:	ll	t0, 0(a0)
133	bne	t0, a1, 2f
134	 move	t1, a2
135	sc	t1, 0(a0)
136	beqz	t1, 1b
137	 nop
138	SYNC_ACQ
139
1402:	PTR_S	zero, PCB_ONFAULT(v1)
141	j	ra
142	 sw	t0, 0(a3)
143END(llsc_ucas_32)
144
145#ifdef _LP64
146/*
147 * int llsc_ucas_64(volatile uint64_t *ptr, uint64_t old,
148 *	uint64_t new, uint64_t *ret)
149 */
150STATIC_LEAF(llsc_ucas_64)
151	.set at
152	PTR_LA	v0, _C_LABEL(llsc_ucaserr)
153	.set noat
154	PTR_L	v1, L_PCB(MIPS_CURLWP)
155	PTR_S	v0, PCB_ONFAULT(v1)
156	bltz	a0, _C_LABEL(llsc_ucaserr)
157	 nop
158	move	v0, zero
159	SYNC_REL
160
161	LLSCSYNC
1621:	lld	t0, 0(a0)
163	bne	t0, a1, 2f
164	 move	t1, a2
165	scd	t1, 0(a0)
166	beqz	t1, 1b
167	 nop
168	SYNC_ACQ
169
1702:	PTR_S	zero, PCB_ONFAULT(v1)
171	j	ra
172	 sd	t0, 0(a3)
173END(llsc_ucas_64)
174#endif /* _LP64 */
175
176STATIC_LEAF_NOPROFILE(llsc_ucaserr)
177	PTR_S	zero, PCB_ONFAULT(v1)		# reset fault handler
178	j	ra
179	 li	v0, EFAULT			# return EFAULT on error
180END(llsc_ucaserr)
181
182#ifndef LOCKDEBUG
183
184/*
185 * void	mutex_enter(kmutex_t *mtx);
186 */
187STATIC_LEAF(llsc_mutex_enter)
188	LLSCSYNC
189	PTR_LL	t0, MTX_OWNER(a0)
1901:
191	bnez	t0, 2f
192	 move	t2, MIPS_CURLWP
193	PTR_SC	t2, MTX_OWNER(a0)
194	beqz	t2, 1b
195	 PTR_LL	t0, MTX_OWNER(a0)
196	j	ra
197	 BDSYNC_ACQ
1982:
199	j	_C_LABEL(mutex_vector_enter)
200	 nop
201END(llsc_mutex_enter)
202
203/*
204 * void	mutex_exit(kmutex_t *mtx);
205 */
206STATIC_LEAF(llsc_mutex_exit)
207	SYNC_REL
208	LLSCSYNC
209	PTR_LL	t0, MTX_OWNER(a0)
2101:
211	bne	t0, MIPS_CURLWP, 2f
212	 move	t2, zero
213	PTR_SC	t2, MTX_OWNER(a0)
214	beqz	t2, 1b
215	 PTR_LL	t0, MTX_OWNER(a0)
216	j	ra
217	 BDSYNC_PLUNGER
2182:
219	j	_C_LABEL(mutex_vector_exit)
220	 nop
221END(llsc_mutex_exit)
222
223/*
224 * void	mutex_spin_enter(kmutex_t *mtx);
225 */
226STATIC_NESTED(llsc_mutex_spin_enter, CALLFRAME_SIZ, ra)
227	move	t0, a0
228	PTR_L	t2, L_CPU(MIPS_CURLWP)
229	INT_L	a0, MTX_IPL(t0)
230#ifdef PARANOIA
231	INT_L	ta1, CPU_INFO_CPL(t2)
232#endif
233
234	/*
235	 * We need to raise our IPL.  But it means calling another routine
236	 * but it's written to have little overhead.  call splraise
237	 * (only uses a0-a3 and v0-v1)
238	 */
239	move	t3, ra			# need to save ra
240	jal	_C_LABEL(splraise)
241	 nop
242	move	ra, t3			# move ra back
243#ifdef PARANOIA
24410:	bne	ta1, v0, 10b		# loop forever if v0 != ta1
245	 nop
246#endif /* PARANOIA */
247
248	/*
249	 * If this is the first lock of the mutex, store the previous IPL for
250	 * exit.  Even if an interrupt happens, the mutex count will not change.
251	 */
2521:
253	INT_L	ta2, CPU_INFO_MTX_COUNT(t2)
254	INT_ADDU ta3, ta2, -1
255	INT_S	ta3, CPU_INFO_MTX_COUNT(t2)
256	bltz	ta2, 2f
257	 nop
258	INT_S	v0, CPU_INFO_MTX_OLDSPL(t2)	/* returned by splraise */
2592:
260#ifdef PARANOIA
261	INT_L	ta1, CPU_INFO_MTX_OLDSPL(t2)
262	INT_L	ta2, CPU_INFO_CPL(t2)	# get updated CPL
263	sltu	v0, ta2, ta0		# v0 = cpl < mtx_ipl
264	sltu	v1, ta2, ta1		# v1 = cpl < oldspl
265	sll	v0, 1
266	or	v0, v1
26712:	bnez	v0, 12b			# loop forever if any are true
268	 nop
269#endif /* PARANOIA */
270
271	LLSCSYNC
272	INT_LL	t3, MTX_LOCK(t0)
2733:
274	bnez	t3, 4f
275	 li	t1, 1
276	INT_SC	t1, MTX_LOCK(t0)
277	beqz	t1, 3b
278	 INT_LL	t3, MTX_LOCK(t0)
279	j	ra
280	 BDSYNC_ACQ
2814:
282	j	_C_LABEL(mutex_spin_retry)
283	 move	a0, t0
284END(llsc_mutex_spin_enter)
285
286/*
287 * void	mutex_spin_exit(kmutex_t *mtx);
288 */
289LEAF(llsc_mutex_spin_exit)
290	SYNC_REL
291	PTR_L	t2, L_CPU(MIPS_CURLWP)
292#if defined(DIAGNOSTIC)
293	INT_L	t0, MTX_LOCK(a0)
294	beqz	t0, 2f
295	 nop
296#endif
297	INT_S	zero, MTX_LOCK(a0)
298
299	/*
300	 * We need to grab this before the mutex count is incremented
301	 * because if we get an interrupt, it may see the count as zero
302	 * and overwrite the oldspl value with a bogus value.
303	 */
304#ifdef PARANOIA
305	INT_L	a2, MTX_IPL(a0)
306#endif
307	INT_L	a0, CPU_INFO_MTX_OLDSPL(t2)
308
309	/*
310	 * Increment the mutex count
311	 */
312	INT_L	t0, CPU_INFO_MTX_COUNT(t2)
313	INT_ADDU t0, t0, 1
314	INT_S	t0, CPU_INFO_MTX_COUNT(t2)
315
316	/*
317	 * If the IPL doesn't change, nothing to do
318	 */
319	INT_L	a1, CPU_INFO_CPL(t2)
320
321#ifdef PARANOIA
322	sltu	v0, a1, a2		# v0 = cpl < mtx_ipl
323	sltu	v1, a1, a0		# v1 = cpl < oldspl
324	sll	v0, 1
325	or	v0, v1
32612:	bnez	v0, 12b			# loop forever if either is true
327	 nop
328#endif /* PARANOIA */
329
330	beq	a0, a1, 1f		# if oldspl == cpl
331	 nop				#   no reason to drop ipl
332
333	bltz	t0, 1f			# there are still holders
334	 nop				# so don't drop IPL
335
336	/*
337	 * Mutex count is zero so we need to restore the old IPL
338	 */
339#ifdef PARANOIA
340	sltiu	v0, a0, IPL_HIGH+1
34113:	beqz	v0, 13b			# loop forever if ipl > IPL_HIGH
342	 nop
343#endif
344	j	 _C_LABEL(splx)
345	 BDSYNC_PLUNGER
3461:
347	j	ra
348	 BDSYNC_PLUNGER
349#if defined(DIAGNOSTIC)
3502:
351	j	_C_LABEL(mutex_vector_exit)
352	 nop
353#endif
354END(llsc_mutex_spin_exit)
355#endif	/* !LOCKDEBUG */
356
357	.rdata
358EXPORT_OBJECT(mips_llsc_locore_atomicvec)
359	PTR_WORD 	llsc_atomic_cas_uint
360	PTR_WORD 	llsc_atomic_cas_ulong
361	PTR_WORD	llsc_ucas_32
362#ifdef _LP64
363	PTR_WORD	llsc_ucas_64
364#else
365	PTR_WORD	0
366#endif /* _LP64 */
367#ifdef LOCKDEBUG
368	PTR_WORD	mutex_vector_enter
369	PTR_WORD	mutex_vector_exit
370	PTR_WORD	mutex_vector_enter
371	PTR_WORD	mutex_vector_exit
372#else
373	PTR_WORD	llsc_mutex_enter
374	PTR_WORD	llsc_mutex_exit
375	PTR_WORD	llsc_mutex_spin_enter
376	PTR_WORD	llsc_mutex_spin_exit
377#endif	/* !LOCKDEBUG */
378