xref: /netbsd-src/sys/arch/hppa/hppa/lock_stubs.S (revision 20f5b7d4ca8a5d07ab33fa09193c5e92e793e91b)
1/*	$NetBSD: lock_stubs.S,v 1.29 2022/05/19 06:41:45 skrll Exp $	*/
2
3/*-
4 * Copyright (c) 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran and Nick Hudson.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32#include "opt_multiprocessor.h"
33#include "opt_lockdebug.h"
34
35#define __MUTEX_PRIVATE
36
37#include <machine/asm.h>
38#include <machine/mutex.h>
39#include <machine/reg.h>
40#include <machine/cpu.h>
41
42#include "assym.h"
43
44/*
45 * uintptr_t _lock_cas(volatile uintptr_t *ptr, uintptr_t old, uintptr_t new);
46 *
47 * Perform an atomic compare-and-swap operation.
48 *
49 * On single CPU systems this can use a restartable sequence as there
50 * we don't need the overhead of interlocking.
51 */
52
53
54#ifndef LOCKDEBUG
55
56	.global mutex_enter_crit_start
57	.global mutex_enter_crit_end
58
59	.import mutex_vector_enter, code
60	.import mutex_vector_exit, code
61	.import mutex_wakeup, code
62
63/*
64 * void mutex_exit(kmutex_t *mtx);
65 *
66 */
67
68LEAF_ENTRY(mutex_exit)
69	/*
70	 * If it's a spin mutex or unowned, we have to take the slow path.
71	 */
72	ldi	MUTEX_ADAPTIVE_UNOWNED, %t1
73	ldw	MTX_OWNER(%arg0),%t2
74	depi	0, 27, 1, %t2			/* bit27 = 0 */
75	comb,=	%t1, %t2, .Lexit_slowpath
76	 nop
77
78	/*
79	 * We know that it's an adapative mutex.  Clear the owners
80	 * field and release the lock.
81	 */
82
83	ldi	__SIMPLELOCK_RAW_UNLOCKED, %t2	/* unlocked = 1 */
84	ldo	(MTX_LOCK + HPPA_LDCW_ALIGN - 1)(%arg0), %t3
85	depi	0, 31, 4, %t3			/* bits[28-31] = 0 */
86	stw	%t1, MTX_OWNER(%arg0)
87	stw	%t2, 0(%t3)			/* %t3 is properly aligned */
88	sync
89
90	/*
91	 * We have posted a read memory barrier so the check of mtx_waiters
92	 * will happen in sequence.  If it's set then trap into mutex_wakeup()
93	 * to wake up any threads sleeping on the lock.
94	 */
95	ldb	MTX_WAITERS(%arg0), %t4
96	comib,=	0, %t4, .Lexit_done
97	 nop
98
99	ldil	L%mutex_wakeup, %t1
100	ldo	R%mutex_wakeup(%t1), %t1
101	.call
102	bv,n	%r0(%t1)
103
104.Lexit_slowpath:
105	ldil	L%mutex_vector_exit, %t1
106	ldo	R%mutex_vector_exit(%t1), %t1
107	.call
108	bv,n	%r0(%t1)
109.Lexit_done:
110	bv,n	%r0(%rp)
111
112EXIT(mutex_exit)
113
114/*
115 * void mutex_enter(kmutex_t *mtx)
116 */
117
118LEAF_ENTRY(mutex_enter)
119
120	/*
121	 * It might be a spin lock (c.f. MUTEX_SPIN_FLAG) or might be
122	 * already owned.  We short circuit the request and go straight
123	 * into mutex_vector_enter() if the owners field is not 'unowned'.
124	 */
125
126	ldi	MUTEX_ADAPTIVE_UNOWNED, %t1
127	ldw	MTX_OWNER(%arg0), %t2
128	comb,=,n %t1, %t2, .Lmutexunowned
129.Lenter_slowpath:
130
131	ldil	L%mutex_vector_enter, %t1
132	ldo	R%mutex_vector_enter(%t1), %t1
133	.call
134	bv,n	%r0(%t1)
135	 nop
136
137	/*
138	 * We now know that it's an adaptive mutex.  Grab the spin
139	 * lock, which is an atomic operation.  Once we have that,
140	 * we can set the owner field.  If we can't get it, we
141	 * need to go the slow path.
142	 *
143	 * Even if we are preempted between acquiring the lock and
144	 * setting the owners field, there is no problem - noone
145	 * else can acquire the mutex while the lock is held.
146	 */
147.Lmutexunowned:
148	ldo	(MTX_LOCK + HPPA_LDCW_ALIGN - 1)(%arg0), %t1
149	depi	0, 31, 4, %t1
150	ldcw	0(%t1), %ret0
151
152mutex_enter_crit_start:
153	comib,=	0, %ret0, .Lenter_slowpath
154
155	GET_CURLWP(%t2)
156
157	bv	%r0(%rp)
158	 stw	%t2, MTX_OWNER(%arg0)
159mutex_enter_crit_end:
160EXIT(mutex_enter)
161
162#endif	/* !LOCKDEBUG */
163
164#ifndef MULTIPROCESSOR
165
166	.global _lock_cas_ras_start
167	.global _lock_cas_ras_end
168
169LEAF_ENTRY(_lock_cas)
170_lock_cas_ras_start:
171	ldw	0(%arg0),%t1
172	comb,<>	%arg1, %t1, 1f
173	 copy	%t1,%ret0
174	stw	%arg2,0(%arg0)
175_lock_cas_ras_end:
176	copy	%arg1,%ret0
1771:
178	bv,n	%r0(%rp)
179
180EXIT(_lock_cas)
181
182STRONG_ALIAS(_atomic_cas_ulong,_lock_cas)
183STRONG_ALIAS(atomic_cas_ulong,_lock_cas)
184STRONG_ALIAS(_atomic_cas_32,_lock_cas)
185STRONG_ALIAS(atomic_cas_32,_lock_cas)
186STRONG_ALIAS(_atomic_cas_uint,_lock_cas)
187STRONG_ALIAS(atomic_cas_uint,_lock_cas)
188STRONG_ALIAS(_atomic_cas_ptr,_lock_cas)
189STRONG_ALIAS(atomic_cas_ptr,_lock_cas)
190
191STRONG_ALIAS(_atomic_cas_ulong_ni,_lock_cas)
192STRONG_ALIAS(atomic_cas_ulong_ni,_lock_cas)
193STRONG_ALIAS(_atomic_cas_32_ni,_lock_cas)
194STRONG_ALIAS(atomic_cas_32_ni,_lock_cas)
195STRONG_ALIAS(_atomic_cas_uint_ni,_lock_cas)
196STRONG_ALIAS(atomic_cas_uint_ni,_lock_cas)
197STRONG_ALIAS(_atomic_cas_ptr_ni,_lock_cas)
198STRONG_ALIAS(atomic_cas_ptr_ni,_lock_cas)
199
200#else  /* !MULTIPROCESSOR */
201
202/*
203 * uintptr_t _lock_cas(volatile uintptr_t *ptr, uintptr_t old, uintptr_t new);
204 *
205 * Perform an atomic compare-and-swap operation.
206 *
207 * On multi-CPU systems, this has to use an interlock and disable interrupts.
208 * The interlock is to protect against another CPU attempting to perform the
209 * cas.  Disabling interrupts is to prevent deadlocks on the current CPU.  That
210 * is, we don't want an interrupts attempting to perform a cas on the interlock
211 * at the same time.
212 *
213 */
214
215#define IL	\
216	.word	__SIMPLELOCK_RAW_UNLOCKED ! \
217	.word	__SIMPLELOCK_RAW_UNLOCKED ! \
218	.word	__SIMPLELOCK_RAW_UNLOCKED ! \
219	.word	__SIMPLELOCK_RAW_UNLOCKED ! \
220
221#define I8	\
222	IL IL IL IL IL IL IL IL
223
224#define I64	\
225	I8 I8 I8 I8 I8 I8 I8 I8
226
227	.section .data
228	.align 4096
229	.export _lock_hash, data
230_lock_hash:
231	I64 I64
232	I64 I64
233	I64 I64
234	I64 I64
235	I64 I64
236	I64 I64
237	I64 I64
238	I64 I64
239
240LEAF_ENTRY(_lock_cas)
241ALTENTRY(_lock_cas_mp)
242
243	mfctl	%eiem, %t1
244	mtctl	%r0, %eiem			/* disable interrupts */
245
246	extru	%arg0, 21+8-1, 8, %ret0
247	ldil	L%_lock_hash, %r1
248	zdep	%ret0, 27, 28, %ret0
249	ldo	R%_lock_hash(%r1), %r1
250
251	addl	%ret0, %r1, %ret0
252	ldo	15(%ret0), %ret0
253	copy	%ret0, %t3
254	depi	0, 31, 4, %t3
255
256	/* %t3 is the interlock address */
257	ldcw	0(%t3), %ret0
258	comib,<>,n	0, %ret0, _lock_cas_mp_interlocked
259_lock_cas_mp_spin:
260	 ldw	0(%t3), %ret0
261	comib,= 0, %ret0, _lock_cas_mp_spin
262	 nop
263	ldcw	0(%t3), %ret0
264	comib,= 0, %ret0, _lock_cas_mp_spin
265	 nop
266
267_lock_cas_mp_interlocked:
268	ldw	0(%arg0), %ret0
269	comclr,<> %arg1, %ret0, %r0	/* If *ptr != old, then nullify */
270	stw	%arg2, 0(%arg0)
271
272	sync
273
274	ldi	__SIMPLELOCK_RAW_UNLOCKED, %t4
275	stw	%t4, 0(%t3)
276	bv	%r0(%r2)
277	 mtctl	%t1, %eiem		/* enable interrupts */
278
279EXIT(_lock_cas)
280
281STRONG_ALIAS(_atomic_cas_ulong,_lock_cas_mp)
282STRONG_ALIAS(atomic_cas_ulong,_lock_cas_mp)
283STRONG_ALIAS(_atomic_cas_32,_lock_cas_mp)
284STRONG_ALIAS(atomic_cas_32,_lock_cas_mp)
285STRONG_ALIAS(_atomic_cas_uint,_lock_cas_mp)
286STRONG_ALIAS(atomic_cas_uint,_lock_cas_mp)
287STRONG_ALIAS(_atomic_cas_ptr,_lock_cas_mp)
288STRONG_ALIAS(atomic_cas_ptr,_lock_cas_mp)
289
290STRONG_ALIAS(_atomic_cas_ulong_ni,_lock_cas_mp)
291STRONG_ALIAS(atomic_cas_ulong_ni,_lock_cas_mp)
292STRONG_ALIAS(_atomic_cas_32_ni,_lock_cas_mp)
293STRONG_ALIAS(atomic_cas_32_ni,_lock_cas_mp)
294STRONG_ALIAS(_atomic_cas_uint_ni,_lock_cas_mp)
295STRONG_ALIAS(atomic_cas_uint_ni,_lock_cas_mp)
296STRONG_ALIAS(_atomic_cas_ptr_ni,_lock_cas_mp)
297STRONG_ALIAS(atomic_cas_ptr_ni,_lock_cas_mp)
298
299#endif /* MULTIPROCESSOR */
300