xref: /netbsd-src/sys/arch/vax/vax/lock_stubs.S (revision 09ff5f3b480cb7eb269d9eec28950bf196ce206c)
1/*	$NetBSD: lock_stubs.S,v 1.19 2022/04/06 22:47:58 riastradh Exp $	*/
2
3/*-
4 * Copyright (c) 2002, 2006, 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Bugfix for mutex_spin_enter, which wasn't reentrant.
34 *
35 * Fixed by Johnny Billquist and Chuck Silvers
36 */
37
38#include "opt_lockdebug.h"
39#include "opt_multiprocessor.h"
40#include <machine/asm.h>
41#include "assym.h"
42
43#ifdef MULTIPROCESSOR
44	.section	.bss
45	.p2align	2
46	.lcomm		cashash,256	/* 2048 bits */
47#endif
48
49#ifndef LOCKDEBUG
50#if MTX_OWNER != 0
51#error MTX_OWNER != 0, need to add offset to (%r1)
52#endif
53
54/*
55 * void mutex_enter(kmutex_t *);
56 */
57ENTRY(mutex_enter, 0)
58	movl	4(%ap), %r1		/* get mutex (ptr) */
59#ifdef DIAGNOSTIC
60	blbs	(%r1), 1f		/* go slow if this is SPIN */
61#endif
62	clrl	%r2			/* set old value (zero) */
63	mfpr	$PR_SSP, %r3		/* set new value (curlwp) */
64#ifndef MULTIPROCESSOR
65	addl3	$CI_CAS_ADDR, L_CPU(%r3), %r4 /* r4 == &curcpu()->ci_cas_addr */
66#endif
67	bsbw	_do_cas+2		/* do the compare-and-swap */
68	tstl	%r0			/* is the old value what we wanted? */
69	beql	2f			/*  yep, just branch to the return */
701:	callg	(%ap), _C_LABEL(mutex_vector_enter)
71					/*  nope, there's an owner so go slow */
722:	ret
73
74/*
75 * void mutex_exit(kmutex_t *);
76 */
77ENTRY(mutex_exit, 0)
78	movl	4(%ap), %r1		/* get mutex (ptr) */
79#ifdef DIAGNOSTIC
80	blbs	(%r1), 1f		/* go slow if this is SPIN */
81#endif
82	mfpr	$PR_SSP, %r2		/* get curlwp (old) */
83	clrl	%r3			/* get zero (new) */
84#ifndef MULTIPROCESSOR
85	addl3	$CI_CAS_ADDR, L_CPU(%r2), %r4 /* r4 == &curcpu()->ci_cas_addr */
86#endif
87	bsbw	_do_cas+2		/* do the compare-and-swap */
88	cmpl	%r0,%r2			/* return == old? */
89	beql	2f			/*   yes, branch to return */
901:	callg	(%ap), _C_LABEL(mutex_vector_exit) /*   no, slow path */
912:	ret
92
93/*
94 * void mutex_spin_enter(kmutex_t *);
95 */
96ENTRY(mutex_spin_enter, 0)
97	movl	4(%ap), %r0			/* get spin mutex */
98#ifdef DIAGNOSTIC
99	blbc	(%r0), 3f
100#endif
101	mfpr	$PR_IPL, %r2			/* get current IPL */
102	movzbl	MTX_IPL(%r0), %r3
103	cmpl	%r3, %r2			/* does mutex have > IPL? */
104	bleq	1f				/*   no, leave IPL alone */
105	mtpr	%r3, $PR_IPL			/*   yes, raise IPL */
1061:	mfpr	$PR_SSP, %r4			/* get curlwp */
107	movl	L_CPU(%r4),%r4			/* get cpu_info */
108	decl	CI_MTX_COUNT(%r4)		/* decr muntex count */
109	bcc	3f				/* previous active - don't save IPL */
110	movl	%r2, CI_MTX_OLDSPL(%r4)		/*   nope, save old IPL */
1113:
112#if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR)
113	bbssi	$0, MTX_LOCK(%r0), 4f		/* take out mutex */
114	ret
1154:	callg	(%ap), _C_LABEL(mutex_spin_retry)	/* slow path */
116#else
117	movb	$1, MTX_LOCK(%r0)		/* for ddb use only */
118#endif
119	ret
120
121/*
122 * void mutex_spin_exit(kmutex_t *);
123 */
124ENTRY(mutex_spin_exit, 0)
125	movl	4(%ap), %r0			/* get spin mutex */
126#if defined(DIAGNOSTIC)
127	blbc	(%r0), 2f			/* assert this is a spinlock */
128#endif
129#if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR)
130	bbcci	$0, MTX_LOCK(%r0), 2f		/* clear mutex */
131#else
132	clrb	MTX_LOCK(%r0)			/* for ddb use only */
133#endif
134	mfpr	$PR_SSP, %r4			/* get curlwp */
135	movl	L_CPU(%r4), %r4			/* get cpu_info */
136	movl	CI_MTX_OLDSPL(%r4), %r2		/* fetch oldspl */
137	incl	CI_MTX_COUNT(%r4)		/* incr mtx count */
138	bneq	1f				/* still held? */
139	mtpr	%r2, $PR_IPL			/*   no, restore saved ipl */
1401:	ret
141
142#if defined(DIAGNOSTIC) || defined(MULTIPROCESSOR)
1432:	callg	(%ap), _C_LABEL(mutex_vector_exit)	/* slow path */
144	ret
145#endif
146
147#if RW_READER != 0
148#error RW_READER != 0, change tstl to cmpl $RW_READER
149#endif
150#if RW_HAS_WAITERS != 1
151#error RW_HAS_WAITERS != 1, don't use blbs
152#endif
153#if RW_OWNER != 0
154#error RW_OWNER != 0, need to add to loads
155#endif
156/*
157 * void rw_enter(krwlock_t *rwl, krw_t op);
158 */
159ENTRY(rw_enter, 0)
160	movl	4(%ap), %r1			/* grab rwlock ptr */
161	tstl	8(%ap)				/* is this a reader op? */
162	bneq	2f				/*   nope, branch to writer */
163
164	movl	(%r1), %r2			/* get owner field */
165	bitl	$(RW_WRITE_LOCKED|RW_WRITE_WANTED), %r2
166						/* write active or pending? */
167	bneq	3f				/*   yep, go slow */
168	addl3	$RW_READ_INCR, %r2, %r3		/* incr. reader count (new) */
169#ifndef MULTIPROCESSOR
170	mfpr	$PR_SSP, %r4
171	addl3	$CI_CAS_ADDR, L_CPU(%r4), %r4 /* r4 == &curcpu()->ci_cas_addr */
172#endif
1731:	bsbw	_do_cas+2			/* do the compare-and-swap */
174	cmpl	%r0, %r2			/* did it succeed? */
175	bneq	3f				/*   nope, go slow */
176	ret					/*   yep, return */
177
1782:	clrl	%r2				/* get old value (zero) */
179	mfpr	$PR_SSP, %r3			/* get new value (curlwp) */
180#ifndef MULTIPROCESSOR
181	addl3	$CI_CAS_ADDR, L_CPU(%r3), %r4 /* r4 == &curcpu()->ci_cas_addr */
182#endif
183	bisl2	$RW_WRITE_LOCKED, %r3		/* show that it's a write */
184	brb	1b				/* do the compare-and-swap */
185
1863:	callg	(%ap), _C_LABEL(rw_vector_enter)
187	ret
188
189/*
190 * void rw_exit(krwlock_t *rwl, krw_t op);
191 */
192ENTRY(rw_exit, 0)
193	movl	4(%ap), %r1			/* grab rwlock ptr */
194	movl	(%r1), %r2			/* grab owner (old) */
195	bitl	$RW_WRITE_LOCKED, %r2		/* is it write locked? */
196	bneq	2f				/*   yep, do the write case */
197
198	blbs	%r2, 3f				/* RW_HAS_WAITERS mbz */
199	subl3	$RW_READ_INCR, %r2, %r3		/* decr. reader count (new) */
200	blss	3f				/* if less than 0, go slow */
201#ifndef MULTIPROCESSOR
202	mfpr	$PR_SSP, %r4			/* get curlwp */
203	addl3	$CI_CAS_ADDR, L_CPU(%r4), %r4 /* r4 == &curcpu()->ci_cas_addr */
204#endif
2051:	bsbw	_do_cas+2			/* do the compare-and-swap */
206	cmpl	%r0, %r2			/* did it succeed? */
207	bneq	3f				/*   nope, go slow */
208	ret					/*   yes, return */
209
2102:	mfpr	$PR_SSP, %r2			/* get old (curlwp) */
211#ifndef MULTIPROCESSOR
212	addl3	$CI_CAS_ADDR, L_CPU(%r2), %r4 /* r4 == &curcpu()->ci_cas_addr */
213#endif
214	bisl2	$RW_WRITE_LOCKED, %r2		/* show that it's a write */
215	clrl	%r3				/* get new (zero) */
216	brb	1b				/* do the compare-and-swap */
217
2183:	callg	(%ap), _C_LABEL(rw_vector_exit)
219	ret
220
221/*
222 * bool rw_tryenter(krwlock_t *krw, krw_t op);
223 */
224ENTRY(rw_tryenter, 0)
225	movl	4(%ap), %r1			/* get rwlock ptr */
226	tstl	8(%ap)				/* is this a reader op? */
227	bneq	3f				/*   nope, branch to writer */
228
229	movl	(%r1), %r2			/* get owner field (old) */
230	bitl	$(RW_WRITE_LOCKED|RW_WRITE_WANTED), %r2
231						/* write active or pending? */
232	bneq	2f				/*   yes, return failure */
233	addl3	$RW_READ_INCR, %r2, %r3		/* incr reader count (new) */
234#ifndef MULTIPROCESSOR
235	mfpr	$PR_SSP, %r4
236	addl3	$CI_CAS_ADDR, L_CPU(%r4), %r4 /* r4 == &curcpu()->ci_cas_addr */
237#endif
2381:	bsbw	_do_cas+2			/* do the compare-and-swap */
239	cmpl	%r0, %r2			/* did it succeed? */
240	bneq	2f				/*   no, we failed. */
241	movl	$1,%r0				/*   yes, indicate success */
242	ret					/* return */
2432:	clrl	%r0				/* indicate failure */
244	ret					/* return */
245
2463:	clrl	%r2				/* set old value (0) */
247	mfpr	$PR_SSP, %r3			/* set new value (curlwp) */
248#ifndef MULTIPROCESSOR
249	addl3	$CI_CAS_ADDR, L_CPU(%r3), %r4 /* r4 == &curcpu()->ci_cas_addr */
250#endif
251	bisl2	$RW_WRITE_LOCKED, %r3		/* show that it's a write */
252	brb	1b				/* do the compare-and-swap */
253#endif /* LOCKDEBUG */
254
255/*
256 * _atomic_cas_32(volatile uint32_t *p, uint32_t old, uint32_t new);
257 */
258ENTRY(_atomic_cas_32, 0)
259	movq	4(%ap), %r1		/* cache ptr, old */
260	movl	12(%ap), %r3		/* cache new */
261#ifndef MULTIPROCESSOR
262	mfpr	$PR_SSP, %r4
263	addl3	$CI_CAS_ADDR, L_CPU(%r4), %r4 /* r4 == &curcpu()->ci_cas_addr */
264#endif
265	bsbw	_do_cas+2		/* do it */
266	ret
267
268ENTRY(_do_cas, 0)
269#ifdef MULTIPROCESSOR
270	movl	(%r1), %r0		/* get value */
271	cmpl	%r0, %r2		/* does it equal old? */
272	bneq	4f			/*    nope, return */
273	extzv	$2,$11,%r1,%r4		/* gets bits 2-12 (our hash) */
274	/*
275	 * Lock everyone out on this cpu.
276	 */
277	mfpr	$PR_IPL, %r5		/* save IPL */
278	mtpr	$IPL_HIGH, $PR_IPL	/* block everything */
2791:	bbssi	%r4,cashash,1b		/* is this pos in the hash table set */
280	movl	(%r1), %r0		/* get value again */
281	cmpl	%r0, %r2		/* does it still equal old? */
282	bneq	2f			/*    nope, return */
283	movl	%r3,(%r1)		/* update *ptr with new */
2842:
285	bbcci	%r4,cashash,3f		/* clear this pos in the hash table */
2863:
287	mtpr	%r5, $PR_IPL		/* restore IPL */
2884:
289	rsb				/* return */
290#else
291/*
292 * entry:
293 *	r1 = address to be CAS'ed
294 *	r2 = old value
295 *	r3 = new value
296 *	r4 = global cell to hold CAS address (common to all callers)
297 *	     e.g. address of curcpu()->ci_cas_addr
298 * exit:
299 *	r0 = old value
300 */
301	.globl	cas32_ras_start, cas32_ras_end
302cas32_ras_start:
303	movl	%r1, (%r4)
304	movl	*(%r4), %r0
305	cmpl	%r2, %r0
306	bneq	1f
307	movl	%r3, *(%r4)
308cas32_ras_end:
3091:
310	movl	$CASMAGIC, (%r4)
311	rsb
312#endif /* !MULTIPROCESSOR */
313STRONG_ALIAS(atomic_cas_ptr,_atomic_cas_32)
314STRONG_ALIAS(_atomic_cas_ptr,_atomic_cas_32)
315STRONG_ALIAS(atomic_cas_uint,_atomic_cas_32)
316STRONG_ALIAS(_atomic_cas_uint,_atomic_cas_32)
317STRONG_ALIAS(atomic_cas_ulong,_atomic_cas_32)
318STRONG_ALIAS(_atomic_cas_ulong,_atomic_cas_32)
319STRONG_ALIAS(atomic_cas_32,_atomic_cas_32)
320
321STRONG_ALIAS(atomic_cas_ptr_ni,_atomic_cas_32)
322STRONG_ALIAS(_atomic_cas_ptr_ni,_atomic_cas_32)
323STRONG_ALIAS(atomic_cas_uint_ni,_atomic_cas_32)
324STRONG_ALIAS(_atomic_cas_uint_ni,_atomic_cas_32)
325STRONG_ALIAS(atomic_cas_ulong_ni,_atomic_cas_32)
326STRONG_ALIAS(_atomic_cas_ulong_ni,_atomic_cas_32)
327STRONG_ALIAS(atomic_cas_32_ni,_atomic_cas_32)
328STRONG_ALIAS(_atomic_cas_32_ni,_atomic_cas_32)
329