xref: /openbsd-src/sys/arch/m88k/m88k/atomic.S (revision 5746cf2977bab637710f707ab6ffd43231ea7ce3)
1/*	$OpenBSD: atomic.S,v 1.7 2022/12/06 18:50:59 guenther Exp $	*/
2
3/*
4 * Copyright (c) 2009 Miodrag Vallat.
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <machine/asm.h>
20
21#ifdef M88110
22#define	CACHE_LINE	32
23#else
24#define	CACHE_LINE	16
25#endif
26	.data
27
28/*
29 * A __cpu_simple_lock_t used to provide the inter-processor interlock,
30 * alone on its cache line.
31 */
32	.balign	CACHE_LINE
33ASLOCAL(__atomic_interlock)
34	.word	0
35	.balign	CACHE_LINE
36
37	.text
38
39/*
40 * Register usage in this file:
41 *
42 * r2 data address
43 * r3 bits to set or clear
44 * r4 argument / scratch
45 * r5 return address
46 * r6 interlock address
47 * r7 psr upon entry
48 * r8 active psr
49 * r9 scratch
50 */
51
52ENTRY(atomic_setbits_int)
53	or	%r5, %r1, %r0		/* save return address */
54	bsr	__atomic_lock
55
56	ld	%r4, %r2, %r0
57	or	%r4, %r4, %r3
58	st	%r4, %r2, %r0
59
60	br	__atomic_unlock
61
62ENTRY(atomic_clearbits_int)
63	or	%r5, %r1, %r0		/* save return address */
64	bsr	__atomic_lock
65
66	ld	%r4, %r2, %r0
67	or	%r4, %r4, %r3
68	xor	%r4, %r4, %r3		/* r4 &= ~r3 */
69	st	%r4, %r2, %r0
70
71	br	__atomic_unlock
72
73ENTRY(atomic_add_int_nv_mp)
74	or	%r5, %r1, %r0		/* save return address */
75	bsr	__atomic_lock
76
77	or	%r9, %r2, %r0
78	ld	%r2, %r9, %r0
79	addu	%r2, %r2, %r3
80	st	%r2, %r9, %r0
81
82	br	__atomic_unlock
83
84ENTRY(atomic_sub_int_nv_mp)
85	or	%r5, %r1, %r0		/* save return address */
86	bsr	__atomic_lock
87
88	or	%r9, %r2, %r0
89	ld	%r2, %r9, %r0
90	subu	%r2, %r2, %r3
91	st	%r2, %r9, %r0
92
93	br	__atomic_unlock
94
95ENTRY(atomic_cas_uint_mp)
96	or	%r5, %r1, %r0		/* save return address */
97	bsr	__atomic_lock
98
99	ld	%r9, %r2, %r0
100	cmp	%r3, %r3, %r9
101	bb0	eq,  %r3, 1f
102	st	%r4, %r2, %r0
1031:
104	or	%r2, %r9, %r0
105
106	br	__atomic_unlock
107
108ENTRY(atomic_swap_uint_mp)
109	or	%r5, %r1, %r0		/* save return address */
110	bsr	__atomic_lock
111
112	ld	%r4, %r2, %r0
113	st	%r3, %r2, %r0
114	or	%r2, %r4, %r0
115
116	br	__atomic_unlock
117
118GLOBAL(__atomic_lock)
119
120/*
121 * If running a kernel with support for both 88100 and 88110 compiled-in
122 * on a 88100 machine, the 88100 code (shorter) will be copied over in
123 * vector_init().
124 */
125
126#ifdef M88110
127ASLOCAL(__atomic_lock_88110)
128	/*
129	 * This is the 88110 version: disable shadowing and interrupts,
130	 * then grab the interlock.
131	 */
132
133	or.u	%r6, %r0, %hi16(__atomic_interlock)
134	or	%r6, %r6, %lo16(__atomic_interlock)
135
136	ldcr	%r7, PSR
137	set	%r8, %r7, 1<PSR_INTERRUPT_DISABLE_BIT>
138	set	%r8, %r8, 1<PSR_SHADOW_FREEZE_BIT>
139	stcr	%r8, PSR
140	FLUSH_PIPELINE
1411:
142	or	%r9, %r0, 1	/* __SIMPLELOCK_LOCKED */
143	xmem	%r9, %r6, %r0
144	bcnd	eq0, %r9, 3f
1452:
146	ld	%r9, %r6, %r0
147	bcnd	eq0, %r9, 1b
148	br	2b
1493:
150	jmp	%r1
151#endif
152
153#ifdef M88100
154GLOBAL(__atomic_lock_88100)
155	/*
156	 * This is the 88100 version: disable interrupts, then grab
157	 * the interlock.
158	 */
159
160	or.u	%r6, %r0, %hi16(__atomic_interlock)
161	or	%r6, %r6, %lo16(__atomic_interlock)
162
163	ldcr	%r7, PSR
164	set	%r8, %r7, 1<PSR_INTERRUPT_DISABLE_BIT>
165	stcr	%r8, PSR
166	FLUSH_PIPELINE
167
1681:
169	or	%r9, %r0, 1	/* __SIMPLELOCK_LOCKED */
170	xmem	%r9, %r6, %r0
171	bcnd	eq0, %r9, 3f
1722:
173	ld	%r9, %r6, %r0
174	bcnd	eq0, %r9, 1b
175	br	2b
1763:
177	jmp	%r1
178GLOBAL(__atomic_lock_88100_end)
179#endif
180
181GLOBAL(__atomic_unlock)
182
183/*
184 * If running a kernel with support for both 88100 and 88110 compiled-in
185 * on a 88100 machine, the 88100 code (shorter) will be copied over in
186 * vector_init().
187 */
188
189#ifdef M88110
190ASLOCAL(__atomic_unlock_88110)
191	/*
192	 * This is the 88110 version: release the interlock, set up
193	 * exception registers to return to our caller with initial
194	 * psr restored.
195	 */
196
197	st	%r0, %r6, %r0	/* release interlock */
198
199	stcr	%r5, EXIP	/* return address */
200	stcr	%r7, EPSR	/* original PSR */
201
202	/*
203	 * No need to workaround errata #18 (see m88110_user_rte in
204	 * eh_common.S), as we are not returning to user mode.
205	 */
206	RTE
207#endif
208
209#ifdef M88100
210GLOBAL(__atomic_unlock_88100)
211	/*
212	 * This is the 88100 version: release the interlock,
213	 * restore psr and return to the caller.
214	 */
215
216	st	%r0, %r6, %r0	/* release interlock */
217
218	stcr	%r7, PSR
219	FLUSH_PIPELINE
220
221	jmp	%r5
222GLOBAL(__atomic_unlock_88100_end)
223#endif
224