xref: /openbsd-src/sys/arch/powerpc/include/atomic.h (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: atomic.h,v 1.10 2015/05/06 03:30:03 dlg Exp $	*/
2 
3 /*
4  * Copyright (c) 2015 Martin Pieuchot
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 
20 #ifndef _POWERPC_ATOMIC_H_
21 #define _POWERPC_ATOMIC_H_
22 
23 #if defined(_KERNEL)
24 
25 static __inline void
26 atomic_setbits_int(volatile unsigned int *uip, unsigned int v)
27 {
28 	unsigned int tmp;
29 
30 	__asm volatile (
31 	    "1:	lwarx	%0, 0, %2	\n"
32 	    "	or	%0, %1, %0	\n"
33 	    "	stwcx.	%0, 0, %2	\n"
34 	    "	bne-	1b		\n"
35 	    "	sync" : "=&r" (tmp) : "r" (v), "r" (uip) : "cc", "memory");
36 }
37 
38 static __inline void
39 atomic_clearbits_int(volatile unsigned int *uip, unsigned int v)
40 {
41 	unsigned int tmp;
42 
43 	__asm volatile (
44 	    "1:	lwarx	%0, 0, %2	\n"
45 	    "	andc	%0, %0, %1	\n"
46 	    "	stwcx.	%0, 0, %2	\n"
47 	    "	bne-	1b		\n"
48 	    "	sync" : "=&r" (tmp) : "r" (v), "r" (uip) : "cc", "memory");
49 }
50 
51 static inline unsigned int
52 _atomic_cas_uint(volatile unsigned int *p, unsigned int o, unsigned int n)
53 {
54 	unsigned int rv;
55 
56 	__asm volatile (
57 	    "1:	lwarx	%0, 0, %2	\n"
58 	    "	cmpw	0, %0, %4	\n"
59 	    "	bne-	2f		\n"
60 	    "	stwcx.	%3, 0, %2	\n"
61 	    "	bne-	1b		\n"
62 	    "2:				\n"
63 	    : "=&r" (rv), "+m" (*p)
64 	    : "r" (p), "r" (n), "r" (o)
65 	    : "cc");
66 
67 	return (rv);
68 }
69 #define atomic_cas_uint(_p, _o, _n) _atomic_cas_uint((_p), (_o), (_n))
70 
71 static inline unsigned long
72 _atomic_cas_ulong(volatile unsigned long *p, unsigned long o, unsigned long n)
73 {
74 	unsigned long rv;
75 
76 	__asm volatile (
77 	    "1:	lwarx	%0, 0, %2	\n"
78 	    "	cmpw	0, %0, %4	\n"
79 	    "	bne-	2f		\n"
80 	    "	stwcx.	%3, 0, %2	\n"
81 	    "	bne-	1b		\n"
82 	    "2:				\n"
83 	    : "=&r" (rv), "+m" (*p)
84 	    : "r" (p), "r" (n), "r" (o)
85 	    : "cc");
86 
87 	return (rv);
88 }
89 #define atomic_cas_ulong(_p, _o, _n) _atomic_cas_ulong((_p), (_o), (_n))
90 
91 static inline void *
92 _atomic_cas_ptr(volatile void *pp, void *o, void *n)
93 {
94 	void * volatile *p = pp;
95 	void *rv;
96 
97 	__asm volatile (
98 	    "1:	lwarx	%0, 0, %2	\n"
99 	    "	cmpw	0, %0, %4	\n"
100 	    "	bne-	2f		\n"
101 	    "	stwcx.	%3, 0, %2	\n"
102 	    "	bne-	1b		\n"
103 	    "2:				\n"
104 	    : "=&r" (rv), "+m" (*p)
105 	    : "r" (p), "r" (n), "r" (o)
106 	    : "cc");
107 
108 	return (rv);
109 }
110 #define atomic_cas_ptr(_p, _o, _n) _atomic_cas_ptr((_p), (_o), (_n))
111 
112 static inline unsigned int
113 _atomic_swap_uint(volatile unsigned int *p, unsigned int v)
114 {
115 	unsigned int rv;
116 
117 	__asm volatile (
118 	    "1:	lwarx	%0, 0, %2	\n"
119 	    "	stwcx.	%3, 0, %2	\n"
120 	    "	bne-	1b		\n"
121 	    : "=&r" (rv), "+m" (*p)
122 	    : "r" (p), "r" (v)
123 	    : "cc");
124 
125 	return (rv);
126 }
127 #define atomic_swap_uint(_p, _v) _atomic_swap_uint((_p), (_v))
128 
129 static inline unsigned long
130 _atomic_swap_ulong(volatile unsigned long *p, unsigned long v)
131 {
132 	unsigned long rv;
133 
134 	__asm volatile (
135 	    "1:	lwarx	%0, 0, %2	\n"
136 	    "	stwcx.	%3, 0, %2	\n"
137 	    "	bne-	1b		\n"
138 	    : "=&r" (rv), "+m" (*p)
139 	    : "r" (p), "r" (v)
140 	    : "cc");
141 
142 	return (rv);
143 }
144 #define atomic_swap_ulong(_p, _v) _atomic_swap_ulong((_p), (_v))
145 
146 static inline void *
147 _atomic_swap_ptr(volatile void *pp, void *v)
148 {
149 	void * volatile *p = pp;
150 	void *rv;
151 
152 	__asm volatile (
153 	    "1:	lwarx	%0, 0, %2	\n"
154 	    "	stwcx.	%3, 0, %2	\n"
155 	    "	bne-	1b		\n"
156 	    : "=&r" (rv), "+m" (*p)
157 	    : "r" (p), "r" (v)
158 	    : "cc");
159 
160 	return (rv);
161 }
162 #define atomic_swap_ptr(_p, _v) _atomic_swap_ptr((_p), (_v))
163 
164 static inline unsigned int
165 _atomic_add_int_nv(volatile unsigned int *p, unsigned int v)
166 {
167 	unsigned int rv;
168 
169 	__asm volatile (
170 	    "1:	lwarx	%0, 0, %2	\n"
171 	    "	add	%0, %3,	%0	\n"
172 	    "	stwcx.	%0, 0, %2	\n"
173 	    "	bne-	1b		\n"
174 	    : "=&r" (rv), "+m" (*p)
175 	    : "r" (p), "r" (v)
176 	    : "cc", "xer");
177 
178 	return (rv);
179 }
180 #define atomic_add_int_nv(_p, _v) _atomic_add_int_nv((_p), (_v))
181 
182 static inline unsigned long
183 _atomic_add_long_nv(volatile unsigned long *p, unsigned long v)
184 {
185 	unsigned long rv;
186 
187 	__asm volatile (
188 	    "1:	lwarx	%0, 0, %2	\n"
189 	    "	add	%0, %3,	%0	\n"
190 	    "	stwcx.	%0, 0, %2	\n"
191 	    "	bne-	1b		\n"
192 	    : "=&r" (rv), "+m" (*p)
193 	    : "r" (p), "r" (v)
194 	    : "cc", "xer");
195 
196 	return (rv);
197 }
198 #define atomic_add_long_nv(_p, _v) _atomic_add_long_nv((_p), (_v))
199 
200 static inline unsigned int
201 _atomic_sub_int_nv(volatile unsigned int *p, unsigned int v)
202 {
203 	unsigned int rv;
204 
205 	__asm volatile (
206 	    "1:	lwarx	%0, 0, %2	\n"
207 	    "	subf	%0, %3,	%0	\n"
208 	    "	stwcx.	%0, 0, %2	\n"
209 	    "	bne-	1b		\n"
210 	    : "=&r" (rv), "+m" (*p)
211 	    : "r" (p), "r" (v)
212 	    : "cc", "xer");
213 
214 	return (rv);
215 }
216 #define atomic_sub_int_nv(_p, _v) _atomic_sub_int_nv((_p), (_v))
217 
218 static inline unsigned long
219 _atomic_sub_long_nv(volatile unsigned long *p, unsigned long v)
220 {
221 	unsigned long rv;
222 
223 	__asm volatile (
224 	    "1:	lwarx	%0, 0, %2	\n"
225 	    "	subf	%0, %3,	%0	\n"
226 	    "	stwcx.	%0, 0, %2	\n"
227 	    "	bne-	1b		\n"
228 	    : "=&r" (rv), "+m" (*p)
229 	    : "r" (p), "r" (v)
230 	    : "cc", "xer");
231 
232 	return (rv);
233 }
234 #define atomic_sub_long_nv(_p, _v) _atomic_sub_long_nv((_p), (_v))
235 
236 static inline unsigned int
237 _atomic_addic_int_nv(volatile unsigned int *p, unsigned int v)
238 {
239 	unsigned int rv;
240 
241 	__asm volatile (
242 	    "1:	lwarx	%0, 0, %2	\n"
243 	    "	addic	%0, %0,	%3	\n"
244 	    "	stwcx.	%0, 0, %2	\n"
245 	    "	bne-	1b		\n"
246 	    : "=&r" (rv), "+m" (*p)
247 	    : "r" (p), "n" (v)
248 	    : "cc", "xer");
249 
250 	return (rv);
251 }
252 #define atomic_inc_int_nv(_p) _atomic_addic_int_nv((_p), 1)
253 #define atomic_dec_int_nv(_p) _atomic_addic_int_nv((_p), -1)
254 
255 static inline unsigned long
256 _atomic_addic_long_nv(volatile unsigned long *p, unsigned long v)
257 {
258 	unsigned long rv;
259 
260 	__asm volatile (
261 	    "1:	lwarx	%0, 0, %2	\n"
262 	    "	addic	%0, %0,	%3	\n"
263 	    "	stwcx.	%0, 0, %2	\n"
264 	    "	bne-	1b		\n"
265 	    : "=&r" (rv), "+m" (*p)
266 	    : "r" (p), "n" (v)
267 	    : "cc", "xer");
268 
269 	return (rv);
270 }
271 #define atomic_inc_long_nv(_p) _atomic_addic_long_nv((_p), 1)
272 #define atomic_dec_long_nv(_p) _atomic_addic_long_nv((_p), -1)
273 
274 #define __membar(_f) do { __asm __volatile(_f ::: "memory"); } while (0)
275 
276 #ifdef MULTIPROCESSOR
277 #define membar_enter()		__membar("isync")
278 #define membar_exit()		__membar("sync")
279 #define membar_producer()	__membar("sync")
280 #define membar_consumer()	__membar("isync")
281 #define membar_sync()		__membar("sync")
282 #else
283 #define membar_enter()		__membar("")
284 #define membar_exit()		__membar("")
285 #define membar_producer()	__membar("")
286 #define membar_consumer()	__membar("")
287 #define membar_sync()		__membar("")
288 #endif
289 
290 #endif /* defined(_KERNEL) */
291 #endif /* _POWERPC_ATOMIC_H_ */
292