xref: /openbsd-src/sys/arch/arm/include/atomic.h (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: atomic.h,v 1.15 2016/05/16 13:18:51 jsg Exp $	*/
2 
3 /* Public Domain */
4 
5 #ifndef _ARM_ATOMIC_H_
6 #define _ARM_ATOMIC_H_
7 
8 #if defined(_KERNEL)
9 
10 #if !defined(CPU_ARMv7)
11 
12 #include <arm/cpufunc.h>
13 #include <arm/armreg.h>
14 
15 /*
16  * on pre-v6 arm processors, it is necessary to disable interrupts if
17  * in the kernel and atomic updates are necessary without full mutexes
18  */
19 
20 static inline unsigned int
21 _atomic_cas_uint(volatile unsigned int *uip, unsigned int o, unsigned int n)
22 {
23 	unsigned int cpsr;
24 	unsigned int rv;
25 
26 	cpsr = disable_interrupts(PSR_I|PSR_F);
27 	rv = *uip;
28 	if (rv == o)
29 		*uip = n;
30 	restore_interrupts(cpsr);
31 
32 	return (rv);
33 }
34 #define atomic_cas_uint(_p, _o, _n) _atomic_cas_uint((_p), (_o), (_n))
35 
36 static inline unsigned int
37 _atomic_cas_ulong(volatile unsigned long *uip, unsigned long o, unsigned long n)
38 {
39 	unsigned int cpsr;
40 	unsigned long rv;
41 
42 	cpsr = disable_interrupts(PSR_I|PSR_F);
43 	rv = *uip;
44 	if (rv == o)
45 		*uip = n;
46 	restore_interrupts(cpsr);
47 
48 	return (rv);
49 }
50 #define atomic_cas_ulong(_p, _o, _n) _atomic_cas_ulong((_p), (_o), (_n))
51 
52 static inline void *
53 _atomic_cas_ptr(volatile void *uip, void *o, void *n)
54 {
55 	unsigned int cpsr;
56 	void * volatile *uipp = (void * volatile *)uip;
57 	void *rv;
58 
59 	cpsr = disable_interrupts(PSR_I|PSR_F);
60 	rv = *uipp;
61 	if (rv == o)
62 		*uipp = n;
63 	restore_interrupts(cpsr);
64 
65 	return (rv);
66 }
67 #define atomic_cas_ptr(_p, _o, _n) _atomic_cas_ptr((_p), (_o), (_n))
68 
69 static inline unsigned int
70 _atomic_swap_uint(volatile unsigned int *uip, unsigned int n)
71 {
72 	unsigned int cpsr;
73 	unsigned int rv;
74 
75 	cpsr = disable_interrupts(PSR_I|PSR_F);
76 	rv = *uip;
77 	*uip = n;
78 	restore_interrupts(cpsr);
79 
80 	return (rv);
81 }
82 #define atomic_swap_uint(_p, _n) _atomic_swap_uint((_p), (_n))
83 
84 static inline unsigned long
85 _atomic_swap_ulong(volatile unsigned long *uip, unsigned long n)
86 {
87 	unsigned int cpsr;
88 	unsigned long rv;
89 
90 	cpsr = disable_interrupts(PSR_I|PSR_F);
91 	rv = *uip;
92 	*uip = n;
93 	restore_interrupts(cpsr);
94 
95 	return (rv);
96 }
97 #define atomic_swap_ulong(_p, _n) _atomic_swap_ulong((_p), (_n))
98 
99 static inline void *
100 _atomic_swap_ptr(volatile void *uip, void *n)
101 {
102 	unsigned int cpsr;
103 	void * volatile *uipp = (void * volatile *)uip;
104 	void *rv;
105 
106 	cpsr = disable_interrupts(PSR_I|PSR_F);
107 	rv = *uipp;
108 	*uipp = n;
109 	restore_interrupts(cpsr);
110 
111 	return (rv);
112 }
113 #define atomic_swap_ptr(_p, _n) _atomic_swap_ptr((_p), (_n))
114 
115 static inline unsigned int
116 _atomic_add_int_nv(volatile unsigned int *uip, unsigned int v)
117 {
118 	unsigned int cpsr;
119 	unsigned int rv;
120 
121 	cpsr = disable_interrupts(PSR_I|PSR_F);
122 	rv = *uip + v;
123 	*uip = rv;
124 	restore_interrupts(cpsr);
125 
126 	return (rv);
127 }
128 #define atomic_add_int_nv(_p, _v) _atomic_add_int_nv((_p), (_v))
129 
130 static inline unsigned long
131 _atomic_add_long_nv(volatile unsigned long *uip, unsigned long v)
132 {
133 	unsigned int cpsr;
134 	unsigned long rv;
135 
136 	cpsr = disable_interrupts(PSR_I|PSR_F);
137 	rv = *uip + v;
138 	*uip = rv;
139 	restore_interrupts(cpsr);
140 
141 	return (rv);
142 }
143 #define atomic_add_long_nv(_p, _v) _atomic_add_long_nv((_p), (_v))
144 
145 static inline unsigned int
146 _atomic_sub_int_nv(volatile unsigned int *uip, unsigned int v)
147 {
148 	unsigned int cpsr;
149 	unsigned int rv;
150 
151 	cpsr = disable_interrupts(PSR_I|PSR_F);
152 	rv = *uip - v;
153 	*uip = rv;
154 	restore_interrupts(cpsr);
155 
156 	return (rv);
157 }
158 #define atomic_sub_int_nv(_p, _v) _atomic_sub_int_nv((_p), (_v))
159 
160 static inline unsigned long
161 _atomic_sub_long_nv(volatile unsigned long *uip, unsigned long v)
162 {
163 	unsigned int cpsr;
164 	unsigned long rv;
165 
166 	cpsr = disable_interrupts(PSR_I|PSR_F);
167 	rv = *uip - v;
168 	*uip = rv;
169 	restore_interrupts(cpsr);
170 
171 	return (rv);
172 }
173 #define atomic_sub_long_nv(_p, _v) _atomic_sub_long_nv((_p), (_v))
174 
175 static inline void
176 atomic_setbits_int(volatile unsigned int *uip, unsigned int v)
177 {
178 	unsigned int cpsr;
179 
180 	cpsr = disable_interrupts(PSR_I|PSR_F);
181 	*uip |= v;
182 	restore_interrupts(cpsr);
183 }
184 
185 static inline void
186 atomic_clearbits_int(volatile unsigned int *uip, unsigned int v)
187 {
188 	unsigned int cpsr;
189 
190 	cpsr = disable_interrupts(PSR_I|PSR_F);
191 	*uip &= ~v;
192 	restore_interrupts(cpsr);
193 }
194 
195 #else /* !CPU_ARMv7 */
196 
197 /*
198  * Compare and set:
199  * ret = *ptr
200  * if (ret == expect)
201  * 	*ptr = new
202  * return (ret)
203  */
204 #define def_atomic_cas(_f, _t)					\
205 static inline _t						\
206 _f(volatile _t *p, _t e, _t n)					\
207 {								\
208 	_t ret, modified;					\
209 								\
210 	__asm volatile (					\
211 	    "1:	ldrex %0, [%4]		\n\t"			\
212 	    "	cmp %0, %3		\n\t"			\
213 	    "	bne 2f			\n\t"			\
214 	    "	strex %1, %2, [%4]	\n\t"			\
215 	    "	cmp %1, #0		\n\t"			\
216 	    "	bne 1b			\n\t"			\
217 	    "	b 3f			\n\t"			\
218 	    "2:	clrex			\n\t"			\
219 	    "3:				\n\t"			\
220 	    : "=&r" (ret), "=&r" (modified)			\
221 	    : "r" (n), "r" (e), "r" (p)				\
222 	    : "memory", "cc"					\
223 	);							\
224 	return (ret);						\
225 }
226 def_atomic_cas(_atomic_cas_uint, unsigned int)
227 def_atomic_cas(_atomic_cas_ulong, unsigned long)
228 #undef def_atomic_cas
229 
230 #define atomic_cas_uint(_p, _e, _n) _atomic_cas_uint((_p), (_e), (_n))
231 #define atomic_cas_ulong(_p, _e, _n) _atomic_cas_ulong((_p), (_e), (_n))
232 
233 static inline void *
234 _atomic_cas_ptr(volatile void *p, void *e, void *n)
235 {
236 	void *ret;
237 	uint32_t modified;
238 
239 	__asm volatile (
240 	    "1:	ldrex %0, [%4]		\n\t"
241 	    "	cmp %0, %3		\n\t"
242 	    "	bne 2f			\n\t"
243 	    "	strex %1, %2, [%4]	\n\t"
244 	    "	cmp %1, #0		\n\t"
245 	    "	bne 1b			\n\t"
246 	    "	b 3f			\n\t"
247 	    "2:	clrex			\n\t"
248 	    "3:				\n\t"
249 	    : "=&r" (ret), "=&r" (modified)
250 	    : "r" (n), "r" (e), "r" (p)
251 	    : "memory", "cc"
252 	);
253 	return (ret);
254 }
255 #define atomic_cas_ptr(_p, _e, _n) _atomic_cas_ptr((_p), (_e), (_n))
256 
257 /*
258  * Swap:
259  * ret = *p
260  * *p = val
261  * return (ret)
262  */
263 #define def_atomic_swap(_f, _t)					\
264 static inline _t						\
265 _f(volatile _t *p, _t v)					\
266 {								\
267 	_t ret, modified;					\
268 								\
269 	__asm volatile (					\
270 	    "1:	ldrex %0, [%3]		\n\t"			\
271 	    "	strex %1, %2, [%3]	\n\t"			\
272 	    "	cmp %1, #0		\n\t"			\
273 	    "	bne 1b			\n\t"			\
274 	    : "=&r" (ret), "=&r" (modified)			\
275 	    : "r" (v), "r" (p)					\
276 	    : "memory", "cc"					\
277 	);							\
278 	return (ret);						\
279 }
280 def_atomic_swap(_atomic_swap_uint, unsigned int)
281 def_atomic_swap(_atomic_swap_ulong, unsigned long)
282 #undef def_atomic_swap
283 
284 #define atomic_swap_uint(_p, _v) _atomic_swap_uint((_p), (_v))
285 #define atomic_swap_ulong(_p, _v) _atomic_swap_ulong((_p), (_v))
286 
287 static inline void *
288 _atomic_swap_ptr(volatile void *p, void *v)
289 {
290 	void *ret;
291 	uint32_t modified;
292 
293 	__asm volatile (
294 	    "1:	ldrex %0, [%3]		\n\t"
295 	    "	strex %1, %2, [%3]	\n\t"
296 	    "	cmp %1, #0		\n\t"
297 	    "	bne 1b			\n\t"
298 	    : "=&r" (ret), "=&r" (modified)
299 	    : "r" (v), "r" (p)
300 	    : "memory", "cc"
301 	);
302 	return (ret);
303 }
304 #define atomic_swap_ptr(_p, _v) _atomic_swap_ptr((_p), (_v))
305 
306 /*
307  * Increment returning the new value
308  * *p += 1
309  * return (*p)
310  */
311 #define def_atomic_inc_nv(_f, _t)				\
312 static inline _t						\
313 _f(volatile _t *p)						\
314 {								\
315 	_t ret, modified;					\
316 								\
317 	__asm volatile (					\
318 	   "1:	ldrex %0, [%2]		\n\t"			\
319 	    "	add %0, %0, #1		\n\t"			\
320 	    "	strex %1, %0, [%2]	\n\t"			\
321 	    "	cmp %1, #0		\n\t"			\
322 	    "	bne 1b			\n\t"			\
323 	    : "=&r" (ret), "=&r" (modified)			\
324 	    : "r" (p)						\
325 	    : "memory", "cc"					\
326 	);							\
327 	return (ret);						\
328 }
329 def_atomic_inc_nv(_atomic_inc_int_nv, unsigned int)
330 def_atomic_inc_nv(_atomic_inc_long_nv, unsigned long)
331 #undef def_atomic_inc_nv
332 
333 #define atomic_inc_int_nv(_p) _atomic_inc_int_nv((_p))
334 #define atomic_inc_long_nv(_p) _atomic_inc_long_nv((_p))
335 
336 /*
337  * Decrement returning the new value
338  * *p -= 1
339  * return (*p)
340  */
341 #define def_atomic_dec_nv(_f, _t)				\
342 static inline _t						\
343 _f(volatile _t *p)						\
344 {								\
345 	_t ret, modified;					\
346 								\
347 	__asm volatile (					\
348 	    "1:	ldrex %0, [%2]		\n\t"			\
349 	    "	sub %0, %0, #1		\n\t"			\
350 	    "	strex %1, %0, [%2]	\n\t"			\
351 	    "	cmp %1, #0		\n\t"			\
352 	    "	bne 1b			\n\t"			\
353 	    : "=&r" (ret), "=&r" (modified)			\
354 	    : "r" (p)						\
355 	    : "memory", "cc"					\
356 	);							\
357 	return (ret);						\
358 }
359 def_atomic_dec_nv(_atomic_dec_int_nv, unsigned int)
360 def_atomic_dec_nv(_atomic_dec_long_nv, unsigned long)
361 #undef def_atomic_dec_nv
362 
363 #define atomic_dec_int_nv(_p) _atomic_dec_int_nv((_p))
364 #define atomic_dec_long_nv(_p) _atomic_dec_long_nv((_p))
365 
366 
367 /*
368  * Addition returning the new value
369  * *p += v
370  * return (*p)
371  */
372 #define def_atomic_add_nv(_f, _t)				\
373 static inline _t						\
374 _f(volatile _t *p, _t v)					\
375 {								\
376 	_t ret, modified;					\
377 								\
378 	__asm volatile (					\
379 	    "1:	ldrex %0, [%2]		\n\t"			\
380 	    "	add %0, %0, %3		\n\t"			\
381 	    "	strex %1, %0, [%2]	\n\t"			\
382 	    "	cmp %1, #0		\n\t"			\
383 	    "	bne 1b			\n\t"			\
384 	    : "=&r" (ret), "=&r" (modified)			\
385 	    : "r" (p), "r" (v)					\
386 	    : "memory", "cc"					\
387 	);							\
388 	return (ret);						\
389 }
390 def_atomic_add_nv(_atomic_add_int_nv, unsigned int)
391 def_atomic_add_nv(_atomic_add_long_nv, unsigned long)
392 #undef def_atomic_add_nv
393 
394 #define atomic_add_int_nv(_p, _v) _atomic_add_int_nv((_p), (_v))
395 #define atomic_add_long_nv(_p, _v) _atomic_add_long_nv((_p), (_v))
396 
397 /*
398  * Subtraction returning the new value
399  * *p -= v
400  * return (*p)
401  */
402 #define def_atomic_sub_nv(_f, _t)				\
403 static inline _t						\
404 _f(volatile _t *p, _t v)					\
405 {								\
406 	_t ret, modified;					\
407 								\
408 	__asm volatile (					\
409 	    "1:	ldrex %0, [%2]		\n\t"			\
410 	    "	sub %0, %0, %3		\n\t"			\
411 	    "	strex %1, %0, [%2]	\n\t"			\
412 	    "	cmp %1, #0		\n\t"			\
413 	    "	bne 1b			\n\t"			\
414 	    : "=&r" (ret), "=&r" (modified)			\
415 	    : "r" (p), "r" (v)					\
416 	    : "memory", "cc"					\
417 	);							\
418 	return (ret);						\
419 }
420 def_atomic_sub_nv(_atomic_sub_int_nv, unsigned int)
421 def_atomic_sub_nv(_atomic_sub_long_nv, unsigned long)
422 #undef def_atomic_sub_nv
423 
424 #define atomic_sub_int_nv(_p, _v) _atomic_sub_int_nv((_p), (_v))
425 #define atomic_sub_long_nv(_p, _v) _atomic_sub_long_nv((_p), (_v))
426 
427 /*
428  * Set bits
429  * *p = *p | v
430  */
431 static inline void
432 atomic_setbits_int(volatile unsigned int *p, unsigned int v)
433 {
434 	unsigned int modified, tmp;
435 
436 	__asm volatile (
437 	    "1:	ldrex %0, [%3]		\n\t"
438 	    "	orr %0, %0, %2		\n\t"
439 	    "	strex %1, %0, [%3]	\n\t"
440 	    "	cmp %1, #0		\n\t"
441 	    "	bne 1b			\n\t"
442 	    : "=&r" (tmp), "=&r" (modified)
443 	    : "r" (v), "r" (p)
444 	    : "memory", "cc"
445 	);
446 }
447 
448 /*
449  * Clear bits
450  * *p = *p & (~v)
451  */
452 static inline void
453 atomic_clearbits_int(volatile unsigned int *p, unsigned int v)
454 {
455 	unsigned int modified, tmp;
456 
457 	__asm volatile (
458 	    "1:	ldrex %0, [%3]		\n\t"
459 	    "	bic %0, %0, %2		\n\t"
460 	    "	strex %1, %0, [%3]	\n\t"
461 	    "	cmp %1, #0		\n\t"
462 	    "	bne 1b			\n\t"
463 	    : "=&r" (tmp), "=&r" (modified)
464 	    : "r" (v), "r" (p)
465 	    : "memory", "cc"
466 	);
467 }
468 #endif /* CPU_ARMv7 */
469 
470 #if !defined(CPU_ARMv7)
471 
472 #define __membar() do { __asm __volatile("" ::: "memory"); } while (0)
473 
474 #define membar_enter()		__membar()
475 #define membar_exit()		__membar()
476 #define membar_producer()	__membar()
477 #define membar_consumer()	__membar()
478 #define membar_sync()		__membar()
479 
480 #else /* !CPU_ARMv7 */
481 
482 #define __membar(_f) do { __asm __volatile(_f ::: "memory"); } while (0)
483 
484 #define membar_enter()		__membar("dmb sy")
485 #define membar_exit()		__membar("dmb sy")
486 #define membar_producer()	__membar("dmb st")
487 #define membar_consumer()	__membar("dmb sy")
488 #define membar_sync()		__membar("dmb sy")
489 
490 #define virtio_membar_producer()	__membar("dmb st")
491 #define virtio_membar_consumer()	__membar("dmb sy")
492 #define virtio_membar_sync()		__membar("dmb sy")
493 #endif /* CPU_ARMv7 */
494 
495 #endif /* defined(_KERNEL) */
496 #endif /* _ARM_ATOMIC_H_ */
497