xref: /openbsd-src/sys/arch/hppa/include/atomic.h (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: atomic.h,v 1.7 2014/06/17 19:49:53 kettenis Exp $	*/
2 
3 /* Public Domain */
4 
5 #ifndef _MACHINE_ATOMIC_H_
6 #define _MACHINE_ATOMIC_H_
7 
8 #if defined(_KERNEL)
9 
10 #include <sys/mutex.h>
11 
12 #ifdef MULTIPROCESSOR
13 extern struct mutex mtx_atomic;
14 #define ATOMIC_LOCK	mtx_enter(&mtx_atomic)
15 #define ATOMIC_UNLOCK	mtx_leave(&mtx_atomic)
16 #else
17 #define ATOMIC_LOCK
18 #define ATOMIC_UNLOCK
19 #endif
20 
21 static __inline void
22 atomic_setbits_int(volatile unsigned int *uip, unsigned int v)
23 {
24 	register_t eiem;
25 
26 	__asm volatile("mfctl	%%cr15, %0": "=r" (eiem));
27 	__asm volatile("mtctl	%r0, %cr15");
28 	ATOMIC_LOCK;
29 	*uip |= v;
30 	ATOMIC_UNLOCK;
31 	__asm volatile("mtctl	%0, %%cr15":: "r" (eiem));
32 }
33 
34 static __inline void
35 atomic_clearbits_int(volatile unsigned int *uip, unsigned int v)
36 {
37 	register_t eiem;
38 
39 	__asm volatile("mfctl	%%cr15, %0": "=r" (eiem));
40 	__asm volatile("mtctl	%r0, %cr15");
41 	ATOMIC_LOCK;
42 	*uip &= ~v;
43 	ATOMIC_UNLOCK;
44 	__asm volatile("mtctl	%0, %%cr15":: "r" (eiem));
45 }
46 
47 static __inline void
48 atomic_setbits_long(volatile unsigned long *uip, unsigned long v)
49 {
50 	register_t eiem;
51 
52 	__asm volatile("mfctl	%%cr15, %0": "=r" (eiem));
53 	__asm volatile("mtctl	%r0, %cr15");
54 	ATOMIC_LOCK;
55 	*uip |= v;
56 	ATOMIC_UNLOCK;
57 	__asm volatile("mtctl	%0, %%cr15":: "r" (eiem));
58 }
59 
60 static __inline void
61 atomic_clearbits_long(volatile unsigned long *uip, unsigned long v)
62 {
63 	register_t eiem;
64 
65 	__asm volatile("mfctl	%%cr15, %0": "=r" (eiem));
66 	__asm volatile("mtctl	%r0, %cr15");
67 	ATOMIC_LOCK;
68 	*uip &= ~v;
69 	ATOMIC_UNLOCK;
70 	__asm volatile("mtctl	%0, %%cr15":: "r" (eiem));
71 }
72 
73 /*
74  * Although the PA-RISC 2.0 architecture allows an implementation to
75  * be weakly ordered, all PA-RISC processers to date implement a
76  * strong memory ordering model.  So all we need is a compiler
77  * barrier.
78  */
79 
80 static inline void
81 __insn_barrier(void)
82 {
83 	__asm volatile("" : : : "memory");
84 }
85 
86 #define membar_enter()		__insn_barrier()
87 #define membar_exit()		__insn_barrier()
88 #define membar_producer()	__insn_barrier()
89 #define membar_consumer()	__insn_barrier()
90 #define membar_sync()		__insn_barrier()
91 
92 #endif /* defined(_KERNEL) */
93 #endif /* _MACHINE_ATOMIC_H_ */
94