xref: /openbsd-src/sys/arch/mips64/include/atomic.h (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: atomic.h,v 1.10 2015/02/10 23:54:09 dlg Exp $	*/
2 
3 /* Public Domain */
4 
5 #ifndef _MIPS64_ATOMIC_H_
6 #define _MIPS64_ATOMIC_H_
7 
8 #if defined(_KERNEL)
9 
10 /* wait until the bits to set are clear, and set them */
11 static __inline void
12 atomic_wait_and_setbits_int(volatile unsigned int *uip, unsigned int v)
13 {
14 	unsigned int tmp0, tmp1;
15 
16 	__asm__ volatile (
17 	"1:	ll	%0,	0(%2)\n"
18 	"	and	%1,	%0,	%3\n"
19 	"	bnez	%1,	1b\n"
20 	"	or	%0,	%3,	%0\n"
21 	"	sc	%0,	0(%2)\n"
22 	"	beqz	%0,	1b\n"
23 	"	 nop\n" :
24 		"=&r"(tmp0), "=&r"(tmp1) :
25 		"r"(uip), "r"(v) : "memory");
26 }
27 
28 static __inline void
29 atomic_setbits_int(volatile unsigned int *uip, unsigned int v)
30 {
31 	unsigned int tmp;
32 
33 	__asm__ volatile (
34 	"1:	ll	%0,	0(%1)\n"
35 	"	or	%0,	%2,	%0\n"
36 	"	sc	%0,	0(%1)\n"
37 	"	beqz	%0,	1b\n"
38 	"	 nop\n" :
39 		"=&r"(tmp) :
40 		"r"(uip), "r"(v) : "memory");
41 }
42 
43 static __inline void
44 atomic_clearbits_int(volatile unsigned int *uip, unsigned int v)
45 {
46 	unsigned int tmp;
47 
48 	__asm__ volatile (
49 	"1:	ll	%0,	0(%1)\n"
50 	"	and	%0,	%2,	%0\n"
51 	"	sc	%0,	0(%1)\n"
52 	"	beqz	%0,	1b\n"
53 	"	 nop\n" :
54 		"=&r"(tmp) :
55 		"r"(uip), "r"(~v) : "memory");
56 }
57 
58 
59 static inline unsigned int
60 _atomic_cas_uint(volatile unsigned int *p, unsigned int o, unsigned int n)
61 {
62 	unsigned int rv, wv;
63 
64 	__asm__ volatile (
65 	"1:	ll	%0,	%1\n"
66 	"	bne	%0,	%4,	2f\n"
67 	"	move	%2,	%3\n"
68 	"	sc	%2,	%1\n"
69 	"	beqz	%2,	1b\n"
70 	"2:	nop\n"
71 	    : "=&r" (rv), "+m" (*p), "=&r" (wv)
72 	    : "r" (n), "Ir" (o));
73 
74 	return (rv);
75 }
76 #define atomic_cas_uint(_p, _o, _n) _atomic_cas_uint((_p), (_o), (_n))
77 
78 static inline unsigned long
79 _atomic_cas_ulong(volatile unsigned long *p, unsigned long o, unsigned long n)
80 {
81 	unsigned long rv, wv;
82 
83 	__asm__ volatile (
84 	"1:	lld	%0,	%1\n"
85 	"	bne	%0,	%4,	2f\n"
86 	"	move	%2,	%3\n"
87 	"	scd	%2,	%1\n"
88 	"	beqz	%2,	1b\n"
89 	"2:	nop\n"
90 	    : "=&r" (rv), "+m" (*p), "=&r" (wv)
91 	    : "r" (n), "Ir" (o));
92 
93 	return (rv);
94 }
95 #define atomic_cas_ulong(_p, _o, _n) _atomic_cas_ulong((_p), (_o), (_n))
96 
97 static inline void *
98 _atomic_cas_ptr(volatile void *pp, void *o, void *n)
99 {
100 	void * volatile *p = pp;
101 	void *rv, *wv;
102 
103 	__asm__ volatile (
104 	"1:	lld	%0,	%1\n"
105 	"	bne	%0,	%4,	2f\n"
106 	"	move	%2,	%3\n"
107 	"	scd	%2,	%1\n"
108 	"	beqz	%2,	1b\n"
109 	"2:	nop\n"
110 	    : "=&r" (rv), "+m" (*p), "=&r" (wv)
111 	    : "r" (n), "Ir" (o));
112 
113 	return (rv);
114 }
115 #define atomic_cas_ptr(_p, _o, _n) _atomic_cas_ptr((_p), (_o), (_n))
116 
117 
118 
119 static inline unsigned int
120 _atomic_swap_uint(volatile unsigned int *uip, unsigned int v)
121 {
122 	unsigned int o, t;
123 
124 	__asm__ volatile (
125 	"1:	ll	%0,	%1\n"
126 	"	move	%2,	%3\n"
127 	"	sc	%2,	%1\n"
128 	"	beqz	%2,	1b\n"
129 	"	nop\n"
130 	    : "=&r" (o), "+m" (*uip), "=&r" (t)
131 	    : "r" (v));
132 
133 	return (o);
134 }
135 #define atomic_swap_uint(_p, _v) _atomic_swap_uint((_p), (_v))
136 
137 static inline unsigned long
138 _atomic_swap_ulong(volatile unsigned long *uip, unsigned long v)
139 {
140 	unsigned long o, t;
141 
142 	__asm__ volatile (
143 	"1:	lld	%0,	%1\n"
144 	"	move	%2,	%3\n"
145 	"	scd	%2,	%1\n"
146 	"	beqz	%2,	1b\n"
147 	"	nop\n"
148 	    : "=&r" (o), "+m" (*uip), "=&r" (t)
149 	    : "r" (v));
150 
151 	return (o);
152 }
153 #define atomic_swap_ulong(_p, _v) _atomic_swap_ulong((_p), (_v))
154 
155 
156 static inline void *
157 _atomic_swap_ptr(volatile void *uipp, void *n)
158 {
159 	void * volatile *uip = uipp;
160 	void *o, *t;
161 
162 	__asm__ volatile (
163 	"1:	lld	%0,	%1\n"
164 	"	move	%2,	%3\n"
165 	"	scd	%2,	%1\n"
166 	"	beqz	%2,	1b\n"
167 	"	nop\n"
168 	    : "=&r" (o), "+m" (*uip), "=&r" (t)
169 	    : "r" (n));
170 
171 	return (o);
172 }
173 #define atomic_swap_ptr(_p, _n) _atomic_swap_ptr((_p), (_n))
174 
175 static inline unsigned int
176 _atomic_add_int_nv(volatile unsigned int *uip, unsigned int v)
177 {
178 	unsigned int rv, nv;
179 
180 	__asm__ volatile (
181 	"1:	ll	%0,	%1\n"
182 	"	addu	%2,	%0,	%3\n"
183 	"	sc	%2,	%1\n"
184 	"	beqz	%2,	1b\n"
185 	"	nop\n"
186 	    : "=&r" (rv), "+m" (*uip), "=&r" (nv)
187 	    : "Ir" (v));
188 
189 	return (rv + v);
190 }
191 #define atomic_add_int_nv(_uip, _v) _atomic_add_int_nv((_uip), (_v))
192 #define atomic_sub_int_nv(_uip, _v) _atomic_add_int_nv((_uip), 0 - (_v))
193 
194 static inline unsigned long
195 _atomic_add_long_nv(volatile unsigned long *uip, unsigned long v)
196 {
197 	unsigned long rv, nv;
198 
199 	__asm__ volatile (
200 	"1:	lld	%0,	%1\n"
201 	"	daddu	%2,	%0,	%3\n"
202 	"	scd	%2,	%1\n"
203 	"	beqz	%2,	1b\n"
204 	"	nop\n"
205 	    : "=&r" (rv), "+m" (*uip), "=&r" (nv)
206 	    : "Ir" (v));
207 
208 	return (rv + v);
209 }
210 #define atomic_add_long_nv(_uip, _v) _atomic_add_long_nv((_uip), (_v))
211 #define atomic_sub_long_nv(_uip, _v) _atomic_add_long_nv((_uip), 0 - (_v))
212 
213 #endif /* defined(_KERNEL) */
214 #endif /* _MIPS64_ATOMIC_H_ */
215