xref: /openbsd-src/sys/dev/pci/drm/include/linux/atomic.h (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /* $OpenBSD: atomic.h,v 1.5 2019/08/17 06:07:22 jsg Exp $ */
2 /**
3  * \file drm_atomic.h
4  * Atomic operations used in the DRM which may or may not be provided by the OS.
5  *
6  * \author Eric Anholt <anholt@FreeBSD.org>
7  */
8 
9 /*-
10  * Copyright 2004 Eric Anholt
11  * All Rights Reserved.
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice (including the next
21  * paragraph) shall be included in all copies or substantial portions of the
22  * Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
27  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
28  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
29  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
30  * OTHER DEALINGS IN THE SOFTWARE.
31  */
32 
33 #ifndef _DRM_LINUX_ATOMIC_H_
34 #define _DRM_LINUX_ATOMIC_H_
35 
36 #include <sys/types.h>
37 #include <sys/mutex.h>
38 #include <machine/intr.h>
39 #include <machine/atomic.h>
40 #include <linux/types.h>
41 
42 #define atomic_set(p, v)	(*(p) = (v))
43 #define atomic_read(p)		(*(p))
44 #define atomic_inc(p)		__sync_fetch_and_add(p, 1)
45 #define atomic_dec(p)		__sync_fetch_and_sub(p, 1)
46 #define atomic_add(n, p)	__sync_fetch_and_add(p, n)
47 #define atomic_sub(n, p)	__sync_fetch_and_sub(p, n)
48 #define atomic_add_return(n, p) __sync_add_and_fetch(p, n)
49 #define atomic_sub_return(n, p) __sync_sub_and_fetch(p, n)
50 #define atomic_inc_return(v)	atomic_add_return(1, (v))
51 #define atomic_dec_return(v)	atomic_sub_return(1, (v))
52 #define atomic_dec_and_test(v)	(atomic_dec_return(v) == 0)
53 #define atomic_inc_and_test(v)	(atomic_inc_return(v) == 0)
54 #define atomic_or(n, p)		atomic_setbits_int(p, n)
55 #define atomic_cmpxchg(p, o, n)	__sync_val_compare_and_swap(p, o, n)
56 #define cmpxchg(p, o, n)	__sync_val_compare_and_swap(p, o, n)
57 #define atomic_set_release(p, v)	atomic_set((p), (v))
58 
59 static inline int
60 atomic_xchg(volatile int *v, int n)
61 {
62 	__sync_synchronize();
63 	return __sync_lock_test_and_set(v, n);
64 }
65 
66 #define xchg(v, n)	__sync_lock_test_and_set(v, n)
67 
68 static inline int
69 atomic_add_unless(volatile int *v, int n, int u)
70 {
71 	int o = *v;
72 
73 	do {
74 		o = *v;
75 		if (o == u)
76 			return 0;
77 	} while (__sync_val_compare_and_swap(v, o, o +n) != o);
78 
79 	return 1;
80 }
81 
82 static inline int
83 atomic_dec_if_positive(volatile int *v)
84 {
85 	int r, o;
86 
87 	do {
88 		o = *v;
89 		r = o - 1;
90 		if (r < 0)
91 			break;
92 	} while (__sync_val_compare_and_swap(v, o, r) != o);
93 
94 	return r;
95 }
96 
97 #define atomic_long_read(p)	(*(p))
98 
99 #ifdef __LP64__
100 typedef int64_t atomic64_t;
101 
102 #define atomic64_set(p, v)	(*(p) = (v))
103 #define atomic64_read(p)	(*(p))
104 
105 static inline int64_t
106 atomic64_xchg(volatile int64_t *v, int64_t n)
107 {
108 	__sync_synchronize();
109 	return __sync_lock_test_and_set(v, n);
110 }
111 
112 #define atomic64_add(n, p)	__sync_fetch_and_add_8(p, n)
113 #define atomic64_sub(n, p)	__sync_fetch_and_sub_8(p, n)
114 #define atomic64_inc(p)		__sync_fetch_and_add_8(p, 1)
115 #define atomic64_add_return(n, p) __sync_add_and_fetch_8(p, n)
116 #define atomic64_inc_return(p)	__sync_add_and_fetch_8(p, 1)
117 
118 #else
119 
120 typedef struct {
121 	volatile int64_t val;
122 	struct mutex lock;
123 } atomic64_t;
124 
125 static inline void
126 atomic64_set(atomic64_t *v, int64_t i)
127 {
128 	mtx_init(&v->lock, IPL_HIGH);
129 	v->val = i;
130 }
131 
132 static inline int64_t
133 atomic64_read(atomic64_t *v)
134 {
135 	int64_t val;
136 
137 	mtx_enter(&v->lock);
138 	val = v->val;
139 	mtx_leave(&v->lock);
140 
141 	return val;
142 }
143 
144 static inline int64_t
145 atomic64_xchg(atomic64_t *v, int64_t n)
146 {
147 	int64_t val;
148 
149 	mtx_enter(&v->lock);
150 	val = v->val;
151 	v->val = n;
152 	mtx_leave(&v->lock);
153 
154 	return val;
155 }
156 
157 static inline void
158 atomic64_add(int i, atomic64_t *v)
159 {
160 	mtx_enter(&v->lock);
161 	v->val += i;
162 	mtx_leave(&v->lock);
163 }
164 
165 #define atomic64_inc(p)		atomic64_add(p, 1)
166 
167 static inline int64_t
168 atomic64_add_return(int i, atomic64_t *v)
169 {
170 	int64_t val;
171 
172 	mtx_enter(&v->lock);
173 	val = v->val + i;
174 	v->val = val;
175 	mtx_leave(&v->lock);
176 
177 	return val;
178 }
179 
180 #define atomic64_inc_return(p)		atomic64_add_return(p, 1)
181 
182 static inline void
183 atomic64_sub(int i, atomic64_t *v)
184 {
185 	mtx_enter(&v->lock);
186 	v->val -= i;
187 	mtx_leave(&v->lock);
188 }
189 #endif
190 
191 #ifdef __LP64__
192 typedef int64_t atomic_long_t;
193 #define atomic_long_set(p, v)		atomic64_set(p, v)
194 #define atomic_long_xchg(v, n)		atomic64_xchg(v, n)
195 #define atomic_long_cmpxchg(p, o, n)	atomic_cmpxchg(p, o, n)
196 #else
197 typedef int32_t atomic_long_t;
198 #define atomic_long_set(p, v)		atomic_set(p, v)
199 #define atomic_long_xchg(v, n)		atomic_xchg(v, n)
200 #define atomic_long_cmpxchg(p, o, n)	atomic_cmpxchg(p, o, n)
201 #endif
202 
203 static inline int
204 atomic_inc_not_zero(atomic_t *p)
205 {
206 	if (*p == 0)
207 		return (0);
208 
209 	*(p) += 1;
210 	return (*p);
211 }
212 
213 /* FIXME */
214 #define atomic_set_int(p, bits)		atomic_setbits_int(p,bits)
215 #define atomic_set_mask(bits, p)	atomic_setbits_int(p,bits)
216 #define atomic_clear_int(p, bits)	atomic_clearbits_int(p,bits)
217 #define atomic_clear_mask(bits, p)	atomic_clearbits_int(p,bits)
218 #define atomic_andnot(bits, p)		atomic_clearbits_int(p,bits)
219 #define atomic_fetchadd_int(p, n) __sync_fetch_and_add(p, n)
220 #define atomic_fetchsub_int(p, n) __sync_fetch_and_sub(p, n)
221 #define atomic_fetch_inc(p) __sync_fetch_and_add(p, 1)
222 #define atomic_fetch_xor(n, p) __sync_fetch_and_xor(p, n)
223 
224 static inline atomic_t
225 test_and_set_bit(u_int b, volatile void *p)
226 {
227 	unsigned int m = 1 << (b & 0x1f);
228 	unsigned int prev = __sync_fetch_and_or((volatile u_int *)p + (b >> 5), m);
229 	return (prev & m) != 0;
230 }
231 
232 static inline void
233 clear_bit(u_int b, volatile void *p)
234 {
235 	atomic_clear_int(((volatile u_int *)p) + (b >> 5), 1 << (b & 0x1f));
236 }
237 
238 static inline void
239 set_bit(u_int b, volatile void *p)
240 {
241 	atomic_set_int(((volatile u_int *)p) + (b >> 5), 1 << (b & 0x1f));
242 }
243 
244 static inline void
245 __clear_bit(u_int b, volatile void *p)
246 {
247 	volatile u_int *ptr = (volatile u_int *)p;
248 	ptr[b >> 5] &= ~(1 << (b & 0x1f));
249 }
250 
251 static inline void
252 __set_bit(u_int b, volatile void *p)
253 {
254 	volatile u_int *ptr = (volatile u_int *)p;
255 	ptr[b >> 5] |= (1 << (b & 0x1f));
256 }
257 
258 static inline int
259 test_bit(u_int b, const volatile void *p)
260 {
261 	return !!(((volatile u_int *)p)[b >> 5] & (1 << (b & 0x1f)));
262 }
263 
264 static inline int
265 __test_and_set_bit(u_int b, volatile void *p)
266 {
267 	unsigned int m = 1 << (b & 0x1f);
268 	volatile u_int *ptr = (volatile u_int *)p;
269 	unsigned int prev = ptr[b >> 5];
270 	ptr[b >> 5] |= m;
271 
272 	return (prev & m) != 0;
273 }
274 
275 static inline int
276 test_and_clear_bit(u_int b, volatile void *p)
277 {
278 	unsigned int m = 1 << (b & 0x1f);
279 	unsigned int prev = __sync_fetch_and_and((volatile u_int *)p + (b >> 5), ~m);
280 	return (prev & m) != 0;
281 }
282 
283 static inline int
284 __test_and_clear_bit(u_int b, volatile void *p)
285 {
286 	volatile u_int *ptr = (volatile u_int *)p;
287 	int rv = !!(ptr[b >> 5] & (1 << (b & 0x1f)));
288 	ptr[b >> 5] &= ~(1 << (b & 0x1f));
289 	return rv;
290 }
291 
292 static inline int
293 find_first_zero_bit(volatile void *p, int max)
294 {
295 	int b;
296 	volatile u_int *ptr = (volatile u_int *)p;
297 
298 	for (b = 0; b < max; b += 32) {
299 		if (ptr[b >> 5] != ~0) {
300 			for (;;) {
301 				if ((ptr[b >> 5] & (1 << (b & 0x1f))) == 0)
302 					return b;
303 				b++;
304 			}
305 		}
306 	}
307 	return max;
308 }
309 
310 static inline int
311 find_next_zero_bit(volatile void *p, int max, int b)
312 {
313 	volatile u_int *ptr = (volatile u_int *)p;
314 
315 	for (; b < max; b += 32) {
316 		if (ptr[b >> 5] != ~0) {
317 			for (;;) {
318 				if ((ptr[b >> 5] & (1 << (b & 0x1f))) == 0)
319 					return b;
320 				b++;
321 			}
322 		}
323 	}
324 	return max;
325 }
326 
327 static inline int
328 find_first_bit(volatile void *p, int max)
329 {
330 	int b;
331 	volatile u_int *ptr = (volatile u_int *)p;
332 
333 	for (b = 0; b < max; b += 32) {
334 		if (ptr[b >> 5] != 0) {
335 			for (;;) {
336 				if (ptr[b >> 5] & (1 << (b & 0x1f)))
337 					return b;
338 				b++;
339 			}
340 		}
341 	}
342 	return max;
343 }
344 
345 static inline int
346 find_next_bit(volatile void *p, int max, int b)
347 {
348 	volatile u_int *ptr = (volatile u_int *)p;
349 
350 	for (; b < max; b+= 32) {
351 		if (ptr[b >> 5] != 0) {
352 			for (;;) {
353 				if (ptr[b >> 5] & (1 << (b & 0x1f)))
354 					return b;
355 				b++;
356 			}
357 		}
358 	}
359 	return max;
360 }
361 
362 #define for_each_set_bit(b, p, max) \
363 	for ((b) = find_first_bit((p), (max));			\
364 	     (b) < (max);					\
365 	     (b) = find_next_bit((p), (max), (b) + 1))
366 
367 #define for_each_clear_bit(b, p, max) \
368 	for ((b) = find_first_zero_bit((p), (max));		\
369 	     (b) < (max);					\
370 	     (b) = find_next_zero_bit((p), (max), (b) + 1))
371 
372 #if defined(__i386__)
373 #define rmb()	__asm __volatile("lock; addl $0,-4(%%esp)" : : : "memory", "cc")
374 #define wmb()	__asm __volatile("lock; addl $0,-4(%%esp)" : : : "memory", "cc")
375 #define mb()	__asm __volatile("lock; addl $0,-4(%%esp)" : : : "memory", "cc")
376 #define smp_mb()	__asm __volatile("lock; addl $0,-4(%%esp)" : : : "memory", "cc")
377 #define smp_rmb()	__asm __volatile("" : : : "memory")
378 #define smp_wmb()	__asm __volatile("" : : : "memory")
379 #define __smp_store_mb(var, value)	do { (void)xchg(&var, value); } while (0)
380 #define smp_mb__after_atomic()	do { } while (0)
381 #define smp_mb__before_atomic()	do { } while (0)
382 #elif defined(__alpha__)
383 #define rmb()	alpha_mb();
384 #define wmb()	alpha_wmb();
385 #define mb()	alpha_mb();
386 #elif defined(__amd64__)
387 #define rmb()	__asm __volatile("lfence" : : : "memory")
388 #define wmb()	__asm __volatile("sfence" : : : "memory")
389 #define mb()	__asm __volatile("mfence" : : : "memory")
390 #define smp_mb()	__asm __volatile("lock; addl $0,-4(%%rsp)" : : : "memory", "cc");
391 #define smp_rmb()	__asm __volatile("" : : : "memory")
392 #define smp_wmb()	__asm __volatile("" : : : "memory")
393 #define __smp_store_mb(var, value)	do { (void)xchg(&var, value); } while (0)
394 #define smp_mb__after_atomic()	do { } while (0)
395 #define smp_mb__before_atomic()	do { } while (0)
396 #elif defined(__aarch64__)
397 #define rmb()	__membar("dsb ld")
398 #define wmb()	__membar("dsb st")
399 #define mb()	__membar("dsb sy")
400 #elif defined(__mips64__)
401 #define rmb()	mips_sync()
402 #define wmb()	mips_sync()
403 #define mb()	mips_sync()
404 #elif defined(__powerpc__)
405 #define rmb()	__asm __volatile("sync" : : : "memory");
406 #define wmb()	__asm __volatile("sync" : : : "memory");
407 #define mb()	__asm __volatile("sync" : : : "memory");
408 #elif defined(__sparc64__)
409 #define rmb()	membar_sync()
410 #define wmb()	membar_sync()
411 #define mb()	membar_sync()
412 #endif
413 
414 #ifndef smp_rmb
415 #define smp_rmb()	rmb()
416 #endif
417 
418 #ifndef smp_wmb
419 #define smp_wmb()	wmb()
420 #endif
421 
422 #ifndef mmiowb
423 #define mmiowb()	wmb()
424 #endif
425 
426 #ifndef smp_mb__before_atomic
427 #define smp_mb__before_atomic()	mb()
428 #endif
429 
430 #ifndef smp_mb__after_atomic
431 #define smp_mb__after_atomic()	mb()
432 #endif
433 
434 #ifndef smp_store_mb
435 #define smp_store_mb(x, v)	do { x = v; mb(); } while (0)
436 #endif
437 
438 #endif
439