xref: /openbsd-src/sys/dev/pci/drm/include/linux/atomic.h (revision 4e1ee0786f11cc571bd0be17d38e46f635c719fc)
1 /* $OpenBSD: atomic.h,v 1.16 2021/06/25 13:41:09 jsg Exp $ */
2 /**
3  * \file drm_atomic.h
4  * Atomic operations used in the DRM which may or may not be provided by the OS.
5  *
6  * \author Eric Anholt <anholt@FreeBSD.org>
7  */
8 
9 /*-
10  * Copyright 2004 Eric Anholt
11  * All Rights Reserved.
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice (including the next
21  * paragraph) shall be included in all copies or substantial portions of the
22  * Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
27  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
28  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
29  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
30  * OTHER DEALINGS IN THE SOFTWARE.
31  */
32 
33 #ifndef _DRM_LINUX_ATOMIC_H_
34 #define _DRM_LINUX_ATOMIC_H_
35 
36 #include <sys/types.h>
37 #include <sys/mutex.h>
38 #include <machine/intr.h>
39 #include <linux/types.h>
40 #include <linux/compiler.h>	/* via x86/include/asm/atomic.h */
41 
42 #define ATOMIC_INIT(x)		(x)
43 
44 #define atomic_set(p, v)	WRITE_ONCE(*(p), (v))
45 #define atomic_read(p)		READ_ONCE(*(p))
46 #define atomic_inc(p)		__sync_fetch_and_add(p, 1)
47 #define atomic_dec(p)		__sync_fetch_and_sub(p, 1)
48 #define atomic_add(n, p)	__sync_fetch_and_add(p, n)
49 #define atomic_sub(n, p)	__sync_fetch_and_sub(p, n)
50 #define atomic_and(n, p)	__sync_fetch_and_and(p, n)
51 #define atomic_or(n, p)		atomic_setbits_int(p, n)
52 #define atomic_add_return(n, p) __sync_add_and_fetch(p, n)
53 #define atomic_sub_return(n, p) __sync_sub_and_fetch(p, n)
54 #define atomic_inc_return(v)	atomic_add_return(1, (v))
55 #define atomic_dec_return(v)	atomic_sub_return(1, (v))
56 #define atomic_dec_and_test(v)	(atomic_dec_return(v) == 0)
57 #define atomic_inc_and_test(v)	(atomic_inc_return(v) == 0)
58 #define atomic_cmpxchg(p, o, n)	__sync_val_compare_and_swap(p, o, n)
59 #define cmpxchg(p, o, n)	__sync_val_compare_and_swap(p, o, n)
60 #define atomic_set_release(p, v)	atomic_set((p), (v))
61 #define atomic_andnot(bits, p)		atomic_clearbits_int(p,bits)
62 #define atomic_fetch_inc(p)		__sync_fetch_and_add(p, 1)
63 #define atomic_fetch_xor(n, p)		__sync_fetch_and_xor(p, n)
64 
65 #define try_cmpxchg(p, op, n)						\
66 ({									\
67 	__typeof(p) __op = (__typeof((p)))(op);				\
68 	__typeof(*(p)) __o = *__op;					\
69 	__typeof(*(p)) __p = __sync_val_compare_and_swap((p), (__o), (n)); \
70 	if (__p != __o)							\
71 		*__op = __p;						\
72 	(__p == __o);							\
73 })
74 
75 static inline bool
76 atomic_try_cmpxchg(volatile int *p, int *op, int n)
77 {
78 	return try_cmpxchg(p, op, n);
79 }
80 
81 static inline int
82 atomic_xchg(volatile int *v, int n)
83 {
84 	__sync_synchronize();
85 	return __sync_lock_test_and_set(v, n);
86 }
87 
88 #define xchg(v, n)	__sync_lock_test_and_set(v, n)
89 
90 static inline int
91 atomic_add_unless(volatile int *v, int n, int u)
92 {
93 	int o;
94 
95 	do {
96 		o = *v;
97 		if (o == u)
98 			return 0;
99 	} while (__sync_val_compare_and_swap(v, o, o +n) != o);
100 
101 	return 1;
102 }
103 
104 #define atomic_inc_not_zero(v)	atomic_add_unless((v), 1, 0)
105 
106 static inline int
107 atomic_dec_if_positive(volatile int *v)
108 {
109 	int r, o;
110 
111 	do {
112 		o = *v;
113 		r = o - 1;
114 		if (r < 0)
115 			break;
116 	} while (__sync_val_compare_and_swap(v, o, r) != o);
117 
118 	return r;
119 }
120 
121 #define atomic_long_read(p)	READ_ONCE(*(p))
122 
123 /* 32 bit powerpc lacks 64 bit atomics */
124 #if !defined(__powerpc__) || defined(__powerpc64__)
125 
126 typedef int64_t atomic64_t;
127 
128 #define ATOMIC64_INIT(x)	(x)
129 
130 #define atomic64_set(p, v)	WRITE_ONCE(*(p), (v))
131 #define atomic64_read(p)	READ_ONCE(*(p))
132 
133 static inline int64_t
134 atomic64_xchg(volatile int64_t *v, int64_t n)
135 {
136 	__sync_synchronize();
137 	return __sync_lock_test_and_set(v, n);
138 }
139 
140 #define atomic64_add(n, p)	__sync_fetch_and_add_8(p, n)
141 #define atomic64_sub(n, p)	__sync_fetch_and_sub_8(p, n)
142 #define atomic64_inc(p)		__sync_fetch_and_add_8(p, 1)
143 #define atomic64_add_return(n, p) __sync_add_and_fetch_8(p, n)
144 #define atomic64_inc_return(p)	__sync_add_and_fetch_8(p, 1)
145 
146 #else
147 
148 extern struct mutex atomic64_mtx;
149 
150 typedef struct {
151 	volatile int64_t val;
152 } atomic64_t;
153 
154 #define ATOMIC64_INIT(x)	{ (x) }
155 
156 static inline void
157 atomic64_set(atomic64_t *v, int64_t i)
158 {
159 	mtx_enter(&atomic64_mtx);
160 	v->val = i;
161 	mtx_leave(&atomic64_mtx);
162 }
163 
164 static inline int64_t
165 atomic64_read(atomic64_t *v)
166 {
167 	int64_t val;
168 
169 	mtx_enter(&atomic64_mtx);
170 	val = v->val;
171 	mtx_leave(&atomic64_mtx);
172 
173 	return val;
174 }
175 
176 static inline int64_t
177 atomic64_xchg(atomic64_t *v, int64_t n)
178 {
179 	int64_t val;
180 
181 	mtx_enter(&atomic64_mtx);
182 	val = v->val;
183 	v->val = n;
184 	mtx_leave(&atomic64_mtx);
185 
186 	return val;
187 }
188 
189 static inline void
190 atomic64_add(int i, atomic64_t *v)
191 {
192 	mtx_enter(&atomic64_mtx);
193 	v->val += i;
194 	mtx_leave(&atomic64_mtx);
195 }
196 
197 #define atomic64_inc(p)		atomic64_add(p, 1)
198 
199 static inline int64_t
200 atomic64_add_return(int i, atomic64_t *v)
201 {
202 	int64_t val;
203 
204 	mtx_enter(&atomic64_mtx);
205 	val = v->val + i;
206 	v->val = val;
207 	mtx_leave(&atomic64_mtx);
208 
209 	return val;
210 }
211 
212 #define atomic64_inc_return(p)		atomic64_add_return(p, 1)
213 
214 static inline void
215 atomic64_sub(int i, atomic64_t *v)
216 {
217 	mtx_enter(&atomic64_mtx);
218 	v->val -= i;
219 	mtx_leave(&atomic64_mtx);
220 }
221 #endif
222 
223 #ifdef __LP64__
224 typedef int64_t atomic_long_t;
225 #define atomic_long_set(p, v)		atomic64_set(p, v)
226 #define atomic_long_xchg(v, n)		atomic64_xchg(v, n)
227 #define atomic_long_cmpxchg(p, o, n)	atomic_cmpxchg(p, o, n)
228 #else
229 typedef int32_t atomic_long_t;
230 #define atomic_long_set(p, v)		atomic_set(p, v)
231 #define atomic_long_xchg(v, n)		atomic_xchg(v, n)
232 #define atomic_long_cmpxchg(p, o, n)	atomic_cmpxchg(p, o, n)
233 #endif
234 
235 static inline atomic_t
236 test_and_set_bit(u_int b, volatile void *p)
237 {
238 	unsigned int m = 1 << (b & 0x1f);
239 	unsigned int prev = __sync_fetch_and_or((volatile u_int *)p + (b >> 5), m);
240 	return (prev & m) != 0;
241 }
242 
243 static inline void
244 clear_bit(u_int b, volatile void *p)
245 {
246 	atomic_clearbits_int(((volatile u_int *)p) + (b >> 5), 1 << (b & 0x1f));
247 }
248 
249 static inline void
250 clear_bit_unlock(u_int b, volatile void *p)
251 {
252 	membar_enter();
253 	clear_bit(b, p);
254 }
255 
256 static inline void
257 set_bit(u_int b, volatile void *p)
258 {
259 	atomic_setbits_int(((volatile u_int *)p) + (b >> 5), 1 << (b & 0x1f));
260 }
261 
262 static inline void
263 __clear_bit(u_int b, volatile void *p)
264 {
265 	volatile u_int *ptr = (volatile u_int *)p;
266 	ptr[b >> 5] &= ~(1 << (b & 0x1f));
267 }
268 
269 static inline void
270 __set_bit(u_int b, volatile void *p)
271 {
272 	volatile u_int *ptr = (volatile u_int *)p;
273 	ptr[b >> 5] |= (1 << (b & 0x1f));
274 }
275 
276 static inline int
277 test_bit(u_int b, const volatile void *p)
278 {
279 	return !!(((volatile u_int *)p)[b >> 5] & (1 << (b & 0x1f)));
280 }
281 
282 static inline int
283 __test_and_set_bit(u_int b, volatile void *p)
284 {
285 	unsigned int m = 1 << (b & 0x1f);
286 	volatile u_int *ptr = (volatile u_int *)p;
287 	unsigned int prev = ptr[b >> 5];
288 	ptr[b >> 5] |= m;
289 
290 	return (prev & m) != 0;
291 }
292 
293 static inline int
294 test_and_clear_bit(u_int b, volatile void *p)
295 {
296 	unsigned int m = 1 << (b & 0x1f);
297 	unsigned int prev = __sync_fetch_and_and((volatile u_int *)p + (b >> 5), ~m);
298 	return (prev & m) != 0;
299 }
300 
301 static inline int
302 __test_and_clear_bit(u_int b, volatile void *p)
303 {
304 	volatile u_int *ptr = (volatile u_int *)p;
305 	int rv = !!(ptr[b >> 5] & (1 << (b & 0x1f)));
306 	ptr[b >> 5] &= ~(1 << (b & 0x1f));
307 	return rv;
308 }
309 
310 static inline int
311 find_first_zero_bit(volatile void *p, int max)
312 {
313 	int b;
314 	volatile u_int *ptr = (volatile u_int *)p;
315 
316 	for (b = 0; b < max; b += 32) {
317 		if (ptr[b >> 5] != ~0) {
318 			for (;;) {
319 				if ((ptr[b >> 5] & (1 << (b & 0x1f))) == 0)
320 					return b;
321 				b++;
322 			}
323 		}
324 	}
325 	return max;
326 }
327 
328 static inline int
329 find_next_zero_bit(volatile void *p, int max, int b)
330 {
331 	volatile u_int *ptr = (volatile u_int *)p;
332 
333 	for (; b < max; b += 32) {
334 		if (ptr[b >> 5] != ~0) {
335 			for (;;) {
336 				if ((ptr[b >> 5] & (1 << (b & 0x1f))) == 0)
337 					return b;
338 				b++;
339 			}
340 		}
341 	}
342 	return max;
343 }
344 
345 static inline int
346 find_first_bit(volatile void *p, int max)
347 {
348 	int b;
349 	volatile u_int *ptr = (volatile u_int *)p;
350 
351 	for (b = 0; b < max; b += 32) {
352 		if (ptr[b >> 5] != 0) {
353 			for (;;) {
354 				if (ptr[b >> 5] & (1 << (b & 0x1f)))
355 					return b;
356 				b++;
357 			}
358 		}
359 	}
360 	return max;
361 }
362 
363 static inline int
364 find_next_bit(volatile void *p, int max, int b)
365 {
366 	volatile u_int *ptr = (volatile u_int *)p;
367 
368 	for (; b < max; b+= 32) {
369 		if (ptr[b >> 5] != 0) {
370 			for (;;) {
371 				if (ptr[b >> 5] & (1 << (b & 0x1f)))
372 					return b;
373 				b++;
374 			}
375 		}
376 	}
377 	return max;
378 }
379 
380 #define for_each_set_bit(b, p, max) \
381 	for ((b) = find_first_bit((p), (max));			\
382 	     (b) < (max);					\
383 	     (b) = find_next_bit((p), (max), (b) + 1))
384 
385 #define for_each_clear_bit(b, p, max) \
386 	for ((b) = find_first_zero_bit((p), (max));		\
387 	     (b) < (max);					\
388 	     (b) = find_next_zero_bit((p), (max), (b) + 1))
389 
390 #if defined(__i386__)
391 #define rmb()	__asm __volatile("lock; addl $0,-4(%%esp)" : : : "memory", "cc")
392 #define wmb()	__asm __volatile("lock; addl $0,-4(%%esp)" : : : "memory", "cc")
393 #define mb()	__asm __volatile("lock; addl $0,-4(%%esp)" : : : "memory", "cc")
394 #define smp_mb()	__asm __volatile("lock; addl $0,-4(%%esp)" : : : "memory", "cc")
395 #define smp_rmb()	__membar("")
396 #define smp_wmb()	__membar("")
397 #define __smp_store_mb(var, value)	do { (void)xchg(&var, value); } while (0)
398 #define smp_mb__after_atomic()	do { } while (0)
399 #define smp_mb__before_atomic()	do { } while (0)
400 #elif defined(__amd64__)
401 #define rmb()	__membar("lfence")
402 #define wmb()	__membar("sfence")
403 #define mb()	__membar("mfence")
404 #define smp_mb()	__asm __volatile("lock; addl $0,-4(%%rsp)" : : : "memory", "cc")
405 #define smp_rmb()	__membar("")
406 #define smp_wmb()	__membar("")
407 #define __smp_store_mb(var, value)	do { (void)xchg(&var, value); } while (0)
408 #define smp_mb__after_atomic()	do { } while (0)
409 #define smp_mb__before_atomic()	do { } while (0)
410 #elif defined(__aarch64__)
411 #define rmb()	__membar("dsb ld")
412 #define wmb()	__membar("dsb st")
413 #define mb()	__membar("dsb sy")
414 #elif defined(__mips64__)
415 #define rmb()	mips_sync()
416 #define wmb()	mips_sync()
417 #define mb()	mips_sync()
418 #elif defined(__powerpc64__)
419 #define rmb()	__membar("sync")
420 #define wmb()	__membar("sync")
421 #define mb()	__membar("sync")
422 #define smp_rmb()	__membar("lwsync")
423 #define smp_wmb()	__membar("lwsync")
424 #elif defined(__powerpc__)
425 #define rmb()	__membar("sync")
426 #define wmb()	__membar("sync")
427 #define mb()	__membar("sync")
428 #define smp_wmb()	__membar("eieio")
429 #elif defined(__riscv)
430 #define rmb()	__membar("fence ir,ir")
431 #define wmb()	__membar("fence ow,ow")
432 #define mb()	__membar("fence iorw,iorw")
433 #define smp_rmb()	__membar("fence r,r")
434 #define smp_wmb()	__membar("fence w,w")
435 #define smp_mb()	__membar("fence rw,rw")
436 #elif defined(__sparc64__)
437 #define rmb()	membar_sync()
438 #define wmb()	membar_sync()
439 #define mb()	membar_sync()
440 #endif
441 
442 #ifndef smp_rmb
443 #define smp_rmb()	rmb()
444 #endif
445 
446 #ifndef smp_wmb
447 #define smp_wmb()	wmb()
448 #endif
449 
450 #ifndef mmiowb
451 #define mmiowb()	wmb()
452 #endif
453 
454 #ifndef smp_mb__before_atomic
455 #define smp_mb__before_atomic()	mb()
456 #endif
457 
458 #ifndef smp_mb__after_atomic
459 #define smp_mb__after_atomic()	mb()
460 #endif
461 
462 #ifndef smp_store_mb
463 #define smp_store_mb(x, v)	do { x = v; mb(); } while (0)
464 #endif
465 
466 #endif
467