xref: /openbsd-src/sys/dev/pci/drm/include/linux/atomic.h (revision 4b70baf6e17fc8b27fc1f7fa7929335753fa94c3)
1 /* $OpenBSD: atomic.h,v 1.1 2019/04/14 10:14:53 jsg Exp $ */
2 /**
3  * \file drm_atomic.h
4  * Atomic operations used in the DRM which may or may not be provided by the OS.
5  *
6  * \author Eric Anholt <anholt@FreeBSD.org>
7  */
8 
9 /*-
10  * Copyright 2004 Eric Anholt
11  * All Rights Reserved.
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a
14  * copy of this software and associated documentation files (the "Software"),
15  * to deal in the Software without restriction, including without limitation
16  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
17  * and/or sell copies of the Software, and to permit persons to whom the
18  * Software is furnished to do so, subject to the following conditions:
19  *
20  * The above copyright notice and this permission notice (including the next
21  * paragraph) shall be included in all copies or substantial portions of the
22  * Software.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
27  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
28  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
29  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
30  * OTHER DEALINGS IN THE SOFTWARE.
31  */
32 
33 #ifndef _DRM_LINUX_ATOMIC_H_
34 #define _DRM_LINUX_ATOMIC_H_
35 
36 #include <sys/types.h>
37 #include <sys/mutex.h>
38 #include <machine/intr.h>
39 #include <machine/atomic.h>
40 #include <linux/types.h>
41 
42 #define atomic_set(p, v)	(*(p) = (v))
43 #define atomic_read(p)		(*(p))
44 #define atomic_inc(p)		__sync_fetch_and_add(p, 1)
45 #define atomic_dec(p)		__sync_fetch_and_sub(p, 1)
46 #define atomic_add(n, p)	__sync_fetch_and_add(p, n)
47 #define atomic_sub(n, p)	__sync_fetch_and_sub(p, n)
48 #define atomic_add_return(n, p) __sync_add_and_fetch(p, n)
49 #define atomic_sub_return(n, p) __sync_sub_and_fetch(p, n)
50 #define atomic_inc_return(v)	atomic_add_return(1, (v))
51 #define atomic_dec_return(v)	atomic_sub_return(1, (v))
52 #define atomic_dec_and_test(v)	(atomic_dec_return(v) == 0)
53 #define atomic_inc_and_test(v)	(atomic_inc_return(v) == 0)
54 #define atomic_or(n, p)		atomic_setbits_int(p, n)
55 #define atomic_cmpxchg(p, o, n)	__sync_val_compare_and_swap(p, o, n)
56 #define atomic_set_release(p, v)	atomic_set((p), (v))
57 
58 static inline int
59 atomic_xchg(volatile int *v, int n)
60 {
61 	__sync_synchronize();
62 	return __sync_lock_test_and_set(v, n);
63 }
64 
65 #define xchg(v, n)	__sync_lock_test_and_set(v, n)
66 
67 static inline int
68 atomic_add_unless(volatile int *v, int n, int u)
69 {
70 	int o = *v;
71 
72 	do {
73 		o = *v;
74 		if (o == u)
75 			return 0;
76 	} while (__sync_val_compare_and_swap(v, o, o +n) != o);
77 
78 	return 1;
79 }
80 
81 static inline int
82 atomic_dec_if_positive(volatile int *v)
83 {
84 	int r, o;
85 
86 	do {
87 		o = *v;
88 		r = o - 1;
89 		if (r < 0)
90 			break;
91 	} while (__sync_val_compare_and_swap(v, o, r) != o);
92 
93 	return r;
94 }
95 
96 #define atomic_long_read(p)	(*(p))
97 
98 #ifdef __LP64__
99 typedef int64_t atomic64_t;
100 
101 #define atomic64_set(p, v)	(*(p) = (v))
102 #define atomic64_read(p)	(*(p))
103 
104 static inline int64_t
105 atomic64_xchg(volatile int64_t *v, int64_t n)
106 {
107 	__sync_synchronize();
108 	return __sync_lock_test_and_set(v, n);
109 }
110 
111 #define atomic64_add(n, p)	__sync_fetch_and_add_8(p, n)
112 #define atomic64_sub(n, p)	__sync_fetch_and_sub_8(p, n)
113 #define atomic64_inc(p)		__sync_fetch_and_add_8(p, 1)
114 #define atomic64_add_return(n, p) __sync_add_and_fetch_8(p, n)
115 #define atomic64_inc_return(p)	__sync_add_and_fetch_8(p, 1)
116 
117 #else
118 
119 typedef struct {
120 	volatile int64_t val;
121 	struct mutex lock;
122 } atomic64_t;
123 
124 static inline void
125 atomic64_set(atomic64_t *v, int64_t i)
126 {
127 	mtx_init(&v->lock, IPL_HIGH);
128 	v->val = i;
129 }
130 
131 static inline int64_t
132 atomic64_read(atomic64_t *v)
133 {
134 	int64_t val;
135 
136 	mtx_enter(&v->lock);
137 	val = v->val;
138 	mtx_leave(&v->lock);
139 
140 	return val;
141 }
142 
143 static inline int64_t
144 atomic64_xchg(atomic64_t *v, int64_t n)
145 {
146 	int64_t val;
147 
148 	mtx_enter(&v->lock);
149 	val = v->val;
150 	v->val = n;
151 	mtx_leave(&v->lock);
152 
153 	return val;
154 }
155 
156 static inline void
157 atomic64_add(int i, atomic64_t *v)
158 {
159 	mtx_enter(&v->lock);
160 	v->val += i;
161 	mtx_leave(&v->lock);
162 }
163 
164 #define atomic64_inc(p)		atomic64_add(p, 1)
165 
166 static inline int64_t
167 atomic64_add_return(int i, atomic64_t *v)
168 {
169 	int64_t val;
170 
171 	mtx_enter(&v->lock);
172 	val = v->val + i;
173 	v->val = val;
174 	mtx_leave(&v->lock);
175 
176 	return val;
177 }
178 
179 #define atomic64_inc_return(p)		atomic64_add_return(p, 1)
180 
181 static inline void
182 atomic64_sub(int i, atomic64_t *v)
183 {
184 	mtx_enter(&v->lock);
185 	v->val -= i;
186 	mtx_leave(&v->lock);
187 }
188 #endif
189 
190 #ifdef __LP64__
191 typedef int64_t atomic_long_t;
192 #define atomic_long_set(p, v)		atomic64_set(p, v)
193 #define atomic_long_xchg(v, n)		atomic64_xchg(v, n)
194 #define atomic_long_cmpxchg(p, o, n)	atomic_cmpxchg(p, o, n)
195 #else
196 typedef int32_t atomic_long_t;
197 #define atomic_long_set(p, v)		atomic_set(p, v)
198 #define atomic_long_xchg(v, n)		atomic_xchg(v, n)
199 #define atomic_long_cmpxchg(p, o, n)	atomic_cmpxchg(p, o, n)
200 #endif
201 
202 static inline int
203 atomic_inc_not_zero(atomic_t *p)
204 {
205 	if (*p == 0)
206 		return (0);
207 
208 	*(p) += 1;
209 	return (*p);
210 }
211 
212 /* FIXME */
213 #define atomic_set_int(p, bits)		atomic_setbits_int(p,bits)
214 #define atomic_set_mask(bits, p)	atomic_setbits_int(p,bits)
215 #define atomic_clear_int(p, bits)	atomic_clearbits_int(p,bits)
216 #define atomic_clear_mask(bits, p)	atomic_clearbits_int(p,bits)
217 #define atomic_andnot(bits, p)		atomic_clearbits_int(p,bits)
218 #define atomic_fetchadd_int(p, n) __sync_fetch_and_add(p, n)
219 #define atomic_fetchsub_int(p, n) __sync_fetch_and_sub(p, n)
220 #define atomic_fetch_inc(p) __sync_fetch_and_add(p, 1)
221 #define atomic_fetch_xor(n, p) __sync_fetch_and_xor(p, n)
222 
223 static inline atomic_t
224 test_and_set_bit(u_int b, volatile void *p)
225 {
226 	unsigned int m = 1 << (b & 0x1f);
227 	unsigned int prev = __sync_fetch_and_or((volatile u_int *)p + (b >> 5), m);
228 	return (prev & m) != 0;
229 }
230 
231 static inline void
232 clear_bit(u_int b, volatile void *p)
233 {
234 	atomic_clear_int(((volatile u_int *)p) + (b >> 5), 1 << (b & 0x1f));
235 }
236 
237 static inline void
238 set_bit(u_int b, volatile void *p)
239 {
240 	atomic_set_int(((volatile u_int *)p) + (b >> 5), 1 << (b & 0x1f));
241 }
242 
243 static inline void
244 __clear_bit(u_int b, volatile void *p)
245 {
246 	volatile u_int *ptr = (volatile u_int *)p;
247 	ptr[b >> 5] &= ~(1 << (b & 0x1f));
248 }
249 
250 static inline void
251 __set_bit(u_int b, volatile void *p)
252 {
253 	volatile u_int *ptr = (volatile u_int *)p;
254 	ptr[b >> 5] |= (1 << (b & 0x1f));
255 }
256 
257 static inline int
258 test_bit(u_int b, const volatile void *p)
259 {
260 	return !!(((volatile u_int *)p)[b >> 5] & (1 << (b & 0x1f)));
261 }
262 
263 static inline int
264 __test_and_set_bit(u_int b, volatile void *p)
265 {
266 	unsigned int m = 1 << (b & 0x1f);
267 	volatile u_int *ptr = (volatile u_int *)p;
268 	unsigned int prev = ptr[b >> 5];
269 	ptr[b >> 5] |= m;
270 
271 	return (prev & m) != 0;
272 }
273 
274 static inline int
275 test_and_clear_bit(u_int b, volatile void *p)
276 {
277 	unsigned int m = 1 << (b & 0x1f);
278 	unsigned int prev = __sync_fetch_and_and((volatile u_int *)p + (b >> 5), ~m);
279 	return (prev & m) != 0;
280 }
281 
282 static inline int
283 __test_and_clear_bit(u_int b, volatile void *p)
284 {
285 	volatile u_int *ptr = (volatile u_int *)p;
286 	int rv = !!(ptr[b >> 5] & (1 << (b & 0x1f)));
287 	ptr[b >> 5] &= ~(1 << (b & 0x1f));
288 	return rv;
289 }
290 
291 static inline int
292 find_first_zero_bit(volatile void *p, int max)
293 {
294 	int b;
295 	volatile u_int *ptr = (volatile u_int *)p;
296 
297 	for (b = 0; b < max; b += 32) {
298 		if (ptr[b >> 5] != ~0) {
299 			for (;;) {
300 				if ((ptr[b >> 5] & (1 << (b & 0x1f))) == 0)
301 					return b;
302 				b++;
303 			}
304 		}
305 	}
306 	return max;
307 }
308 
309 static inline int
310 find_next_zero_bit(volatile void *p, int max, int b)
311 {
312 	volatile u_int *ptr = (volatile u_int *)p;
313 
314 	for (; b < max; b += 32) {
315 		if (ptr[b >> 5] != ~0) {
316 			for (;;) {
317 				if ((ptr[b >> 5] & (1 << (b & 0x1f))) == 0)
318 					return b;
319 				b++;
320 			}
321 		}
322 	}
323 	return max;
324 }
325 
326 static inline int
327 find_first_bit(volatile void *p, int max)
328 {
329 	int b;
330 	volatile u_int *ptr = (volatile u_int *)p;
331 
332 	for (b = 0; b < max; b += 32) {
333 		if (ptr[b >> 5] != 0) {
334 			for (;;) {
335 				if (ptr[b >> 5] & (1 << (b & 0x1f)))
336 					return b;
337 				b++;
338 			}
339 		}
340 	}
341 	return max;
342 }
343 
344 static inline int
345 find_next_bit(volatile void *p, int max, int b)
346 {
347 	volatile u_int *ptr = (volatile u_int *)p;
348 
349 	for (; b < max; b+= 32) {
350 		if (ptr[b >> 5] != 0) {
351 			for (;;) {
352 				if (ptr[b >> 5] & (1 << (b & 0x1f)))
353 					return b;
354 				b++;
355 			}
356 		}
357 	}
358 	return max;
359 }
360 
361 #define for_each_set_bit(b, p, max) \
362 	for ((b) = find_first_bit((p), (max));			\
363 	     (b) < (max);					\
364 	     (b) = find_next_bit((p), (max), (b) + 1))
365 
366 #define for_each_clear_bit(b, p, max) \
367 	for ((b) = find_first_zero_bit((p), (max));		\
368 	     (b) < (max);					\
369 	     (b) = find_next_zero_bit((p), (max), (b) + 1))
370 
371 /* DRM_READMEMORYBARRIER() prevents reordering of reads.
372  * DRM_WRITEMEMORYBARRIER() prevents reordering of writes.
373  * DRM_MEMORYBARRIER() prevents reordering of reads and writes.
374  */
375 #if defined(__i386__)
376 #define DRM_READMEMORYBARRIER()		__asm __volatile( \
377 					"lock; addl $0,0(%%esp)" : : : "memory");
378 #define DRM_WRITEMEMORYBARRIER()	__asm __volatile("" : : : "memory");
379 #define DRM_MEMORYBARRIER()		__asm __volatile( \
380 					"lock; addl $0,0(%%esp)" : : : "memory");
381 #elif defined(__alpha__)
382 #define DRM_READMEMORYBARRIER()		alpha_mb();
383 #define DRM_WRITEMEMORYBARRIER()	alpha_wmb();
384 #define DRM_MEMORYBARRIER()		alpha_mb();
385 #elif defined(__amd64__)
386 #define DRM_READMEMORYBARRIER()		__asm __volatile( \
387 					"lock; addl $0,0(%%rsp)" : : : "memory");
388 #define DRM_WRITEMEMORYBARRIER()	__asm __volatile("" : : : "memory");
389 #define DRM_MEMORYBARRIER()		__asm __volatile( \
390 					"lock; addl $0,0(%%rsp)" : : : "memory");
391 #elif defined(__aarch64__)
392 #define DRM_READMEMORYBARRIER()		__membar("dsb ld")
393 #define DRM_WRITEMEMORYBARRIER()	__membar("dsb st")
394 #define DRM_MEMORYBARRIER()		__membar("dsb sy")
395 #elif defined(__mips64__)
396 #define DRM_READMEMORYBARRIER()		DRM_MEMORYBARRIER()
397 #define DRM_WRITEMEMORYBARRIER()	DRM_MEMORYBARRIER()
398 #define DRM_MEMORYBARRIER()		mips_sync()
399 #elif defined(__powerpc__)
400 #define DRM_READMEMORYBARRIER()		DRM_MEMORYBARRIER()
401 #define DRM_WRITEMEMORYBARRIER()	DRM_MEMORYBARRIER()
402 #define DRM_MEMORYBARRIER()		__asm __volatile("sync" : : : "memory");
403 #elif defined(__sparc64__)
404 #define DRM_READMEMORYBARRIER()		DRM_MEMORYBARRIER()
405 #define DRM_WRITEMEMORYBARRIER()	DRM_MEMORYBARRIER()
406 #define DRM_MEMORYBARRIER()		membar_sync()
407 #endif
408 
409 #define smp_mb__before_atomic()		DRM_MEMORYBARRIER()
410 #define smp_mb__after_atomic()		DRM_MEMORYBARRIER()
411 #define smp_mb__before_atomic_dec()	DRM_MEMORYBARRIER()
412 #define smp_mb__after_atomic_dec()	DRM_MEMORYBARRIER()
413 #define smp_mb__before_atomic_inc()	DRM_MEMORYBARRIER()
414 #define smp_mb__after_atomic_inc()	DRM_MEMORYBARRIER()
415 
416 #define smp_store_mb(x, v)		do { x = v; DRM_MEMORYBARRIER(); } while (0)
417 
418 #define mb()				DRM_MEMORYBARRIER()
419 #define rmb()				DRM_READMEMORYBARRIER()
420 #define wmb()				DRM_WRITEMEMORYBARRIER()
421 #define smp_rmb()			DRM_READMEMORYBARRIER()
422 #define smp_wmb()			DRM_WRITEMEMORYBARRIER()
423 #define mmiowb()			DRM_WRITEMEMORYBARRIER()
424 
425 #endif
426