xref: /netbsd-src/sys/external/bsd/drm2/include/linux/atomic.h (revision 7330f729ccf0bd976a06f95fad452fe774fc7fd1)
1 /*	$NetBSD: atomic.h,v 1.21 2019/09/28 12:34:56 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _LINUX_ATOMIC_H_
33 #define _LINUX_ATOMIC_H_
34 
35 #include <sys/atomic.h>
36 
37 #include <machine/limits.h>
38 
39 #if defined(MULTIPROCESSOR) && !defined(__HAVE_ATOMIC_AS_MEMBAR)
40 #  define	smp_mb__before_atomic()		membar_exit()
41 #  define	smp_mb__after_atomic()		membar_enter()
42 #else
43 #  define	smp_mb__before_atomic()		__insn_barrier()
44 #  define	smp_mb__after_atomic()		__insn_barrier()
45 #endif
46 
47 /*
48  * atomic (u)int operations
49  *
50  *	Atomics that return a value, other than atomic_read, imply a
51  *	full memory_sync barrier.  Those that do not return a value
52  *	imply no memory barrier.
53  */
54 
55 struct atomic {
56 	union {
57 		volatile int au_int;
58 		volatile unsigned int au_uint;
59 	} a_u;
60 };
61 
62 #define	ATOMIC_INIT(i)	{ .a_u = { .au_int = (i) } }
63 
64 typedef struct atomic atomic_t;
65 
66 static inline int
67 atomic_read(atomic_t *atomic)
68 {
69 	/* no membar */
70 	return atomic->a_u.au_int;
71 }
72 
73 static inline void
74 atomic_set(atomic_t *atomic, int value)
75 {
76 	/* no membar */
77 	atomic->a_u.au_int = value;
78 }
79 
80 static inline void
81 atomic_add(int addend, atomic_t *atomic)
82 {
83 	/* no membar */
84 	atomic_add_int(&atomic->a_u.au_uint, addend);
85 }
86 
87 static inline void
88 atomic_sub(int subtrahend, atomic_t *atomic)
89 {
90 	/* no membar */
91 	atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
92 }
93 
94 static inline int
95 atomic_add_return(int addend, atomic_t *atomic)
96 {
97 	int v;
98 
99 	smp_mb__before_atomic();
100 	v = (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
101 	smp_mb__after_atomic();
102 
103 	return v;
104 }
105 
106 static inline void
107 atomic_inc(atomic_t *atomic)
108 {
109 	/* no membar */
110 	atomic_inc_uint(&atomic->a_u.au_uint);
111 }
112 
113 static inline void
114 atomic_dec(atomic_t *atomic)
115 {
116 	/* no membar */
117 	atomic_dec_uint(&atomic->a_u.au_uint);
118 }
119 
120 static inline int
121 atomic_inc_return(atomic_t *atomic)
122 {
123 	int v;
124 
125 	smp_mb__before_atomic();
126 	v = (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
127 	smp_mb__after_atomic();
128 
129 	return v;
130 }
131 
132 static inline int
133 atomic_dec_return(atomic_t *atomic)
134 {
135 	int v;
136 
137 	smp_mb__before_atomic();
138 	v = (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
139 	smp_mb__after_atomic();
140 
141 	return v;
142 }
143 
144 static inline int
145 atomic_dec_and_test(atomic_t *atomic)
146 {
147 	/* membar implied by atomic_dec_return */
148 	return atomic_dec_return(atomic) == 0;
149 }
150 
151 static inline void
152 atomic_or(int value, atomic_t *atomic)
153 {
154 	/* no membar */
155 	atomic_or_uint(&atomic->a_u.au_uint, value);
156 }
157 
158 static inline void
159 atomic_set_mask(unsigned long mask, atomic_t *atomic)
160 {
161 	/* no membar */
162 	atomic_or_uint(&atomic->a_u.au_uint, mask);
163 }
164 
165 static inline void
166 atomic_clear_mask(unsigned long mask, atomic_t *atomic)
167 {
168 	/* no membar */
169 	atomic_and_uint(&atomic->a_u.au_uint, ~mask);
170 }
171 
172 static inline int
173 atomic_add_unless(atomic_t *atomic, int addend, int zero)
174 {
175 	int value;
176 
177 	smp_mb__before_atomic();
178 	do {
179 		value = atomic->a_u.au_int;
180 		if (value == zero)
181 			break;
182 	} while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
183 	    != (unsigned)value);
184 	smp_mb__after_atomic();
185 
186 	return value != zero;
187 }
188 
189 static inline int
190 atomic_inc_not_zero(atomic_t *atomic)
191 {
192 	/* membar implied by atomic_add_unless */
193 	return atomic_add_unless(atomic, 1, 0);
194 }
195 
196 static inline int
197 atomic_xchg(atomic_t *atomic, int new)
198 {
199 	int old;
200 
201 	smp_mb__before_atomic();
202 	old = (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
203 	smp_mb__after_atomic();
204 
205 	return old;
206 }
207 
208 static inline int
209 atomic_cmpxchg(atomic_t *atomic, int expect, int new)
210 {
211 	int old;
212 
213 	/*
214 	 * XXX As an optimization, under Linux's semantics we are
215 	 * allowed to skip the memory barrier if the comparison fails,
216 	 * but taking advantage of that is not convenient here.
217 	 */
218 	smp_mb__before_atomic();
219 	old = (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)expect,
220 	    (unsigned)new);
221 	smp_mb__after_atomic();
222 
223 	return old;
224 }
225 
226 struct atomic64 {
227 	volatile uint64_t	a_v;
228 };
229 
230 typedef struct atomic64 atomic64_t;
231 
232 #define	ATOMIC64_INIT(v)	{ .a_v = (v) }
233 
234 int		linux_atomic64_init(void);
235 void		linux_atomic64_fini(void);
236 
237 #ifdef __HAVE_ATOMIC64_OPS
238 
239 static inline uint64_t
240 atomic64_read(const struct atomic64 *a)
241 {
242 	/* no membar */
243 	return a->a_v;
244 }
245 
246 static inline void
247 atomic64_set(struct atomic64 *a, uint64_t v)
248 {
249 	/* no membar */
250 	a->a_v = v;
251 }
252 
253 static inline void
254 atomic64_add(int64_t d, struct atomic64 *a)
255 {
256 	/* no membar */
257 	atomic_add_64(&a->a_v, d);
258 }
259 
260 static inline void
261 atomic64_sub(int64_t d, struct atomic64 *a)
262 {
263 	/* no membar */
264 	atomic_add_64(&a->a_v, -d);
265 }
266 
267 static inline int64_t
268 atomic64_add_return(int64_t d, struct atomic64 *a)
269 {
270 	int64_t v;
271 
272 	smp_mb__before_atomic();
273 	v = (int64_t)atomic_add_64_nv(&a->a_v, d);
274 	smp_mb__after_atomic();
275 
276 	return v;
277 }
278 
279 static inline uint64_t
280 atomic64_xchg(struct atomic64 *a, uint64_t new)
281 {
282 	uint64_t old;
283 
284 	smp_mb__before_atomic();
285 	old = atomic_swap_64(&a->a_v, new);
286 	smp_mb__after_atomic();
287 
288 	return old;
289 }
290 
291 static inline uint64_t
292 atomic64_cmpxchg(struct atomic64 *atomic, uint64_t expect, uint64_t new)
293 {
294 	uint64_t old;
295 
296 	/*
297 	 * XXX As an optimization, under Linux's semantics we are
298 	 * allowed to skip the memory barrier if the comparison fails,
299 	 * but taking advantage of that is not convenient here.
300 	 */
301 	smp_mb__before_atomic();
302 	old = atomic_cas_64(&atomic->a_v, expect, new);
303 	smp_mb__after_atomic();
304 
305 	return old;
306 }
307 
308 #else  /* !defined(__HAVE_ATOMIC64_OPS) */
309 
310 #define	atomic64_add		linux_atomic64_add
311 #define	atomic64_add_return	linux_atomic64_add_return
312 #define	atomic64_cmpxchg	linux_atomic64_cmpxchg
313 #define	atomic64_read		linux_atomic64_read
314 #define	atomic64_set		linux_atomic64_set
315 #define	atomic64_sub		linux_atomic64_sub
316 #define	atomic64_xchg		linux_atomic64_xchg
317 
318 uint64_t	atomic64_read(const struct atomic64 *);
319 void		atomic64_set(struct atomic64 *, uint64_t);
320 void		atomic64_add(int64_t, struct atomic64 *);
321 void		atomic64_sub(int64_t, struct atomic64 *);
322 int64_t		atomic64_add_return(int64_t, struct atomic64 *);
323 uint64_t	atomic64_xchg(struct atomic64 *, uint64_t);
324 uint64_t	atomic64_cmpxchg(struct atomic64 *, uint64_t, uint64_t);
325 
326 #endif
327 
328 static inline int64_t
329 atomic64_inc_return(struct atomic64 *a)
330 {
331 	return atomic64_add_return(1, a);
332 }
333 
334 struct atomic_long {
335 	volatile unsigned long	al_v;
336 };
337 
338 typedef struct atomic_long atomic_long_t;
339 
340 static inline long
341 atomic_long_read(struct atomic_long *a)
342 {
343 	/* no membar */
344 	return (unsigned long)a->al_v;
345 }
346 
347 static inline void
348 atomic_long_set(struct atomic_long *a, long v)
349 {
350 	/* no membar */
351 	a->al_v = v;
352 }
353 
354 static inline long
355 atomic_long_add_unless(struct atomic_long *a, long addend, long zero)
356 {
357 	long value;
358 
359 	smp_mb__before_atomic();
360 	do {
361 		value = (long)a->al_v;
362 		if (value == zero)
363 			break;
364 	} while (atomic_cas_ulong(&a->al_v, (unsigned long)value,
365 		(unsigned long)(value + addend)) != (unsigned long)value);
366 	smp_mb__after_atomic();
367 
368 	return value != zero;
369 }
370 
371 static inline long
372 atomic_long_inc_not_zero(struct atomic_long *a)
373 {
374 	/* membar implied by atomic_long_add_unless */
375 	return atomic_long_add_unless(a, 1, 0);
376 }
377 
378 static inline long
379 atomic_long_cmpxchg(struct atomic_long *a, long expect, long new)
380 {
381 	long old;
382 
383 	/*
384 	 * XXX As an optimization, under Linux's semantics we are
385 	 * allowed to skip the memory barrier if the comparison fails,
386 	 * but taking advantage of that is not convenient here.
387 	 */
388 	smp_mb__before_atomic();
389 	old = (long)atomic_cas_ulong(&a->al_v, (unsigned long)expect,
390 	    (unsigned long)new);
391 	smp_mb__after_atomic();
392 
393 	return old;
394 }
395 
396 static inline void
397 set_bit(unsigned int bit, volatile unsigned long *ptr)
398 {
399 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
400 
401 	/* no memory barrier */
402 	atomic_or_ulong(&ptr[bit / units], (1UL << (bit % units)));
403 }
404 
405 static inline void
406 clear_bit(unsigned int bit, volatile unsigned long *ptr)
407 {
408 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
409 
410 	/* no memory barrier */
411 	atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
412 }
413 
414 static inline void
415 change_bit(unsigned int bit, volatile unsigned long *ptr)
416 {
417 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
418 	volatile unsigned long *const p = &ptr[bit / units];
419 	const unsigned long mask = (1UL << (bit % units));
420 	unsigned long v;
421 
422 	/* no memory barrier */
423 	do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
424 }
425 
426 static inline int
427 test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
428 {
429 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
430 	volatile unsigned long *const p = &ptr[bit / units];
431 	const unsigned long mask = (1UL << (bit % units));
432 	unsigned long v;
433 
434 	smp_mb__before_atomic();
435 	do v = *p; while (atomic_cas_ulong(p, v, (v | mask)) != v);
436 	smp_mb__after_atomic();
437 
438 	return ((v & mask) != 0);
439 }
440 
441 static inline int
442 test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
443 {
444 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
445 	volatile unsigned long *const p = &ptr[bit / units];
446 	const unsigned long mask = (1UL << (bit % units));
447 	unsigned long v;
448 
449 	smp_mb__before_atomic();
450 	do v = *p; while (atomic_cas_ulong(p, v, (v & ~mask)) != v);
451 	smp_mb__after_atomic();
452 
453 	return ((v & mask) != 0);
454 }
455 
456 static inline int
457 test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
458 {
459 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
460 	volatile unsigned long *const p = &ptr[bit / units];
461 	const unsigned long mask = (1UL << (bit % units));
462 	unsigned long v;
463 
464 	smp_mb__before_atomic();
465 	do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
466 	smp_mb__after_atomic();
467 
468 	return ((v & mask) != 0);
469 }
470 
471 #endif  /* _LINUX_ATOMIC_H_ */
472