xref: /netbsd-src/sys/external/bsd/drm2/include/linux/atomic.h (revision 946379e7b37692fc43f68eb0d1c10daa0a7f3b6c)
1 /*	$NetBSD: atomic.h,v 1.7 2014/07/17 14:30:33 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _LINUX_ATOMIC_H_
33 #define _LINUX_ATOMIC_H_
34 
35 #include <sys/atomic.h>
36 
37 #include <machine/limits.h>
38 
39 struct atomic {
40 	union {
41 		volatile int au_int;
42 		volatile unsigned int au_uint;
43 	} a_u;
44 };
45 
46 #define	ATOMIC_INIT(i)	{ .a_u = { .au_int = (i) } }
47 
48 typedef struct atomic atomic_t;
49 
50 static inline int
51 atomic_read(atomic_t *atomic)
52 {
53 	return atomic->a_u.au_int;
54 }
55 
56 static inline void
57 atomic_set(atomic_t *atomic, int value)
58 {
59 	atomic->a_u.au_int = value;
60 }
61 
62 static inline void
63 atomic_add(int addend, atomic_t *atomic)
64 {
65 	atomic_add_int(&atomic->a_u.au_uint, addend);
66 }
67 
68 static inline void
69 atomic_sub(int subtrahend, atomic_t *atomic)
70 {
71 	atomic_add_int(&atomic->a_u.au_uint, -subtrahend);
72 }
73 
74 static inline int
75 atomic_add_return(int addend, atomic_t *atomic)
76 {
77 	return (int)atomic_add_int_nv(&atomic->a_u.au_uint, addend);
78 }
79 
80 static inline void
81 atomic_inc(atomic_t *atomic)
82 {
83 	atomic_inc_uint(&atomic->a_u.au_uint);
84 }
85 
86 static inline void
87 atomic_dec(atomic_t *atomic)
88 {
89 	atomic_dec_uint(&atomic->a_u.au_uint);
90 }
91 
92 static inline int
93 atomic_inc_return(atomic_t *atomic)
94 {
95 	return (int)atomic_inc_uint_nv(&atomic->a_u.au_uint);
96 }
97 
98 static inline int
99 atomic_dec_return(atomic_t *atomic)
100 {
101 	return (int)atomic_dec_uint_nv(&atomic->a_u.au_uint);
102 }
103 
104 static inline int
105 atomic_dec_and_test(atomic_t *atomic)
106 {
107 	return (0 == (int)atomic_dec_uint_nv(&atomic->a_u.au_uint));
108 }
109 
110 static inline void
111 atomic_set_mask(unsigned long mask, atomic_t *atomic)
112 {
113 	atomic_or_uint(&atomic->a_u.au_uint, mask);
114 }
115 
116 static inline void
117 atomic_clear_mask(unsigned long mask, atomic_t *atomic)
118 {
119 	atomic_and_uint(&atomic->a_u.au_uint, ~mask);
120 }
121 
122 static inline int
123 atomic_add_unless(atomic_t *atomic, int addend, int zero)
124 {
125 	int value;
126 
127 	do {
128 		value = atomic->a_u.au_int;
129 		if (value == zero)
130 			return 0;
131 	} while (atomic_cas_uint(&atomic->a_u.au_uint, value, (value + addend))
132 	    != value);
133 
134 	return 1;
135 }
136 
137 static inline int
138 atomic_inc_not_zero(atomic_t *atomic)
139 {
140 	return atomic_add_unless(atomic, 1, 0);
141 }
142 
143 static inline int
144 atomic_xchg(atomic_t *atomic, int new)
145 {
146 	return (int)atomic_swap_uint(&atomic->a_u.au_uint, (unsigned)new);
147 }
148 
149 static inline int
150 atomic_cmpxchg(atomic_t *atomic, int old, int new)
151 {
152 	return (int)atomic_cas_uint(&atomic->a_u.au_uint, (unsigned)old,
153 	    (unsigned)new);
154 }
155 
156 struct atomic64 {
157 	volatile uint64_t	a_v;
158 };
159 
160 typedef struct atomic64 atomic64_t;
161 
162 static inline uint64_t
163 atomic64_read(const struct atomic64 *a)
164 {
165 	return a->a_v;
166 }
167 
168 static inline void
169 atomic64_set(struct atomic64 *a, uint64_t v)
170 {
171 	a->a_v = v;
172 }
173 
174 static inline void
175 atomic64_add(long long d, struct atomic64 *a)
176 {
177 	atomic_add_64(&a->a_v, d);
178 }
179 
180 static inline void
181 atomic64_sub(long long d, struct atomic64 *a)
182 {
183 	atomic_add_64(&a->a_v, -d);
184 }
185 
186 static inline uint64_t
187 atomic64_xchg(struct atomic64 *a, uint64_t v)
188 {
189 	return atomic_swap_64(&a->a_v, v);
190 }
191 
192 static inline void
193 set_bit(unsigned int bit, volatile unsigned long *ptr)
194 {
195 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
196 
197 	atomic_or_ulong(&ptr[bit / units], (1UL << (bit % units)));
198 }
199 
200 static inline void
201 clear_bit(unsigned int bit, volatile unsigned long *ptr)
202 {
203 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
204 
205 	atomic_and_ulong(&ptr[bit / units], ~(1UL << (bit % units)));
206 }
207 
208 static inline void
209 change_bit(unsigned int bit, volatile unsigned long *ptr)
210 {
211 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
212 	volatile unsigned long *const p = &ptr[bit / units];
213 	const unsigned long mask = (1UL << (bit % units));
214 	unsigned long v;
215 
216 	do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
217 }
218 
219 static inline unsigned long
220 test_and_set_bit(unsigned int bit, volatile unsigned long *ptr)
221 {
222 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
223 	volatile unsigned long *const p = &ptr[bit / units];
224 	const unsigned long mask = (1UL << (bit % units));
225 	unsigned long v;
226 
227 	do v = *p; while (atomic_cas_ulong(p, v, (v | mask)) != v);
228 
229 	return ((v & mask) != 0);
230 }
231 
232 static inline unsigned long
233 test_and_clear_bit(unsigned int bit, volatile unsigned long *ptr)
234 {
235 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
236 	volatile unsigned long *const p = &ptr[bit / units];
237 	const unsigned long mask = (1UL << (bit % units));
238 	unsigned long v;
239 
240 	do v = *p; while (atomic_cas_ulong(p, v, (v & ~mask)) != v);
241 
242 	return ((v & mask) != 0);
243 }
244 
245 static inline unsigned long
246 test_and_change_bit(unsigned int bit, volatile unsigned long *ptr)
247 {
248 	const unsigned int units = (sizeof(*ptr) * CHAR_BIT);
249 	volatile unsigned long *const p = &ptr[bit / units];
250 	const unsigned long mask = (1UL << (bit % units));
251 	unsigned long v;
252 
253 	do v = *p; while (atomic_cas_ulong(p, v, (v ^ mask)) != v);
254 
255 	return ((v & mask) != 0);
256 }
257 
258 #if defined(MULTIPROCESSOR) && !defined(__HAVE_ATOMIC_AS_MEMBAR)
259 /*
260  * XXX These memory barriers are doubtless overkill, but I am having
261  * trouble understanding the intent and use of the Linux atomic membar
262  * API.  I think that for reference counting purposes, the sequences
263  * should be insn/inc/enter and exit/dec/insn, but the use of the
264  * before/after memory barriers is not consistent throughout Linux.
265  */
266 #  define	smp_mb__before_atomic_inc()	membar_sync()
267 #  define	smp_mb__after_atomic_inc()	membar_sync()
268 #  define	smp_mb__before_atomic_dec()	membar_sync()
269 #  define	smp_mb__after_atomic_dec()	membar_sync()
270 #else
271 #  define	smp_mb__before_atomic_inc()	__insn_barrier()
272 #  define	smp_mb__after_atomic_inc()	__insn_barrier()
273 #  define	smp_mb__before_atomic_dec()	__insn_barrier()
274 #  define	smp_mb__after_atomic_dec()	__insn_barrier()
275 #endif
276 
277 #endif  /* _LINUX_ATOMIC_H_ */
278