xref: /freebsd-src/sys/sys/atomic_common.h (revision dbd5678dca91abcefe8d046aa2f9b66497a95ffb)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2017 The FreeBSD Foundation
5  *
6  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * $FreeBSD$
31  */
32 #ifndef _SYS_ATOMIC_COMMON_H_
33 #define	_SYS_ATOMIC_COMMON_H_
34 
35 #ifndef _MACHINE_ATOMIC_H_
36 #error do not include this header, use machine/atomic.h
37 #endif
38 
39 #include <sys/cdefs.h>
40 #include <sys/types.h>
41 
42 #define	__atomic_load_bool_relaxed(p)	(*(volatile _Bool *)(p))
43 #define	__atomic_store_bool_relaxed(p, v)	\
44     (*(volatile _Bool *)(p) = (_Bool)(v))
45 
46 #define	__atomic_load_char_relaxed(p)	(*(volatile u_char *)(p))
47 #define	__atomic_load_short_relaxed(p)	(*(volatile u_short *)(p))
48 #define	__atomic_load_int_relaxed(p)	(*(volatile u_int *)(p))
49 #define	__atomic_load_long_relaxed(p)	(*(volatile u_long *)(p))
50 #define	__atomic_load_8_relaxed(p)	(*(volatile uint8_t *)(p))
51 #define	__atomic_load_16_relaxed(p)	(*(volatile uint16_t *)(p))
52 #define	__atomic_load_32_relaxed(p)	(*(volatile uint32_t *)(p))
53 #define	__atomic_load_64_relaxed(p)	(*(volatile uint64_t *)(p))
54 
55 #define	__atomic_store_char_relaxed(p, v)	\
56     (*(volatile u_char *)(p) = (u_char)(v))
57 #define	__atomic_store_short_relaxed(p, v)	\
58     (*(volatile u_short *)(p) = (u_short)(v))
59 #define	__atomic_store_int_relaxed(p, v)	\
60     (*(volatile u_int *)(p) = (u_int)(v))
61 #define	__atomic_store_long_relaxed(p, v)	\
62     (*(volatile u_long *)(p) = (u_long)(v))
63 #define	__atomic_store_8_relaxed(p, v)		\
64     (*(volatile uint8_t *)(p) = (uint8_t)(v))
65 #define	__atomic_store_16_relaxed(p, v)		\
66     (*(volatile uint16_t *)(p) = (uint16_t)(v))
67 #define	__atomic_store_32_relaxed(p, v)		\
68     (*(volatile uint32_t *)(p) = (uint32_t)(v))
69 #define	__atomic_store_64_relaxed(p, v)		\
70     (*(volatile uint64_t *)(p) = (uint64_t)(v))
71 
72 /*
73  * When _Generic is available, try to provide some type checking.
74  */
75 #if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || \
76     __has_extension(c_generic_selections)
77 #define	atomic_load_bool(p)			\
78 	_Generic(*(p), _Bool: __atomic_load_bool_relaxed(p))
79 #define	atomic_store_bool(p, v)			\
80 	_Generic(*(p), _Bool: __atomic_store_bool_relaxed(p, v))
81 
82 #define	__atomic_load_generic(p, t, ut, n)	\
83 	_Generic(*(p),				\
84 	    t: __atomic_load_ ## n ## _relaxed(p), \
85 	    ut: __atomic_load_ ## n ## _relaxed(p))
86 #define	__atomic_store_generic(p, v, t, ut, n)	\
87 	_Generic(*(p),				\
88 	    t: __atomic_store_ ## n ## _relaxed(p, v), \
89 	    ut: __atomic_store_ ## n ## _relaxed(p, v))
90 #else
91 #define	atomic_load_bool(p)			\
92 	__atomic_load_bool_relaxed(p)
93 #define	atomic_store_bool(p, v)			\
94 	__atomic_store_bool_relaxed(p, v)
95 #define	__atomic_load_generic(p, t, ut, n)	\
96 	__atomic_load_ ## n ## _relaxed(p)
97 #define	__atomic_store_generic(p, v, t, ut, n)	\
98 	__atomic_store_ ## n ## _relaxed(p, v)
99 #endif
100 
101 #define	atomic_load_char(p)	__atomic_load_generic(p, char, u_char, char)
102 #define	atomic_load_short(p)	__atomic_load_generic(p, short, u_short, short)
103 #define	atomic_load_int(p)	__atomic_load_generic(p, int, u_int, int)
104 #define	atomic_load_long(p)	__atomic_load_generic(p, long, u_long, long)
105 #define	atomic_load_8(p)	__atomic_load_generic(p, int8_t, uint8_t, 8)
106 #define	atomic_load_16(p)	__atomic_load_generic(p, int16_t, uint16_t, 16)
107 #define	atomic_load_32(p)	__atomic_load_generic(p, int32_t, uint32_t, 32)
108 #ifdef __LP64__
109 #define	atomic_load_64(p)	__atomic_load_generic(p, int64_t, uint64_t, 64)
110 #endif
111 #define	atomic_store_char(p, v)			\
112 	__atomic_store_generic(p, v, char, u_char, char)
113 #define	atomic_store_short(p, v)		\
114 	__atomic_store_generic(p, v, short, u_short, short)
115 #define	atomic_store_int(p, v)			\
116 	__atomic_store_generic(p, v, int, u_int, int)
117 #define	atomic_store_long(p, v)			\
118 	__atomic_store_generic(p, v, long, u_long, long)
119 #define	atomic_store_8(p, v)			\
120 	__atomic_store_generic(p, v, int8_t, uint8_t, 8)
121 #define	atomic_store_16(p, v)			\
122 	__atomic_store_generic(p, v, int16_t, uint16_t, 16)
123 #define	atomic_store_32(p, v)			\
124 	__atomic_store_generic(p, v, int32_t, uint32_t, 32)
125 #ifdef __LP64__
126 #define	atomic_store_64(p, v)			\
127 	__atomic_store_generic(p, v, int64_t, uint64_t, 64)
128 #endif
129 
130 #define	atomic_load_ptr(p)	(*(volatile __typeof(*p) *)(p))
131 #define	atomic_store_ptr(p, v)	(*(volatile __typeof(*p) *)(p) = (v))
132 
133 /*
134  * Currently all architectures provide acquire and release fences on their own,
135  * but they don't provide consume. Kludge below allows relevant code to stop
136  * openly resorting to the stronger acquire fence, to be sorted out.
137  */
138 #define	atomic_load_consume_ptr(p)	\
139     ((__typeof(*p)) atomic_load_acq_ptr((uintptr_t *)p))
140 
141 #define	atomic_interrupt_fence()	__compiler_membar()
142 
143 #endif /* !_SYS_ATOMIC_COMMON_H_ */
144