xref: /dpdk/lib/eal/x86/include/rte_atomic_64.h (revision 558f03577b28d2757bc93fc26cd1217026c48d0c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  */
4 
5 /*
6  * Inspired from FreeBSD src/sys/amd64/include/atomic.h
7  * Copyright (c) 1998 Doug Rabson
8  * Copyright (c) 2019 Intel Corporation
9  * All rights reserved.
10  */
11 
12 #ifndef _RTE_ATOMIC_X86_H_
13 #error do not include this file directly, use <rte_atomic.h> instead
14 #endif
15 
16 #ifndef _RTE_ATOMIC_X86_64_H_
17 #define _RTE_ATOMIC_X86_64_H_
18 
19 #include <stdint.h>
20 
21 #include <rte_common.h>
22 
23 /*------------------------- 64 bit atomic operations -------------------------*/
24 
25 #ifndef RTE_FORCE_INTRINSICS
26 static inline int
rte_atomic64_cmpset(volatile uint64_t * dst,uint64_t exp,uint64_t src)27 rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
28 {
29 	uint8_t res;
30 
31 
32 	asm volatile(
33 			MPLOCKED
34 			"cmpxchgq %[src], %[dst];"
35 			"sete %[res];"
36 			: [res] "=a" (res),     /* output */
37 			  [dst] "=m" (*dst)
38 			: [src] "r" (src),      /* input */
39 			  "a" (exp),
40 			  "m" (*dst)
41 			: "memory");            /* no-clobber list */
42 
43 	return res;
44 }
45 
46 static inline uint64_t
rte_atomic64_exchange(volatile uint64_t * dst,uint64_t val)47 rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val)
48 {
49 	asm volatile(
50 			MPLOCKED
51 			"xchgq %0, %1;"
52 			: "=r" (val), "=m" (*dst)
53 			: "0" (val),  "m" (*dst)
54 			: "memory");         /* no-clobber list */
55 	return val;
56 }
57 
58 static inline void
rte_atomic64_init(rte_atomic64_t * v)59 rte_atomic64_init(rte_atomic64_t *v)
60 {
61 	v->cnt = 0;
62 }
63 
64 static inline int64_t
rte_atomic64_read(rte_atomic64_t * v)65 rte_atomic64_read(rte_atomic64_t *v)
66 {
67 	return v->cnt;
68 }
69 
70 static inline void
rte_atomic64_set(rte_atomic64_t * v,int64_t new_value)71 rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
72 {
73 	v->cnt = new_value;
74 }
75 
76 static inline void
rte_atomic64_add(rte_atomic64_t * v,int64_t inc)77 rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
78 {
79 	asm volatile(
80 			MPLOCKED
81 			"addq %[inc], %[cnt]"
82 			: [cnt] "=m" (v->cnt)   /* output */
83 			: [inc] "ir" (inc),     /* input */
84 			  "m" (v->cnt)
85 			);
86 }
87 
88 static inline void
rte_atomic64_sub(rte_atomic64_t * v,int64_t dec)89 rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
90 {
91 	asm volatile(
92 			MPLOCKED
93 			"subq %[dec], %[cnt]"
94 			: [cnt] "=m" (v->cnt)   /* output */
95 			: [dec] "ir" (dec),     /* input */
96 			  "m" (v->cnt)
97 			);
98 }
99 
100 static inline void
rte_atomic64_inc(rte_atomic64_t * v)101 rte_atomic64_inc(rte_atomic64_t *v)
102 {
103 	asm volatile(
104 			MPLOCKED
105 			"incq %[cnt]"
106 			: [cnt] "=m" (v->cnt)   /* output */
107 			: "m" (v->cnt)          /* input */
108 			);
109 }
110 
111 static inline void
rte_atomic64_dec(rte_atomic64_t * v)112 rte_atomic64_dec(rte_atomic64_t *v)
113 {
114 	asm volatile(
115 			MPLOCKED
116 			"decq %[cnt]"
117 			: [cnt] "=m" (v->cnt)   /* output */
118 			: "m" (v->cnt)          /* input */
119 			);
120 }
121 
122 static inline int64_t
rte_atomic64_add_return(rte_atomic64_t * v,int64_t inc)123 rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
124 {
125 	int64_t prev = inc;
126 
127 	asm volatile(
128 			MPLOCKED
129 			"xaddq %[prev], %[cnt]"
130 			: [prev] "+r" (prev),   /* output */
131 			  [cnt] "=m" (v->cnt)
132 			: "m" (v->cnt)          /* input */
133 			);
134 	return prev + inc;
135 }
136 
137 static inline int64_t
rte_atomic64_sub_return(rte_atomic64_t * v,int64_t dec)138 rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
139 {
140 	return rte_atomic64_add_return(v, -dec);
141 }
142 
rte_atomic64_inc_and_test(rte_atomic64_t * v)143 static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
144 {
145 	uint8_t ret;
146 
147 	asm volatile(
148 			MPLOCKED
149 			"incq %[cnt] ; "
150 			"sete %[ret]"
151 			: [cnt] "+m" (v->cnt), /* output */
152 			  [ret] "=qm" (ret)
153 			);
154 
155 	return ret != 0;
156 }
157 
rte_atomic64_dec_and_test(rte_atomic64_t * v)158 static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
159 {
160 	uint8_t ret;
161 
162 	asm volatile(
163 			MPLOCKED
164 			"decq %[cnt] ; "
165 			"sete %[ret]"
166 			: [cnt] "+m" (v->cnt),  /* output */
167 			  [ret] "=qm" (ret)
168 			);
169 	return ret != 0;
170 }
171 
rte_atomic64_test_and_set(rte_atomic64_t * v)172 static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
173 {
174 	return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
175 }
176 
rte_atomic64_clear(rte_atomic64_t * v)177 static inline void rte_atomic64_clear(rte_atomic64_t *v)
178 {
179 	v->cnt = 0;
180 }
181 #endif
182 
183 /*------------------------ 128 bit atomic operations -------------------------*/
184 
185 static inline int
rte_atomic128_cmp_exchange(rte_int128_t * dst,rte_int128_t * exp,const rte_int128_t * src,unsigned int weak,int success,int failure)186 rte_atomic128_cmp_exchange(rte_int128_t *dst,
187 			   rte_int128_t *exp,
188 			   const rte_int128_t *src,
189 			   unsigned int weak,
190 			   int success,
191 			   int failure)
192 {
193 	RTE_SET_USED(weak);
194 	RTE_SET_USED(success);
195 	RTE_SET_USED(failure);
196 	uint8_t res;
197 
198 	asm volatile (
199 		      MPLOCKED
200 		      "cmpxchg16b %[dst];"
201 		      " sete %[res]"
202 		      : [dst] "=m" (dst->val[0]),
203 			"=a" (exp->val[0]),
204 			"=d" (exp->val[1]),
205 			[res] "=r" (res)
206 		      : "b" (src->val[0]),
207 			"c" (src->val[1]),
208 			"a" (exp->val[0]),
209 			"d" (exp->val[1]),
210 			"m" (dst->val[0])
211 		      : "memory");
212 
213 	return res;
214 }
215 
216 #endif /* _RTE_ATOMIC_X86_64_H_ */
217