xref: /dpdk/lib/eal/include/generic/rte_spinlock.h (revision 719834a6849e1daf4a70ff7742bbcc3ae7e25607)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #ifndef _RTE_SPINLOCK_H_
6 #define _RTE_SPINLOCK_H_
7 
8 /**
9  * @file
10  *
11  * RTE Spinlocks
12  *
13  * This file defines an API for read-write locks, which are implemented
14  * in an architecture-specific way. This kind of lock simply waits in
15  * a loop repeatedly checking until the lock becomes available.
16  *
17  * All locks must be initialised before use, and only initialised once.
18  */
19 
20 #include <rte_lcore.h>
21 #ifdef RTE_FORCE_INTRINSICS
22 #include <rte_common.h>
23 #endif
24 #include <rte_lock_annotations.h>
25 #include <rte_pause.h>
26 #include <rte_stdatomic.h>
27 
28 #ifdef __cplusplus
29 extern "C" {
30 #endif
31 
32 /**
33  * The rte_spinlock_t type.
34  */
35 typedef struct __rte_lockable {
36 	volatile RTE_ATOMIC(int) locked; /**< lock status 0 = unlocked, 1 = locked */
37 } rte_spinlock_t;
38 
39 /**
40  * A static spinlock initializer.
41  */
42 #define RTE_SPINLOCK_INITIALIZER { 0 }
43 
44 /**
45  * Initialize the spinlock to an unlocked state.
46  *
47  * @param sl
48  *   A pointer to the spinlock.
49  */
50 static inline void
51 rte_spinlock_init(rte_spinlock_t *sl)
52 {
53 	sl->locked = 0;
54 }
55 
56 /**
57  * Take the spinlock.
58  *
59  * @param sl
60  *   A pointer to the spinlock.
61  */
62 static inline void
63 rte_spinlock_lock(rte_spinlock_t *sl)
64 	__rte_exclusive_lock_function(sl);
65 
66 #ifdef RTE_FORCE_INTRINSICS
67 static inline void
68 rte_spinlock_lock(rte_spinlock_t *sl)
69 	__rte_no_thread_safety_analysis
70 {
71 	int exp = 0;
72 
73 	while (!rte_atomic_compare_exchange_strong_explicit(&sl->locked, &exp, 1,
74 				rte_memory_order_acquire, rte_memory_order_relaxed)) {
75 		rte_wait_until_equal_32((volatile uint32_t *)(uintptr_t)&sl->locked,
76 			       0, rte_memory_order_relaxed);
77 		exp = 0;
78 	}
79 }
80 #endif
81 
82 /**
83  * Release the spinlock.
84  *
85  * @param sl
86  *   A pointer to the spinlock.
87  */
88 static inline void
89 rte_spinlock_unlock(rte_spinlock_t *sl)
90 	__rte_unlock_function(sl);
91 
92 #ifdef RTE_FORCE_INTRINSICS
93 static inline void
94 rte_spinlock_unlock(rte_spinlock_t *sl)
95 	__rte_no_thread_safety_analysis
96 {
97 	rte_atomic_store_explicit(&sl->locked, 0, rte_memory_order_release);
98 }
99 #endif
100 
101 /**
102  * Try to take the lock.
103  *
104  * @param sl
105  *   A pointer to the spinlock.
106  * @return
107  *   1 if the lock is successfully taken; 0 otherwise.
108  */
109 __rte_warn_unused_result
110 static inline int
111 rte_spinlock_trylock(rte_spinlock_t *sl)
112 	__rte_exclusive_trylock_function(1, sl);
113 
114 #ifdef RTE_FORCE_INTRINSICS
115 static inline int
116 rte_spinlock_trylock(rte_spinlock_t *sl)
117 	__rte_no_thread_safety_analysis
118 {
119 	int exp = 0;
120 	return rte_atomic_compare_exchange_strong_explicit(&sl->locked, &exp, 1,
121 				rte_memory_order_acquire, rte_memory_order_relaxed);
122 }
123 #endif
124 
125 /**
126  * Test if the lock is taken.
127  *
128  * @param sl
129  *   A pointer to the spinlock.
130  * @return
131  *   1 if the lock is currently taken; 0 otherwise.
132  */
133 static inline int rte_spinlock_is_locked (rte_spinlock_t *sl)
134 {
135 	return rte_atomic_load_explicit(&sl->locked, rte_memory_order_acquire);
136 }
137 
138 /**
139  * Test if hardware transactional memory (lock elision) is supported
140  *
141  * @return
142  *   1 if the hardware transactional memory is supported; 0 otherwise.
143  */
144 static inline int rte_tm_supported(void);
145 
146 /**
147  * Try to execute critical section in a hardware memory transaction,
148  * if it fails or not available take the spinlock.
149  *
150  * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
151  * transaction always aborts the transaction since the CPU is not able to
152  * roll-back should the transaction fail. Therefore, hardware transactional
153  * locks are not advised to be used around rte_eth_rx_burst() and
154  * rte_eth_tx_burst() calls.
155  *
156  * @param sl
157  *   A pointer to the spinlock.
158  */
159 static inline void
160 rte_spinlock_lock_tm(rte_spinlock_t *sl)
161 	__rte_exclusive_lock_function(sl);
162 
163 /**
164  * Commit hardware memory transaction or release the spinlock if
165  * the spinlock is used as a fall-back
166  *
167  * @param sl
168  *   A pointer to the spinlock.
169  */
170 static inline void
171 rte_spinlock_unlock_tm(rte_spinlock_t *sl)
172 	__rte_unlock_function(sl);
173 
174 /**
175  * Try to execute critical section in a hardware memory transaction,
176  * if it fails or not available try to take the lock.
177  *
178  * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
179  * transaction always aborts the transaction since the CPU is not able to
180  * roll-back should the transaction fail. Therefore, hardware transactional
181  * locks are not advised to be used around rte_eth_rx_burst() and
182  * rte_eth_tx_burst() calls.
183  *
184  * @param sl
185  *   A pointer to the spinlock.
186  * @return
187  *   1 if the hardware memory transaction is successfully started
188  *   or lock is successfully taken; 0 otherwise.
189  */
190 __rte_warn_unused_result
191 static inline int
192 rte_spinlock_trylock_tm(rte_spinlock_t *sl)
193 	__rte_exclusive_trylock_function(1, sl);
194 
195 /**
196  * The rte_spinlock_recursive_t type.
197  */
198 typedef struct {
199 	rte_spinlock_t sl; /**< the actual spinlock */
200 	volatile int user; /**< core id using lock, -1 for unused */
201 	volatile int count; /**< count of time this lock has been called */
202 } rte_spinlock_recursive_t;
203 
204 /**
205  * A static recursive spinlock initializer.
206  */
207 #define RTE_SPINLOCK_RECURSIVE_INITIALIZER {RTE_SPINLOCK_INITIALIZER, -1, 0}
208 
209 /**
210  * Initialize the recursive spinlock to an unlocked state.
211  *
212  * @param slr
213  *   A pointer to the recursive spinlock.
214  */
215 static inline void rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr)
216 {
217 	rte_spinlock_init(&slr->sl);
218 	slr->user = -1;
219 	slr->count = 0;
220 }
221 
222 /**
223  * Take the recursive spinlock.
224  *
225  * @param slr
226  *   A pointer to the recursive spinlock.
227  */
228 static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr)
229 	__rte_no_thread_safety_analysis
230 {
231 	int id = rte_gettid();
232 
233 	if (slr->user != id) {
234 		rte_spinlock_lock(&slr->sl);
235 		slr->user = id;
236 	}
237 	slr->count++;
238 }
239 /**
240  * Release the recursive spinlock.
241  *
242  * @param slr
243  *   A pointer to the recursive spinlock.
244  */
245 static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr)
246 	__rte_no_thread_safety_analysis
247 {
248 	if (--(slr->count) == 0) {
249 		slr->user = -1;
250 		rte_spinlock_unlock(&slr->sl);
251 	}
252 
253 }
254 
255 /**
256  * Try to take the recursive lock.
257  *
258  * @param slr
259  *   A pointer to the recursive spinlock.
260  * @return
261  *   1 if the lock is successfully taken; 0 otherwise.
262  */
263 __rte_warn_unused_result
264 static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr)
265 	__rte_no_thread_safety_analysis
266 {
267 	int id = rte_gettid();
268 
269 	if (slr->user != id) {
270 		if (rte_spinlock_trylock(&slr->sl) == 0)
271 			return 0;
272 		slr->user = id;
273 	}
274 	slr->count++;
275 	return 1;
276 }
277 
278 
279 /**
280  * Try to execute critical section in a hardware memory transaction,
281  * if it fails or not available take the recursive spinlocks
282  *
283  * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
284  * transaction always aborts the transaction since the CPU is not able to
285  * roll-back should the transaction fail. Therefore, hardware transactional
286  * locks are not advised to be used around rte_eth_rx_burst() and
287  * rte_eth_tx_burst() calls.
288  *
289  * @param slr
290  *   A pointer to the recursive spinlock.
291  */
292 static inline void rte_spinlock_recursive_lock_tm(
293 	rte_spinlock_recursive_t *slr);
294 
295 /**
296  * Commit hardware memory transaction or release the recursive spinlock
297  * if the recursive spinlock is used as a fall-back
298  *
299  * @param slr
300  *   A pointer to the recursive spinlock.
301  */
302 static inline void rte_spinlock_recursive_unlock_tm(
303 	rte_spinlock_recursive_t *slr);
304 
305 /**
306  * Try to execute critical section in a hardware memory transaction,
307  * if it fails or not available try to take the recursive lock
308  *
309  * NOTE: An attempt to perform a HW I/O operation inside a hardware memory
310  * transaction always aborts the transaction since the CPU is not able to
311  * roll-back should the transaction fail. Therefore, hardware transactional
312  * locks are not advised to be used around rte_eth_rx_burst() and
313  * rte_eth_tx_burst() calls.
314  *
315  * @param slr
316  *   A pointer to the recursive spinlock.
317  * @return
318  *   1 if the hardware memory transaction is successfully started
319  *   or lock is successfully taken; 0 otherwise.
320  */
321 __rte_warn_unused_result
322 static inline int rte_spinlock_recursive_trylock_tm(
323 	rte_spinlock_recursive_t *slr);
324 
325 #ifdef __cplusplus
326 }
327 #endif
328 
329 #endif /* _RTE_SPINLOCK_H_ */
330