xref: /dpdk/lib/eal/x86/include/rte_spinlock.h (revision 719834a6849e1daf4a70ff7742bbcc3ae7e25607)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #ifndef _RTE_SPINLOCK_X86_64_H_
6 #define _RTE_SPINLOCK_X86_64_H_
7 
8 #include "generic/rte_spinlock.h"
9 #include "rte_rtm.h"
10 #include "rte_cpuflags.h"
11 #include "rte_branch_prediction.h"
12 #include "rte_common.h"
13 #include "rte_pause.h"
14 #include "rte_cycles.h"
15 
16 #ifdef __cplusplus
17 extern "C" {
18 #endif
19 
20 #define RTE_RTM_MAX_RETRIES (20)
21 #define RTE_XABORT_LOCK_BUSY (0xff)
22 
23 #ifndef RTE_FORCE_INTRINSICS
24 static inline void
25 rte_spinlock_lock(rte_spinlock_t *sl)
26 	__rte_no_thread_safety_analysis
27 {
28 	int lock_val = 1;
29 	asm volatile (
30 			"1:\n"
31 			"xchg %[locked], %[lv]\n"
32 			"test %[lv], %[lv]\n"
33 			"jz 3f\n"
34 			"2:\n"
35 			"pause\n"
36 			"cmpl $0, %[locked]\n"
37 			"jnz 2b\n"
38 			"jmp 1b\n"
39 			"3:\n"
40 			: [locked] "=m" (sl->locked), [lv] "=q" (lock_val)
41 			: "[lv]" (lock_val)
42 			: "memory");
43 }
44 
45 static inline void
46 rte_spinlock_unlock (rte_spinlock_t *sl)
47 	__rte_no_thread_safety_analysis
48 {
49 	int unlock_val = 0;
50 	asm volatile (
51 			"xchg %[locked], %[ulv]\n"
52 			: [locked] "=m" (sl->locked), [ulv] "=q" (unlock_val)
53 			: "[ulv]" (unlock_val)
54 			: "memory");
55 }
56 
57 static inline int
58 rte_spinlock_trylock (rte_spinlock_t *sl)
59 	__rte_no_thread_safety_analysis
60 {
61 	int lockval = 1;
62 
63 	asm volatile (
64 			"xchg %[locked], %[lockval]"
65 			: [locked] "=m" (sl->locked), [lockval] "=q" (lockval)
66 			: "[lockval]" (lockval)
67 			: "memory");
68 
69 	return lockval == 0;
70 }
71 #endif
72 
73 extern uint8_t rte_rtm_supported;
74 
75 static inline int rte_tm_supported(void)
76 {
77 	return rte_rtm_supported;
78 }
79 
80 static inline int
81 rte_try_tm(volatile RTE_ATOMIC(int) *lock)
82 {
83 	int i, retries;
84 
85 	if (!rte_rtm_supported)
86 		return 0;
87 
88 	retries = RTE_RTM_MAX_RETRIES;
89 
90 	while (likely(retries--)) {
91 
92 		unsigned int status = rte_xbegin();
93 
94 		if (likely(RTE_XBEGIN_STARTED == status)) {
95 			if (unlikely(*lock))
96 				rte_xabort(RTE_XABORT_LOCK_BUSY);
97 			else
98 				return 1;
99 		}
100 		while (*lock)
101 			rte_pause();
102 
103 		if ((status & RTE_XABORT_CONFLICT) ||
104 		   ((status & RTE_XABORT_EXPLICIT) &&
105 		    (RTE_XABORT_CODE(status) == RTE_XABORT_LOCK_BUSY))) {
106 			/* add a small delay before retrying, basing the
107 			 * delay on the number of times we've already tried,
108 			 * to give a back-off type of behaviour. We
109 			 * randomize trycount by taking bits from the tsc count
110 			 */
111 			int try_count = RTE_RTM_MAX_RETRIES - retries;
112 			int pause_count = (rte_rdtsc() & 0x7) | 1;
113 			pause_count <<= try_count;
114 			for (i = 0; i < pause_count; i++)
115 				rte_pause();
116 			continue;
117 		}
118 
119 		if ((status & RTE_XABORT_RETRY) == 0) /* do not retry */
120 			break;
121 	}
122 	return 0;
123 }
124 
125 static inline void
126 rte_spinlock_lock_tm(rte_spinlock_t *sl)
127 	__rte_no_thread_safety_analysis
128 {
129 	if (likely(rte_try_tm(&sl->locked)))
130 		return;
131 
132 	rte_spinlock_lock(sl); /* fall-back */
133 }
134 
135 static inline int
136 rte_spinlock_trylock_tm(rte_spinlock_t *sl)
137 	__rte_no_thread_safety_analysis
138 {
139 	if (likely(rte_try_tm(&sl->locked)))
140 		return 1;
141 
142 	return rte_spinlock_trylock(sl);
143 }
144 
145 static inline void
146 rte_spinlock_unlock_tm(rte_spinlock_t *sl)
147 	__rte_no_thread_safety_analysis
148 {
149 	if (unlikely(sl->locked))
150 		rte_spinlock_unlock(sl);
151 	else
152 		rte_xend();
153 }
154 
155 static inline void
156 rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
157 	__rte_no_thread_safety_analysis
158 {
159 	if (likely(rte_try_tm(&slr->sl.locked)))
160 		return;
161 
162 	rte_spinlock_recursive_lock(slr); /* fall-back */
163 }
164 
165 static inline void
166 rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
167 	__rte_no_thread_safety_analysis
168 {
169 	if (unlikely(slr->sl.locked))
170 		rte_spinlock_recursive_unlock(slr);
171 	else
172 		rte_xend();
173 }
174 
175 static inline int
176 rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
177 	__rte_no_thread_safety_analysis
178 {
179 	if (likely(rte_try_tm(&slr->sl.locked)))
180 		return 1;
181 
182 	return rte_spinlock_recursive_trylock(slr);
183 }
184 
185 #ifdef __cplusplus
186 }
187 #endif
188 
189 #endif /* _RTE_SPINLOCK_X86_64_H_ */
190