xref: /dpdk/lib/eal/x86/include/rte_spinlock.h (revision 719834a6849e1daf4a70ff7742bbcc3ae7e25607)
199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
299a2dd95SBruce Richardson  * Copyright(c) 2010-2014 Intel Corporation
399a2dd95SBruce Richardson  */
499a2dd95SBruce Richardson 
599a2dd95SBruce Richardson #ifndef _RTE_SPINLOCK_X86_64_H_
699a2dd95SBruce Richardson #define _RTE_SPINLOCK_X86_64_H_
799a2dd95SBruce Richardson 
899a2dd95SBruce Richardson #include "generic/rte_spinlock.h"
999a2dd95SBruce Richardson #include "rte_rtm.h"
1099a2dd95SBruce Richardson #include "rte_cpuflags.h"
1199a2dd95SBruce Richardson #include "rte_branch_prediction.h"
1299a2dd95SBruce Richardson #include "rte_common.h"
1399a2dd95SBruce Richardson #include "rte_pause.h"
1499a2dd95SBruce Richardson #include "rte_cycles.h"
1599a2dd95SBruce Richardson 
16*719834a6SMattias Rönnblom #ifdef __cplusplus
17*719834a6SMattias Rönnblom extern "C" {
18*719834a6SMattias Rönnblom #endif
19*719834a6SMattias Rönnblom 
2099a2dd95SBruce Richardson #define RTE_RTM_MAX_RETRIES (20)
2199a2dd95SBruce Richardson #define RTE_XABORT_LOCK_BUSY (0xff)
2299a2dd95SBruce Richardson 
2399a2dd95SBruce Richardson #ifndef RTE_FORCE_INTRINSICS
2499a2dd95SBruce Richardson static inline void
2599a2dd95SBruce Richardson rte_spinlock_lock(rte_spinlock_t *sl)
26657a98f3SDavid Marchand 	__rte_no_thread_safety_analysis
2799a2dd95SBruce Richardson {
2899a2dd95SBruce Richardson 	int lock_val = 1;
2999a2dd95SBruce Richardson 	asm volatile (
3099a2dd95SBruce Richardson 			"1:\n"
3199a2dd95SBruce Richardson 			"xchg %[locked], %[lv]\n"
3299a2dd95SBruce Richardson 			"test %[lv], %[lv]\n"
3399a2dd95SBruce Richardson 			"jz 3f\n"
3499a2dd95SBruce Richardson 			"2:\n"
3599a2dd95SBruce Richardson 			"pause\n"
3699a2dd95SBruce Richardson 			"cmpl $0, %[locked]\n"
3799a2dd95SBruce Richardson 			"jnz 2b\n"
3899a2dd95SBruce Richardson 			"jmp 1b\n"
3999a2dd95SBruce Richardson 			"3:\n"
4099a2dd95SBruce Richardson 			: [locked] "=m" (sl->locked), [lv] "=q" (lock_val)
4199a2dd95SBruce Richardson 			: "[lv]" (lock_val)
4299a2dd95SBruce Richardson 			: "memory");
4399a2dd95SBruce Richardson }
4499a2dd95SBruce Richardson 
4599a2dd95SBruce Richardson static inline void
4699a2dd95SBruce Richardson rte_spinlock_unlock (rte_spinlock_t *sl)
47657a98f3SDavid Marchand 	__rte_no_thread_safety_analysis
4899a2dd95SBruce Richardson {
4999a2dd95SBruce Richardson 	int unlock_val = 0;
5099a2dd95SBruce Richardson 	asm volatile (
5199a2dd95SBruce Richardson 			"xchg %[locked], %[ulv]\n"
5299a2dd95SBruce Richardson 			: [locked] "=m" (sl->locked), [ulv] "=q" (unlock_val)
5399a2dd95SBruce Richardson 			: "[ulv]" (unlock_val)
5499a2dd95SBruce Richardson 			: "memory");
5599a2dd95SBruce Richardson }
5699a2dd95SBruce Richardson 
5799a2dd95SBruce Richardson static inline int
5899a2dd95SBruce Richardson rte_spinlock_trylock (rte_spinlock_t *sl)
59657a98f3SDavid Marchand 	__rte_no_thread_safety_analysis
6099a2dd95SBruce Richardson {
6199a2dd95SBruce Richardson 	int lockval = 1;
6299a2dd95SBruce Richardson 
6399a2dd95SBruce Richardson 	asm volatile (
6499a2dd95SBruce Richardson 			"xchg %[locked], %[lockval]"
6599a2dd95SBruce Richardson 			: [locked] "=m" (sl->locked), [lockval] "=q" (lockval)
6699a2dd95SBruce Richardson 			: "[lockval]" (lockval)
6799a2dd95SBruce Richardson 			: "memory");
6899a2dd95SBruce Richardson 
6999a2dd95SBruce Richardson 	return lockval == 0;
7099a2dd95SBruce Richardson }
7199a2dd95SBruce Richardson #endif
7299a2dd95SBruce Richardson 
7399a2dd95SBruce Richardson extern uint8_t rte_rtm_supported;
7499a2dd95SBruce Richardson 
7599a2dd95SBruce Richardson static inline int rte_tm_supported(void)
7699a2dd95SBruce Richardson {
7799a2dd95SBruce Richardson 	return rte_rtm_supported;
7899a2dd95SBruce Richardson }
7999a2dd95SBruce Richardson 
8099a2dd95SBruce Richardson static inline int
811ec6a845STyler Retzlaff rte_try_tm(volatile RTE_ATOMIC(int) *lock)
8299a2dd95SBruce Richardson {
8399a2dd95SBruce Richardson 	int i, retries;
8499a2dd95SBruce Richardson 
8599a2dd95SBruce Richardson 	if (!rte_rtm_supported)
8699a2dd95SBruce Richardson 		return 0;
8799a2dd95SBruce Richardson 
8899a2dd95SBruce Richardson 	retries = RTE_RTM_MAX_RETRIES;
8999a2dd95SBruce Richardson 
9099a2dd95SBruce Richardson 	while (likely(retries--)) {
9199a2dd95SBruce Richardson 
9299a2dd95SBruce Richardson 		unsigned int status = rte_xbegin();
9399a2dd95SBruce Richardson 
9499a2dd95SBruce Richardson 		if (likely(RTE_XBEGIN_STARTED == status)) {
9599a2dd95SBruce Richardson 			if (unlikely(*lock))
9699a2dd95SBruce Richardson 				rte_xabort(RTE_XABORT_LOCK_BUSY);
9799a2dd95SBruce Richardson 			else
9899a2dd95SBruce Richardson 				return 1;
9999a2dd95SBruce Richardson 		}
10099a2dd95SBruce Richardson 		while (*lock)
10199a2dd95SBruce Richardson 			rte_pause();
10299a2dd95SBruce Richardson 
10399a2dd95SBruce Richardson 		if ((status & RTE_XABORT_CONFLICT) ||
10499a2dd95SBruce Richardson 		   ((status & RTE_XABORT_EXPLICIT) &&
10599a2dd95SBruce Richardson 		    (RTE_XABORT_CODE(status) == RTE_XABORT_LOCK_BUSY))) {
10699a2dd95SBruce Richardson 			/* add a small delay before retrying, basing the
10799a2dd95SBruce Richardson 			 * delay on the number of times we've already tried,
10899a2dd95SBruce Richardson 			 * to give a back-off type of behaviour. We
10999a2dd95SBruce Richardson 			 * randomize trycount by taking bits from the tsc count
11099a2dd95SBruce Richardson 			 */
11199a2dd95SBruce Richardson 			int try_count = RTE_RTM_MAX_RETRIES - retries;
11299a2dd95SBruce Richardson 			int pause_count = (rte_rdtsc() & 0x7) | 1;
11399a2dd95SBruce Richardson 			pause_count <<= try_count;
11499a2dd95SBruce Richardson 			for (i = 0; i < pause_count; i++)
11599a2dd95SBruce Richardson 				rte_pause();
11699a2dd95SBruce Richardson 			continue;
11799a2dd95SBruce Richardson 		}
11899a2dd95SBruce Richardson 
11999a2dd95SBruce Richardson 		if ((status & RTE_XABORT_RETRY) == 0) /* do not retry */
12099a2dd95SBruce Richardson 			break;
12199a2dd95SBruce Richardson 	}
12299a2dd95SBruce Richardson 	return 0;
12399a2dd95SBruce Richardson }
12499a2dd95SBruce Richardson 
12599a2dd95SBruce Richardson static inline void
12699a2dd95SBruce Richardson rte_spinlock_lock_tm(rte_spinlock_t *sl)
127657a98f3SDavid Marchand 	__rte_no_thread_safety_analysis
12899a2dd95SBruce Richardson {
12999a2dd95SBruce Richardson 	if (likely(rte_try_tm(&sl->locked)))
13099a2dd95SBruce Richardson 		return;
13199a2dd95SBruce Richardson 
13299a2dd95SBruce Richardson 	rte_spinlock_lock(sl); /* fall-back */
13399a2dd95SBruce Richardson }
13499a2dd95SBruce Richardson 
13599a2dd95SBruce Richardson static inline int
13699a2dd95SBruce Richardson rte_spinlock_trylock_tm(rte_spinlock_t *sl)
137657a98f3SDavid Marchand 	__rte_no_thread_safety_analysis
13899a2dd95SBruce Richardson {
13999a2dd95SBruce Richardson 	if (likely(rte_try_tm(&sl->locked)))
14099a2dd95SBruce Richardson 		return 1;
14199a2dd95SBruce Richardson 
14299a2dd95SBruce Richardson 	return rte_spinlock_trylock(sl);
14399a2dd95SBruce Richardson }
14499a2dd95SBruce Richardson 
14599a2dd95SBruce Richardson static inline void
14699a2dd95SBruce Richardson rte_spinlock_unlock_tm(rte_spinlock_t *sl)
147657a98f3SDavid Marchand 	__rte_no_thread_safety_analysis
14899a2dd95SBruce Richardson {
14999a2dd95SBruce Richardson 	if (unlikely(sl->locked))
15099a2dd95SBruce Richardson 		rte_spinlock_unlock(sl);
15199a2dd95SBruce Richardson 	else
15299a2dd95SBruce Richardson 		rte_xend();
15399a2dd95SBruce Richardson }
15499a2dd95SBruce Richardson 
15599a2dd95SBruce Richardson static inline void
15699a2dd95SBruce Richardson rte_spinlock_recursive_lock_tm(rte_spinlock_recursive_t *slr)
157657a98f3SDavid Marchand 	__rte_no_thread_safety_analysis
15899a2dd95SBruce Richardson {
15999a2dd95SBruce Richardson 	if (likely(rte_try_tm(&slr->sl.locked)))
16099a2dd95SBruce Richardson 		return;
16199a2dd95SBruce Richardson 
16299a2dd95SBruce Richardson 	rte_spinlock_recursive_lock(slr); /* fall-back */
16399a2dd95SBruce Richardson }
16499a2dd95SBruce Richardson 
16599a2dd95SBruce Richardson static inline void
16699a2dd95SBruce Richardson rte_spinlock_recursive_unlock_tm(rte_spinlock_recursive_t *slr)
167657a98f3SDavid Marchand 	__rte_no_thread_safety_analysis
16899a2dd95SBruce Richardson {
16999a2dd95SBruce Richardson 	if (unlikely(slr->sl.locked))
17099a2dd95SBruce Richardson 		rte_spinlock_recursive_unlock(slr);
17199a2dd95SBruce Richardson 	else
17299a2dd95SBruce Richardson 		rte_xend();
17399a2dd95SBruce Richardson }
17499a2dd95SBruce Richardson 
17599a2dd95SBruce Richardson static inline int
17699a2dd95SBruce Richardson rte_spinlock_recursive_trylock_tm(rte_spinlock_recursive_t *slr)
177657a98f3SDavid Marchand 	__rte_no_thread_safety_analysis
17899a2dd95SBruce Richardson {
17999a2dd95SBruce Richardson 	if (likely(rte_try_tm(&slr->sl.locked)))
18099a2dd95SBruce Richardson 		return 1;
18199a2dd95SBruce Richardson 
18299a2dd95SBruce Richardson 	return rte_spinlock_recursive_trylock(slr);
18399a2dd95SBruce Richardson }
18499a2dd95SBruce Richardson 
18599a2dd95SBruce Richardson #ifdef __cplusplus
18699a2dd95SBruce Richardson }
18799a2dd95SBruce Richardson #endif
18899a2dd95SBruce Richardson 
18999a2dd95SBruce Richardson #endif /* _RTE_SPINLOCK_X86_64_H_ */
190