xref: /dpdk/lib/eal/arm/include/rte_cycles_64.h (revision 719834a6849e1daf4a70ff7742bbcc3ae7e25607)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015 Cavium, Inc
3  * Copyright(c) 2020 Arm Limited
4  */
5 
6 #ifndef _RTE_CYCLES_ARM64_H_
7 #define _RTE_CYCLES_ARM64_H_
8 
9 #include "generic/rte_cycles.h"
10 
11 #ifdef __cplusplus
12 extern "C" {
13 #endif
14 
15 /** Read generic counter frequency */
16 static __rte_always_inline uint64_t
17 __rte_arm64_cntfrq(void)
18 {
19 	uint64_t freq;
20 
21 	asm volatile("mrs %0, cntfrq_el0" : "=r" (freq));
22 	return freq;
23 }
24 
25 /** Read generic counter */
26 static __rte_always_inline uint64_t
27 __rte_arm64_cntvct(void)
28 {
29 	uint64_t tsc;
30 
31 	asm volatile("mrs %0, cntvct_el0" : "=r" (tsc));
32 	return tsc;
33 }
34 
35 static __rte_always_inline uint64_t
36 __rte_arm64_cntvct_precise(void)
37 {
38 	asm volatile("isb" : : : "memory");
39 	return __rte_arm64_cntvct();
40 }
41 
42 /**
43  * Read the time base register.
44  *
45  * @return
46  *   The time base for this lcore.
47  */
48 #ifndef RTE_ARM_EAL_RDTSC_USE_PMU
49 /**
50  * This call is portable to any ARMv8 architecture, however, typically
51  * cntvct_el0 runs at <= 100MHz and it may be imprecise for some tasks.
52  */
53 static __rte_always_inline uint64_t
54 rte_rdtsc(void)
55 {
56 	return __rte_arm64_cntvct();
57 }
58 #else
59 /**
60  * This is an alternative method to enable rte_rdtsc() with high resolution
61  * PMU cycles counter.The cycle counter runs at cpu frequency and this scheme
62  * uses ARMv8 PMU subsystem to get the cycle counter at userspace, However,
63  * access to PMU cycle counter from user space is not enabled by default in
64  * arm64 linux kernel.
65  * It is possible to enable cycle counter at user space access by configuring
66  * the PMU from the privileged mode (kernel space).
67  *
68  * asm volatile("msr pmintenset_el1, %0" : : "r" ((u64)(0 << 31)));
69  * asm volatile("msr pmcntenset_el0, %0" :: "r" BIT(31));
70  * asm volatile("msr pmuserenr_el0, %0" : : "r"(BIT(0) | BIT(2)));
71  * asm volatile("mrs %0, pmcr_el0" : "=r" (val));
72  * val |= (BIT(0) | BIT(2));
73  * isb();
74  * asm volatile("msr pmcr_el0, %0" : : "r" (val));
75  */
76 
77 /** Read PMU cycle counter */
78 static __rte_always_inline uint64_t
79 __rte_arm64_pmccntr(void)
80 {
81 	uint64_t tsc;
82 
83 	asm volatile("mrs %0, pmccntr_el0" : "=r"(tsc));
84 	return tsc;
85 }
86 
87 static __rte_always_inline uint64_t
88 rte_rdtsc(void)
89 {
90 	return __rte_arm64_pmccntr();
91 }
92 #endif
93 
94 static __rte_always_inline uint64_t
95 rte_rdtsc_precise(void)
96 {
97 	asm volatile("isb" : : : "memory");
98 	return rte_rdtsc();
99 }
100 
101 static __rte_always_inline uint64_t
102 rte_get_tsc_cycles(void)
103 {
104 	return rte_rdtsc();
105 }
106 
107 #ifdef __cplusplus
108 }
109 #endif
110 
111 #endif /* _RTE_CYCLES_ARM64_H_ */
112