xref: /dpdk/lib/eal/riscv/include/rte_cycles.h (revision 719834a6849e1daf4a70ff7742bbcc3ae7e25607)
1f22e705eSMichal Mazurek /* SPDX-License-Identifier: BSD-3-Clause
2f22e705eSMichal Mazurek  * Copyright(c) 2015 Cavium, Inc
3f22e705eSMichal Mazurek  * Copyright(c) 2022 StarFive
4f22e705eSMichal Mazurek  * Copyright(c) 2022 SiFive
5f22e705eSMichal Mazurek  * Copyright(c) 2022 Semihalf
6f22e705eSMichal Mazurek  */
7f22e705eSMichal Mazurek 
8f22e705eSMichal Mazurek #ifndef RTE_CYCLES_RISCV_H
9f22e705eSMichal Mazurek #define RTE_CYCLES_RISCV_H
10f22e705eSMichal Mazurek 
11*719834a6SMattias Rönnblom #include "generic/rte_cycles.h"
12*719834a6SMattias Rönnblom 
13f22e705eSMichal Mazurek #ifdef __cplusplus
14f22e705eSMichal Mazurek extern "C" {
15f22e705eSMichal Mazurek #endif
16f22e705eSMichal Mazurek 
17f22e705eSMichal Mazurek #ifndef RTE_RISCV_RDTSC_USE_HPM
18f22e705eSMichal Mazurek #define RTE_RISCV_RDTSC_USE_HPM 0
19f22e705eSMichal Mazurek #endif
20f22e705eSMichal Mazurek 
21f22e705eSMichal Mazurek /** Read wall time counter */
22f22e705eSMichal Mazurek static __rte_always_inline uint64_t
23f22e705eSMichal Mazurek __rte_riscv_rdtime(void)
24f22e705eSMichal Mazurek {
25f22e705eSMichal Mazurek 	uint64_t tsc;
26f22e705eSMichal Mazurek 	asm volatile("csrr %0, time" : "=r" (tsc) : : "memory");
27f22e705eSMichal Mazurek 	return tsc;
28f22e705eSMichal Mazurek }
29f22e705eSMichal Mazurek 
30f22e705eSMichal Mazurek /** Read wall time counter ensuring no re-ordering */
31f22e705eSMichal Mazurek static __rte_always_inline uint64_t
32f22e705eSMichal Mazurek __rte_riscv_rdtime_precise(void)
33f22e705eSMichal Mazurek {
34f22e705eSMichal Mazurek 	asm volatile("fence" : : : "memory");
35f22e705eSMichal Mazurek 	return __rte_riscv_rdtime();
36f22e705eSMichal Mazurek }
37f22e705eSMichal Mazurek 
38f22e705eSMichal Mazurek /** Read hart cycle counter */
39f22e705eSMichal Mazurek static __rte_always_inline uint64_t
40f22e705eSMichal Mazurek __rte_riscv_rdcycle(void)
41f22e705eSMichal Mazurek {
42f22e705eSMichal Mazurek 	uint64_t tsc;
43f22e705eSMichal Mazurek 	asm volatile("csrr %0, cycle" : "=r" (tsc) : : "memory");
44f22e705eSMichal Mazurek 	return tsc;
45f22e705eSMichal Mazurek }
46f22e705eSMichal Mazurek 
47f22e705eSMichal Mazurek /** Read hart cycle counter ensuring no re-ordering */
48f22e705eSMichal Mazurek static __rte_always_inline uint64_t
49f22e705eSMichal Mazurek __rte_riscv_rdcycle_precise(void)
50f22e705eSMichal Mazurek {
51f22e705eSMichal Mazurek 	asm volatile("fence" : : : "memory");
52f22e705eSMichal Mazurek 	return __rte_riscv_rdcycle();
53f22e705eSMichal Mazurek }
54f22e705eSMichal Mazurek 
55f22e705eSMichal Mazurek /**
56f22e705eSMichal Mazurek  * Read the time base register.
57f22e705eSMichal Mazurek  *
58f22e705eSMichal Mazurek  * @return
59f22e705eSMichal Mazurek  *   The time base for this lcore.
60f22e705eSMichal Mazurek  */
61f22e705eSMichal Mazurek static __rte_always_inline uint64_t
62f22e705eSMichal Mazurek rte_rdtsc(void)
63f22e705eSMichal Mazurek {
64f22e705eSMichal Mazurek 	/**
65f22e705eSMichal Mazurek 	 * By default TIME userspace counter is used. It is stable and shared
66f22e705eSMichal Mazurek 	 * across cores. Although it's frequency may not be enough for all
67f22e705eSMichal Mazurek 	 * applications.
68f22e705eSMichal Mazurek 	 */
69f22e705eSMichal Mazurek 	if (!RTE_RISCV_RDTSC_USE_HPM)
70f22e705eSMichal Mazurek 		return __rte_riscv_rdtime();
71f22e705eSMichal Mazurek 	/**
72f22e705eSMichal Mazurek 	 * Alternatively HPM's CYCLE counter may be used. However this counter
73f22e705eSMichal Mazurek 	 * is not guaranteed by ISA to either be stable frequency or always
74f22e705eSMichal Mazurek 	 * enabled for userspace access (it may trap to kernel or firmware,
75f22e705eSMichal Mazurek 	 * though as of Linux kernel 5.13 it doesn't).
76f22e705eSMichal Mazurek 	 * It is also highly probable that values of this counter are not
77f22e705eSMichal Mazurek 	 * synchronized across cores. Therefore if it is to be used as a timer,
78f22e705eSMichal Mazurek 	 * it can only be used in the scope of a single core.
79f22e705eSMichal Mazurek 	 */
80f22e705eSMichal Mazurek 	return __rte_riscv_rdcycle();
81f22e705eSMichal Mazurek }
82f22e705eSMichal Mazurek 
83f22e705eSMichal Mazurek static inline uint64_t
84f22e705eSMichal Mazurek rte_rdtsc_precise(void)
85f22e705eSMichal Mazurek {
86f22e705eSMichal Mazurek 	if (!RTE_RISCV_RDTSC_USE_HPM)
87f22e705eSMichal Mazurek 		return __rte_riscv_rdtime_precise();
88f22e705eSMichal Mazurek 	return __rte_riscv_rdcycle_precise();
89f22e705eSMichal Mazurek }
90f22e705eSMichal Mazurek 
91f22e705eSMichal Mazurek static __rte_always_inline uint64_t
92f22e705eSMichal Mazurek rte_get_tsc_cycles(void)
93f22e705eSMichal Mazurek {
94f22e705eSMichal Mazurek 	return rte_rdtsc();
95f22e705eSMichal Mazurek }
96f22e705eSMichal Mazurek 
97f22e705eSMichal Mazurek #ifdef __cplusplus
98f22e705eSMichal Mazurek }
99f22e705eSMichal Mazurek #endif
100f22e705eSMichal Mazurek 
101f22e705eSMichal Mazurek #endif /* RTE_CYCLES_RISCV_H */
102