xref: /netbsd-src/external/apache2/llvm/dist/libcxx/utils/google-benchmark/src/cycleclock.h (revision 4d6fc14bc9b0c5bf3e30be318c143ee82cadd108)
1*4d6fc14bSjoerg // ----------------------------------------------------------------------
2*4d6fc14bSjoerg // CycleClock
3*4d6fc14bSjoerg //    A CycleClock tells you the current time in Cycles.  The "time"
4*4d6fc14bSjoerg //    is actually time since power-on.  This is like time() but doesn't
5*4d6fc14bSjoerg //    involve a system call and is much more precise.
6*4d6fc14bSjoerg //
7*4d6fc14bSjoerg // NOTE: Not all cpu/platform/kernel combinations guarantee that this
8*4d6fc14bSjoerg // clock increments at a constant rate or is synchronized across all logical
9*4d6fc14bSjoerg // cpus in a system.
10*4d6fc14bSjoerg //
11*4d6fc14bSjoerg // If you need the above guarantees, please consider using a different
12*4d6fc14bSjoerg // API. There are efforts to provide an interface which provides a millisecond
13*4d6fc14bSjoerg // granularity and implemented as a memory read. A memory read is generally
14*4d6fc14bSjoerg // cheaper than the CycleClock for many architectures.
15*4d6fc14bSjoerg //
16*4d6fc14bSjoerg // Also, in some out of order CPU implementations, the CycleClock is not
17*4d6fc14bSjoerg // serializing. So if you're trying to count at cycles granularity, your
18*4d6fc14bSjoerg // data might be inaccurate due to out of order instruction execution.
19*4d6fc14bSjoerg // ----------------------------------------------------------------------
20*4d6fc14bSjoerg 
21*4d6fc14bSjoerg #ifndef BENCHMARK_CYCLECLOCK_H_
22*4d6fc14bSjoerg #define BENCHMARK_CYCLECLOCK_H_
23*4d6fc14bSjoerg 
24*4d6fc14bSjoerg #include <cstdint>
25*4d6fc14bSjoerg 
26*4d6fc14bSjoerg #include "benchmark/benchmark.h"
27*4d6fc14bSjoerg #include "internal_macros.h"
28*4d6fc14bSjoerg 
29*4d6fc14bSjoerg #if defined(BENCHMARK_OS_MACOSX)
30*4d6fc14bSjoerg #include <mach/mach_time.h>
31*4d6fc14bSjoerg #endif
32*4d6fc14bSjoerg // For MSVC, we want to use '_asm rdtsc' when possible (since it works
33*4d6fc14bSjoerg // with even ancient MSVC compilers), and when not possible the
34*4d6fc14bSjoerg // __rdtsc intrinsic, declared in <intrin.h>.  Unfortunately, in some
35*4d6fc14bSjoerg // environments, <windows.h> and <intrin.h> have conflicting
36*4d6fc14bSjoerg // declarations of some other intrinsics, breaking compilation.
37*4d6fc14bSjoerg // Therefore, we simply declare __rdtsc ourselves. See also
38*4d6fc14bSjoerg // http://connect.microsoft.com/VisualStudio/feedback/details/262047
39*4d6fc14bSjoerg #if defined(COMPILER_MSVC) && !defined(_M_IX86)
40*4d6fc14bSjoerg extern "C" uint64_t __rdtsc();
41*4d6fc14bSjoerg #pragma intrinsic(__rdtsc)
42*4d6fc14bSjoerg #endif
43*4d6fc14bSjoerg 
44*4d6fc14bSjoerg #if !defined(BENCHMARK_OS_WINDOWS) || defined(BENCHMARK_OS_MINGW)
45*4d6fc14bSjoerg #include <sys/time.h>
46*4d6fc14bSjoerg #include <time.h>
47*4d6fc14bSjoerg #endif
48*4d6fc14bSjoerg 
49*4d6fc14bSjoerg #ifdef BENCHMARK_OS_EMSCRIPTEN
50*4d6fc14bSjoerg #include <emscripten.h>
51*4d6fc14bSjoerg #endif
52*4d6fc14bSjoerg 
53*4d6fc14bSjoerg namespace benchmark {
54*4d6fc14bSjoerg // NOTE: only i386 and x86_64 have been well tested.
55*4d6fc14bSjoerg // PPC, sparc, alpha, and ia64 are based on
56*4d6fc14bSjoerg //    http://peter.kuscsik.com/wordpress/?p=14
57*4d6fc14bSjoerg // with modifications by m3b.  See also
58*4d6fc14bSjoerg //    https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
59*4d6fc14bSjoerg namespace cycleclock {
60*4d6fc14bSjoerg // This should return the number of cycles since power-on.  Thread-safe.
Now()61*4d6fc14bSjoerg inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
62*4d6fc14bSjoerg #if defined(BENCHMARK_OS_MACOSX)
63*4d6fc14bSjoerg   // this goes at the top because we need ALL Macs, regardless of
64*4d6fc14bSjoerg   // architecture, to return the number of "mach time units" that
65*4d6fc14bSjoerg   // have passed since startup.  See sysinfo.cc where
66*4d6fc14bSjoerg   // InitializeSystemInfo() sets the supposed cpu clock frequency of
67*4d6fc14bSjoerg   // macs to the number of mach time units per second, not actual
68*4d6fc14bSjoerg   // CPU clock frequency (which can change in the face of CPU
69*4d6fc14bSjoerg   // frequency scaling).  Also note that when the Mac sleeps, this
70*4d6fc14bSjoerg   // counter pauses; it does not continue counting, nor does it
71*4d6fc14bSjoerg   // reset to zero.
72*4d6fc14bSjoerg   return mach_absolute_time();
73*4d6fc14bSjoerg #elif defined(BENCHMARK_OS_EMSCRIPTEN)
74*4d6fc14bSjoerg   // this goes above x86-specific code because old versions of Emscripten
75*4d6fc14bSjoerg   // define __x86_64__, although they have nothing to do with it.
76*4d6fc14bSjoerg   return static_cast<int64_t>(emscripten_get_now() * 1e+6);
77*4d6fc14bSjoerg #elif defined(__i386__)
78*4d6fc14bSjoerg   int64_t ret;
79*4d6fc14bSjoerg   __asm__ volatile("rdtsc" : "=A"(ret));
80*4d6fc14bSjoerg   return ret;
81*4d6fc14bSjoerg #elif defined(__x86_64__) || defined(__amd64__)
82*4d6fc14bSjoerg   uint64_t low, high;
83*4d6fc14bSjoerg   __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
84*4d6fc14bSjoerg   return (high << 32) | low;
85*4d6fc14bSjoerg #elif defined(__powerpc__) || defined(__ppc__)
86*4d6fc14bSjoerg   // This returns a time-base, which is not always precisely a cycle-count.
87*4d6fc14bSjoerg #if defined(__powerpc64__) || defined(__ppc64__)
88*4d6fc14bSjoerg   int64_t tb;
89*4d6fc14bSjoerg   asm volatile("mfspr %0, 268" : "=r"(tb));
90*4d6fc14bSjoerg   return tb;
91*4d6fc14bSjoerg #else
92*4d6fc14bSjoerg   uint32_t tbl, tbu0, tbu1;
93*4d6fc14bSjoerg   asm volatile(
94*4d6fc14bSjoerg       "mftbu %0\n"
95*4d6fc14bSjoerg       "mftb %1\n"
96*4d6fc14bSjoerg       "mftbu %2"
97*4d6fc14bSjoerg       : "=r"(tbu0), "=r"(tbl), "=r"(tbu1));
98*4d6fc14bSjoerg   tbl &= -static_cast<int32_t>(tbu0 == tbu1);
99*4d6fc14bSjoerg   // high 32 bits in tbu1; low 32 bits in tbl  (tbu0 is no longer needed)
100*4d6fc14bSjoerg   return (static_cast<uint64_t>(tbu1) << 32) | tbl;
101*4d6fc14bSjoerg #endif
102*4d6fc14bSjoerg #elif defined(__sparc__)
103*4d6fc14bSjoerg   int64_t tick;
104*4d6fc14bSjoerg   asm(".byte 0x83, 0x41, 0x00, 0x00");
105*4d6fc14bSjoerg   asm("mov   %%g1, %0" : "=r"(tick));
106*4d6fc14bSjoerg   return tick;
107*4d6fc14bSjoerg #elif defined(__ia64__)
108*4d6fc14bSjoerg   int64_t itc;
109*4d6fc14bSjoerg   asm("mov %0 = ar.itc" : "=r"(itc));
110*4d6fc14bSjoerg   return itc;
111*4d6fc14bSjoerg #elif defined(COMPILER_MSVC) && defined(_M_IX86)
112*4d6fc14bSjoerg   // Older MSVC compilers (like 7.x) don't seem to support the
113*4d6fc14bSjoerg   // __rdtsc intrinsic properly, so I prefer to use _asm instead
114*4d6fc14bSjoerg   // when I know it will work.  Otherwise, I'll use __rdtsc and hope
115*4d6fc14bSjoerg   // the code is being compiled with a non-ancient compiler.
116*4d6fc14bSjoerg   _asm rdtsc
117*4d6fc14bSjoerg #elif defined(COMPILER_MSVC)
118*4d6fc14bSjoerg   return __rdtsc();
119*4d6fc14bSjoerg #elif defined(BENCHMARK_OS_NACL)
120*4d6fc14bSjoerg   // Native Client validator on x86/x86-64 allows RDTSC instructions,
121*4d6fc14bSjoerg   // and this case is handled above. Native Client validator on ARM
122*4d6fc14bSjoerg   // rejects MRC instructions (used in the ARM-specific sequence below),
123*4d6fc14bSjoerg   // so we handle it here. Portable Native Client compiles to
124*4d6fc14bSjoerg   // architecture-agnostic bytecode, which doesn't provide any
125*4d6fc14bSjoerg   // cycle counter access mnemonics.
126*4d6fc14bSjoerg 
127*4d6fc14bSjoerg   // Native Client does not provide any API to access cycle counter.
128*4d6fc14bSjoerg   // Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday
129*4d6fc14bSjoerg   // because is provides nanosecond resolution (which is noticable at
130*4d6fc14bSjoerg   // least for PNaCl modules running on x86 Mac & Linux).
131*4d6fc14bSjoerg   // Initialize to always return 0 if clock_gettime fails.
132*4d6fc14bSjoerg   struct timespec ts = {0, 0};
133*4d6fc14bSjoerg   clock_gettime(CLOCK_MONOTONIC, &ts);
134*4d6fc14bSjoerg   return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
135*4d6fc14bSjoerg #elif defined(__aarch64__)
136*4d6fc14bSjoerg   // System timer of ARMv8 runs at a different frequency than the CPU's.
137*4d6fc14bSjoerg   // The frequency is fixed, typically in the range 1-50MHz.  It can be
138*4d6fc14bSjoerg   // read at CNTFRQ special register.  We assume the OS has set up
139*4d6fc14bSjoerg   // the virtual timer properly.
140*4d6fc14bSjoerg   int64_t virtual_timer_value;
141*4d6fc14bSjoerg   asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
142*4d6fc14bSjoerg   return virtual_timer_value;
143*4d6fc14bSjoerg #elif defined(__ARM_ARCH)
144*4d6fc14bSjoerg   // V6 is the earliest arch that has a standard cyclecount
145*4d6fc14bSjoerg   // Native Client validator doesn't allow MRC instructions.
146*4d6fc14bSjoerg #if (__ARM_ARCH >= 6)
147*4d6fc14bSjoerg   uint32_t pmccntr;
148*4d6fc14bSjoerg   uint32_t pmuseren;
149*4d6fc14bSjoerg   uint32_t pmcntenset;
150*4d6fc14bSjoerg   // Read the user mode perf monitor counter access permissions.
151*4d6fc14bSjoerg   asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
152*4d6fc14bSjoerg   if (pmuseren & 1) {  // Allows reading perfmon counters for user mode code.
153*4d6fc14bSjoerg     asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
154*4d6fc14bSjoerg     if (pmcntenset & 0x80000000ul) {  // Is it counting?
155*4d6fc14bSjoerg       asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
156*4d6fc14bSjoerg       // The counter is set up to count every 64th cycle
157*4d6fc14bSjoerg       return static_cast<int64_t>(pmccntr) * 64;  // Should optimize to << 6
158*4d6fc14bSjoerg     }
159*4d6fc14bSjoerg   }
160*4d6fc14bSjoerg #endif
161*4d6fc14bSjoerg   struct timeval tv;
162*4d6fc14bSjoerg   gettimeofday(&tv, nullptr);
163*4d6fc14bSjoerg   return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
164*4d6fc14bSjoerg #elif defined(__mips__) || defined(__m68k__)
165*4d6fc14bSjoerg   // mips apparently only allows rdtsc for superusers, so we fall
166*4d6fc14bSjoerg   // back to gettimeofday.  It's possible clock_gettime would be better.
167*4d6fc14bSjoerg   struct timeval tv;
168*4d6fc14bSjoerg   gettimeofday(&tv, nullptr);
169*4d6fc14bSjoerg   return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
170*4d6fc14bSjoerg #elif defined(__s390__)  // Covers both s390 and s390x.
171*4d6fc14bSjoerg   // Return the CPU clock.
172*4d6fc14bSjoerg   uint64_t tsc;
173*4d6fc14bSjoerg   asm("stck %0" : "=Q"(tsc) : : "cc");
174*4d6fc14bSjoerg   return tsc;
175*4d6fc14bSjoerg #elif defined(__riscv) // RISC-V
176*4d6fc14bSjoerg   // Use RDCYCLE (and RDCYCLEH on riscv32)
177*4d6fc14bSjoerg #if __riscv_xlen == 32
178*4d6fc14bSjoerg   uint32_t cycles_lo, cycles_hi0, cycles_hi1;
179*4d6fc14bSjoerg   // This asm also includes the PowerPC overflow handling strategy, as above.
180*4d6fc14bSjoerg   // Implemented in assembly because Clang insisted on branching.
181*4d6fc14bSjoerg   asm volatile(
182*4d6fc14bSjoerg       "rdcycleh %0\n"
183*4d6fc14bSjoerg       "rdcycle %1\n"
184*4d6fc14bSjoerg       "rdcycleh %2\n"
185*4d6fc14bSjoerg       "sub %0, %0, %2\n"
186*4d6fc14bSjoerg       "seqz %0, %0\n"
187*4d6fc14bSjoerg       "sub %0, zero, %0\n"
188*4d6fc14bSjoerg       "and %1, %1, %0\n"
189*4d6fc14bSjoerg       : "=r"(cycles_hi0), "=r"(cycles_lo), "=r"(cycles_hi1));
190*4d6fc14bSjoerg   return (static_cast<uint64_t>(cycles_hi1) << 32) | cycles_lo;
191*4d6fc14bSjoerg #else
192*4d6fc14bSjoerg   uint64_t cycles;
193*4d6fc14bSjoerg   asm volatile("rdcycle %0" : "=r"(cycles));
194*4d6fc14bSjoerg   return cycles;
195*4d6fc14bSjoerg #endif
196*4d6fc14bSjoerg #else
197*4d6fc14bSjoerg // The soft failover to a generic implementation is automatic only for ARM.
198*4d6fc14bSjoerg // For other platforms the developer is expected to make an attempt to create
199*4d6fc14bSjoerg // a fast implementation and use generic version if nothing better is available.
200*4d6fc14bSjoerg #error You need to define CycleTimer for your OS and CPU
201*4d6fc14bSjoerg #endif
202*4d6fc14bSjoerg }
203*4d6fc14bSjoerg }  // end namespace cycleclock
204*4d6fc14bSjoerg }  // end namespace benchmark
205*4d6fc14bSjoerg 
206*4d6fc14bSjoerg #endif  // BENCHMARK_CYCLECLOCK_H_
207