xref: /netbsd-src/external/apache2/llvm/dist/llvm/utils/benchmark/src/cycleclock.h (revision 82d56013d7b633d116a93943de88e08335357a7c)
17330f729Sjoerg // ----------------------------------------------------------------------
27330f729Sjoerg // CycleClock
37330f729Sjoerg //    A CycleClock tells you the current time in Cycles.  The "time"
47330f729Sjoerg //    is actually time since power-on.  This is like time() but doesn't
57330f729Sjoerg //    involve a system call and is much more precise.
67330f729Sjoerg //
77330f729Sjoerg // NOTE: Not all cpu/platform/kernel combinations guarantee that this
87330f729Sjoerg // clock increments at a constant rate or is synchronized across all logical
97330f729Sjoerg // cpus in a system.
107330f729Sjoerg //
117330f729Sjoerg // If you need the above guarantees, please consider using a different
127330f729Sjoerg // API. There are efforts to provide an interface which provides a millisecond
137330f729Sjoerg // granularity and implemented as a memory read. A memory read is generally
147330f729Sjoerg // cheaper than the CycleClock for many architectures.
157330f729Sjoerg //
167330f729Sjoerg // Also, in some out of order CPU implementations, the CycleClock is not
177330f729Sjoerg // serializing. So if you're trying to count at cycles granularity, your
187330f729Sjoerg // data might be inaccurate due to out of order instruction execution.
197330f729Sjoerg // ----------------------------------------------------------------------
207330f729Sjoerg 
217330f729Sjoerg #ifndef BENCHMARK_CYCLECLOCK_H_
227330f729Sjoerg #define BENCHMARK_CYCLECLOCK_H_
237330f729Sjoerg 
247330f729Sjoerg #include <cstdint>
257330f729Sjoerg 
267330f729Sjoerg #include "benchmark/benchmark.h"
277330f729Sjoerg #include "internal_macros.h"
287330f729Sjoerg 
297330f729Sjoerg #if defined(BENCHMARK_OS_MACOSX)
307330f729Sjoerg #include <mach/mach_time.h>
317330f729Sjoerg #endif
327330f729Sjoerg // For MSVC, we want to use '_asm rdtsc' when possible (since it works
337330f729Sjoerg // with even ancient MSVC compilers), and when not possible the
347330f729Sjoerg // __rdtsc intrinsic, declared in <intrin.h>.  Unfortunately, in some
357330f729Sjoerg // environments, <windows.h> and <intrin.h> have conflicting
367330f729Sjoerg // declarations of some other intrinsics, breaking compilation.
377330f729Sjoerg // Therefore, we simply declare __rdtsc ourselves. See also
387330f729Sjoerg // http://connect.microsoft.com/VisualStudio/feedback/details/262047
397330f729Sjoerg #if defined(COMPILER_MSVC) && !defined(_M_IX86)
407330f729Sjoerg extern "C" uint64_t __rdtsc();
417330f729Sjoerg #pragma intrinsic(__rdtsc)
427330f729Sjoerg #endif
437330f729Sjoerg 
447330f729Sjoerg #if !defined(BENCHMARK_OS_WINDOWS) || defined(BENCHMARK_OS_MINGW)
457330f729Sjoerg #include <sys/time.h>
467330f729Sjoerg #include <time.h>
477330f729Sjoerg #endif
487330f729Sjoerg 
497330f729Sjoerg #ifdef BENCHMARK_OS_EMSCRIPTEN
507330f729Sjoerg #include <emscripten.h>
517330f729Sjoerg #endif
527330f729Sjoerg 
537330f729Sjoerg namespace benchmark {
547330f729Sjoerg // NOTE: only i386 and x86_64 have been well tested.
557330f729Sjoerg // PPC, sparc, alpha, and ia64 are based on
567330f729Sjoerg //    http://peter.kuscsik.com/wordpress/?p=14
577330f729Sjoerg // with modifications by m3b.  See also
587330f729Sjoerg //    https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
597330f729Sjoerg namespace cycleclock {
607330f729Sjoerg // This should return the number of cycles since power-on.  Thread-safe.
Now()617330f729Sjoerg inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
627330f729Sjoerg #if defined(BENCHMARK_OS_MACOSX)
637330f729Sjoerg   // this goes at the top because we need ALL Macs, regardless of
647330f729Sjoerg   // architecture, to return the number of "mach time units" that
657330f729Sjoerg   // have passed since startup.  See sysinfo.cc where
667330f729Sjoerg   // InitializeSystemInfo() sets the supposed cpu clock frequency of
677330f729Sjoerg   // macs to the number of mach time units per second, not actual
687330f729Sjoerg   // CPU clock frequency (which can change in the face of CPU
697330f729Sjoerg   // frequency scaling).  Also note that when the Mac sleeps, this
707330f729Sjoerg   // counter pauses; it does not continue counting, nor does it
717330f729Sjoerg   // reset to zero.
727330f729Sjoerg   return mach_absolute_time();
737330f729Sjoerg #elif defined(BENCHMARK_OS_EMSCRIPTEN)
747330f729Sjoerg   // this goes above x86-specific code because old versions of Emscripten
757330f729Sjoerg   // define __x86_64__, although they have nothing to do with it.
767330f729Sjoerg   return static_cast<int64_t>(emscripten_get_now() * 1e+6);
777330f729Sjoerg #elif defined(__i386__)
787330f729Sjoerg   int64_t ret;
797330f729Sjoerg   __asm__ volatile("rdtsc" : "=A"(ret));
807330f729Sjoerg   return ret;
817330f729Sjoerg #elif defined(__x86_64__) || defined(__amd64__)
827330f729Sjoerg   uint64_t low, high;
837330f729Sjoerg   __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
847330f729Sjoerg   return (high << 32) | low;
857330f729Sjoerg #elif defined(__powerpc__) || defined(__ppc__)
867330f729Sjoerg   // This returns a time-base, which is not always precisely a cycle-count.
87*82d56013Sjoerg #if defined(__powerpc64__) || defined(__ppc64__)
88*82d56013Sjoerg   int64_t tb;
89*82d56013Sjoerg   asm volatile("mfspr %0, 268" : "=r"(tb));
90*82d56013Sjoerg   return tb;
91*82d56013Sjoerg #else
92*82d56013Sjoerg   uint32_t tbl, tbu0, tbu1;
93*82d56013Sjoerg   asm volatile(
94*82d56013Sjoerg       "mftbu %0\n"
95*82d56013Sjoerg       "mftb %1\n"
96*82d56013Sjoerg       "mftbu %2"
97*82d56013Sjoerg       : "=r"(tbu0), "=r"(tbl), "=r"(tbu1));
98*82d56013Sjoerg   tbl &= -static_cast<int32_t>(tbu0 == tbu1);
99*82d56013Sjoerg   // high 32 bits in tbu1; low 32 bits in tbl  (tbu0 is no longer needed)
100*82d56013Sjoerg   return (static_cast<uint64_t>(tbu1) << 32) | tbl;
101*82d56013Sjoerg #endif
1027330f729Sjoerg #elif defined(__sparc__)
1037330f729Sjoerg   int64_t tick;
1047330f729Sjoerg   asm(".byte 0x83, 0x41, 0x00, 0x00");
1057330f729Sjoerg   asm("mov   %%g1, %0" : "=r"(tick));
1067330f729Sjoerg   return tick;
1077330f729Sjoerg #elif defined(__ia64__)
1087330f729Sjoerg   int64_t itc;
1097330f729Sjoerg   asm("mov %0 = ar.itc" : "=r"(itc));
1107330f729Sjoerg   return itc;
1117330f729Sjoerg #elif defined(COMPILER_MSVC) && defined(_M_IX86)
1127330f729Sjoerg   // Older MSVC compilers (like 7.x) don't seem to support the
1137330f729Sjoerg   // __rdtsc intrinsic properly, so I prefer to use _asm instead
1147330f729Sjoerg   // when I know it will work.  Otherwise, I'll use __rdtsc and hope
1157330f729Sjoerg   // the code is being compiled with a non-ancient compiler.
1167330f729Sjoerg   _asm rdtsc
1177330f729Sjoerg #elif defined(COMPILER_MSVC)
1187330f729Sjoerg   return __rdtsc();
1197330f729Sjoerg #elif defined(BENCHMARK_OS_NACL)
1207330f729Sjoerg   // Native Client validator on x86/x86-64 allows RDTSC instructions,
1217330f729Sjoerg   // and this case is handled above. Native Client validator on ARM
1227330f729Sjoerg   // rejects MRC instructions (used in the ARM-specific sequence below),
1237330f729Sjoerg   // so we handle it here. Portable Native Client compiles to
1247330f729Sjoerg   // architecture-agnostic bytecode, which doesn't provide any
1257330f729Sjoerg   // cycle counter access mnemonics.
1267330f729Sjoerg 
1277330f729Sjoerg   // Native Client does not provide any API to access cycle counter.
1287330f729Sjoerg   // Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday
1297330f729Sjoerg   // because is provides nanosecond resolution (which is noticable at
1307330f729Sjoerg   // least for PNaCl modules running on x86 Mac & Linux).
1317330f729Sjoerg   // Initialize to always return 0 if clock_gettime fails.
1327330f729Sjoerg   struct timespec ts = { 0, 0 };
1337330f729Sjoerg   clock_gettime(CLOCK_MONOTONIC, &ts);
1347330f729Sjoerg   return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
1357330f729Sjoerg #elif defined(__aarch64__)
1367330f729Sjoerg   // System timer of ARMv8 runs at a different frequency than the CPU's.
1377330f729Sjoerg   // The frequency is fixed, typically in the range 1-50MHz.  It can be
1387330f729Sjoerg   // read at CNTFRQ special register.  We assume the OS has set up
1397330f729Sjoerg   // the virtual timer properly.
1407330f729Sjoerg   int64_t virtual_timer_value;
1417330f729Sjoerg   asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
1427330f729Sjoerg   return virtual_timer_value;
1437330f729Sjoerg #elif defined(__ARM_ARCH)
1447330f729Sjoerg   // V6 is the earliest arch that has a standard cyclecount
1457330f729Sjoerg   // Native Client validator doesn't allow MRC instructions.
1467330f729Sjoerg #if (__ARM_ARCH >= 6)
1477330f729Sjoerg   uint32_t pmccntr;
1487330f729Sjoerg   uint32_t pmuseren;
1497330f729Sjoerg   uint32_t pmcntenset;
1507330f729Sjoerg   // Read the user mode perf monitor counter access permissions.
1517330f729Sjoerg   asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
1527330f729Sjoerg   if (pmuseren & 1) {  // Allows reading perfmon counters for user mode code.
1537330f729Sjoerg     asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
1547330f729Sjoerg     if (pmcntenset & 0x80000000ul) {  // Is it counting?
1557330f729Sjoerg       asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
1567330f729Sjoerg       // The counter is set up to count every 64th cycle
1577330f729Sjoerg       return static_cast<int64_t>(pmccntr) * 64;  // Should optimize to << 6
1587330f729Sjoerg     }
1597330f729Sjoerg   }
1607330f729Sjoerg #endif
1617330f729Sjoerg   struct timeval tv;
1627330f729Sjoerg   gettimeofday(&tv, nullptr);
1637330f729Sjoerg   return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
164*82d56013Sjoerg #elif defined(__mips__) || defined(__m68k__)
1657330f729Sjoerg   // mips apparently only allows rdtsc for superusers, so we fall
1667330f729Sjoerg   // back to gettimeofday.  It's possible clock_gettime would be better.
1677330f729Sjoerg   struct timeval tv;
1687330f729Sjoerg   gettimeofday(&tv, nullptr);
1697330f729Sjoerg   return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
1707330f729Sjoerg #elif defined(__s390__) // Covers both s390 and s390x.
1717330f729Sjoerg   // Return the CPU clock.
1727330f729Sjoerg   uint64_t tsc;
1737330f729Sjoerg   asm("stck %0" : "=Q" (tsc) : : "cc");
1747330f729Sjoerg   return tsc;
1757330f729Sjoerg #elif defined(__riscv) // RISC-V
1767330f729Sjoerg   // Use RDCYCLE (and RDCYCLEH on riscv32)
1777330f729Sjoerg #if __riscv_xlen == 32
178*82d56013Sjoerg   uint32_t cycles_lo, cycles_hi0, cycles_hi1;
179*82d56013Sjoerg   // This asm also includes the PowerPC overflow handling strategy, as above.
180*82d56013Sjoerg   // Implemented in assembly because Clang insisted on branching.
181*82d56013Sjoerg   asm volatile(
182*82d56013Sjoerg       "rdcycleh %0\n"
183*82d56013Sjoerg       "rdcycle %1\n"
184*82d56013Sjoerg       "rdcycleh %2\n"
185*82d56013Sjoerg       "sub %0, %0, %2\n"
186*82d56013Sjoerg       "seqz %0, %0\n"
187*82d56013Sjoerg       "sub %0, zero, %0\n"
188*82d56013Sjoerg       "and %1, %1, %0\n"
189*82d56013Sjoerg       : "=r"(cycles_hi0), "=r"(cycles_lo), "=r"(cycles_hi1));
190*82d56013Sjoerg   return (static_cast<uint64_t>(cycles_hi1) << 32) | cycles_lo;
1917330f729Sjoerg #else
1927330f729Sjoerg   uint64_t cycles;
193*82d56013Sjoerg   asm volatile("rdcycle %0" : "=r"(cycles));
1947330f729Sjoerg   return cycles;
1957330f729Sjoerg #endif
1967330f729Sjoerg #else
1977330f729Sjoerg // The soft failover to a generic implementation is automatic only for ARM.
1987330f729Sjoerg // For other platforms the developer is expected to make an attempt to create
1997330f729Sjoerg // a fast implementation and use generic version if nothing better is available.
2007330f729Sjoerg #error You need to define CycleTimer for your OS and CPU
2017330f729Sjoerg #endif
2027330f729Sjoerg }
2037330f729Sjoerg }  // end namespace cycleclock
2047330f729Sjoerg }  // end namespace benchmark
2057330f729Sjoerg 
2067330f729Sjoerg #endif  // BENCHMARK_CYCLECLOCK_H_
207