Lines Matching full:tsc
61 // If this is the last TSC range, so we have to extrapolate. In this case, in GetInterpolatedTime()
62 // we assume that each instruction took one TSC, which is what an in GetInterpolatedTime()
65 return interpolate(tsc_conversion.ToNanos(tsc + items_count)); in GetInterpolatedTime()
67 if (items_count < (next_range->tsc - tsc)) { in GetInterpolatedTime()
68 // If the numbers of items in this range is less than the total TSC duration in GetInterpolatedTime()
69 // of this range, i.e. each instruction taking longer than 1 TSC, then we in GetInterpolatedTime()
72 // also assume that each instruction took 1 TSC. A proper way to improve in GetInterpolatedTime()
79 std::min(tsc_conversion.ToNanos(tsc + items_count), next_range->nanos)); in GetInterpolatedTime()
82 // In this case, each item took less than 1 TSC, so some parallelism was in GetInterpolatedTime()
120 void DecodedThread::NotifyTsc(TSC tsc) { in NotifyTsc() argument
121 if (m_last_tsc && (*m_last_tsc)->second.tsc == tsc) in NotifyTsc()
124 assert(tsc >= (*m_last_tsc)->second.tsc && in NotifyTsc()
128 m_tscs.emplace(GetItemsCount(), TSCRange{tsc, 0, GetItemsCount()}).first; in NotifyTsc()
131 uint64_t nanos = m_tsc_conversion->ToNanos(tsc); in NotifyTsc()
135 .emplace(GetItemsCount(), NanosecondsRange{nanos, tsc, nullptr, 0, in NotifyTsc()
259 (sizeof(uint64_t) + sizeof(TSC)) * m_tscs.size() + in CalculateApproximateMemoryUsage()