1ad8b1aafSjsg // SPDX-License-Identifier: MIT
2ad8b1aafSjsg /*
3ad8b1aafSjsg * Copyright © 2020 Intel Corporation
4ad8b1aafSjsg */
5ad8b1aafSjsg
6ad8b1aafSjsg #include "i915_drv.h"
71bb76ff1Sjsg #include "i915_reg.h"
8ad8b1aafSjsg #include "intel_gt.h"
9ad8b1aafSjsg #include "intel_gt_clock_utils.h"
10*f005ef32Sjsg #include "intel_gt_print.h"
111bb76ff1Sjsg #include "intel_gt_regs.h"
12ad8b1aafSjsg
read_reference_ts_freq(struct intel_uncore * uncore)135ca02815Sjsg static u32 read_reference_ts_freq(struct intel_uncore *uncore)
14ad8b1aafSjsg {
155ca02815Sjsg u32 ts_override = intel_uncore_read(uncore, GEN9_TIMESTAMP_OVERRIDE);
165ca02815Sjsg u32 base_freq, frac_freq;
17ad8b1aafSjsg
185ca02815Sjsg base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >>
195ca02815Sjsg GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1;
205ca02815Sjsg base_freq *= 1000000;
21ad8b1aafSjsg
225ca02815Sjsg frac_freq = ((ts_override &
235ca02815Sjsg GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >>
245ca02815Sjsg GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT);
255ca02815Sjsg frac_freq = 1000000 / (frac_freq + 1);
265ca02815Sjsg
275ca02815Sjsg return base_freq + frac_freq;
285ca02815Sjsg }
295ca02815Sjsg
gen11_get_crystal_clock_freq(struct intel_uncore * uncore,u32 rpm_config_reg)305ca02815Sjsg static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
315ca02815Sjsg u32 rpm_config_reg)
325ca02815Sjsg {
335ca02815Sjsg u32 f19_2_mhz = 19200000;
345ca02815Sjsg u32 f24_mhz = 24000000;
355ca02815Sjsg u32 f25_mhz = 25000000;
365ca02815Sjsg u32 f38_4_mhz = 38400000;
375ca02815Sjsg u32 crystal_clock =
385ca02815Sjsg (rpm_config_reg & GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
395ca02815Sjsg GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
405ca02815Sjsg
415ca02815Sjsg switch (crystal_clock) {
425ca02815Sjsg case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
435ca02815Sjsg return f24_mhz;
445ca02815Sjsg case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
455ca02815Sjsg return f19_2_mhz;
465ca02815Sjsg case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ:
475ca02815Sjsg return f38_4_mhz;
485ca02815Sjsg case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ:
495ca02815Sjsg return f25_mhz;
505ca02815Sjsg default:
515ca02815Sjsg MISSING_CASE(crystal_clock);
525ca02815Sjsg return 0;
535ca02815Sjsg }
545ca02815Sjsg }
555ca02815Sjsg
gen11_read_clock_frequency(struct intel_uncore * uncore)561bb76ff1Sjsg static u32 gen11_read_clock_frequency(struct intel_uncore *uncore)
575ca02815Sjsg {
585ca02815Sjsg u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
595ca02815Sjsg u32 freq = 0;
605ca02815Sjsg
615ca02815Sjsg /*
621bb76ff1Sjsg * Note that on gen11+, the clock frequency may be reconfigured.
631bb76ff1Sjsg * We do not, and we assume nobody else does.
641bb76ff1Sjsg *
655ca02815Sjsg * First figure out the reference frequency. There are 2 ways
665ca02815Sjsg * we can compute the frequency, either through the
675ca02815Sjsg * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
685ca02815Sjsg * tells us which one we should use.
695ca02815Sjsg */
705ca02815Sjsg if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
715ca02815Sjsg freq = read_reference_ts_freq(uncore);
725ca02815Sjsg } else {
735ca02815Sjsg u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
745ca02815Sjsg
755ca02815Sjsg freq = gen11_get_crystal_clock_freq(uncore, c0);
765ca02815Sjsg
775ca02815Sjsg /*
785ca02815Sjsg * Now figure out how the command stream's timestamp
795ca02815Sjsg * register increments from this frequency (it might
805ca02815Sjsg * increment only every few clock cycle).
815ca02815Sjsg */
825ca02815Sjsg freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
835ca02815Sjsg GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
845ca02815Sjsg }
855ca02815Sjsg
865ca02815Sjsg return freq;
875ca02815Sjsg }
885ca02815Sjsg
gen9_read_clock_frequency(struct intel_uncore * uncore)891bb76ff1Sjsg static u32 gen9_read_clock_frequency(struct intel_uncore *uncore)
901bb76ff1Sjsg {
911bb76ff1Sjsg u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
921bb76ff1Sjsg u32 freq = 0;
931bb76ff1Sjsg
941bb76ff1Sjsg if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
951bb76ff1Sjsg freq = read_reference_ts_freq(uncore);
961bb76ff1Sjsg } else {
971bb76ff1Sjsg freq = IS_GEN9_LP(uncore->i915) ? 19200000 : 24000000;
981bb76ff1Sjsg
991bb76ff1Sjsg /*
1001bb76ff1Sjsg * Now figure out how the command stream's timestamp
1011bb76ff1Sjsg * register increments from this frequency (it might
1021bb76ff1Sjsg * increment only every few clock cycle).
1031bb76ff1Sjsg */
1041bb76ff1Sjsg freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
1051bb76ff1Sjsg CTC_SHIFT_PARAMETER_SHIFT);
1061bb76ff1Sjsg }
1071bb76ff1Sjsg
1081bb76ff1Sjsg return freq;
1091bb76ff1Sjsg }
1101bb76ff1Sjsg
gen6_read_clock_frequency(struct intel_uncore * uncore)111*f005ef32Sjsg static u32 gen6_read_clock_frequency(struct intel_uncore *uncore)
1121bb76ff1Sjsg {
1131bb76ff1Sjsg /*
1141bb76ff1Sjsg * PRMs say:
1151bb76ff1Sjsg *
1161bb76ff1Sjsg * "The PCU TSC counts 10ns increments; this timestamp
1171bb76ff1Sjsg * reflects bits 38:3 of the TSC (i.e. 80ns granularity,
1181bb76ff1Sjsg * rolling over every 1.5 hours).
1191bb76ff1Sjsg */
1201bb76ff1Sjsg return 12500000;
1211bb76ff1Sjsg }
1221bb76ff1Sjsg
gen5_read_clock_frequency(struct intel_uncore * uncore)123*f005ef32Sjsg static u32 gen5_read_clock_frequency(struct intel_uncore *uncore)
124*f005ef32Sjsg {
125*f005ef32Sjsg /*
126*f005ef32Sjsg * 63:32 increments every 1000 ns
127*f005ef32Sjsg * 31:0 mbz
128*f005ef32Sjsg */
129*f005ef32Sjsg return 1000000000 / 1000;
130*f005ef32Sjsg }
131*f005ef32Sjsg
g4x_read_clock_frequency(struct intel_uncore * uncore)132*f005ef32Sjsg static u32 g4x_read_clock_frequency(struct intel_uncore *uncore)
133*f005ef32Sjsg {
134*f005ef32Sjsg /*
135*f005ef32Sjsg * 63:20 increments every 1/4 ns
136*f005ef32Sjsg * 19:0 mbz
137*f005ef32Sjsg *
138*f005ef32Sjsg * -> 63:32 increments every 1024 ns
139*f005ef32Sjsg */
140*f005ef32Sjsg return 1000000000 / 1024;
141*f005ef32Sjsg }
142*f005ef32Sjsg
gen4_read_clock_frequency(struct intel_uncore * uncore)143*f005ef32Sjsg static u32 gen4_read_clock_frequency(struct intel_uncore *uncore)
1441bb76ff1Sjsg {
1451bb76ff1Sjsg /*
1461bb76ff1Sjsg * PRMs say:
1471bb76ff1Sjsg *
1481bb76ff1Sjsg * "The value in this register increments once every 16
1491bb76ff1Sjsg * hclks." (through the “Clocking Configuration”
1501bb76ff1Sjsg * (“CLKCFG”) MCHBAR register)
151*f005ef32Sjsg *
152*f005ef32Sjsg * Testing on actual hardware has shown there is no /16.
1531bb76ff1Sjsg */
154*f005ef32Sjsg return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000;
1551bb76ff1Sjsg }
1561bb76ff1Sjsg
read_clock_frequency(struct intel_uncore * uncore)1571bb76ff1Sjsg static u32 read_clock_frequency(struct intel_uncore *uncore)
1581bb76ff1Sjsg {
1591bb76ff1Sjsg if (GRAPHICS_VER(uncore->i915) >= 11)
1601bb76ff1Sjsg return gen11_read_clock_frequency(uncore);
1611bb76ff1Sjsg else if (GRAPHICS_VER(uncore->i915) >= 9)
1621bb76ff1Sjsg return gen9_read_clock_frequency(uncore);
163*f005ef32Sjsg else if (GRAPHICS_VER(uncore->i915) >= 6)
164*f005ef32Sjsg return gen6_read_clock_frequency(uncore);
165*f005ef32Sjsg else if (GRAPHICS_VER(uncore->i915) == 5)
1661bb76ff1Sjsg return gen5_read_clock_frequency(uncore);
167*f005ef32Sjsg else if (IS_G4X(uncore->i915))
168*f005ef32Sjsg return g4x_read_clock_frequency(uncore);
169*f005ef32Sjsg else if (GRAPHICS_VER(uncore->i915) == 4)
170*f005ef32Sjsg return gen4_read_clock_frequency(uncore);
1711bb76ff1Sjsg else
172*f005ef32Sjsg return 0;
173ad8b1aafSjsg }
174ad8b1aafSjsg
intel_gt_init_clock_frequency(struct intel_gt * gt)175ad8b1aafSjsg void intel_gt_init_clock_frequency(struct intel_gt *gt)
176ad8b1aafSjsg {
1775ca02815Sjsg gt->clock_frequency = read_clock_frequency(gt->uncore);
1781bb76ff1Sjsg
1791bb76ff1Sjsg /* Icelake appears to use another fixed frequency for CTX_TIMESTAMP */
1801bb76ff1Sjsg if (GRAPHICS_VER(gt->i915) == 11)
1811bb76ff1Sjsg gt->clock_period_ns = NSEC_PER_SEC / 13750000;
1821bb76ff1Sjsg else if (gt->clock_frequency)
1835ca02815Sjsg gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
1845ca02815Sjsg
185ad8b1aafSjsg GT_TRACE(gt,
1865ca02815Sjsg "Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n",
1875ca02815Sjsg gt->clock_frequency / 1000,
1885ca02815Sjsg gt->clock_period_ns,
1895ca02815Sjsg div_u64(mul_u32_u32(gt->clock_period_ns, S32_MAX),
1905ca02815Sjsg USEC_PER_SEC));
191ad8b1aafSjsg }
192ad8b1aafSjsg
193ad8b1aafSjsg #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
intel_gt_check_clock_frequency(const struct intel_gt * gt)194ad8b1aafSjsg void intel_gt_check_clock_frequency(const struct intel_gt *gt)
195ad8b1aafSjsg {
1965ca02815Sjsg if (gt->clock_frequency != read_clock_frequency(gt->uncore)) {
197*f005ef32Sjsg gt_err(gt, "GT clock frequency changed, was %uHz, now %uHz!\n",
198ad8b1aafSjsg gt->clock_frequency,
1995ca02815Sjsg read_clock_frequency(gt->uncore));
200ad8b1aafSjsg }
201ad8b1aafSjsg }
202ad8b1aafSjsg #endif
203ad8b1aafSjsg
div_u64_roundup(u64 nom,u32 den)204ad8b1aafSjsg static u64 div_u64_roundup(u64 nom, u32 den)
205ad8b1aafSjsg {
206ad8b1aafSjsg return div_u64(nom + den - 1, den);
207ad8b1aafSjsg }
208ad8b1aafSjsg
intel_gt_clock_interval_to_ns(const struct intel_gt * gt,u64 count)2095ca02815Sjsg u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count)
210ad8b1aafSjsg {
2115ca02815Sjsg return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency);
212ad8b1aafSjsg }
213ad8b1aafSjsg
intel_gt_pm_interval_to_ns(const struct intel_gt * gt,u64 count)2145ca02815Sjsg u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
215ad8b1aafSjsg {
216ad8b1aafSjsg return intel_gt_clock_interval_to_ns(gt, 16 * count);
217ad8b1aafSjsg }
218ad8b1aafSjsg
intel_gt_ns_to_clock_interval(const struct intel_gt * gt,u64 ns)2195ca02815Sjsg u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns)
220ad8b1aafSjsg {
2215ca02815Sjsg return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC);
222ad8b1aafSjsg }
223ad8b1aafSjsg
intel_gt_ns_to_pm_interval(const struct intel_gt * gt,u64 ns)2245ca02815Sjsg u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
225ad8b1aafSjsg {
2265ca02815Sjsg u64 val;
227ad8b1aafSjsg
228ad8b1aafSjsg /*
229ad8b1aafSjsg * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
230ad8b1aafSjsg * 8300) freezing up around GPU hangs. Looks as if even
231ad8b1aafSjsg * scheduling/timer interrupts start misbehaving if the RPS
232ad8b1aafSjsg * EI/thresholds are "bad", leading to a very sluggish or even
233ad8b1aafSjsg * frozen machine.
234ad8b1aafSjsg */
2355ca02815Sjsg val = div_u64_roundup(intel_gt_ns_to_clock_interval(gt, ns), 16);
2365ca02815Sjsg if (GRAPHICS_VER(gt->i915) == 6)
2375ca02815Sjsg val = div_u64_roundup(val, 25) * 25;
238ad8b1aafSjsg
239ad8b1aafSjsg return val;
240ad8b1aafSjsg }
241