xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/gt/debugfs_gt_pm.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: debugfs_gt_pm.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $	*/
2 
3 // SPDX-License-Identifier: MIT
4 
5 /*
6  * Copyright © 2019 Intel Corporation
7  */
8 
9 #include <sys/cdefs.h>
10 __KERNEL_RCSID(0, "$NetBSD: debugfs_gt_pm.c,v 1.2 2021/12/18 23:45:30 riastradh Exp $");
11 
12 #include <linux/seq_file.h>
13 
14 #include "debugfs_gt.h"
15 #include "debugfs_gt_pm.h"
16 #include "i915_drv.h"
17 #include "intel_gt.h"
18 #include "intel_llc.h"
19 #include "intel_rc6.h"
20 #include "intel_rps.h"
21 #include "intel_runtime_pm.h"
22 #include "intel_sideband.h"
23 #include "intel_uncore.h"
24 
fw_domains_show(struct seq_file * m,void * data)25 static int fw_domains_show(struct seq_file *m, void *data)
26 {
27 	struct intel_gt *gt = m->private;
28 	struct intel_uncore *uncore = gt->uncore;
29 	struct intel_uncore_forcewake_domain *fw_domain;
30 	unsigned int tmp;
31 
32 	seq_printf(m, "user.bypass_count = %u\n",
33 		   uncore->user_forcewake_count);
34 
35 	for_each_fw_domain(fw_domain, uncore, tmp)
36 		seq_printf(m, "%s.wake_count = %u\n",
37 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
38 			   READ_ONCE(fw_domain->wake_count));
39 
40 	return 0;
41 }
42 DEFINE_GT_DEBUGFS_ATTRIBUTE(fw_domains);
43 
print_rc6_res(struct seq_file * m,const char * title,const i915_reg_t reg)44 static void print_rc6_res(struct seq_file *m,
45 			  const char *title,
46 			  const i915_reg_t reg)
47 {
48 	struct intel_gt *gt = m->private;
49 	intel_wakeref_t wakeref;
50 
51 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
52 		seq_printf(m, "%s %u (%llu us)\n", title,
53 			   intel_uncore_read(gt->uncore, reg),
54 			   intel_rc6_residency_us(&gt->rc6, reg));
55 }
56 
vlv_drpc(struct seq_file * m)57 static int vlv_drpc(struct seq_file *m)
58 {
59 	struct intel_gt *gt = m->private;
60 	struct intel_uncore *uncore = gt->uncore;
61 	u32 rcctl1, pw_status;
62 
63 	pw_status = intel_uncore_read(uncore, VLV_GTLC_PW_STATUS);
64 	rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
65 
66 	seq_printf(m, "RC6 Enabled: %s\n",
67 		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
68 					GEN6_RC_CTL_EI_MODE(1))));
69 	seq_printf(m, "Render Power Well: %s\n",
70 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
71 	seq_printf(m, "Media Power Well: %s\n",
72 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
73 
74 	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
75 	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
76 
77 	return fw_domains_show(m, NULL);
78 }
79 
gen6_drpc(struct seq_file * m)80 static int gen6_drpc(struct seq_file *m)
81 {
82 	struct intel_gt *gt = m->private;
83 	struct drm_i915_private *i915 = gt->i915;
84 	struct intel_uncore *uncore = gt->uncore;
85 	u32 gt_core_status, rcctl1, rc6vids = 0;
86 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
87 
88 	gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS);
89 
90 	rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
91 	if (INTEL_GEN(i915) >= 9) {
92 		gen9_powergate_enable =
93 			intel_uncore_read(uncore, GEN9_PG_ENABLE);
94 		gen9_powergate_status =
95 			intel_uncore_read(uncore, GEN9_PWRGT_DOMAIN_STATUS);
96 	}
97 
98 	if (INTEL_GEN(i915) <= 7)
99 		sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS,
100 				       &rc6vids, NULL);
101 
102 	seq_printf(m, "RC1e Enabled: %s\n",
103 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
104 	seq_printf(m, "RC6 Enabled: %s\n",
105 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
106 	if (INTEL_GEN(i915) >= 9) {
107 		seq_printf(m, "Render Well Gating Enabled: %s\n",
108 			   yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
109 		seq_printf(m, "Media Well Gating Enabled: %s\n",
110 			   yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
111 	}
112 	seq_printf(m, "Deep RC6 Enabled: %s\n",
113 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
114 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
115 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
116 	seq_puts(m, "Current RC state: ");
117 	switch (gt_core_status & GEN6_RCn_MASK) {
118 	case GEN6_RC0:
119 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
120 			seq_puts(m, "Core Power Down\n");
121 		else
122 			seq_puts(m, "on\n");
123 		break;
124 	case GEN6_RC3:
125 		seq_puts(m, "RC3\n");
126 		break;
127 	case GEN6_RC6:
128 		seq_puts(m, "RC6\n");
129 		break;
130 	case GEN6_RC7:
131 		seq_puts(m, "RC7\n");
132 		break;
133 	default:
134 		seq_puts(m, "Unknown\n");
135 		break;
136 	}
137 
138 	seq_printf(m, "Core Power Down: %s\n",
139 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
140 	if (INTEL_GEN(i915) >= 9) {
141 		seq_printf(m, "Render Power Well: %s\n",
142 			   (gen9_powergate_status &
143 			    GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
144 		seq_printf(m, "Media Power Well: %s\n",
145 			   (gen9_powergate_status &
146 			    GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
147 	}
148 
149 	/* Not exactly sure what this is */
150 	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
151 		      GEN6_GT_GFX_RC6_LOCKED);
152 	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
153 	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
154 	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
155 
156 	if (INTEL_GEN(i915) <= 7) {
157 		seq_printf(m, "RC6   voltage: %dmV\n",
158 			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
159 		seq_printf(m, "RC6+  voltage: %dmV\n",
160 			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
161 		seq_printf(m, "RC6++ voltage: %dmV\n",
162 			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
163 	}
164 
165 	return fw_domains_show(m, NULL);
166 }
167 
ilk_drpc(struct seq_file * m)168 static int ilk_drpc(struct seq_file *m)
169 {
170 	struct intel_gt *gt = m->private;
171 	struct intel_uncore *uncore = gt->uncore;
172 	u32 rgvmodectl, rstdbyctl;
173 	u16 crstandvid;
174 
175 	rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
176 	rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
177 	crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
178 
179 	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
180 	seq_printf(m, "Boost freq: %d\n",
181 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
182 		   MEMMODE_BOOST_FREQ_SHIFT);
183 	seq_printf(m, "HW control enabled: %s\n",
184 		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
185 	seq_printf(m, "SW control enabled: %s\n",
186 		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
187 	seq_printf(m, "Gated voltage change: %s\n",
188 		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
189 	seq_printf(m, "Starting frequency: P%d\n",
190 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
191 	seq_printf(m, "Max P-state: P%d\n",
192 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
193 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
194 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
195 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
196 	seq_printf(m, "Render standby enabled: %s\n",
197 		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
198 	seq_puts(m, "Current RS state: ");
199 	switch (rstdbyctl & RSX_STATUS_MASK) {
200 	case RSX_STATUS_ON:
201 		seq_puts(m, "on\n");
202 		break;
203 	case RSX_STATUS_RC1:
204 		seq_puts(m, "RC1\n");
205 		break;
206 	case RSX_STATUS_RC1E:
207 		seq_puts(m, "RC1E\n");
208 		break;
209 	case RSX_STATUS_RS1:
210 		seq_puts(m, "RS1\n");
211 		break;
212 	case RSX_STATUS_RS2:
213 		seq_puts(m, "RS2 (RC6)\n");
214 		break;
215 	case RSX_STATUS_RS3:
216 		seq_puts(m, "RC3 (RC6+)\n");
217 		break;
218 	default:
219 		seq_puts(m, "unknown\n");
220 		break;
221 	}
222 
223 	return 0;
224 }
225 
drpc_show(struct seq_file * m,void * unused)226 static int drpc_show(struct seq_file *m, void *unused)
227 {
228 	struct intel_gt *gt = m->private;
229 	struct drm_i915_private *i915 = gt->i915;
230 	intel_wakeref_t wakeref;
231 	int err = -ENODEV;
232 
233 	with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
234 		if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
235 			err = vlv_drpc(m);
236 		else if (INTEL_GEN(i915) >= 6)
237 			err = gen6_drpc(m);
238 		else
239 			err = ilk_drpc(m);
240 	}
241 
242 	return err;
243 }
244 DEFINE_GT_DEBUGFS_ATTRIBUTE(drpc);
245 
frequency_show(struct seq_file * m,void * unused)246 static int frequency_show(struct seq_file *m, void *unused)
247 {
248 	struct intel_gt *gt = m->private;
249 	struct drm_i915_private *i915 = gt->i915;
250 	struct intel_uncore *uncore = gt->uncore;
251 	struct intel_rps *rps = &gt->rps;
252 	intel_wakeref_t wakeref;
253 
254 	wakeref = intel_runtime_pm_get(uncore->rpm);
255 
256 	if (IS_GEN(i915, 5)) {
257 		u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
258 		u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
259 
260 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
261 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
262 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
263 			   MEMSTAT_VID_SHIFT);
264 		seq_printf(m, "Current P-state: %d\n",
265 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
266 	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
267 		u32 rpmodectl, freq_sts;
268 
269 		rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
270 		seq_printf(m, "Video Turbo Mode: %s\n",
271 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
272 		seq_printf(m, "HW control enabled: %s\n",
273 			   yesno(rpmodectl & GEN6_RP_ENABLE));
274 		seq_printf(m, "SW control enabled: %s\n",
275 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
276 				  GEN6_RP_MEDIA_SW_MODE));
277 
278 		vlv_punit_get(i915);
279 		freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
280 		vlv_punit_put(i915);
281 
282 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
283 		seq_printf(m, "DDR freq: %d MHz\n", i915->mem_freq);
284 
285 		seq_printf(m, "actual GPU freq: %d MHz\n",
286 			   intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
287 
288 		seq_printf(m, "current GPU freq: %d MHz\n",
289 			   intel_gpu_freq(rps, rps->cur_freq));
290 
291 		seq_printf(m, "max GPU freq: %d MHz\n",
292 			   intel_gpu_freq(rps, rps->max_freq));
293 
294 		seq_printf(m, "min GPU freq: %d MHz\n",
295 			   intel_gpu_freq(rps, rps->min_freq));
296 
297 		seq_printf(m, "idle GPU freq: %d MHz\n",
298 			   intel_gpu_freq(rps, rps->idle_freq));
299 
300 		seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
301 			   intel_gpu_freq(rps, rps->efficient_freq));
302 	} else if (INTEL_GEN(i915) >= 6) {
303 		u32 rp_state_limits;
304 		u32 gt_perf_status;
305 		u32 rp_state_cap;
306 		u32 rpmodectl, rpinclimit, rpdeclimit;
307 		u32 rpstat, cagf, reqf;
308 		u32 rpupei, rpcurup, rpprevup;
309 		u32 rpdownei, rpcurdown, rpprevdown;
310 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
311 		int max_freq;
312 
313 		rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS);
314 		if (IS_GEN9_LP(i915)) {
315 			rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
316 			gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS);
317 		} else {
318 			rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
319 			gt_perf_status = intel_uncore_read(uncore, GEN6_GT_PERF_STATUS);
320 		}
321 
322 		/* RPSTAT1 is in the GT power well */
323 		intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
324 
325 		reqf = intel_uncore_read(uncore, GEN6_RPNSWREQ);
326 		if (INTEL_GEN(i915) >= 9) {
327 			reqf >>= 23;
328 		} else {
329 			reqf &= ~GEN6_TURBO_DISABLE;
330 			if (IS_HASWELL(i915) || IS_BROADWELL(i915))
331 				reqf >>= 24;
332 			else
333 				reqf >>= 25;
334 		}
335 		reqf = intel_gpu_freq(rps, reqf);
336 
337 		rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
338 		rpinclimit = intel_uncore_read(uncore, GEN6_RP_UP_THRESHOLD);
339 		rpdeclimit = intel_uncore_read(uncore, GEN6_RP_DOWN_THRESHOLD);
340 
341 		rpstat = intel_uncore_read(uncore, GEN6_RPSTAT1);
342 		rpupei = intel_uncore_read(uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
343 		rpcurup = intel_uncore_read(uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
344 		rpprevup = intel_uncore_read(uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
345 		rpdownei = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
346 		rpcurdown = intel_uncore_read(uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
347 		rpprevdown = intel_uncore_read(uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
348 		cagf = intel_rps_read_actual_frequency(rps);
349 
350 		intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
351 
352 		if (INTEL_GEN(i915) >= 11) {
353 			pm_ier = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
354 			pm_imr = intel_uncore_read(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
355 			/*
356 			 * The equivalent to the PM ISR & IIR cannot be read
357 			 * without affecting the current state of the system
358 			 */
359 			pm_isr = 0;
360 			pm_iir = 0;
361 		} else if (INTEL_GEN(i915) >= 8) {
362 			pm_ier = intel_uncore_read(uncore, GEN8_GT_IER(2));
363 			pm_imr = intel_uncore_read(uncore, GEN8_GT_IMR(2));
364 			pm_isr = intel_uncore_read(uncore, GEN8_GT_ISR(2));
365 			pm_iir = intel_uncore_read(uncore, GEN8_GT_IIR(2));
366 		} else {
367 			pm_ier = intel_uncore_read(uncore, GEN6_PMIER);
368 			pm_imr = intel_uncore_read(uncore, GEN6_PMIMR);
369 			pm_isr = intel_uncore_read(uncore, GEN6_PMISR);
370 			pm_iir = intel_uncore_read(uncore, GEN6_PMIIR);
371 		}
372 		pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK);
373 
374 		seq_printf(m, "Video Turbo Mode: %s\n",
375 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
376 		seq_printf(m, "HW control enabled: %s\n",
377 			   yesno(rpmodectl & GEN6_RP_ENABLE));
378 		seq_printf(m, "SW control enabled: %s\n",
379 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
380 				  GEN6_RP_MEDIA_SW_MODE));
381 
382 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
383 			   pm_ier, pm_imr, pm_mask);
384 		if (INTEL_GEN(i915) <= 10)
385 			seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
386 				   pm_isr, pm_iir);
387 		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
388 			   rps->pm_intrmsk_mbz);
389 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
390 		seq_printf(m, "Render p-state ratio: %d\n",
391 			   (gt_perf_status & (INTEL_GEN(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
392 		seq_printf(m, "Render p-state VID: %d\n",
393 			   gt_perf_status & 0xff);
394 		seq_printf(m, "Render p-state limit: %d\n",
395 			   rp_state_limits & 0xff);
396 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
397 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
398 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
399 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
400 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
401 		seq_printf(m, "CAGF: %dMHz\n", cagf);
402 		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
403 			   rpupei, GT_PM_INTERVAL_TO_US(i915, rpupei));
404 		seq_printf(m, "RP CUR UP: %d (%dus)\n",
405 			   rpcurup, GT_PM_INTERVAL_TO_US(i915, rpcurup));
406 		seq_printf(m, "RP PREV UP: %d (%dus)\n",
407 			   rpprevup, GT_PM_INTERVAL_TO_US(i915, rpprevup));
408 		seq_printf(m, "Up threshold: %d%%\n",
409 			   rps->power.up_threshold);
410 
411 		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
412 			   rpdownei, GT_PM_INTERVAL_TO_US(i915, rpdownei));
413 		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
414 			   rpcurdown, GT_PM_INTERVAL_TO_US(i915, rpcurdown));
415 		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
416 			   rpprevdown, GT_PM_INTERVAL_TO_US(i915, rpprevdown));
417 		seq_printf(m, "Down threshold: %d%%\n",
418 			   rps->power.down_threshold);
419 
420 		max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
421 			    rp_state_cap >> 16) & 0xff;
422 		max_freq *= (IS_GEN9_BC(i915) ||
423 			     INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
424 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
425 			   intel_gpu_freq(rps, max_freq));
426 
427 		max_freq = (rp_state_cap & 0xff00) >> 8;
428 		max_freq *= (IS_GEN9_BC(i915) ||
429 			     INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
430 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
431 			   intel_gpu_freq(rps, max_freq));
432 
433 		max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 :
434 			    rp_state_cap >> 0) & 0xff;
435 		max_freq *= (IS_GEN9_BC(i915) ||
436 			     INTEL_GEN(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
437 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
438 			   intel_gpu_freq(rps, max_freq));
439 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
440 			   intel_gpu_freq(rps, rps->max_freq));
441 
442 		seq_printf(m, "Current freq: %d MHz\n",
443 			   intel_gpu_freq(rps, rps->cur_freq));
444 		seq_printf(m, "Actual freq: %d MHz\n", cagf);
445 		seq_printf(m, "Idle freq: %d MHz\n",
446 			   intel_gpu_freq(rps, rps->idle_freq));
447 		seq_printf(m, "Min freq: %d MHz\n",
448 			   intel_gpu_freq(rps, rps->min_freq));
449 		seq_printf(m, "Boost freq: %d MHz\n",
450 			   intel_gpu_freq(rps, rps->boost_freq));
451 		seq_printf(m, "Max freq: %d MHz\n",
452 			   intel_gpu_freq(rps, rps->max_freq));
453 		seq_printf(m,
454 			   "efficient (RPe) frequency: %d MHz\n",
455 			   intel_gpu_freq(rps, rps->efficient_freq));
456 	} else {
457 		seq_puts(m, "no P-state info available\n");
458 	}
459 
460 	seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk);
461 	seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq);
462 	seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
463 
464 	intel_runtime_pm_put(uncore->rpm, wakeref);
465 
466 	return 0;
467 }
468 DEFINE_GT_DEBUGFS_ATTRIBUTE(frequency);
469 
llc_show(struct seq_file * m,void * data)470 static int llc_show(struct seq_file *m, void *data)
471 {
472 	struct intel_gt *gt = m->private;
473 	struct drm_i915_private *i915 = gt->i915;
474 	const bool edram = INTEL_GEN(i915) > 8;
475 	struct intel_rps *rps = &gt->rps;
476 	unsigned int max_gpu_freq, min_gpu_freq;
477 	intel_wakeref_t wakeref;
478 	int gpu_freq, ia_freq;
479 
480 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(i915)));
481 	seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
482 		   i915->edram_size_mb);
483 
484 	min_gpu_freq = rps->min_freq;
485 	max_gpu_freq = rps->max_freq;
486 	if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) {
487 		/* Convert GT frequency to 50 HZ units */
488 		min_gpu_freq /= GEN9_FREQ_SCALER;
489 		max_gpu_freq /= GEN9_FREQ_SCALER;
490 	}
491 
492 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
493 
494 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
495 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
496 		ia_freq = gpu_freq;
497 		sandybridge_pcode_read(i915,
498 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
499 				       &ia_freq, NULL);
500 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
501 			   intel_gpu_freq(rps,
502 					  (gpu_freq *
503 					   (IS_GEN9_BC(i915) ||
504 					    INTEL_GEN(i915) >= 10 ?
505 					    GEN9_FREQ_SCALER : 1))),
506 			   ((ia_freq >> 0) & 0xff) * 100,
507 			   ((ia_freq >> 8) & 0xff) * 100);
508 	}
509 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
510 
511 	return 0;
512 }
513 
llc_eval(const struct intel_gt * gt)514 static bool llc_eval(const struct intel_gt *gt)
515 {
516 	return HAS_LLC(gt->i915);
517 }
518 
519 DEFINE_GT_DEBUGFS_ATTRIBUTE(llc);
520 
rps_power_to_str(unsigned int power)521 static const char *rps_power_to_str(unsigned int power)
522 {
523 	static const char * const strings[] = {
524 		[LOW_POWER] = "low power",
525 		[BETWEEN] = "mixed",
526 		[HIGH_POWER] = "high power",
527 	};
528 
529 	if (power >= ARRAY_SIZE(strings) || !strings[power])
530 		return "unknown";
531 
532 	return strings[power];
533 }
534 
rps_boost_show(struct seq_file * m,void * data)535 static int rps_boost_show(struct seq_file *m, void *data)
536 {
537 	struct intel_gt *gt = m->private;
538 	struct drm_i915_private *i915 = gt->i915;
539 	struct intel_rps *rps = &gt->rps;
540 
541 	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
542 	seq_printf(m, "GPU busy? %s\n", yesno(gt->awake));
543 	seq_printf(m, "Boosts outstanding? %d\n",
544 		   atomic_read(&rps->num_waiters));
545 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
546 	seq_printf(m, "Frequency requested %d, actual %d\n",
547 		   intel_gpu_freq(rps, rps->cur_freq),
548 		   intel_rps_read_actual_frequency(rps));
549 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
550 		   intel_gpu_freq(rps, rps->min_freq),
551 		   intel_gpu_freq(rps, rps->min_freq_softlimit),
552 		   intel_gpu_freq(rps, rps->max_freq_softlimit),
553 		   intel_gpu_freq(rps, rps->max_freq));
554 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
555 		   intel_gpu_freq(rps, rps->idle_freq),
556 		   intel_gpu_freq(rps, rps->efficient_freq),
557 		   intel_gpu_freq(rps, rps->boost_freq));
558 
559 	seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
560 
561 	if (INTEL_GEN(i915) >= 6 && rps->enabled && gt->awake) {
562 		struct intel_uncore *uncore = gt->uncore;
563 		u32 rpup, rpupei;
564 		u32 rpdown, rpdownei;
565 
566 		intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
567 		rpup = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
568 		rpupei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
569 		rpdown = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
570 		rpdownei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
571 		intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
572 
573 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
574 			   rps_power_to_str(rps->power.mode));
575 		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
576 			   rpup && rpupei ? 100 * rpup / rpupei : 0,
577 			   rps->power.up_threshold);
578 		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
579 			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
580 			   rps->power.down_threshold);
581 	} else {
582 		seq_puts(m, "\nRPS Autotuning inactive\n");
583 	}
584 
585 	return 0;
586 }
587 
rps_eval(const struct intel_gt * gt)588 static bool rps_eval(const struct intel_gt *gt)
589 {
590 	return HAS_RPS(gt->i915);
591 }
592 
593 DEFINE_GT_DEBUGFS_ATTRIBUTE(rps_boost);
594 
debugfs_gt_pm_register(struct intel_gt * gt,struct dentry * root)595 void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root)
596 {
597 	static const struct debugfs_gt_file files[] = {
598 		{ "drpc", &drpc_fops, NULL },
599 		{ "frequency", &frequency_fops, NULL },
600 		{ "forcewake", &fw_domains_fops, NULL },
601 		{ "llc", &llc_fops, llc_eval },
602 		{ "rps_boost", &rps_boost_fops, rps_eval },
603 	};
604 
605 	debugfs_gt_register_files(gt, root, files, ARRAY_SIZE(files));
606 }
607