xref: /openbsd-src/sys/dev/pci/drm/i915/gt/intel_gt_pm_debugfs.c (revision f005ef32267c16bdb134f0e9fa4477dbe07c263a)
1 // SPDX-License-Identifier: MIT
2 
3 /*
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include <linux/seq_file.h>
8 #include <linux/string_helpers.h>
9 
10 #include "i915_drv.h"
11 #include "i915_reg.h"
12 #include "intel_gt.h"
13 #include "intel_gt_clock_utils.h"
14 #include "intel_gt_debugfs.h"
15 #include "intel_gt_pm.h"
16 #include "intel_gt_pm_debugfs.h"
17 #include "intel_gt_regs.h"
18 #include "intel_llc.h"
19 #include "intel_mchbar_regs.h"
20 #include "intel_pcode.h"
21 #include "intel_rc6.h"
22 #include "intel_rps.h"
23 #include "intel_runtime_pm.h"
24 #include "intel_uncore.h"
25 #include "vlv_sideband.h"
26 
27 #ifdef notyet
28 
intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt * gt)29 void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
30 {
31 	atomic_inc(&gt->user_wakeref);
32 	intel_gt_pm_get(gt);
33 	if (GRAPHICS_VER(gt->i915) >= 6)
34 		intel_uncore_forcewake_user_get(gt->uncore);
35 }
36 
intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt * gt)37 void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt)
38 {
39 	if (GRAPHICS_VER(gt->i915) >= 6)
40 		intel_uncore_forcewake_user_put(gt->uncore);
41 	intel_gt_pm_put(gt);
42 	atomic_dec(&gt->user_wakeref);
43 }
44 
forcewake_user_open(struct inode * inode,struct file * file)45 static int forcewake_user_open(struct inode *inode, struct file *file)
46 {
47 	struct intel_gt *gt = inode->i_private;
48 
49 	intel_gt_pm_debugfs_forcewake_user_open(gt);
50 
51 	return 0;
52 }
53 
forcewake_user_release(struct inode * inode,struct file * file)54 static int forcewake_user_release(struct inode *inode, struct file *file)
55 {
56 	struct intel_gt *gt = inode->i_private;
57 
58 	intel_gt_pm_debugfs_forcewake_user_release(gt);
59 
60 	return 0;
61 }
62 
63 static const struct file_operations forcewake_user_fops = {
64 	.owner = THIS_MODULE,
65 	.open = forcewake_user_open,
66 	.release = forcewake_user_release,
67 };
68 
fw_domains_show(struct seq_file * m,void * data)69 static int fw_domains_show(struct seq_file *m, void *data)
70 {
71 	struct intel_gt *gt = m->private;
72 	struct intel_uncore *uncore = gt->uncore;
73 	struct intel_uncore_forcewake_domain *fw_domain;
74 	unsigned int tmp;
75 
76 	seq_printf(m, "user.bypass_count = %u\n",
77 		   uncore->user_forcewake_count);
78 
79 	for_each_fw_domain(fw_domain, uncore, tmp)
80 		seq_printf(m, "%s.wake_count = %u\n",
81 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
82 			   READ_ONCE(fw_domain->wake_count));
83 
84 	return 0;
85 }
86 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(fw_domains);
87 
vlv_drpc(struct seq_file * m)88 static int vlv_drpc(struct seq_file *m)
89 {
90 	struct intel_gt *gt = m->private;
91 	struct intel_uncore *uncore = gt->uncore;
92 	u32 rcctl1, pw_status, mt_fwake_req;
93 
94 	mt_fwake_req = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
95 	pw_status = intel_uncore_read(uncore, VLV_GTLC_PW_STATUS);
96 	rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
97 
98 	seq_printf(m, "RC6 Enabled: %s\n",
99 		   str_yes_no(rcctl1 & (GEN7_RC_CTL_TO_MODE |
100 					GEN6_RC_CTL_EI_MODE(1))));
101 	seq_printf(m, "Multi-threaded Forcewake Request: 0x%x\n", mt_fwake_req);
102 	seq_printf(m, "Render Power Well: %s\n",
103 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
104 	seq_printf(m, "Media Power Well: %s\n",
105 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
106 
107 	intel_rc6_print_residency(m, "Render RC6 residency since boot:", INTEL_RC6_RES_RC6);
108 	intel_rc6_print_residency(m, "Media RC6 residency since boot:", INTEL_RC6_RES_VLV_MEDIA);
109 
110 	return fw_domains_show(m, NULL);
111 }
112 
gen6_drpc(struct seq_file * m)113 static int gen6_drpc(struct seq_file *m)
114 {
115 	struct intel_gt *gt = m->private;
116 	struct drm_i915_private *i915 = gt->i915;
117 	struct intel_uncore *uncore = gt->uncore;
118 	u32 gt_core_status, mt_fwake_req, rcctl1, rc6vids = 0;
119 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
120 
121 	mt_fwake_req = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
122 	gt_core_status = intel_uncore_read_fw(uncore, GEN6_GT_CORE_STATUS);
123 
124 	rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
125 	if (GRAPHICS_VER(i915) >= 9) {
126 		gen9_powergate_enable =
127 			intel_uncore_read(uncore, GEN9_PG_ENABLE);
128 		gen9_powergate_status =
129 			intel_uncore_read(uncore, GEN9_PWRGT_DOMAIN_STATUS);
130 	}
131 
132 	if (GRAPHICS_VER(i915) <= 7)
133 		snb_pcode_read(gt->uncore, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL);
134 
135 	seq_printf(m, "RC1e Enabled: %s\n",
136 		   str_yes_no(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
137 	seq_printf(m, "RC6 Enabled: %s\n",
138 		   str_yes_no(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
139 	if (GRAPHICS_VER(i915) >= 9) {
140 		seq_printf(m, "Render Well Gating Enabled: %s\n",
141 			   str_yes_no(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
142 		seq_printf(m, "Media Well Gating Enabled: %s\n",
143 			   str_yes_no(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
144 	}
145 	seq_printf(m, "Deep RC6 Enabled: %s\n",
146 		   str_yes_no(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
147 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
148 		   str_yes_no(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
149 	seq_puts(m, "Current RC state: ");
150 	switch (gt_core_status & GEN6_RCn_MASK) {
151 	case GEN6_RC0:
152 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
153 			seq_puts(m, "Core Power Down\n");
154 		else
155 			seq_puts(m, "on\n");
156 		break;
157 	case GEN6_RC3:
158 		seq_puts(m, "RC3\n");
159 		break;
160 	case GEN6_RC6:
161 		seq_puts(m, "RC6\n");
162 		break;
163 	case GEN6_RC7:
164 		seq_puts(m, "RC7\n");
165 		break;
166 	default:
167 		seq_puts(m, "Unknown\n");
168 		break;
169 	}
170 
171 	seq_printf(m, "Core Power Down: %s\n",
172 		   str_yes_no(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
173 	seq_printf(m, "Multi-threaded Forcewake Request: 0x%x\n", mt_fwake_req);
174 	if (GRAPHICS_VER(i915) >= 9) {
175 		seq_printf(m, "Render Power Well: %s\n",
176 			   (gen9_powergate_status &
177 			    GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
178 		seq_printf(m, "Media Power Well: %s\n",
179 			   (gen9_powergate_status &
180 			    GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
181 	}
182 
183 	/* Not exactly sure what this is */
184 	intel_rc6_print_residency(m, "RC6 \"Locked to RPn\" residency since boot:",
185 				  INTEL_RC6_RES_RC6_LOCKED);
186 	intel_rc6_print_residency(m, "RC6 residency since boot:", INTEL_RC6_RES_RC6);
187 	intel_rc6_print_residency(m, "RC6+ residency since boot:", INTEL_RC6_RES_RC6p);
188 	intel_rc6_print_residency(m, "RC6++ residency since boot:", INTEL_RC6_RES_RC6pp);
189 
190 	if (GRAPHICS_VER(i915) <= 7) {
191 		seq_printf(m, "RC6   voltage: %dmV\n",
192 			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
193 		seq_printf(m, "RC6+  voltage: %dmV\n",
194 			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
195 		seq_printf(m, "RC6++ voltage: %dmV\n",
196 			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
197 	}
198 
199 	return fw_domains_show(m, NULL);
200 }
201 
ilk_drpc(struct seq_file * m)202 static int ilk_drpc(struct seq_file *m)
203 {
204 	struct intel_gt *gt = m->private;
205 	struct intel_uncore *uncore = gt->uncore;
206 	u32 rgvmodectl, rstdbyctl;
207 	u16 crstandvid;
208 
209 	rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
210 	rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
211 	crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
212 
213 	seq_printf(m, "HD boost: %s\n",
214 		   str_yes_no(rgvmodectl & MEMMODE_BOOST_EN));
215 	seq_printf(m, "Boost freq: %d\n",
216 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
217 		   MEMMODE_BOOST_FREQ_SHIFT);
218 	seq_printf(m, "HW control enabled: %s\n",
219 		   str_yes_no(rgvmodectl & MEMMODE_HWIDLE_EN));
220 	seq_printf(m, "SW control enabled: %s\n",
221 		   str_yes_no(rgvmodectl & MEMMODE_SWMODE_EN));
222 	seq_printf(m, "Gated voltage change: %s\n",
223 		   str_yes_no(rgvmodectl & MEMMODE_RCLK_GATE));
224 	seq_printf(m, "Starting frequency: P%d\n",
225 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
226 	seq_printf(m, "Max P-state: P%d\n",
227 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
228 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
229 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
230 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
231 	seq_printf(m, "Render standby enabled: %s\n",
232 		   str_yes_no(!(rstdbyctl & RCX_SW_EXIT)));
233 	seq_puts(m, "Current RS state: ");
234 	switch (rstdbyctl & RSX_STATUS_MASK) {
235 	case RSX_STATUS_ON:
236 		seq_puts(m, "on\n");
237 		break;
238 	case RSX_STATUS_RC1:
239 		seq_puts(m, "RC1\n");
240 		break;
241 	case RSX_STATUS_RC1E:
242 		seq_puts(m, "RC1E\n");
243 		break;
244 	case RSX_STATUS_RS1:
245 		seq_puts(m, "RS1\n");
246 		break;
247 	case RSX_STATUS_RS2:
248 		seq_puts(m, "RS2 (RC6)\n");
249 		break;
250 	case RSX_STATUS_RS3:
251 		seq_puts(m, "RC3 (RC6+)\n");
252 		break;
253 	default:
254 		seq_puts(m, "unknown\n");
255 		break;
256 	}
257 
258 	return 0;
259 }
260 
mtl_drpc(struct seq_file * m)261 static int mtl_drpc(struct seq_file *m)
262 {
263 	struct intel_gt *gt = m->private;
264 	struct intel_uncore *uncore = gt->uncore;
265 	u32 gt_core_status, rcctl1, mt_fwake_req;
266 	u32 mtl_powergate_enable = 0, mtl_powergate_status = 0;
267 
268 	mt_fwake_req = intel_uncore_read_fw(uncore, FORCEWAKE_MT);
269 	gt_core_status = intel_uncore_read(uncore, MTL_MIRROR_TARGET_WP1);
270 
271 	rcctl1 = intel_uncore_read(uncore, GEN6_RC_CONTROL);
272 	mtl_powergate_enable = intel_uncore_read(uncore, GEN9_PG_ENABLE);
273 	mtl_powergate_status = intel_uncore_read(uncore,
274 						 GEN9_PWRGT_DOMAIN_STATUS);
275 
276 	seq_printf(m, "RC6 Enabled: %s\n",
277 		   str_yes_no(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
278 	if (gt->type == GT_MEDIA) {
279 		seq_printf(m, "Media Well Gating Enabled: %s\n",
280 			   str_yes_no(mtl_powergate_enable & GEN9_MEDIA_PG_ENABLE));
281 	} else {
282 		seq_printf(m, "Render Well Gating Enabled: %s\n",
283 			   str_yes_no(mtl_powergate_enable & GEN9_RENDER_PG_ENABLE));
284 	}
285 
286 	seq_puts(m, "Current RC state: ");
287 	switch (REG_FIELD_GET(MTL_CC_MASK, gt_core_status)) {
288 	case MTL_CC0:
289 		seq_puts(m, "RC0\n");
290 		break;
291 	case MTL_CC6:
292 		seq_puts(m, "RC6\n");
293 		break;
294 	default:
295 		MISSING_CASE(REG_FIELD_GET(MTL_CC_MASK, gt_core_status));
296 		seq_puts(m, "Unknown\n");
297 		break;
298 	}
299 
300 	seq_printf(m, "Multi-threaded Forcewake Request: 0x%x\n", mt_fwake_req);
301 	if (gt->type == GT_MEDIA)
302 		seq_printf(m, "Media Power Well: %s\n",
303 			   (mtl_powergate_status &
304 			    GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
305 	else
306 		seq_printf(m, "Render Power Well: %s\n",
307 			   (mtl_powergate_status &
308 			    GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
309 
310 	/* Works for both render and media gt's */
311 	intel_rc6_print_residency(m, "RC6 residency since boot:", INTEL_RC6_RES_RC6);
312 
313 	return fw_domains_show(m, NULL);
314 }
315 
drpc_show(struct seq_file * m,void * unused)316 static int drpc_show(struct seq_file *m, void *unused)
317 {
318 	struct intel_gt *gt = m->private;
319 	struct drm_i915_private *i915 = gt->i915;
320 	intel_wakeref_t wakeref;
321 	int err = -ENODEV;
322 
323 	with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
324 		if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
325 			err = mtl_drpc(m);
326 		else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
327 			err = vlv_drpc(m);
328 		else if (GRAPHICS_VER(i915) >= 6)
329 			err = gen6_drpc(m);
330 		else
331 			err = ilk_drpc(m);
332 	}
333 
334 	return err;
335 }
336 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(drpc);
337 
intel_gt_pm_frequency_dump(struct intel_gt * gt,struct drm_printer * p)338 void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
339 {
340 	struct drm_i915_private *i915 = gt->i915;
341 	struct intel_uncore *uncore = gt->uncore;
342 	struct intel_rps *rps = &gt->rps;
343 	intel_wakeref_t wakeref;
344 
345 	wakeref = intel_runtime_pm_get(uncore->rpm);
346 
347 	if (GRAPHICS_VER(i915) == 5) {
348 		u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
349 		u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
350 
351 		drm_printf(p, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
352 		drm_printf(p, "Requested VID: %d\n", rgvswctl & 0x3f);
353 		drm_printf(p, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
354 			   MEMSTAT_VID_SHIFT);
355 		drm_printf(p, "Current P-state: %d\n",
356 			   REG_FIELD_GET(MEMSTAT_PSTATE_MASK, rgvstat));
357 	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
358 		u32 rpmodectl, freq_sts;
359 
360 		rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
361 		drm_printf(p, "Video Turbo Mode: %s\n",
362 			   str_yes_no(rpmodectl & GEN6_RP_MEDIA_TURBO));
363 		drm_printf(p, "HW control enabled: %s\n",
364 			   str_yes_no(rpmodectl & GEN6_RP_ENABLE));
365 		drm_printf(p, "SW control enabled: %s\n",
366 			   str_yes_no((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) == GEN6_RP_MEDIA_SW_MODE));
367 
368 		vlv_punit_get(i915);
369 		freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
370 		vlv_punit_put(i915);
371 
372 		drm_printf(p, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
373 		drm_printf(p, "DDR freq: %d MHz\n", i915->mem_freq);
374 
375 		drm_printf(p, "actual GPU freq: %d MHz\n",
376 			   intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
377 
378 		drm_printf(p, "current GPU freq: %d MHz\n",
379 			   intel_gpu_freq(rps, rps->cur_freq));
380 
381 		drm_printf(p, "max GPU freq: %d MHz\n",
382 			   intel_gpu_freq(rps, rps->max_freq));
383 
384 		drm_printf(p, "min GPU freq: %d MHz\n",
385 			   intel_gpu_freq(rps, rps->min_freq));
386 
387 		drm_printf(p, "idle GPU freq: %d MHz\n",
388 			   intel_gpu_freq(rps, rps->idle_freq));
389 
390 		drm_printf(p, "efficient (RPe) frequency: %d MHz\n",
391 			   intel_gpu_freq(rps, rps->efficient_freq));
392 	} else if (GRAPHICS_VER(i915) >= 6) {
393 		gen6_rps_frequency_dump(rps, p);
394 	} else {
395 		drm_puts(p, "no P-state info available\n");
396 	}
397 
398 	drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->display.cdclk.hw.cdclk);
399 	drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->display.cdclk.max_cdclk_freq);
400 	drm_printf(p, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
401 
402 	intel_runtime_pm_put(uncore->rpm, wakeref);
403 }
404 
frequency_show(struct seq_file * m,void * unused)405 static int frequency_show(struct seq_file *m, void *unused)
406 {
407 	struct intel_gt *gt = m->private;
408 	struct drm_printer p = drm_seq_file_printer(m);
409 
410 	intel_gt_pm_frequency_dump(gt, &p);
411 
412 	return 0;
413 }
414 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(frequency);
415 
llc_show(struct seq_file * m,void * data)416 static int llc_show(struct seq_file *m, void *data)
417 {
418 	struct intel_gt *gt = m->private;
419 	struct drm_i915_private *i915 = gt->i915;
420 	const bool edram = GRAPHICS_VER(i915) > 8;
421 	struct intel_rps *rps = &gt->rps;
422 	unsigned int max_gpu_freq, min_gpu_freq;
423 	intel_wakeref_t wakeref;
424 	int gpu_freq, ia_freq;
425 
426 	seq_printf(m, "LLC: %s\n", str_yes_no(HAS_LLC(i915)));
427 	seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
428 		   i915->edram_size_mb);
429 
430 	min_gpu_freq = rps->min_freq;
431 	max_gpu_freq = rps->max_freq;
432 	if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
433 		/* Convert GT frequency to 50 HZ units */
434 		min_gpu_freq /= GEN9_FREQ_SCALER;
435 		max_gpu_freq /= GEN9_FREQ_SCALER;
436 	}
437 
438 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
439 
440 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
441 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
442 		ia_freq = gpu_freq;
443 		snb_pcode_read(gt->uncore, GEN6_PCODE_READ_MIN_FREQ_TABLE,
444 			       &ia_freq, NULL);
445 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
446 			   intel_gpu_freq(rps,
447 					  (gpu_freq *
448 					   (IS_GEN9_BC(i915) ||
449 					    GRAPHICS_VER(i915) >= 11 ?
450 					    GEN9_FREQ_SCALER : 1))),
451 			   ((ia_freq >> 0) & 0xff) * 100,
452 			   ((ia_freq >> 8) & 0xff) * 100);
453 	}
454 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
455 
456 	return 0;
457 }
458 
llc_eval(void * data)459 static bool llc_eval(void *data)
460 {
461 	struct intel_gt *gt = data;
462 
463 	return HAS_LLC(gt->i915);
464 }
465 
466 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(llc);
467 
rps_power_to_str(unsigned int power)468 static const char *rps_power_to_str(unsigned int power)
469 {
470 	static const char * const strings[] = {
471 		[LOW_POWER] = "low power",
472 		[BETWEEN] = "mixed",
473 		[HIGH_POWER] = "high power",
474 	};
475 
476 	if (power >= ARRAY_SIZE(strings) || !strings[power])
477 		return "unknown";
478 
479 	return strings[power];
480 }
481 
rps_boost_show(struct seq_file * m,void * data)482 static int rps_boost_show(struct seq_file *m, void *data)
483 {
484 	struct intel_gt *gt = m->private;
485 	struct drm_i915_private *i915 = gt->i915;
486 	struct intel_rps *rps = &gt->rps;
487 
488 	seq_printf(m, "RPS enabled? %s\n",
489 		   str_yes_no(intel_rps_is_enabled(rps)));
490 	seq_printf(m, "RPS active? %s\n",
491 		   str_yes_no(intel_rps_is_active(rps)));
492 	seq_printf(m, "GPU busy? %s, %llums\n",
493 		   str_yes_no(gt->awake),
494 		   ktime_to_ms(intel_gt_get_awake_time(gt)));
495 	seq_printf(m, "Boosts outstanding? %d\n",
496 		   atomic_read(&rps->num_waiters));
497 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
498 	seq_printf(m, "Frequency requested %d, actual %d\n",
499 		   intel_gpu_freq(rps, rps->cur_freq),
500 		   intel_rps_read_actual_frequency(rps));
501 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
502 		   intel_gpu_freq(rps, rps->min_freq),
503 		   intel_gpu_freq(rps, rps->min_freq_softlimit),
504 		   intel_gpu_freq(rps, rps->max_freq_softlimit),
505 		   intel_gpu_freq(rps, rps->max_freq));
506 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
507 		   intel_gpu_freq(rps, rps->idle_freq),
508 		   intel_gpu_freq(rps, rps->efficient_freq),
509 		   intel_gpu_freq(rps, rps->boost_freq));
510 
511 	seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts));
512 
513 	if (GRAPHICS_VER(i915) >= 6 && intel_rps_is_active(rps)) {
514 		struct intel_uncore *uncore = gt->uncore;
515 		u32 rpup, rpupei;
516 		u32 rpdown, rpdownei;
517 
518 		intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
519 		rpup = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
520 		rpupei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
521 		rpdown = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
522 		rpdownei = intel_uncore_read_fw(uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
523 		intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
524 
525 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
526 			   rps_power_to_str(rps->power.mode));
527 		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
528 			   rpup && rpupei ? 100 * rpup / rpupei : 0,
529 			   rps->power.up_threshold);
530 		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
531 			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
532 			   rps->power.down_threshold);
533 	} else {
534 		seq_puts(m, "\nRPS Autotuning inactive\n");
535 	}
536 
537 	return 0;
538 }
539 
rps_eval(void * data)540 static bool rps_eval(void *data)
541 {
542 	struct intel_gt *gt = data;
543 
544 	if (intel_guc_slpc_is_used(&gt->uc.guc))
545 		return false;
546 	else
547 		return HAS_RPS(gt->i915);
548 }
549 
550 DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(rps_boost);
551 
perf_limit_reasons_get(void * data,u64 * val)552 static int perf_limit_reasons_get(void *data, u64 *val)
553 {
554 	struct intel_gt *gt = data;
555 	intel_wakeref_t wakeref;
556 
557 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
558 		*val = intel_uncore_read(gt->uncore, intel_gt_perf_limit_reasons_reg(gt));
559 
560 	return 0;
561 }
562 
perf_limit_reasons_clear(void * data,u64 val)563 static int perf_limit_reasons_clear(void *data, u64 val)
564 {
565 	struct intel_gt *gt = data;
566 	intel_wakeref_t wakeref;
567 
568 	/*
569 	 * Clear the upper 16 "log" bits, the lower 16 "status" bits are
570 	 * read-only. The upper 16 "log" bits are identical to the lower 16
571 	 * "status" bits except that the "log" bits remain set until cleared.
572 	 */
573 	with_intel_runtime_pm(gt->uncore->rpm, wakeref)
574 		intel_uncore_rmw(gt->uncore, intel_gt_perf_limit_reasons_reg(gt),
575 				 GT0_PERF_LIMIT_REASONS_LOG_MASK, 0);
576 
577 	return 0;
578 }
579 
perf_limit_reasons_eval(void * data)580 static bool perf_limit_reasons_eval(void *data)
581 {
582 	struct intel_gt *gt = data;
583 
584 	return i915_mmio_reg_valid(intel_gt_perf_limit_reasons_reg(gt));
585 }
586 
587 DEFINE_SIMPLE_ATTRIBUTE(perf_limit_reasons_fops, perf_limit_reasons_get,
588 			perf_limit_reasons_clear, "0x%llx\n");
589 
590 #endif /* notyet */
591 
intel_gt_pm_debugfs_register(struct intel_gt * gt,struct dentry * root)592 void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root)
593 {
594 #ifdef notyet
595 	static const struct intel_gt_debugfs_file files[] = {
596 		{ "drpc", &drpc_fops, NULL },
597 		{ "frequency", &frequency_fops, NULL },
598 		{ "forcewake", &fw_domains_fops, NULL },
599 		{ "forcewake_user", &forcewake_user_fops, NULL},
600 		{ "llc", &llc_fops, llc_eval },
601 		{ "rps_boost", &rps_boost_fops, rps_eval },
602 		{ "perf_limit_reasons", &perf_limit_reasons_fops, perf_limit_reasons_eval },
603 	};
604 
605 	intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
606 #endif
607 }
608