xref: /openbsd-src/sys/dev/pci/drm/i915/display/intel_display_debugfs.c (revision c1a45aed656e7d5627c30c92421893a76f370ccb)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <drm/drm_debugfs.h>
7 #include <drm/drm_fourcc.h>
8 
9 #include "i915_debugfs.h"
10 #include "intel_display_debugfs.h"
11 #include "intel_display_power.h"
12 #include "intel_de.h"
13 #include "intel_display_types.h"
14 #include "intel_dmc.h"
15 #include "intel_dp.h"
16 #include "intel_drrs.h"
17 #include "intel_fbc.h"
18 #include "intel_hdcp.h"
19 #include "intel_hdmi.h"
20 #include "intel_pm.h"
21 #include "intel_psr.h"
22 #include "intel_sideband.h"
23 #include "intel_sprite.h"
24 
25 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
26 {
27 	return to_i915(node->minor->dev);
28 }
29 
30 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
31 {
32 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
33 
34 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
35 		   dev_priv->fb_tracking.busy_bits);
36 
37 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
38 		   dev_priv->fb_tracking.flip_bits);
39 
40 	return 0;
41 }
42 
43 static int i915_fbc_status(struct seq_file *m, void *unused)
44 {
45 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
46 	struct intel_fbc *fbc = &dev_priv->fbc;
47 	intel_wakeref_t wakeref;
48 
49 	if (!HAS_FBC(dev_priv))
50 		return -ENODEV;
51 
52 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
53 	mutex_lock(&fbc->lock);
54 
55 	if (intel_fbc_is_active(dev_priv))
56 		seq_puts(m, "FBC enabled\n");
57 	else
58 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
59 
60 	if (intel_fbc_is_active(dev_priv)) {
61 		u32 mask;
62 
63 		if (DISPLAY_VER(dev_priv) >= 8)
64 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
65 		else if (DISPLAY_VER(dev_priv) >= 7)
66 			mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
67 		else if (DISPLAY_VER(dev_priv) >= 5)
68 			mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
69 		else if (IS_G4X(dev_priv))
70 			mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
71 		else
72 			mask = intel_de_read(dev_priv, FBC_STATUS) &
73 				(FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
74 
75 		seq_printf(m, "Compressing: %s\n", yesno(mask));
76 	}
77 
78 	mutex_unlock(&fbc->lock);
79 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
80 
81 	return 0;
82 }
83 
84 static int i915_fbc_false_color_get(void *data, u64 *val)
85 {
86 	struct drm_i915_private *dev_priv = data;
87 
88 	if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv))
89 		return -ENODEV;
90 
91 	*val = dev_priv->fbc.false_color;
92 
93 	return 0;
94 }
95 
96 static int i915_fbc_false_color_set(void *data, u64 val)
97 {
98 	struct drm_i915_private *dev_priv = data;
99 	u32 reg;
100 
101 	if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv))
102 		return -ENODEV;
103 
104 	mutex_lock(&dev_priv->fbc.lock);
105 
106 	reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
107 	dev_priv->fbc.false_color = val;
108 
109 	intel_de_write(dev_priv, ILK_DPFC_CONTROL,
110 		       val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
111 
112 	mutex_unlock(&dev_priv->fbc.lock);
113 	return 0;
114 }
115 
116 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
117 			i915_fbc_false_color_get, i915_fbc_false_color_set,
118 			"%llu\n");
119 
120 static int i915_ips_status(struct seq_file *m, void *unused)
121 {
122 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
123 	intel_wakeref_t wakeref;
124 
125 	if (!HAS_IPS(dev_priv))
126 		return -ENODEV;
127 
128 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
129 
130 	seq_printf(m, "Enabled by kernel parameter: %s\n",
131 		   yesno(dev_priv->params.enable_ips));
132 
133 	if (DISPLAY_VER(dev_priv) >= 8) {
134 		seq_puts(m, "Currently: unknown\n");
135 	} else {
136 		if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
137 			seq_puts(m, "Currently: enabled\n");
138 		else
139 			seq_puts(m, "Currently: disabled\n");
140 	}
141 
142 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
143 
144 	return 0;
145 }
146 
147 static int i915_sr_status(struct seq_file *m, void *unused)
148 {
149 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
150 	intel_wakeref_t wakeref;
151 	bool sr_enabled = false;
152 
153 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
154 
155 	if (DISPLAY_VER(dev_priv) >= 9)
156 		/* no global SR status; inspect per-plane WM */;
157 	else if (HAS_PCH_SPLIT(dev_priv))
158 		sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
159 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
160 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
161 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
162 	else if (IS_I915GM(dev_priv))
163 		sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
164 	else if (IS_PINEVIEW(dev_priv))
165 		sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
166 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
167 		sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
168 
169 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
170 
171 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
172 
173 	return 0;
174 }
175 
176 static int i915_opregion(struct seq_file *m, void *unused)
177 {
178 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
179 
180 	if (opregion->header)
181 		seq_write(m, opregion->header, OPREGION_SIZE);
182 
183 	return 0;
184 }
185 
186 static int i915_vbt(struct seq_file *m, void *unused)
187 {
188 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
189 
190 	if (opregion->vbt)
191 		seq_write(m, opregion->vbt, opregion->vbt_size);
192 
193 	return 0;
194 }
195 
196 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
197 {
198 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
199 	struct drm_device *dev = &dev_priv->drm;
200 	struct intel_framebuffer *fbdev_fb = NULL;
201 	struct drm_framebuffer *drm_fb;
202 
203 #ifdef CONFIG_DRM_FBDEV_EMULATION
204 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
205 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
206 
207 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
208 			   fbdev_fb->base.width,
209 			   fbdev_fb->base.height,
210 			   fbdev_fb->base.format->depth,
211 			   fbdev_fb->base.format->cpp[0] * 8,
212 			   fbdev_fb->base.modifier,
213 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
214 		i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
215 		seq_putc(m, '\n');
216 	}
217 #endif
218 
219 	mutex_lock(&dev->mode_config.fb_lock);
220 	drm_for_each_fb(drm_fb, dev) {
221 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
222 		if (fb == fbdev_fb)
223 			continue;
224 
225 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
226 			   fb->base.width,
227 			   fb->base.height,
228 			   fb->base.format->depth,
229 			   fb->base.format->cpp[0] * 8,
230 			   fb->base.modifier,
231 			   drm_framebuffer_read_refcount(&fb->base));
232 		i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
233 		seq_putc(m, '\n');
234 	}
235 	mutex_unlock(&dev->mode_config.fb_lock);
236 
237 	return 0;
238 }
239 
240 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
241 {
242 	u8 val;
243 	static const char * const sink_status[] = {
244 		"inactive",
245 		"transition to active, capture and display",
246 		"active, display from RFB",
247 		"active, capture and display on sink device timings",
248 		"transition to inactive, capture and display, timing re-sync",
249 		"reserved",
250 		"reserved",
251 		"sink internal error",
252 	};
253 	struct drm_connector *connector = m->private;
254 	struct intel_dp *intel_dp =
255 		intel_attached_dp(to_intel_connector(connector));
256 	int ret;
257 
258 	if (!CAN_PSR(intel_dp)) {
259 		seq_puts(m, "PSR Unsupported\n");
260 		return -ENODEV;
261 	}
262 
263 	if (connector->status != connector_status_connected)
264 		return -ENODEV;
265 
266 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
267 
268 	if (ret == 1) {
269 		const char *str = "unknown";
270 
271 		val &= DP_PSR_SINK_STATE_MASK;
272 		if (val < ARRAY_SIZE(sink_status))
273 			str = sink_status[val];
274 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
275 	} else {
276 		return ret;
277 	}
278 
279 	return 0;
280 }
281 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
282 
283 static void
284 psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
285 {
286 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
287 	const char *status = "unknown";
288 	u32 val, status_val;
289 
290 	if (intel_dp->psr.psr2_enabled) {
291 		static const char * const live_status[] = {
292 			"IDLE",
293 			"CAPTURE",
294 			"CAPTURE_FS",
295 			"SLEEP",
296 			"BUFON_FW",
297 			"ML_UP",
298 			"SU_STANDBY",
299 			"FAST_SLEEP",
300 			"DEEP_SLEEP",
301 			"BUF_ON",
302 			"TG_ON"
303 		};
304 		val = intel_de_read(dev_priv,
305 				    EDP_PSR2_STATUS(intel_dp->psr.transcoder));
306 		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
307 			      EDP_PSR2_STATUS_STATE_SHIFT;
308 		if (status_val < ARRAY_SIZE(live_status))
309 			status = live_status[status_val];
310 	} else {
311 		static const char * const live_status[] = {
312 			"IDLE",
313 			"SRDONACK",
314 			"SRDENT",
315 			"BUFOFF",
316 			"BUFON",
317 			"AUXACK",
318 			"SRDOFFACK",
319 			"SRDENT_ON",
320 		};
321 		val = intel_de_read(dev_priv,
322 				    EDP_PSR_STATUS(intel_dp->psr.transcoder));
323 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
324 			      EDP_PSR_STATUS_STATE_SHIFT;
325 		if (status_val < ARRAY_SIZE(live_status))
326 			status = live_status[status_val];
327 	}
328 
329 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
330 }
331 
332 static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
333 {
334 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
335 	struct intel_psr *psr = &intel_dp->psr;
336 	intel_wakeref_t wakeref;
337 	const char *status;
338 	bool enabled;
339 	u32 val;
340 
341 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
342 	if (psr->sink_support)
343 		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
344 	seq_puts(m, "\n");
345 
346 	if (!psr->sink_support)
347 		return 0;
348 
349 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
350 	mutex_lock(&psr->lock);
351 
352 	if (psr->enabled)
353 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
354 	else
355 		status = "disabled";
356 	seq_printf(m, "PSR mode: %s\n", status);
357 
358 	if (!psr->enabled) {
359 		seq_printf(m, "PSR sink not reliable: %s\n",
360 			   yesno(psr->sink_not_reliable));
361 
362 		goto unlock;
363 	}
364 
365 	if (psr->psr2_enabled) {
366 		val = intel_de_read(dev_priv,
367 				    EDP_PSR2_CTL(intel_dp->psr.transcoder));
368 		enabled = val & EDP_PSR2_ENABLE;
369 	} else {
370 		val = intel_de_read(dev_priv,
371 				    EDP_PSR_CTL(intel_dp->psr.transcoder));
372 		enabled = val & EDP_PSR_ENABLE;
373 	}
374 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
375 		   enableddisabled(enabled), val);
376 	psr_source_status(intel_dp, m);
377 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
378 		   psr->busy_frontbuffer_bits);
379 
380 	/*
381 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
382 	 */
383 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
384 		val = intel_de_read(dev_priv,
385 				    EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
386 		val &= EDP_PSR_PERF_CNT_MASK;
387 		seq_printf(m, "Performance counter: %u\n", val);
388 	}
389 
390 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
391 		seq_printf(m, "Last attempted entry at: %lld\n",
392 			   psr->last_entry_attempt);
393 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
394 	}
395 
396 	if (psr->psr2_enabled) {
397 		u32 su_frames_val[3];
398 		int frame;
399 
400 		/*
401 		 * Reading all 3 registers before hand to minimize crossing a
402 		 * frame boundary between register reads
403 		 */
404 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
405 			val = intel_de_read(dev_priv,
406 					    PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
407 			su_frames_val[frame / 3] = val;
408 		}
409 
410 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
411 
412 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
413 			u32 su_blocks;
414 
415 			su_blocks = su_frames_val[frame / 3] &
416 				    PSR2_SU_STATUS_MASK(frame);
417 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
418 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
419 		}
420 
421 		seq_printf(m, "PSR2 selective fetch: %s\n",
422 			   enableddisabled(psr->psr2_sel_fetch_enabled));
423 	}
424 
425 unlock:
426 	mutex_unlock(&psr->lock);
427 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
428 
429 	return 0;
430 }
431 
432 static int i915_edp_psr_status(struct seq_file *m, void *data)
433 {
434 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
435 	struct intel_dp *intel_dp = NULL;
436 	struct intel_encoder *encoder;
437 
438 	if (!HAS_PSR(dev_priv))
439 		return -ENODEV;
440 
441 	/* Find the first EDP which supports PSR */
442 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
443 		intel_dp = enc_to_intel_dp(encoder);
444 		break;
445 	}
446 
447 	if (!intel_dp)
448 		return -ENODEV;
449 
450 	return intel_psr_status(m, intel_dp);
451 }
452 
453 static int
454 i915_edp_psr_debug_set(void *data, u64 val)
455 {
456 	struct drm_i915_private *dev_priv = data;
457 	struct intel_encoder *encoder;
458 	intel_wakeref_t wakeref;
459 	int ret = -ENODEV;
460 
461 	if (!HAS_PSR(dev_priv))
462 		return ret;
463 
464 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
465 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
466 
467 		drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
468 
469 		wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
470 
471 		// TODO: split to each transcoder's PSR debug state
472 		ret = intel_psr_debug_set(intel_dp, val);
473 
474 		intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
475 	}
476 
477 	return ret;
478 }
479 
480 static int
481 i915_edp_psr_debug_get(void *data, u64 *val)
482 {
483 	struct drm_i915_private *dev_priv = data;
484 	struct intel_encoder *encoder;
485 
486 	if (!HAS_PSR(dev_priv))
487 		return -ENODEV;
488 
489 	for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
490 		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
491 
492 		// TODO: split to each transcoder's PSR debug state
493 		*val = READ_ONCE(intel_dp->psr.debug);
494 		return 0;
495 	}
496 
497 	return -ENODEV;
498 }
499 
500 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
501 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
502 			"%llu\n");
503 
504 static int i915_power_domain_info(struct seq_file *m, void *unused)
505 {
506 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
507 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
508 	int i;
509 
510 	mutex_lock(&power_domains->lock);
511 
512 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
513 	for (i = 0; i < power_domains->power_well_count; i++) {
514 		struct i915_power_well *power_well;
515 		enum intel_display_power_domain power_domain;
516 
517 		power_well = &power_domains->power_wells[i];
518 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
519 			   power_well->count);
520 
521 		for_each_power_domain(power_domain, power_well->desc->domains)
522 			seq_printf(m, "  %-23s %d\n",
523 				 intel_display_power_domain_str(power_domain),
524 				 power_domains->domain_use_count[power_domain]);
525 	}
526 
527 	mutex_unlock(&power_domains->lock);
528 
529 	return 0;
530 }
531 
532 static int i915_dmc_info(struct seq_file *m, void *unused)
533 {
534 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
535 	intel_wakeref_t wakeref;
536 	struct intel_dmc *dmc;
537 	i915_reg_t dc5_reg, dc6_reg = {};
538 
539 	if (!HAS_DMC(dev_priv))
540 		return -ENODEV;
541 
542 	dmc = &dev_priv->dmc;
543 
544 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
545 
546 	seq_printf(m, "fw loaded: %s\n", yesno(intel_dmc_has_payload(dev_priv)));
547 	seq_printf(m, "path: %s\n", dmc->fw_path);
548 	seq_printf(m, "Pipe A fw support: %s\n",
549 		   yesno(GRAPHICS_VER(dev_priv) >= 12));
550 	seq_printf(m, "Pipe A fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEA].payload));
551 	seq_printf(m, "Pipe B fw support: %s\n", yesno(IS_ALDERLAKE_P(dev_priv)));
552 	seq_printf(m, "Pipe B fw loaded: %s\n", yesno(dmc->dmc_info[DMC_FW_PIPEB].payload));
553 
554 	if (!intel_dmc_has_payload(dev_priv))
555 		goto out;
556 
557 	seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version),
558 		   DMC_VERSION_MINOR(dmc->version));
559 
560 	if (DISPLAY_VER(dev_priv) >= 12) {
561 		if (IS_DGFX(dev_priv)) {
562 			dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
563 		} else {
564 			dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
565 			dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
566 		}
567 
568 		/*
569 		 * NOTE: DMC_DEBUG3 is a general purpose reg.
570 		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
571 		 * reg for DC3CO debugging and validation,
572 		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
573 		 */
574 		seq_printf(m, "DC3CO count: %d\n",
575 			   intel_de_read(dev_priv, DMC_DEBUG3));
576 	} else {
577 		dc5_reg = IS_BROXTON(dev_priv) ? BXT_DMC_DC3_DC5_COUNT :
578 						 SKL_DMC_DC3_DC5_COUNT;
579 		if (!IS_GEMINILAKE(dev_priv) && !IS_BROXTON(dev_priv))
580 			dc6_reg = SKL_DMC_DC5_DC6_COUNT;
581 	}
582 
583 	seq_printf(m, "DC3 -> DC5 count: %d\n",
584 		   intel_de_read(dev_priv, dc5_reg));
585 	if (dc6_reg.reg)
586 		seq_printf(m, "DC5 -> DC6 count: %d\n",
587 			   intel_de_read(dev_priv, dc6_reg));
588 
589 out:
590 	seq_printf(m, "program base: 0x%08x\n",
591 		   intel_de_read(dev_priv, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
592 	seq_printf(m, "ssp base: 0x%08x\n",
593 		   intel_de_read(dev_priv, DMC_SSP_BASE));
594 	seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, DMC_HTP_SKL));
595 
596 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
597 
598 	return 0;
599 }
600 
601 static void intel_seq_print_mode(struct seq_file *m, int tabs,
602 				 const struct drm_display_mode *mode)
603 {
604 	int i;
605 
606 	for (i = 0; i < tabs; i++)
607 		seq_putc(m, '\t');
608 
609 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
610 }
611 
612 static void intel_encoder_info(struct seq_file *m,
613 			       struct intel_crtc *crtc,
614 			       struct intel_encoder *encoder)
615 {
616 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
617 	struct drm_connector_list_iter conn_iter;
618 	struct drm_connector *connector;
619 
620 	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
621 		   encoder->base.base.id, encoder->base.name);
622 
623 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
624 	drm_for_each_connector_iter(connector, &conn_iter) {
625 		const struct drm_connector_state *conn_state =
626 			connector->state;
627 
628 		if (conn_state->best_encoder != &encoder->base)
629 			continue;
630 
631 		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
632 			   connector->base.id, connector->name);
633 	}
634 	drm_connector_list_iter_end(&conn_iter);
635 }
636 
637 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
638 {
639 	const struct drm_display_mode *mode = panel->fixed_mode;
640 
641 	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
642 }
643 
644 static void intel_hdcp_info(struct seq_file *m,
645 			    struct intel_connector *intel_connector)
646 {
647 	bool hdcp_cap, hdcp2_cap;
648 
649 	if (!intel_connector->hdcp.shim) {
650 		seq_puts(m, "No Connector Support");
651 		goto out;
652 	}
653 
654 	hdcp_cap = intel_hdcp_capable(intel_connector);
655 	hdcp2_cap = intel_hdcp2_capable(intel_connector);
656 
657 	if (hdcp_cap)
658 		seq_puts(m, "HDCP1.4 ");
659 	if (hdcp2_cap)
660 		seq_puts(m, "HDCP2.2 ");
661 
662 	if (!hdcp_cap && !hdcp2_cap)
663 		seq_puts(m, "None");
664 
665 out:
666 	seq_puts(m, "\n");
667 }
668 
669 static void intel_dp_info(struct seq_file *m,
670 			  struct intel_connector *intel_connector)
671 {
672 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
673 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
674 	const struct drm_property_blob *edid = intel_connector->base.edid_blob_ptr;
675 
676 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
677 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
678 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
679 		intel_panel_info(m, &intel_connector->panel);
680 
681 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
682 				edid ? edid->data : NULL, &intel_dp->aux);
683 }
684 
685 static void intel_dp_mst_info(struct seq_file *m,
686 			      struct intel_connector *intel_connector)
687 {
688 	bool has_audio = intel_connector->port->has_audio;
689 
690 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
691 }
692 
693 static void intel_hdmi_info(struct seq_file *m,
694 			    struct intel_connector *intel_connector)
695 {
696 	struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
697 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
698 
699 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
700 }
701 
702 static void intel_lvds_info(struct seq_file *m,
703 			    struct intel_connector *intel_connector)
704 {
705 	intel_panel_info(m, &intel_connector->panel);
706 }
707 
708 static void intel_connector_info(struct seq_file *m,
709 				 struct drm_connector *connector)
710 {
711 	struct intel_connector *intel_connector = to_intel_connector(connector);
712 	const struct drm_connector_state *conn_state = connector->state;
713 	struct intel_encoder *encoder =
714 		to_intel_encoder(conn_state->best_encoder);
715 	const struct drm_display_mode *mode;
716 
717 	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
718 		   connector->base.id, connector->name,
719 		   drm_get_connector_status_name(connector->status));
720 
721 	if (connector->status == connector_status_disconnected)
722 		return;
723 
724 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
725 		   connector->display_info.width_mm,
726 		   connector->display_info.height_mm);
727 	seq_printf(m, "\tsubpixel order: %s\n",
728 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
729 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
730 
731 	if (!encoder)
732 		return;
733 
734 	switch (connector->connector_type) {
735 	case DRM_MODE_CONNECTOR_DisplayPort:
736 	case DRM_MODE_CONNECTOR_eDP:
737 		if (encoder->type == INTEL_OUTPUT_DP_MST)
738 			intel_dp_mst_info(m, intel_connector);
739 		else
740 			intel_dp_info(m, intel_connector);
741 		break;
742 	case DRM_MODE_CONNECTOR_LVDS:
743 		if (encoder->type == INTEL_OUTPUT_LVDS)
744 			intel_lvds_info(m, intel_connector);
745 		break;
746 	case DRM_MODE_CONNECTOR_HDMIA:
747 		if (encoder->type == INTEL_OUTPUT_HDMI ||
748 		    encoder->type == INTEL_OUTPUT_DDI)
749 			intel_hdmi_info(m, intel_connector);
750 		break;
751 	default:
752 		break;
753 	}
754 
755 	seq_puts(m, "\tHDCP version: ");
756 	intel_hdcp_info(m, intel_connector);
757 
758 	seq_printf(m, "\tmodes:\n");
759 	list_for_each_entry(mode, &connector->modes, head)
760 		intel_seq_print_mode(m, 2, mode);
761 }
762 
763 static const char *plane_type(enum drm_plane_type type)
764 {
765 	switch (type) {
766 	case DRM_PLANE_TYPE_OVERLAY:
767 		return "OVL";
768 	case DRM_PLANE_TYPE_PRIMARY:
769 		return "PRI";
770 	case DRM_PLANE_TYPE_CURSOR:
771 		return "CUR";
772 	/*
773 	 * Deliberately omitting default: to generate compiler warnings
774 	 * when a new drm_plane_type gets added.
775 	 */
776 	}
777 
778 	return "unknown";
779 }
780 
781 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
782 {
783 	/*
784 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
785 	 * will print them all to visualize if the values are misused
786 	 */
787 	snprintf(buf, bufsize,
788 		 "%s%s%s%s%s%s(0x%08x)",
789 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
790 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
791 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
792 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
793 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
794 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
795 		 rotation);
796 }
797 
798 static const char *plane_visibility(const struct intel_plane_state *plane_state)
799 {
800 	if (plane_state->uapi.visible)
801 		return "visible";
802 
803 	if (plane_state->planar_slave)
804 		return "planar-slave";
805 
806 	return "hidden";
807 }
808 
809 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
810 {
811 	const struct intel_plane_state *plane_state =
812 		to_intel_plane_state(plane->base.state);
813 	const struct drm_framebuffer *fb = plane_state->uapi.fb;
814 	struct drm_rect src, dst;
815 	char rot_str[48];
816 
817 	src = drm_plane_state_src(&plane_state->uapi);
818 	dst = drm_plane_state_dest(&plane_state->uapi);
819 
820 	plane_rotation(rot_str, sizeof(rot_str),
821 		       plane_state->uapi.rotation);
822 
823 	seq_puts(m, "\t\tuapi: [FB:");
824 	if (fb)
825 		seq_printf(m, "%d] %p4cc,0x%llx,%dx%d", fb->base.id,
826 			   &fb->format->format, fb->modifier, fb->width,
827 			   fb->height);
828 	else
829 		seq_puts(m, "0] n/a,0x0,0x0,");
830 	seq_printf(m, ", visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT
831 		   ", rotation=%s\n", plane_visibility(plane_state),
832 		   DRM_RECT_FP_ARG(&src), DRM_RECT_ARG(&dst), rot_str);
833 
834 	if (plane_state->planar_linked_plane)
835 		seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n",
836 			   plane_state->planar_linked_plane->base.base.id, plane_state->planar_linked_plane->base.name,
837 			   plane_state->planar_slave ? "slave" : "master");
838 }
839 
840 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
841 {
842 	const struct intel_plane_state *plane_state =
843 		to_intel_plane_state(plane->base.state);
844 	const struct drm_framebuffer *fb = plane_state->hw.fb;
845 	char rot_str[48];
846 
847 	if (!fb)
848 		return;
849 
850 	plane_rotation(rot_str, sizeof(rot_str),
851 		       plane_state->hw.rotation);
852 
853 	seq_printf(m, "\t\thw: [FB:%d] %p4cc,0x%llx,%dx%d, visible=%s, src="
854 		   DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
855 		   fb->base.id, &fb->format->format,
856 		   fb->modifier, fb->width, fb->height,
857 		   yesno(plane_state->uapi.visible),
858 		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
859 		   DRM_RECT_ARG(&plane_state->uapi.dst),
860 		   rot_str);
861 }
862 
863 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
864 {
865 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
866 	struct intel_plane *plane;
867 
868 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
869 		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
870 			   plane->base.base.id, plane->base.name,
871 			   plane_type(plane->base.type));
872 		intel_plane_uapi_info(m, plane);
873 		intel_plane_hw_info(m, plane);
874 	}
875 }
876 
877 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
878 {
879 	const struct intel_crtc_state *crtc_state =
880 		to_intel_crtc_state(crtc->base.state);
881 	int num_scalers = crtc->num_scalers;
882 	int i;
883 
884 	/* Not all platformas have a scaler */
885 	if (num_scalers) {
886 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
887 			   num_scalers,
888 			   crtc_state->scaler_state.scaler_users,
889 			   crtc_state->scaler_state.scaler_id);
890 
891 		for (i = 0; i < num_scalers; i++) {
892 			const struct intel_scaler *sc =
893 				&crtc_state->scaler_state.scalers[i];
894 
895 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
896 				   i, yesno(sc->in_use), sc->mode);
897 		}
898 		seq_puts(m, "\n");
899 	} else {
900 		seq_puts(m, "\tNo scalers available on this platform\n");
901 	}
902 }
903 
904 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
905 static void crtc_updates_info(struct seq_file *m,
906 			      struct intel_crtc *crtc,
907 			      const char *hdr)
908 {
909 	u64 count;
910 	int row;
911 
912 	count = 0;
913 	for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++)
914 		count += crtc->debug.vbl.times[row];
915 	seq_printf(m, "%sUpdates: %llu\n", hdr, count);
916 	if (!count)
917 		return;
918 
919 	for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) {
920 		char columns[80] = "       |";
921 		unsigned int x;
922 
923 		if (row & 1) {
924 			const char *units;
925 
926 			if (row > 10) {
927 				x = 1000000;
928 				units = "ms";
929 			} else {
930 				x = 1000;
931 				units = "us";
932 			}
933 
934 			snprintf(columns, sizeof(columns), "%4ld%s |",
935 				 DIV_ROUND_CLOSEST(BIT(row + 9), x), units);
936 		}
937 
938 		if (crtc->debug.vbl.times[row]) {
939 			x = ilog2(crtc->debug.vbl.times[row]);
940 			memset(columns + 8, '*', x);
941 			columns[8 + x] = '\0';
942 		}
943 
944 		seq_printf(m, "%s%s\n", hdr, columns);
945 	}
946 
947 	seq_printf(m, "%sMin update: %lluns\n",
948 		   hdr, crtc->debug.vbl.min);
949 	seq_printf(m, "%sMax update: %lluns\n",
950 		   hdr, crtc->debug.vbl.max);
951 	seq_printf(m, "%sAverage update: %lluns\n",
952 		   hdr, div64_u64(crtc->debug.vbl.sum,  count));
953 	seq_printf(m, "%sOverruns > %uus: %u\n",
954 		   hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over);
955 }
956 
957 static int crtc_updates_show(struct seq_file *m, void *data)
958 {
959 	crtc_updates_info(m, m->private, "");
960 	return 0;
961 }
962 
963 static int crtc_updates_open(struct inode *inode, struct file *file)
964 {
965 	return single_open(file, crtc_updates_show, inode->i_private);
966 }
967 
968 static ssize_t crtc_updates_write(struct file *file,
969 				  const char __user *ubuf,
970 				  size_t len, loff_t *offp)
971 {
972 	struct seq_file *m = file->private_data;
973 	struct intel_crtc *crtc = m->private;
974 
975 	/* May race with an update. Meh. */
976 	memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl));
977 
978 	return len;
979 }
980 
981 static const struct file_operations crtc_updates_fops = {
982 	.owner = THIS_MODULE,
983 	.open = crtc_updates_open,
984 	.read = seq_read,
985 	.llseek = seq_lseek,
986 	.release = single_release,
987 	.write = crtc_updates_write
988 };
989 
990 static void crtc_updates_add(struct drm_crtc *crtc)
991 {
992 	debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry,
993 			    to_intel_crtc(crtc), &crtc_updates_fops);
994 }
995 
996 #else
997 static void crtc_updates_info(struct seq_file *m,
998 			      struct intel_crtc *crtc,
999 			      const char *hdr)
1000 {
1001 }
1002 
1003 static void crtc_updates_add(struct drm_crtc *crtc)
1004 {
1005 }
1006 #endif
1007 
1008 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
1009 {
1010 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1011 	const struct intel_crtc_state *crtc_state =
1012 		to_intel_crtc_state(crtc->base.state);
1013 	struct intel_encoder *encoder;
1014 
1015 	seq_printf(m, "[CRTC:%d:%s]:\n",
1016 		   crtc->base.base.id, crtc->base.name);
1017 
1018 	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
1019 		   yesno(crtc_state->uapi.enable),
1020 		   yesno(crtc_state->uapi.active),
1021 		   DRM_MODE_ARG(&crtc_state->uapi.mode));
1022 
1023 	if (crtc_state->hw.enable) {
1024 		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
1025 			   yesno(crtc_state->hw.active),
1026 			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
1027 
1028 		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
1029 			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
1030 			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
1031 
1032 		intel_scaler_info(m, crtc);
1033 	}
1034 
1035 	if (crtc_state->bigjoiner)
1036 		seq_printf(m, "\tLinked to [CRTC:%d:%s] as a %s\n",
1037 			   crtc_state->bigjoiner_linked_crtc->base.base.id,
1038 			   crtc_state->bigjoiner_linked_crtc->base.name,
1039 			   crtc_state->bigjoiner_slave ? "slave" : "master");
1040 
1041 	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
1042 				    crtc_state->uapi.encoder_mask)
1043 		intel_encoder_info(m, crtc, encoder);
1044 
1045 	intel_plane_info(m, crtc);
1046 
1047 	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
1048 		   yesno(!crtc->cpu_fifo_underrun_disabled),
1049 		   yesno(!crtc->pch_fifo_underrun_disabled));
1050 
1051 	crtc_updates_info(m, crtc, "\t");
1052 }
1053 
1054 static int i915_display_info(struct seq_file *m, void *unused)
1055 {
1056 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1057 	struct drm_device *dev = &dev_priv->drm;
1058 	struct intel_crtc *crtc;
1059 	struct drm_connector *connector;
1060 	struct drm_connector_list_iter conn_iter;
1061 	intel_wakeref_t wakeref;
1062 
1063 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1064 
1065 	drm_modeset_lock_all(dev);
1066 
1067 	seq_printf(m, "CRTC info\n");
1068 	seq_printf(m, "---------\n");
1069 	for_each_intel_crtc(dev, crtc)
1070 		intel_crtc_info(m, crtc);
1071 
1072 	seq_printf(m, "\n");
1073 	seq_printf(m, "Connector info\n");
1074 	seq_printf(m, "--------------\n");
1075 	drm_connector_list_iter_begin(dev, &conn_iter);
1076 	drm_for_each_connector_iter(connector, &conn_iter)
1077 		intel_connector_info(m, connector);
1078 	drm_connector_list_iter_end(&conn_iter);
1079 
1080 	drm_modeset_unlock_all(dev);
1081 
1082 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1083 
1084 	return 0;
1085 }
1086 
1087 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
1088 {
1089 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1090 	struct drm_device *dev = &dev_priv->drm;
1091 	int i;
1092 
1093 	drm_modeset_lock_all(dev);
1094 
1095 	seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
1096 		   dev_priv->dpll.ref_clks.nssc,
1097 		   dev_priv->dpll.ref_clks.ssc);
1098 
1099 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
1100 		struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
1101 
1102 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
1103 			   pll->info->id);
1104 		seq_printf(m, " pipe_mask: 0x%x, active: 0x%x, on: %s\n",
1105 			   pll->state.pipe_mask, pll->active_mask, yesno(pll->on));
1106 		seq_printf(m, " tracked hardware state:\n");
1107 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
1108 		seq_printf(m, " dpll_md: 0x%08x\n",
1109 			   pll->state.hw_state.dpll_md);
1110 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
1111 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
1112 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
1113 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
1114 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
1115 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
1116 			   pll->state.hw_state.mg_refclkin_ctl);
1117 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
1118 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
1119 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
1120 			   pll->state.hw_state.mg_clktop2_hsclkctl);
1121 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
1122 			   pll->state.hw_state.mg_pll_div0);
1123 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
1124 			   pll->state.hw_state.mg_pll_div1);
1125 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
1126 			   pll->state.hw_state.mg_pll_lf);
1127 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
1128 			   pll->state.hw_state.mg_pll_frac_lock);
1129 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
1130 			   pll->state.hw_state.mg_pll_ssc);
1131 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
1132 			   pll->state.hw_state.mg_pll_bias);
1133 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
1134 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
1135 	}
1136 	drm_modeset_unlock_all(dev);
1137 
1138 	return 0;
1139 }
1140 
1141 static int i915_ipc_status_show(struct seq_file *m, void *data)
1142 {
1143 	struct drm_i915_private *dev_priv = m->private;
1144 
1145 	seq_printf(m, "Isochronous Priority Control: %s\n",
1146 			yesno(dev_priv->ipc_enabled));
1147 	return 0;
1148 }
1149 
1150 static int i915_ipc_status_open(struct inode *inode, struct file *file)
1151 {
1152 	struct drm_i915_private *dev_priv = inode->i_private;
1153 
1154 	if (!HAS_IPC(dev_priv))
1155 		return -ENODEV;
1156 
1157 	return single_open(file, i915_ipc_status_show, dev_priv);
1158 }
1159 
1160 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
1161 				     size_t len, loff_t *offp)
1162 {
1163 	struct seq_file *m = file->private_data;
1164 	struct drm_i915_private *dev_priv = m->private;
1165 	intel_wakeref_t wakeref;
1166 	bool enable;
1167 	int ret;
1168 
1169 	ret = kstrtobool_from_user(ubuf, len, &enable);
1170 	if (ret < 0)
1171 		return ret;
1172 
1173 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1174 		if (!dev_priv->ipc_enabled && enable)
1175 			drm_info(&dev_priv->drm,
1176 				 "Enabling IPC: WM will be proper only after next commit\n");
1177 		dev_priv->ipc_enabled = enable;
1178 		intel_enable_ipc(dev_priv);
1179 	}
1180 
1181 	return len;
1182 }
1183 
1184 static const struct file_operations i915_ipc_status_fops = {
1185 	.owner = THIS_MODULE,
1186 	.open = i915_ipc_status_open,
1187 	.read = seq_read,
1188 	.llseek = seq_lseek,
1189 	.release = single_release,
1190 	.write = i915_ipc_status_write
1191 };
1192 
1193 static int i915_ddb_info(struct seq_file *m, void *unused)
1194 {
1195 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1196 	struct drm_device *dev = &dev_priv->drm;
1197 	struct skl_ddb_entry *entry;
1198 	struct intel_crtc *crtc;
1199 
1200 	if (DISPLAY_VER(dev_priv) < 9)
1201 		return -ENODEV;
1202 
1203 	drm_modeset_lock_all(dev);
1204 
1205 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
1206 
1207 	for_each_intel_crtc(&dev_priv->drm, crtc) {
1208 		struct intel_crtc_state *crtc_state =
1209 			to_intel_crtc_state(crtc->base.state);
1210 		enum pipe pipe = crtc->pipe;
1211 		enum plane_id plane_id;
1212 
1213 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
1214 
1215 		for_each_plane_id_on_crtc(crtc, plane_id) {
1216 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
1217 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
1218 				   entry->start, entry->end,
1219 				   skl_ddb_entry_size(entry));
1220 		}
1221 
1222 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
1223 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
1224 			   entry->end, skl_ddb_entry_size(entry));
1225 	}
1226 
1227 	drm_modeset_unlock_all(dev);
1228 
1229 	return 0;
1230 }
1231 
1232 static void drrs_status_per_crtc(struct seq_file *m,
1233 				 struct drm_device *dev,
1234 				 struct intel_crtc *crtc)
1235 {
1236 	struct drm_i915_private *dev_priv = to_i915(dev);
1237 	struct i915_drrs *drrs = &dev_priv->drrs;
1238 	int vrefresh = 0;
1239 	struct drm_connector *connector;
1240 	struct drm_connector_list_iter conn_iter;
1241 
1242 	drm_connector_list_iter_begin(dev, &conn_iter);
1243 	drm_for_each_connector_iter(connector, &conn_iter) {
1244 		bool supported = false;
1245 
1246 		if (connector->state->crtc != &crtc->base)
1247 			continue;
1248 
1249 		seq_printf(m, "%s:\n", connector->name);
1250 
1251 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
1252 		    drrs->type == SEAMLESS_DRRS_SUPPORT)
1253 			supported = true;
1254 
1255 		seq_printf(m, "\tDRRS Supported: %s\n", yesno(supported));
1256 	}
1257 	drm_connector_list_iter_end(&conn_iter);
1258 
1259 	seq_puts(m, "\n");
1260 
1261 	if (to_intel_crtc_state(crtc->base.state)->has_drrs) {
1262 		struct intel_panel *panel;
1263 
1264 		mutex_lock(&drrs->mutex);
1265 		/* DRRS Supported */
1266 		seq_puts(m, "\tDRRS Enabled: Yes\n");
1267 
1268 		/* disable_drrs() will make drrs->dp NULL */
1269 		if (!drrs->dp) {
1270 			seq_puts(m, "Idleness DRRS: Disabled\n");
1271 			mutex_unlock(&drrs->mutex);
1272 			return;
1273 		}
1274 
1275 		panel = &drrs->dp->attached_connector->panel;
1276 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
1277 					drrs->busy_frontbuffer_bits);
1278 
1279 		seq_puts(m, "\n\t\t");
1280 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
1281 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
1282 			vrefresh = drm_mode_vrefresh(panel->fixed_mode);
1283 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
1284 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
1285 			vrefresh = drm_mode_vrefresh(panel->downclock_mode);
1286 		} else {
1287 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
1288 						drrs->refresh_rate_type);
1289 			mutex_unlock(&drrs->mutex);
1290 			return;
1291 		}
1292 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
1293 
1294 		seq_puts(m, "\n\t\t");
1295 		mutex_unlock(&drrs->mutex);
1296 	} else {
1297 		/* DRRS not supported. Print the VBT parameter*/
1298 		seq_puts(m, "\tDRRS Enabled : No");
1299 	}
1300 	seq_puts(m, "\n");
1301 }
1302 
1303 static int i915_drrs_status(struct seq_file *m, void *unused)
1304 {
1305 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1306 	struct drm_device *dev = &dev_priv->drm;
1307 	struct intel_crtc *crtc;
1308 	int active_crtc_cnt = 0;
1309 
1310 	drm_modeset_lock_all(dev);
1311 	for_each_intel_crtc(dev, crtc) {
1312 		if (crtc->base.state->active) {
1313 			active_crtc_cnt++;
1314 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
1315 
1316 			drrs_status_per_crtc(m, dev, crtc);
1317 		}
1318 	}
1319 	drm_modeset_unlock_all(dev);
1320 
1321 	if (!active_crtc_cnt)
1322 		seq_puts(m, "No active crtc found\n");
1323 
1324 	return 0;
1325 }
1326 
1327 #define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
1328 				seq_puts(m, "LPSP: disabled\n"))
1329 
1330 static bool
1331 intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
1332 			      enum i915_power_well_id power_well_id)
1333 {
1334 	intel_wakeref_t wakeref;
1335 	bool is_enabled;
1336 
1337 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1338 	is_enabled = intel_display_power_well_is_enabled(i915,
1339 							 power_well_id);
1340 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1341 
1342 	return is_enabled;
1343 }
1344 
1345 static int i915_lpsp_status(struct seq_file *m, void *unused)
1346 {
1347 	struct drm_i915_private *i915 = node_to_i915(m->private);
1348 
1349 	if (DISPLAY_VER(i915) >= 13) {
1350 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915,
1351 							   SKL_DISP_PW_2));
1352 		return 0;
1353 	}
1354 
1355 	switch (DISPLAY_VER(i915)) {
1356 	case 12:
1357 	case 11:
1358 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
1359 		break;
1360 	case 10:
1361 	case 9:
1362 		LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
1363 		break;
1364 	default:
1365 		/*
1366 		 * Apart from HASWELL/BROADWELL other legacy platform doesn't
1367 		 * support lpsp.
1368 		 */
1369 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
1370 			LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
1371 		else
1372 			seq_puts(m, "LPSP: not supported\n");
1373 	}
1374 
1375 	return 0;
1376 }
1377 
1378 static int i915_dp_mst_info(struct seq_file *m, void *unused)
1379 {
1380 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1381 	struct drm_device *dev = &dev_priv->drm;
1382 	struct intel_encoder *intel_encoder;
1383 	struct intel_digital_port *dig_port;
1384 	struct drm_connector *connector;
1385 	struct drm_connector_list_iter conn_iter;
1386 
1387 	drm_connector_list_iter_begin(dev, &conn_iter);
1388 	drm_for_each_connector_iter(connector, &conn_iter) {
1389 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
1390 			continue;
1391 
1392 		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
1393 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
1394 			continue;
1395 
1396 		dig_port = enc_to_dig_port(intel_encoder);
1397 		if (!dig_port->dp.can_mst)
1398 			continue;
1399 
1400 		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
1401 			   dig_port->base.base.base.id,
1402 			   dig_port->base.base.name);
1403 		drm_dp_mst_dump_topology(m, &dig_port->dp.mst_mgr);
1404 	}
1405 	drm_connector_list_iter_end(&conn_iter);
1406 
1407 	return 0;
1408 }
1409 
1410 static ssize_t i915_displayport_test_active_write(struct file *file,
1411 						  const char __user *ubuf,
1412 						  size_t len, loff_t *offp)
1413 {
1414 	char *input_buffer;
1415 	int status = 0;
1416 	struct drm_device *dev;
1417 	struct drm_connector *connector;
1418 	struct drm_connector_list_iter conn_iter;
1419 	struct intel_dp *intel_dp;
1420 	int val = 0;
1421 
1422 	dev = ((struct seq_file *)file->private_data)->private;
1423 
1424 	if (len == 0)
1425 		return 0;
1426 
1427 	input_buffer = memdup_user_nul(ubuf, len);
1428 	if (IS_ERR(input_buffer))
1429 		return PTR_ERR(input_buffer);
1430 
1431 	drm_dbg(&to_i915(dev)->drm,
1432 		"Copied %d bytes from user\n", (unsigned int)len);
1433 
1434 	drm_connector_list_iter_begin(dev, &conn_iter);
1435 	drm_for_each_connector_iter(connector, &conn_iter) {
1436 		struct intel_encoder *encoder;
1437 
1438 		if (connector->connector_type !=
1439 		    DRM_MODE_CONNECTOR_DisplayPort)
1440 			continue;
1441 
1442 		encoder = to_intel_encoder(connector->encoder);
1443 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1444 			continue;
1445 
1446 		if (encoder && connector->status == connector_status_connected) {
1447 			intel_dp = enc_to_intel_dp(encoder);
1448 			status = kstrtoint(input_buffer, 10, &val);
1449 			if (status < 0)
1450 				break;
1451 			drm_dbg(&to_i915(dev)->drm,
1452 				"Got %d for test active\n", val);
1453 			/* To prevent erroneous activation of the compliance
1454 			 * testing code, only accept an actual value of 1 here
1455 			 */
1456 			if (val == 1)
1457 				intel_dp->compliance.test_active = true;
1458 			else
1459 				intel_dp->compliance.test_active = false;
1460 		}
1461 	}
1462 	drm_connector_list_iter_end(&conn_iter);
1463 	kfree(input_buffer);
1464 	if (status < 0)
1465 		return status;
1466 
1467 	*offp += len;
1468 	return len;
1469 }
1470 
1471 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
1472 {
1473 	struct drm_i915_private *dev_priv = m->private;
1474 	struct drm_device *dev = &dev_priv->drm;
1475 	struct drm_connector *connector;
1476 	struct drm_connector_list_iter conn_iter;
1477 	struct intel_dp *intel_dp;
1478 
1479 	drm_connector_list_iter_begin(dev, &conn_iter);
1480 	drm_for_each_connector_iter(connector, &conn_iter) {
1481 		struct intel_encoder *encoder;
1482 
1483 		if (connector->connector_type !=
1484 		    DRM_MODE_CONNECTOR_DisplayPort)
1485 			continue;
1486 
1487 		encoder = to_intel_encoder(connector->encoder);
1488 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1489 			continue;
1490 
1491 		if (encoder && connector->status == connector_status_connected) {
1492 			intel_dp = enc_to_intel_dp(encoder);
1493 			if (intel_dp->compliance.test_active)
1494 				seq_puts(m, "1");
1495 			else
1496 				seq_puts(m, "0");
1497 		} else
1498 			seq_puts(m, "0");
1499 	}
1500 	drm_connector_list_iter_end(&conn_iter);
1501 
1502 	return 0;
1503 }
1504 
1505 static int i915_displayport_test_active_open(struct inode *inode,
1506 					     struct file *file)
1507 {
1508 	return single_open(file, i915_displayport_test_active_show,
1509 			   inode->i_private);
1510 }
1511 
1512 static const struct file_operations i915_displayport_test_active_fops = {
1513 	.owner = THIS_MODULE,
1514 	.open = i915_displayport_test_active_open,
1515 	.read = seq_read,
1516 	.llseek = seq_lseek,
1517 	.release = single_release,
1518 	.write = i915_displayport_test_active_write
1519 };
1520 
1521 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
1522 {
1523 	struct drm_i915_private *dev_priv = m->private;
1524 	struct drm_device *dev = &dev_priv->drm;
1525 	struct drm_connector *connector;
1526 	struct drm_connector_list_iter conn_iter;
1527 	struct intel_dp *intel_dp;
1528 
1529 	drm_connector_list_iter_begin(dev, &conn_iter);
1530 	drm_for_each_connector_iter(connector, &conn_iter) {
1531 		struct intel_encoder *encoder;
1532 
1533 		if (connector->connector_type !=
1534 		    DRM_MODE_CONNECTOR_DisplayPort)
1535 			continue;
1536 
1537 		encoder = to_intel_encoder(connector->encoder);
1538 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1539 			continue;
1540 
1541 		if (encoder && connector->status == connector_status_connected) {
1542 			intel_dp = enc_to_intel_dp(encoder);
1543 			if (intel_dp->compliance.test_type ==
1544 			    DP_TEST_LINK_EDID_READ)
1545 				seq_printf(m, "%lx",
1546 					   intel_dp->compliance.test_data.edid);
1547 			else if (intel_dp->compliance.test_type ==
1548 				 DP_TEST_LINK_VIDEO_PATTERN) {
1549 				seq_printf(m, "hdisplay: %d\n",
1550 					   intel_dp->compliance.test_data.hdisplay);
1551 				seq_printf(m, "vdisplay: %d\n",
1552 					   intel_dp->compliance.test_data.vdisplay);
1553 				seq_printf(m, "bpc: %u\n",
1554 					   intel_dp->compliance.test_data.bpc);
1555 			} else if (intel_dp->compliance.test_type ==
1556 				   DP_TEST_LINK_PHY_TEST_PATTERN) {
1557 				seq_printf(m, "pattern: %d\n",
1558 					   intel_dp->compliance.test_data.phytest.phy_pattern);
1559 				seq_printf(m, "Number of lanes: %d\n",
1560 					   intel_dp->compliance.test_data.phytest.num_lanes);
1561 				seq_printf(m, "Link Rate: %d\n",
1562 					   intel_dp->compliance.test_data.phytest.link_rate);
1563 				seq_printf(m, "level: %02x\n",
1564 					   intel_dp->train_set[0]);
1565 			}
1566 		} else
1567 			seq_puts(m, "0");
1568 	}
1569 	drm_connector_list_iter_end(&conn_iter);
1570 
1571 	return 0;
1572 }
1573 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
1574 
1575 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
1576 {
1577 	struct drm_i915_private *dev_priv = m->private;
1578 	struct drm_device *dev = &dev_priv->drm;
1579 	struct drm_connector *connector;
1580 	struct drm_connector_list_iter conn_iter;
1581 	struct intel_dp *intel_dp;
1582 
1583 	drm_connector_list_iter_begin(dev, &conn_iter);
1584 	drm_for_each_connector_iter(connector, &conn_iter) {
1585 		struct intel_encoder *encoder;
1586 
1587 		if (connector->connector_type !=
1588 		    DRM_MODE_CONNECTOR_DisplayPort)
1589 			continue;
1590 
1591 		encoder = to_intel_encoder(connector->encoder);
1592 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
1593 			continue;
1594 
1595 		if (encoder && connector->status == connector_status_connected) {
1596 			intel_dp = enc_to_intel_dp(encoder);
1597 			seq_printf(m, "%02lx\n", intel_dp->compliance.test_type);
1598 		} else
1599 			seq_puts(m, "0");
1600 	}
1601 	drm_connector_list_iter_end(&conn_iter);
1602 
1603 	return 0;
1604 }
1605 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
1606 
1607 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
1608 {
1609 	struct drm_i915_private *dev_priv = m->private;
1610 	struct drm_device *dev = &dev_priv->drm;
1611 	int level;
1612 	int num_levels;
1613 
1614 	if (IS_CHERRYVIEW(dev_priv))
1615 		num_levels = 3;
1616 	else if (IS_VALLEYVIEW(dev_priv))
1617 		num_levels = 1;
1618 	else if (IS_G4X(dev_priv))
1619 		num_levels = 3;
1620 	else
1621 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1622 
1623 	drm_modeset_lock_all(dev);
1624 
1625 	for (level = 0; level < num_levels; level++) {
1626 		unsigned int latency = wm[level];
1627 
1628 		/*
1629 		 * - WM1+ latency values in 0.5us units
1630 		 * - latencies are in us on gen9/vlv/chv
1631 		 */
1632 		if (DISPLAY_VER(dev_priv) >= 9 ||
1633 		    IS_VALLEYVIEW(dev_priv) ||
1634 		    IS_CHERRYVIEW(dev_priv) ||
1635 		    IS_G4X(dev_priv))
1636 			latency *= 10;
1637 		else if (level > 0)
1638 			latency *= 5;
1639 
1640 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
1641 			   level, wm[level], latency / 10, latency % 10);
1642 	}
1643 
1644 	drm_modeset_unlock_all(dev);
1645 }
1646 
1647 static int pri_wm_latency_show(struct seq_file *m, void *data)
1648 {
1649 	struct drm_i915_private *dev_priv = m->private;
1650 	const u16 *latencies;
1651 
1652 	if (DISPLAY_VER(dev_priv) >= 9)
1653 		latencies = dev_priv->wm.skl_latency;
1654 	else
1655 		latencies = dev_priv->wm.pri_latency;
1656 
1657 	wm_latency_show(m, latencies);
1658 
1659 	return 0;
1660 }
1661 
1662 static int spr_wm_latency_show(struct seq_file *m, void *data)
1663 {
1664 	struct drm_i915_private *dev_priv = m->private;
1665 	const u16 *latencies;
1666 
1667 	if (DISPLAY_VER(dev_priv) >= 9)
1668 		latencies = dev_priv->wm.skl_latency;
1669 	else
1670 		latencies = dev_priv->wm.spr_latency;
1671 
1672 	wm_latency_show(m, latencies);
1673 
1674 	return 0;
1675 }
1676 
1677 static int cur_wm_latency_show(struct seq_file *m, void *data)
1678 {
1679 	struct drm_i915_private *dev_priv = m->private;
1680 	const u16 *latencies;
1681 
1682 	if (DISPLAY_VER(dev_priv) >= 9)
1683 		latencies = dev_priv->wm.skl_latency;
1684 	else
1685 		latencies = dev_priv->wm.cur_latency;
1686 
1687 	wm_latency_show(m, latencies);
1688 
1689 	return 0;
1690 }
1691 
1692 static int pri_wm_latency_open(struct inode *inode, struct file *file)
1693 {
1694 	struct drm_i915_private *dev_priv = inode->i_private;
1695 
1696 	if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
1697 		return -ENODEV;
1698 
1699 	return single_open(file, pri_wm_latency_show, dev_priv);
1700 }
1701 
1702 static int spr_wm_latency_open(struct inode *inode, struct file *file)
1703 {
1704 	struct drm_i915_private *dev_priv = inode->i_private;
1705 
1706 	if (HAS_GMCH(dev_priv))
1707 		return -ENODEV;
1708 
1709 	return single_open(file, spr_wm_latency_show, dev_priv);
1710 }
1711 
1712 static int cur_wm_latency_open(struct inode *inode, struct file *file)
1713 {
1714 	struct drm_i915_private *dev_priv = inode->i_private;
1715 
1716 	if (HAS_GMCH(dev_priv))
1717 		return -ENODEV;
1718 
1719 	return single_open(file, cur_wm_latency_show, dev_priv);
1720 }
1721 
1722 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
1723 				size_t len, loff_t *offp, u16 wm[8])
1724 {
1725 	struct seq_file *m = file->private_data;
1726 	struct drm_i915_private *dev_priv = m->private;
1727 	struct drm_device *dev = &dev_priv->drm;
1728 	u16 new[8] = { 0 };
1729 	int num_levels;
1730 	int level;
1731 	int ret;
1732 	char tmp[32];
1733 
1734 	if (IS_CHERRYVIEW(dev_priv))
1735 		num_levels = 3;
1736 	else if (IS_VALLEYVIEW(dev_priv))
1737 		num_levels = 1;
1738 	else if (IS_G4X(dev_priv))
1739 		num_levels = 3;
1740 	else
1741 		num_levels = ilk_wm_max_level(dev_priv) + 1;
1742 
1743 	if (len >= sizeof(tmp))
1744 		return -EINVAL;
1745 
1746 	if (copy_from_user(tmp, ubuf, len))
1747 		return -EFAULT;
1748 
1749 	tmp[len] = '\0';
1750 
1751 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
1752 		     &new[0], &new[1], &new[2], &new[3],
1753 		     &new[4], &new[5], &new[6], &new[7]);
1754 	if (ret != num_levels)
1755 		return -EINVAL;
1756 
1757 	drm_modeset_lock_all(dev);
1758 
1759 	for (level = 0; level < num_levels; level++)
1760 		wm[level] = new[level];
1761 
1762 	drm_modeset_unlock_all(dev);
1763 
1764 	return len;
1765 }
1766 
1767 
1768 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
1769 				    size_t len, loff_t *offp)
1770 {
1771 	struct seq_file *m = file->private_data;
1772 	struct drm_i915_private *dev_priv = m->private;
1773 	u16 *latencies;
1774 
1775 	if (DISPLAY_VER(dev_priv) >= 9)
1776 		latencies = dev_priv->wm.skl_latency;
1777 	else
1778 		latencies = dev_priv->wm.pri_latency;
1779 
1780 	return wm_latency_write(file, ubuf, len, offp, latencies);
1781 }
1782 
1783 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
1784 				    size_t len, loff_t *offp)
1785 {
1786 	struct seq_file *m = file->private_data;
1787 	struct drm_i915_private *dev_priv = m->private;
1788 	u16 *latencies;
1789 
1790 	if (DISPLAY_VER(dev_priv) >= 9)
1791 		latencies = dev_priv->wm.skl_latency;
1792 	else
1793 		latencies = dev_priv->wm.spr_latency;
1794 
1795 	return wm_latency_write(file, ubuf, len, offp, latencies);
1796 }
1797 
1798 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
1799 				    size_t len, loff_t *offp)
1800 {
1801 	struct seq_file *m = file->private_data;
1802 	struct drm_i915_private *dev_priv = m->private;
1803 	u16 *latencies;
1804 
1805 	if (DISPLAY_VER(dev_priv) >= 9)
1806 		latencies = dev_priv->wm.skl_latency;
1807 	else
1808 		latencies = dev_priv->wm.cur_latency;
1809 
1810 	return wm_latency_write(file, ubuf, len, offp, latencies);
1811 }
1812 
1813 static const struct file_operations i915_pri_wm_latency_fops = {
1814 	.owner = THIS_MODULE,
1815 	.open = pri_wm_latency_open,
1816 	.read = seq_read,
1817 	.llseek = seq_lseek,
1818 	.release = single_release,
1819 	.write = pri_wm_latency_write
1820 };
1821 
1822 static const struct file_operations i915_spr_wm_latency_fops = {
1823 	.owner = THIS_MODULE,
1824 	.open = spr_wm_latency_open,
1825 	.read = seq_read,
1826 	.llseek = seq_lseek,
1827 	.release = single_release,
1828 	.write = spr_wm_latency_write
1829 };
1830 
1831 static const struct file_operations i915_cur_wm_latency_fops = {
1832 	.owner = THIS_MODULE,
1833 	.open = cur_wm_latency_open,
1834 	.read = seq_read,
1835 	.llseek = seq_lseek,
1836 	.release = single_release,
1837 	.write = cur_wm_latency_write
1838 };
1839 
1840 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
1841 {
1842 	struct drm_i915_private *dev_priv = m->private;
1843 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1844 
1845 	/* Synchronize with everything first in case there's been an HPD
1846 	 * storm, but we haven't finished handling it in the kernel yet
1847 	 */
1848 	intel_synchronize_irq(dev_priv);
1849 	flush_work(&dev_priv->hotplug.dig_port_work);
1850 	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
1851 
1852 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
1853 	seq_printf(m, "Detected: %s\n",
1854 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
1855 
1856 	return 0;
1857 }
1858 
1859 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
1860 					const char __user *ubuf, size_t len,
1861 					loff_t *offp)
1862 {
1863 	struct seq_file *m = file->private_data;
1864 	struct drm_i915_private *dev_priv = m->private;
1865 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1866 	unsigned int new_threshold;
1867 	int i;
1868 	char *newline;
1869 	char tmp[16];
1870 
1871 	if (len >= sizeof(tmp))
1872 		return -EINVAL;
1873 
1874 	if (copy_from_user(tmp, ubuf, len))
1875 		return -EFAULT;
1876 
1877 	tmp[len] = '\0';
1878 
1879 	/* Strip newline, if any */
1880 	newline = strchr(tmp, '\n');
1881 	if (newline)
1882 		*newline = '\0';
1883 
1884 	if (strcmp(tmp, "reset") == 0)
1885 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
1886 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
1887 		return -EINVAL;
1888 
1889 	if (new_threshold > 0)
1890 		drm_dbg_kms(&dev_priv->drm,
1891 			    "Setting HPD storm detection threshold to %d\n",
1892 			    new_threshold);
1893 	else
1894 		drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
1895 
1896 	spin_lock_irq(&dev_priv->irq_lock);
1897 	hotplug->hpd_storm_threshold = new_threshold;
1898 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1899 	for_each_hpd_pin(i)
1900 		hotplug->stats[i].count = 0;
1901 	spin_unlock_irq(&dev_priv->irq_lock);
1902 
1903 	/* Re-enable hpd immediately if we were in an irq storm */
1904 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1905 
1906 	return len;
1907 }
1908 
1909 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
1910 {
1911 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
1912 }
1913 
1914 static const struct file_operations i915_hpd_storm_ctl_fops = {
1915 	.owner = THIS_MODULE,
1916 	.open = i915_hpd_storm_ctl_open,
1917 	.read = seq_read,
1918 	.llseek = seq_lseek,
1919 	.release = single_release,
1920 	.write = i915_hpd_storm_ctl_write
1921 };
1922 
1923 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
1924 {
1925 	struct drm_i915_private *dev_priv = m->private;
1926 
1927 	seq_printf(m, "Enabled: %s\n",
1928 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
1929 
1930 	return 0;
1931 }
1932 
1933 static int
1934 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
1935 {
1936 	return single_open(file, i915_hpd_short_storm_ctl_show,
1937 			   inode->i_private);
1938 }
1939 
1940 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
1941 					      const char __user *ubuf,
1942 					      size_t len, loff_t *offp)
1943 {
1944 	struct seq_file *m = file->private_data;
1945 	struct drm_i915_private *dev_priv = m->private;
1946 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
1947 	char *newline;
1948 	char tmp[16];
1949 	int i;
1950 	bool new_state;
1951 
1952 	if (len >= sizeof(tmp))
1953 		return -EINVAL;
1954 
1955 	if (copy_from_user(tmp, ubuf, len))
1956 		return -EFAULT;
1957 
1958 	tmp[len] = '\0';
1959 
1960 	/* Strip newline, if any */
1961 	newline = strchr(tmp, '\n');
1962 	if (newline)
1963 		*newline = '\0';
1964 
1965 	/* Reset to the "default" state for this system */
1966 	if (strcmp(tmp, "reset") == 0)
1967 		new_state = !HAS_DP_MST(dev_priv);
1968 	else if (kstrtobool(tmp, &new_state) != 0)
1969 		return -EINVAL;
1970 
1971 	drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
1972 		    new_state ? "En" : "Dis");
1973 
1974 	spin_lock_irq(&dev_priv->irq_lock);
1975 	hotplug->hpd_short_storm_enabled = new_state;
1976 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
1977 	for_each_hpd_pin(i)
1978 		hotplug->stats[i].count = 0;
1979 	spin_unlock_irq(&dev_priv->irq_lock);
1980 
1981 	/* Re-enable hpd immediately if we were in an irq storm */
1982 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
1983 
1984 	return len;
1985 }
1986 
1987 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
1988 	.owner = THIS_MODULE,
1989 	.open = i915_hpd_short_storm_ctl_open,
1990 	.read = seq_read,
1991 	.llseek = seq_lseek,
1992 	.release = single_release,
1993 	.write = i915_hpd_short_storm_ctl_write,
1994 };
1995 
1996 static int i915_drrs_ctl_set(void *data, u64 val)
1997 {
1998 	struct drm_i915_private *dev_priv = data;
1999 	struct drm_device *dev = &dev_priv->drm;
2000 	struct intel_crtc *crtc;
2001 
2002 	if (DISPLAY_VER(dev_priv) < 7)
2003 		return -ENODEV;
2004 
2005 	for_each_intel_crtc(dev, crtc) {
2006 		struct drm_connector_list_iter conn_iter;
2007 		struct intel_crtc_state *crtc_state;
2008 		struct drm_connector *connector;
2009 		struct drm_crtc_commit *commit;
2010 		int ret;
2011 
2012 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
2013 		if (ret)
2014 			return ret;
2015 
2016 		crtc_state = to_intel_crtc_state(crtc->base.state);
2017 
2018 		if (!crtc_state->hw.active ||
2019 		    !crtc_state->has_drrs)
2020 			goto out;
2021 
2022 		commit = crtc_state->uapi.commit;
2023 		if (commit) {
2024 			ret = wait_for_completion_interruptible(&commit->hw_done);
2025 			if (ret)
2026 				goto out;
2027 		}
2028 
2029 		drm_connector_list_iter_begin(dev, &conn_iter);
2030 		drm_for_each_connector_iter(connector, &conn_iter) {
2031 			struct intel_encoder *encoder;
2032 			struct intel_dp *intel_dp;
2033 
2034 			if (!(crtc_state->uapi.connector_mask &
2035 			      drm_connector_mask(connector)))
2036 				continue;
2037 
2038 			encoder = intel_attached_encoder(to_intel_connector(connector));
2039 			if (encoder->type != INTEL_OUTPUT_EDP)
2040 				continue;
2041 
2042 			drm_dbg(&dev_priv->drm,
2043 				"Manually %sabling DRRS. %llu\n",
2044 				val ? "en" : "dis", val);
2045 
2046 			intel_dp = enc_to_intel_dp(encoder);
2047 			if (val)
2048 				intel_edp_drrs_enable(intel_dp,
2049 						      crtc_state);
2050 			else
2051 				intel_edp_drrs_disable(intel_dp,
2052 						       crtc_state);
2053 		}
2054 		drm_connector_list_iter_end(&conn_iter);
2055 
2056 out:
2057 		drm_modeset_unlock(&crtc->base.mutex);
2058 		if (ret)
2059 			return ret;
2060 	}
2061 
2062 	return 0;
2063 }
2064 
2065 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
2066 
2067 static ssize_t
2068 i915_fifo_underrun_reset_write(struct file *filp,
2069 			       const char __user *ubuf,
2070 			       size_t cnt, loff_t *ppos)
2071 {
2072 	struct drm_i915_private *dev_priv = filp->private_data;
2073 	struct intel_crtc *crtc;
2074 	struct drm_device *dev = &dev_priv->drm;
2075 	int ret;
2076 	bool reset;
2077 
2078 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
2079 	if (ret)
2080 		return ret;
2081 
2082 	if (!reset)
2083 		return cnt;
2084 
2085 	for_each_intel_crtc(dev, crtc) {
2086 		struct drm_crtc_commit *commit;
2087 		struct intel_crtc_state *crtc_state;
2088 
2089 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
2090 		if (ret)
2091 			return ret;
2092 
2093 		crtc_state = to_intel_crtc_state(crtc->base.state);
2094 		commit = crtc_state->uapi.commit;
2095 		if (commit) {
2096 			ret = wait_for_completion_interruptible(&commit->hw_done);
2097 			if (!ret)
2098 				ret = wait_for_completion_interruptible(&commit->flip_done);
2099 		}
2100 
2101 		if (!ret && crtc_state->hw.active) {
2102 			drm_dbg_kms(&dev_priv->drm,
2103 				    "Re-arming FIFO underruns on pipe %c\n",
2104 				    pipe_name(crtc->pipe));
2105 
2106 			intel_crtc_arm_fifo_underrun(crtc, crtc_state);
2107 		}
2108 
2109 		drm_modeset_unlock(&crtc->base.mutex);
2110 
2111 		if (ret)
2112 			return ret;
2113 	}
2114 
2115 	ret = intel_fbc_reset_underrun(dev_priv);
2116 	if (ret)
2117 		return ret;
2118 
2119 	return cnt;
2120 }
2121 
2122 static const struct file_operations i915_fifo_underrun_reset_ops = {
2123 	.owner = THIS_MODULE,
2124 	.open = simple_open,
2125 	.write = i915_fifo_underrun_reset_write,
2126 	.llseek = default_llseek,
2127 };
2128 
2129 static const struct drm_info_list intel_display_debugfs_list[] = {
2130 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
2131 	{"i915_fbc_status", i915_fbc_status, 0},
2132 	{"i915_ips_status", i915_ips_status, 0},
2133 	{"i915_sr_status", i915_sr_status, 0},
2134 	{"i915_opregion", i915_opregion, 0},
2135 	{"i915_vbt", i915_vbt, 0},
2136 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
2137 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
2138 	{"i915_power_domain_info", i915_power_domain_info, 0},
2139 	{"i915_dmc_info", i915_dmc_info, 0},
2140 	{"i915_display_info", i915_display_info, 0},
2141 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
2142 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
2143 	{"i915_ddb_info", i915_ddb_info, 0},
2144 	{"i915_drrs_status", i915_drrs_status, 0},
2145 	{"i915_lpsp_status", i915_lpsp_status, 0},
2146 };
2147 
2148 static const struct {
2149 	const char *name;
2150 	const struct file_operations *fops;
2151 } intel_display_debugfs_files[] = {
2152 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
2153 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
2154 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
2155 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
2156 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
2157 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
2158 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
2159 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
2160 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
2161 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
2162 	{"i915_ipc_status", &i915_ipc_status_fops},
2163 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
2164 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
2165 };
2166 
2167 void intel_display_debugfs_register(struct drm_i915_private *i915)
2168 {
2169 	struct drm_minor *minor = i915->drm.primary;
2170 	int i;
2171 
2172 	for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
2173 		debugfs_create_file(intel_display_debugfs_files[i].name,
2174 				    S_IRUGO | S_IWUSR,
2175 				    minor->debugfs_root,
2176 				    to_i915(minor->dev),
2177 				    intel_display_debugfs_files[i].fops);
2178 	}
2179 
2180 	drm_debugfs_create_files(intel_display_debugfs_list,
2181 				 ARRAY_SIZE(intel_display_debugfs_list),
2182 				 minor->debugfs_root, minor);
2183 }
2184 
2185 static int i915_panel_show(struct seq_file *m, void *data)
2186 {
2187 	struct drm_connector *connector = m->private;
2188 	struct intel_dp *intel_dp =
2189 		intel_attached_dp(to_intel_connector(connector));
2190 
2191 	if (connector->status != connector_status_connected)
2192 		return -ENODEV;
2193 
2194 	seq_printf(m, "Panel power up delay: %d\n",
2195 		   intel_dp->pps.panel_power_up_delay);
2196 	seq_printf(m, "Panel power down delay: %d\n",
2197 		   intel_dp->pps.panel_power_down_delay);
2198 	seq_printf(m, "Backlight on delay: %d\n",
2199 		   intel_dp->pps.backlight_on_delay);
2200 	seq_printf(m, "Backlight off delay: %d\n",
2201 		   intel_dp->pps.backlight_off_delay);
2202 
2203 	return 0;
2204 }
2205 DEFINE_SHOW_ATTRIBUTE(i915_panel);
2206 
2207 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
2208 {
2209 	struct drm_connector *connector = m->private;
2210 	struct drm_i915_private *i915 = to_i915(connector->dev);
2211 	struct intel_connector *intel_connector = to_intel_connector(connector);
2212 	int ret;
2213 
2214 	ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
2215 	if (ret)
2216 		return ret;
2217 
2218 	if (!connector->encoder || connector->status != connector_status_connected) {
2219 		ret = -ENODEV;
2220 		goto out;
2221 	}
2222 
2223 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
2224 		   connector->base.id);
2225 	intel_hdcp_info(m, intel_connector);
2226 
2227 out:
2228 	drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
2229 
2230 	return ret;
2231 }
2232 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
2233 
2234 static int i915_psr_status_show(struct seq_file *m, void *data)
2235 {
2236 	struct drm_connector *connector = m->private;
2237 	struct intel_dp *intel_dp =
2238 		intel_attached_dp(to_intel_connector(connector));
2239 
2240 	return intel_psr_status(m, intel_dp);
2241 }
2242 DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
2243 
2244 #define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
2245 				seq_puts(m, "LPSP: incapable\n"))
2246 
2247 static int i915_lpsp_capability_show(struct seq_file *m, void *data)
2248 {
2249 	struct drm_connector *connector = m->private;
2250 	struct drm_i915_private *i915 = to_i915(connector->dev);
2251 	struct intel_encoder *encoder;
2252 
2253 	encoder = intel_attached_encoder(to_intel_connector(connector));
2254 	if (!encoder)
2255 		return -ENODEV;
2256 
2257 	if (connector->status != connector_status_connected)
2258 		return -ENODEV;
2259 
2260 	if (DISPLAY_VER(i915) >= 13) {
2261 		LPSP_CAPABLE(encoder->port <= PORT_B);
2262 		return 0;
2263 	}
2264 
2265 	switch (DISPLAY_VER(i915)) {
2266 	case 12:
2267 		/*
2268 		 * Actually TGL can drive LPSP on port till DDI_C
2269 		 * but there is no physical connected DDI_C on TGL sku's,
2270 		 * even driver is not initilizing DDI_C port for gen12.
2271 		 */
2272 		LPSP_CAPABLE(encoder->port <= PORT_B);
2273 		break;
2274 	case 11:
2275 		LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2276 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2277 		break;
2278 	case 10:
2279 	case 9:
2280 		LPSP_CAPABLE(encoder->port == PORT_A &&
2281 			     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2282 			     connector->connector_type == DRM_MODE_CONNECTOR_eDP  ||
2283 			     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
2284 		break;
2285 	default:
2286 		if (IS_HASWELL(i915) || IS_BROADWELL(i915))
2287 			LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
2288 	}
2289 
2290 	return 0;
2291 }
2292 DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
2293 
2294 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
2295 {
2296 	struct drm_connector *connector = m->private;
2297 	struct drm_device *dev = connector->dev;
2298 	struct drm_crtc *crtc;
2299 	struct intel_dp *intel_dp;
2300 	struct drm_modeset_acquire_ctx ctx;
2301 	struct intel_crtc_state *crtc_state = NULL;
2302 	int ret = 0;
2303 	bool try_again = false;
2304 
2305 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2306 
2307 	do {
2308 		try_again = false;
2309 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
2310 				       &ctx);
2311 		if (ret) {
2312 			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
2313 				try_again = true;
2314 				continue;
2315 			}
2316 			break;
2317 		}
2318 		crtc = connector->state->crtc;
2319 		if (connector->status != connector_status_connected || !crtc) {
2320 			ret = -ENODEV;
2321 			break;
2322 		}
2323 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2324 		if (ret == -EDEADLK) {
2325 			ret = drm_modeset_backoff(&ctx);
2326 			if (!ret) {
2327 				try_again = true;
2328 				continue;
2329 			}
2330 			break;
2331 		} else if (ret) {
2332 			break;
2333 		}
2334 		intel_dp = intel_attached_dp(to_intel_connector(connector));
2335 		crtc_state = to_intel_crtc_state(crtc->state);
2336 		seq_printf(m, "DSC_Enabled: %s\n",
2337 			   yesno(crtc_state->dsc.compression_enable));
2338 		seq_printf(m, "DSC_Sink_Support: %s\n",
2339 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
2340 		seq_printf(m, "Force_DSC_Enable: %s\n",
2341 			   yesno(intel_dp->force_dsc_en));
2342 		if (!intel_dp_is_edp(intel_dp))
2343 			seq_printf(m, "FEC_Sink_Support: %s\n",
2344 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
2345 	} while (try_again);
2346 
2347 	drm_modeset_drop_locks(&ctx);
2348 	drm_modeset_acquire_fini(&ctx);
2349 
2350 	return ret;
2351 }
2352 
2353 static ssize_t i915_dsc_fec_support_write(struct file *file,
2354 					  const char __user *ubuf,
2355 					  size_t len, loff_t *offp)
2356 {
2357 	bool dsc_enable = false;
2358 	int ret;
2359 	struct drm_connector *connector =
2360 		((struct seq_file *)file->private_data)->private;
2361 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2362 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2363 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2364 
2365 	if (len == 0)
2366 		return 0;
2367 
2368 	drm_dbg(&i915->drm,
2369 		"Copied %zu bytes from user to force DSC\n", len);
2370 
2371 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
2372 	if (ret < 0)
2373 		return ret;
2374 
2375 	drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
2376 		(dsc_enable) ? "true" : "false");
2377 	intel_dp->force_dsc_en = dsc_enable;
2378 
2379 	*offp += len;
2380 	return len;
2381 }
2382 
2383 static int i915_dsc_fec_support_open(struct inode *inode,
2384 				     struct file *file)
2385 {
2386 	return single_open(file, i915_dsc_fec_support_show,
2387 			   inode->i_private);
2388 }
2389 
2390 static const struct file_operations i915_dsc_fec_support_fops = {
2391 	.owner = THIS_MODULE,
2392 	.open = i915_dsc_fec_support_open,
2393 	.read = seq_read,
2394 	.llseek = seq_lseek,
2395 	.release = single_release,
2396 	.write = i915_dsc_fec_support_write
2397 };
2398 
2399 static int i915_dsc_bpp_show(struct seq_file *m, void *data)
2400 {
2401 	struct drm_connector *connector = m->private;
2402 	struct drm_device *dev = connector->dev;
2403 	struct drm_crtc *crtc;
2404 	struct intel_crtc_state *crtc_state;
2405 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2406 	int ret;
2407 
2408 	if (!encoder)
2409 		return -ENODEV;
2410 
2411 	ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex);
2412 	if (ret)
2413 		return ret;
2414 
2415 	crtc = connector->state->crtc;
2416 	if (connector->status != connector_status_connected || !crtc) {
2417 		ret = -ENODEV;
2418 		goto out;
2419 	}
2420 
2421 	crtc_state = to_intel_crtc_state(crtc->state);
2422 	seq_printf(m, "Compressed_BPP: %d\n", crtc_state->dsc.compressed_bpp);
2423 
2424 out:	drm_modeset_unlock(&dev->mode_config.connection_mutex);
2425 
2426 	return ret;
2427 }
2428 
2429 static ssize_t i915_dsc_bpp_write(struct file *file,
2430 				  const char __user *ubuf,
2431 				  size_t len, loff_t *offp)
2432 {
2433 	struct drm_connector *connector =
2434 		((struct seq_file *)file->private_data)->private;
2435 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
2436 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2437 	int dsc_bpp = 0;
2438 	int ret;
2439 
2440 	ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpp);
2441 	if (ret < 0)
2442 		return ret;
2443 
2444 	intel_dp->force_dsc_bpp = dsc_bpp;
2445 	*offp += len;
2446 
2447 	return len;
2448 }
2449 
2450 static int i915_dsc_bpp_open(struct inode *inode,
2451 			     struct file *file)
2452 {
2453 	return single_open(file, i915_dsc_bpp_show,
2454 			   inode->i_private);
2455 }
2456 
2457 static const struct file_operations i915_dsc_bpp_fops = {
2458 	.owner = THIS_MODULE,
2459 	.open = i915_dsc_bpp_open,
2460 	.read = seq_read,
2461 	.llseek = seq_lseek,
2462 	.release = single_release,
2463 	.write = i915_dsc_bpp_write
2464 };
2465 
2466 /**
2467  * intel_connector_debugfs_add - add i915 specific connector debugfs files
2468  * @connector: pointer to a registered drm_connector
2469  *
2470  * Cleanup will be done by drm_connector_unregister() through a call to
2471  * drm_debugfs_connector_remove().
2472  *
2473  * Returns 0 on success, negative error codes on error.
2474  */
2475 int intel_connector_debugfs_add(struct drm_connector *connector)
2476 {
2477 	struct dentry *root = connector->debugfs_entry;
2478 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
2479 
2480 	/* The connector must have been registered beforehands. */
2481 	if (!root)
2482 		return -ENODEV;
2483 
2484 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2485 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
2486 				    connector, &i915_panel_fops);
2487 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
2488 				    connector, &i915_psr_sink_status_fops);
2489 	}
2490 
2491 	if (HAS_PSR(dev_priv) &&
2492 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
2493 		debugfs_create_file("i915_psr_status", 0444, root,
2494 				    connector, &i915_psr_status_fops);
2495 	}
2496 
2497 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2498 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2499 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
2500 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
2501 				    connector, &i915_hdcp_sink_capability_fops);
2502 	}
2503 
2504 	if (DISPLAY_VER(dev_priv) >= 11 &&
2505 	    ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
2506 	    !to_intel_connector(connector)->mst_port) ||
2507 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
2508 		debugfs_create_file("i915_dsc_fec_support", 0644, root,
2509 				    connector, &i915_dsc_fec_support_fops);
2510 
2511 		debugfs_create_file("i915_dsc_bpp", 0644, root,
2512 				    connector, &i915_dsc_bpp_fops);
2513 	}
2514 
2515 	/* Legacy panels doesn't lpsp on any platform */
2516 	if ((DISPLAY_VER(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
2517 	     IS_BROADWELL(dev_priv)) &&
2518 	     (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
2519 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
2520 	     connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
2521 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
2522 	     connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
2523 		debugfs_create_file("i915_lpsp_capability", 0444, root,
2524 				    connector, &i915_lpsp_capability_fops);
2525 
2526 	return 0;
2527 }
2528 
2529 /**
2530  * intel_crtc_debugfs_add - add i915 specific crtc debugfs files
2531  * @crtc: pointer to a drm_crtc
2532  *
2533  * Returns 0 on success, negative error codes on error.
2534  *
2535  * Failure to add debugfs entries should generally be ignored.
2536  */
2537 int intel_crtc_debugfs_add(struct drm_crtc *crtc)
2538 {
2539 	if (!crtc->debugfs_entry)
2540 		return -ENODEV;
2541 
2542 	crtc_updates_add(crtc);
2543 	return 0;
2544 }
2545