xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/i915_debugfs.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: i915_debugfs.c,v 1.5 2021/12/18 23:45:28 riastradh Exp $	*/
2 
3 /*
4  * Copyright © 2008 Intel Corporation
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23  * IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Eric Anholt <eric@anholt.net>
27  *    Keith Packard <keithp@keithp.com>
28  *
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: i915_debugfs.c,v 1.5 2021/12/18 23:45:28 riastradh Exp $");
33 
34 #include <linux/sched/mm.h>
35 #include <linux/sort.h>
36 
37 #include <drm/drm_debugfs.h>
38 #include <drm/drm_fourcc.h>
39 
40 #include "display/intel_display_types.h"
41 #include "display/intel_dp.h"
42 #include "display/intel_fbc.h"
43 #include "display/intel_hdcp.h"
44 #include "display/intel_hdmi.h"
45 #include "display/intel_psr.h"
46 
47 #include "gem/i915_gem_context.h"
48 #include "gt/intel_gt_pm.h"
49 #include "gt/intel_gt_requests.h"
50 #include "gt/intel_reset.h"
51 #include "gt/intel_rc6.h"
52 #include "gt/intel_rps.h"
53 #include "gt/uc/intel_guc_submission.h"
54 
55 #include "i915_debugfs.h"
56 #include "i915_irq.h"
57 #include "i915_trace.h"
58 #include "intel_csr.h"
59 #include "intel_pm.h"
60 #include "intel_sideband.h"
61 
node_to_i915(struct drm_info_node * node)62 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
63 {
64 	return to_i915(node->minor->dev);
65 }
66 
i915_capabilities(struct seq_file * m,void * data)67 static int i915_capabilities(struct seq_file *m, void *data)
68 {
69 	struct drm_i915_private *i915 = node_to_i915(m->private);
70 	struct drm_printer p = drm_seq_file_printer(m);
71 
72 	seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
73 
74 	intel_device_info_print_static(INTEL_INFO(i915), &p);
75 	intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
76 	intel_driver_caps_print(&i915->caps, &p);
77 
78 	kernel_param_lock(THIS_MODULE);
79 	i915_params_dump(&i915_modparams, &p);
80 	kernel_param_unlock(THIS_MODULE);
81 
82 	return 0;
83 }
84 
get_tiling_flag(struct drm_i915_gem_object * obj)85 static char get_tiling_flag(struct drm_i915_gem_object *obj)
86 {
87 	switch (i915_gem_object_get_tiling(obj)) {
88 	default:
89 	case I915_TILING_NONE: return ' ';
90 	case I915_TILING_X: return 'X';
91 	case I915_TILING_Y: return 'Y';
92 	}
93 }
94 
get_global_flag(struct drm_i915_gem_object * obj)95 static char get_global_flag(struct drm_i915_gem_object *obj)
96 {
97 	return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
98 }
99 
get_pin_mapped_flag(struct drm_i915_gem_object * obj)100 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
101 {
102 	return obj->mm.mapping ? 'M' : ' ';
103 }
104 
105 static const char *
stringify_page_sizes(unsigned int page_sizes,char * buf,size_t len)106 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
107 {
108 	size_t x = 0;
109 
110 	switch (page_sizes) {
111 	case 0:
112 		return "";
113 	case I915_GTT_PAGE_SIZE_4K:
114 		return "4K";
115 	case I915_GTT_PAGE_SIZE_64K:
116 		return "64K";
117 	case I915_GTT_PAGE_SIZE_2M:
118 		return "2M";
119 	default:
120 		if (!buf)
121 			return "M";
122 
123 		if (page_sizes & I915_GTT_PAGE_SIZE_2M)
124 			x += snprintf(buf + x, len - x, "2M, ");
125 		if (page_sizes & I915_GTT_PAGE_SIZE_64K)
126 			x += snprintf(buf + x, len - x, "64K, ");
127 		if (page_sizes & I915_GTT_PAGE_SIZE_4K)
128 			x += snprintf(buf + x, len - x, "4K, ");
129 		buf[x-2] = '\0';
130 
131 		return buf;
132 	}
133 }
134 
135 static void
describe_obj(struct seq_file * m,struct drm_i915_gem_object * obj)136 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
137 {
138 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
139 	struct intel_engine_cs *engine;
140 	struct i915_vma *vma;
141 	int pin_count = 0;
142 
143 	seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
144 		   &obj->base,
145 		   get_tiling_flag(obj),
146 		   get_global_flag(obj),
147 		   get_pin_mapped_flag(obj),
148 		   obj->base.size / 1024,
149 		   obj->read_domains,
150 		   obj->write_domain,
151 		   i915_cache_level_str(dev_priv, obj->cache_level),
152 		   obj->mm.dirty ? " dirty" : "",
153 		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
154 	if (obj->base.name)
155 		seq_printf(m, " (name: %d)", obj->base.name);
156 
157 	spin_lock(&obj->vma.lock);
158 	list_for_each_entry(vma, &obj->vma.list, obj_link) {
159 		if (!drm_mm_node_allocated(&vma->node))
160 			continue;
161 
162 		spin_unlock(&obj->vma.lock);
163 
164 		if (i915_vma_is_pinned(vma))
165 			pin_count++;
166 
167 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
168 			   i915_vma_is_ggtt(vma) ? "g" : "pp",
169 			   vma->node.start, vma->node.size,
170 			   stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
171 		if (i915_vma_is_ggtt(vma)) {
172 			switch (vma->ggtt_view.type) {
173 			case I915_GGTT_VIEW_NORMAL:
174 				seq_puts(m, ", normal");
175 				break;
176 
177 			case I915_GGTT_VIEW_PARTIAL:
178 				seq_printf(m, ", partial [%08llx+%x]",
179 					   vma->ggtt_view.partial.offset << PAGE_SHIFT,
180 					   vma->ggtt_view.partial.size << PAGE_SHIFT);
181 				break;
182 
183 			case I915_GGTT_VIEW_ROTATED:
184 				seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
185 					   vma->ggtt_view.rotated.plane[0].width,
186 					   vma->ggtt_view.rotated.plane[0].height,
187 					   vma->ggtt_view.rotated.plane[0].stride,
188 					   vma->ggtt_view.rotated.plane[0].offset,
189 					   vma->ggtt_view.rotated.plane[1].width,
190 					   vma->ggtt_view.rotated.plane[1].height,
191 					   vma->ggtt_view.rotated.plane[1].stride,
192 					   vma->ggtt_view.rotated.plane[1].offset);
193 				break;
194 
195 			case I915_GGTT_VIEW_REMAPPED:
196 				seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
197 					   vma->ggtt_view.remapped.plane[0].width,
198 					   vma->ggtt_view.remapped.plane[0].height,
199 					   vma->ggtt_view.remapped.plane[0].stride,
200 					   vma->ggtt_view.remapped.plane[0].offset,
201 					   vma->ggtt_view.remapped.plane[1].width,
202 					   vma->ggtt_view.remapped.plane[1].height,
203 					   vma->ggtt_view.remapped.plane[1].stride,
204 					   vma->ggtt_view.remapped.plane[1].offset);
205 				break;
206 
207 			default:
208 				MISSING_CASE(vma->ggtt_view.type);
209 				break;
210 			}
211 		}
212 		if (vma->fence)
213 			seq_printf(m, " , fence: %d", vma->fence->id);
214 		seq_puts(m, ")");
215 
216 		spin_lock(&obj->vma.lock);
217 	}
218 	spin_unlock(&obj->vma.lock);
219 
220 	seq_printf(m, " (pinned x %d)", pin_count);
221 	if (obj->stolen)
222 		seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
223 	if (i915_gem_object_is_framebuffer(obj))
224 		seq_printf(m, " (fb)");
225 
226 	engine = i915_gem_object_last_write_engine(obj);
227 	if (engine)
228 		seq_printf(m, " (%s)", engine->name);
229 }
230 
231 struct file_stats {
232 	struct i915_address_space *vm;
233 	unsigned long count;
234 	u64 total, unbound;
235 	u64 active, inactive;
236 	u64 closed;
237 };
238 
per_file_stats(int id,void * ptr,void * data)239 static int per_file_stats(int id, void *ptr, void *data)
240 {
241 	struct drm_i915_gem_object *obj = ptr;
242 	struct file_stats *stats = data;
243 	struct i915_vma *vma;
244 
245 	if (!kref_get_unless_zero(&obj->base.refcount))
246 		return 0;
247 
248 	stats->count++;
249 	stats->total += obj->base.size;
250 	if (!atomic_read(&obj->bind_count))
251 		stats->unbound += obj->base.size;
252 
253 	spin_lock(&obj->vma.lock);
254 	if (!stats->vm) {
255 		for_each_ggtt_vma(vma, obj) {
256 			if (!drm_mm_node_allocated(&vma->node))
257 				continue;
258 
259 			if (i915_vma_is_active(vma))
260 				stats->active += vma->node.size;
261 			else
262 				stats->inactive += vma->node.size;
263 
264 			if (i915_vma_is_closed(vma))
265 				stats->closed += vma->node.size;
266 		}
267 	} else {
268 		struct rb_node *p = obj->vma.tree.rb_node;
269 
270 		while (p) {
271 			long cmp;
272 
273 			vma = rb_entry(p, typeof(*vma), obj_node);
274 			cmp = i915_vma_compare(vma, stats->vm, NULL);
275 			if (cmp == 0) {
276 				if (drm_mm_node_allocated(&vma->node)) {
277 					if (i915_vma_is_active(vma))
278 						stats->active += vma->node.size;
279 					else
280 						stats->inactive += vma->node.size;
281 
282 					if (i915_vma_is_closed(vma))
283 						stats->closed += vma->node.size;
284 				}
285 				break;
286 			}
287 			if (cmp < 0)
288 				p = p->rb_right;
289 			else
290 				p = p->rb_left;
291 		}
292 	}
293 	spin_unlock(&obj->vma.lock);
294 
295 	i915_gem_object_put(obj);
296 	return 0;
297 }
298 
299 #define print_file_stats(m, name, stats) do { \
300 	if (stats.count) \
301 		seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
302 			   name, \
303 			   stats.count, \
304 			   stats.total, \
305 			   stats.active, \
306 			   stats.inactive, \
307 			   stats.unbound, \
308 			   stats.closed); \
309 } while (0)
310 
print_context_stats(struct seq_file * m,struct drm_i915_private * i915)311 static void print_context_stats(struct seq_file *m,
312 				struct drm_i915_private *i915)
313 {
314 	struct file_stats kstats = {};
315 	struct i915_gem_context *ctx, *cn;
316 
317 	spin_lock(&i915->gem.contexts.lock);
318 	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
319 		struct i915_gem_engines_iter it;
320 		struct intel_context *ce;
321 
322 		if (!kref_get_unless_zero(&ctx->ref))
323 			continue;
324 
325 		spin_unlock(&i915->gem.contexts.lock);
326 
327 		for_each_gem_engine(ce,
328 				    i915_gem_context_lock_engines(ctx), it) {
329 			if (intel_context_pin_if_active(ce)) {
330 				rcu_read_lock();
331 				if (ce->state)
332 					per_file_stats(0,
333 						       ce->state->obj, &kstats);
334 				per_file_stats(0, ce->ring->vma->obj, &kstats);
335 				rcu_read_unlock();
336 				intel_context_unpin(ce);
337 			}
338 		}
339 		i915_gem_context_unlock_engines(ctx);
340 
341 		if (!IS_ERR_OR_NULL(ctx->file_priv)) {
342 			struct file_stats stats = {
343 				.vm = rcu_access_pointer(ctx->vm),
344 			};
345 			struct drm_file *file = ctx->file_priv->file;
346 			struct task_struct *task;
347 			char name[80];
348 
349 			rcu_read_lock();
350 			idr_for_each(&file->object_idr, per_file_stats, &stats);
351 			rcu_read_unlock();
352 
353 			rcu_read_lock();
354 			task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
355 			snprintf(name, sizeof(name), "%s",
356 				 task ? task->comm : "<unknown>");
357 			rcu_read_unlock();
358 
359 			print_file_stats(m, name, stats);
360 		}
361 
362 		spin_lock(&i915->gem.contexts.lock);
363 		list_safe_reset_next(ctx, cn, link);
364 		i915_gem_context_put(ctx);
365 	}
366 	spin_unlock(&i915->gem.contexts.lock);
367 
368 	print_file_stats(m, "[k]contexts", kstats);
369 }
370 
i915_gem_object_info(struct seq_file * m,void * data)371 static int i915_gem_object_info(struct seq_file *m, void *data)
372 {
373 	struct drm_i915_private *i915 = node_to_i915(m->private);
374 	struct intel_memory_region *mr;
375 	enum intel_region_id id;
376 
377 	seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
378 		   i915->mm.shrink_count,
379 		   atomic_read(&i915->mm.free_count),
380 		   i915->mm.shrink_memory);
381 	for_each_memory_region(mr, i915, id)
382 		seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
383 			   mr->name, &mr->total, &mr->avail);
384 	seq_putc(m, '\n');
385 
386 	print_context_stats(m, i915);
387 
388 	return 0;
389 }
390 
gen8_display_interrupt_info(struct seq_file * m)391 static void gen8_display_interrupt_info(struct seq_file *m)
392 {
393 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
394 	enum pipe pipe;
395 
396 	for_each_pipe(dev_priv, pipe) {
397 		enum intel_display_power_domain power_domain;
398 		intel_wakeref_t wakeref;
399 
400 		power_domain = POWER_DOMAIN_PIPE(pipe);
401 		wakeref = intel_display_power_get_if_enabled(dev_priv,
402 							     power_domain);
403 		if (!wakeref) {
404 			seq_printf(m, "Pipe %c power disabled\n",
405 				   pipe_name(pipe));
406 			continue;
407 		}
408 		seq_printf(m, "Pipe %c IMR:\t%08x\n",
409 			   pipe_name(pipe),
410 			   I915_READ(GEN8_DE_PIPE_IMR(pipe)));
411 		seq_printf(m, "Pipe %c IIR:\t%08x\n",
412 			   pipe_name(pipe),
413 			   I915_READ(GEN8_DE_PIPE_IIR(pipe)));
414 		seq_printf(m, "Pipe %c IER:\t%08x\n",
415 			   pipe_name(pipe),
416 			   I915_READ(GEN8_DE_PIPE_IER(pipe)));
417 
418 		intel_display_power_put(dev_priv, power_domain, wakeref);
419 	}
420 
421 	seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
422 		   I915_READ(GEN8_DE_PORT_IMR));
423 	seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
424 		   I915_READ(GEN8_DE_PORT_IIR));
425 	seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
426 		   I915_READ(GEN8_DE_PORT_IER));
427 
428 	seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
429 		   I915_READ(GEN8_DE_MISC_IMR));
430 	seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
431 		   I915_READ(GEN8_DE_MISC_IIR));
432 	seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
433 		   I915_READ(GEN8_DE_MISC_IER));
434 
435 	seq_printf(m, "PCU interrupt mask:\t%08x\n",
436 		   I915_READ(GEN8_PCU_IMR));
437 	seq_printf(m, "PCU interrupt identity:\t%08x\n",
438 		   I915_READ(GEN8_PCU_IIR));
439 	seq_printf(m, "PCU interrupt enable:\t%08x\n",
440 		   I915_READ(GEN8_PCU_IER));
441 }
442 
i915_interrupt_info(struct seq_file * m,void * data)443 static int i915_interrupt_info(struct seq_file *m, void *data)
444 {
445 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
446 	struct intel_engine_cs *engine;
447 	intel_wakeref_t wakeref;
448 	int i, pipe;
449 
450 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
451 
452 	if (IS_CHERRYVIEW(dev_priv)) {
453 		intel_wakeref_t pref;
454 
455 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
456 			   I915_READ(GEN8_MASTER_IRQ));
457 
458 		seq_printf(m, "Display IER:\t%08x\n",
459 			   I915_READ(VLV_IER));
460 		seq_printf(m, "Display IIR:\t%08x\n",
461 			   I915_READ(VLV_IIR));
462 		seq_printf(m, "Display IIR_RW:\t%08x\n",
463 			   I915_READ(VLV_IIR_RW));
464 		seq_printf(m, "Display IMR:\t%08x\n",
465 			   I915_READ(VLV_IMR));
466 		for_each_pipe(dev_priv, pipe) {
467 			enum intel_display_power_domain power_domain;
468 
469 			power_domain = POWER_DOMAIN_PIPE(pipe);
470 			pref = intel_display_power_get_if_enabled(dev_priv,
471 								  power_domain);
472 			if (!pref) {
473 				seq_printf(m, "Pipe %c power disabled\n",
474 					   pipe_name(pipe));
475 				continue;
476 			}
477 
478 			seq_printf(m, "Pipe %c stat:\t%08x\n",
479 				   pipe_name(pipe),
480 				   I915_READ(PIPESTAT(pipe)));
481 
482 			intel_display_power_put(dev_priv, power_domain, pref);
483 		}
484 
485 		pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
486 		seq_printf(m, "Port hotplug:\t%08x\n",
487 			   I915_READ(PORT_HOTPLUG_EN));
488 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
489 			   I915_READ(VLV_DPFLIPSTAT));
490 		seq_printf(m, "DPINVGTT:\t%08x\n",
491 			   I915_READ(DPINVGTT));
492 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
493 
494 		for (i = 0; i < 4; i++) {
495 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
496 				   i, I915_READ(GEN8_GT_IMR(i)));
497 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
498 				   i, I915_READ(GEN8_GT_IIR(i)));
499 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
500 				   i, I915_READ(GEN8_GT_IER(i)));
501 		}
502 
503 		seq_printf(m, "PCU interrupt mask:\t%08x\n",
504 			   I915_READ(GEN8_PCU_IMR));
505 		seq_printf(m, "PCU interrupt identity:\t%08x\n",
506 			   I915_READ(GEN8_PCU_IIR));
507 		seq_printf(m, "PCU interrupt enable:\t%08x\n",
508 			   I915_READ(GEN8_PCU_IER));
509 	} else if (INTEL_GEN(dev_priv) >= 11) {
510 		seq_printf(m, "Master Interrupt Control:  %08x\n",
511 			   I915_READ(GEN11_GFX_MSTR_IRQ));
512 
513 		seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
514 			   I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
515 		seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
516 			   I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
517 		seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
518 			   I915_READ(GEN11_GUC_SG_INTR_ENABLE));
519 		seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
520 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
521 		seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
522 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
523 		seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
524 			   I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
525 
526 		seq_printf(m, "Display Interrupt Control:\t%08x\n",
527 			   I915_READ(GEN11_DISPLAY_INT_CTL));
528 
529 		gen8_display_interrupt_info(m);
530 	} else if (INTEL_GEN(dev_priv) >= 8) {
531 		seq_printf(m, "Master Interrupt Control:\t%08x\n",
532 			   I915_READ(GEN8_MASTER_IRQ));
533 
534 		for (i = 0; i < 4; i++) {
535 			seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
536 				   i, I915_READ(GEN8_GT_IMR(i)));
537 			seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
538 				   i, I915_READ(GEN8_GT_IIR(i)));
539 			seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
540 				   i, I915_READ(GEN8_GT_IER(i)));
541 		}
542 
543 		gen8_display_interrupt_info(m);
544 	} else if (IS_VALLEYVIEW(dev_priv)) {
545 		intel_wakeref_t pref;
546 
547 		seq_printf(m, "Display IER:\t%08x\n",
548 			   I915_READ(VLV_IER));
549 		seq_printf(m, "Display IIR:\t%08x\n",
550 			   I915_READ(VLV_IIR));
551 		seq_printf(m, "Display IIR_RW:\t%08x\n",
552 			   I915_READ(VLV_IIR_RW));
553 		seq_printf(m, "Display IMR:\t%08x\n",
554 			   I915_READ(VLV_IMR));
555 		for_each_pipe(dev_priv, pipe) {
556 			enum intel_display_power_domain power_domain;
557 
558 			power_domain = POWER_DOMAIN_PIPE(pipe);
559 			pref = intel_display_power_get_if_enabled(dev_priv,
560 								  power_domain);
561 			if (!pref) {
562 				seq_printf(m, "Pipe %c power disabled\n",
563 					   pipe_name(pipe));
564 				continue;
565 			}
566 
567 			seq_printf(m, "Pipe %c stat:\t%08x\n",
568 				   pipe_name(pipe),
569 				   I915_READ(PIPESTAT(pipe)));
570 			intel_display_power_put(dev_priv, power_domain, pref);
571 		}
572 
573 		seq_printf(m, "Master IER:\t%08x\n",
574 			   I915_READ(VLV_MASTER_IER));
575 
576 		seq_printf(m, "Render IER:\t%08x\n",
577 			   I915_READ(GTIER));
578 		seq_printf(m, "Render IIR:\t%08x\n",
579 			   I915_READ(GTIIR));
580 		seq_printf(m, "Render IMR:\t%08x\n",
581 			   I915_READ(GTIMR));
582 
583 		seq_printf(m, "PM IER:\t\t%08x\n",
584 			   I915_READ(GEN6_PMIER));
585 		seq_printf(m, "PM IIR:\t\t%08x\n",
586 			   I915_READ(GEN6_PMIIR));
587 		seq_printf(m, "PM IMR:\t\t%08x\n",
588 			   I915_READ(GEN6_PMIMR));
589 
590 		pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
591 		seq_printf(m, "Port hotplug:\t%08x\n",
592 			   I915_READ(PORT_HOTPLUG_EN));
593 		seq_printf(m, "DPFLIPSTAT:\t%08x\n",
594 			   I915_READ(VLV_DPFLIPSTAT));
595 		seq_printf(m, "DPINVGTT:\t%08x\n",
596 			   I915_READ(DPINVGTT));
597 		intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
598 
599 	} else if (!HAS_PCH_SPLIT(dev_priv)) {
600 		seq_printf(m, "Interrupt enable:    %08x\n",
601 			   I915_READ(GEN2_IER));
602 		seq_printf(m, "Interrupt identity:  %08x\n",
603 			   I915_READ(GEN2_IIR));
604 		seq_printf(m, "Interrupt mask:      %08x\n",
605 			   I915_READ(GEN2_IMR));
606 		for_each_pipe(dev_priv, pipe)
607 			seq_printf(m, "Pipe %c stat:         %08x\n",
608 				   pipe_name(pipe),
609 				   I915_READ(PIPESTAT(pipe)));
610 	} else {
611 		seq_printf(m, "North Display Interrupt enable:		%08x\n",
612 			   I915_READ(DEIER));
613 		seq_printf(m, "North Display Interrupt identity:	%08x\n",
614 			   I915_READ(DEIIR));
615 		seq_printf(m, "North Display Interrupt mask:		%08x\n",
616 			   I915_READ(DEIMR));
617 		seq_printf(m, "South Display Interrupt enable:		%08x\n",
618 			   I915_READ(SDEIER));
619 		seq_printf(m, "South Display Interrupt identity:	%08x\n",
620 			   I915_READ(SDEIIR));
621 		seq_printf(m, "South Display Interrupt mask:		%08x\n",
622 			   I915_READ(SDEIMR));
623 		seq_printf(m, "Graphics Interrupt enable:		%08x\n",
624 			   I915_READ(GTIER));
625 		seq_printf(m, "Graphics Interrupt identity:		%08x\n",
626 			   I915_READ(GTIIR));
627 		seq_printf(m, "Graphics Interrupt mask:		%08x\n",
628 			   I915_READ(GTIMR));
629 	}
630 
631 	if (INTEL_GEN(dev_priv) >= 11) {
632 		seq_printf(m, "RCS Intr Mask:\t %08x\n",
633 			   I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
634 		seq_printf(m, "BCS Intr Mask:\t %08x\n",
635 			   I915_READ(GEN11_BCS_RSVD_INTR_MASK));
636 		seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
637 			   I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
638 		seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
639 			   I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
640 		seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
641 			   I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
642 		seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
643 			   I915_READ(GEN11_GUC_SG_INTR_MASK));
644 		seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
645 			   I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
646 		seq_printf(m, "Crypto Intr Mask:\t %08x\n",
647 			   I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
648 		seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
649 			   I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
650 
651 	} else if (INTEL_GEN(dev_priv) >= 6) {
652 		for_each_uabi_engine(engine, dev_priv) {
653 			seq_printf(m,
654 				   "Graphics Interrupt mask (%s):	%08x\n",
655 				   engine->name, ENGINE_READ(engine, RING_IMR));
656 		}
657 	}
658 
659 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
660 
661 	return 0;
662 }
663 
i915_gem_fence_regs_info(struct seq_file * m,void * data)664 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
665 {
666 	struct drm_i915_private *i915 = node_to_i915(m->private);
667 	unsigned int i;
668 
669 	seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
670 
671 	rcu_read_lock();
672 	for (i = 0; i < i915->ggtt.num_fences; i++) {
673 		struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
674 		struct i915_vma *vma = reg->vma;
675 
676 		seq_printf(m, "Fence %d, pin count = %d, object = ",
677 			   i, atomic_read(&reg->pin_count));
678 		if (!vma)
679 			seq_puts(m, "unused");
680 		else
681 			describe_obj(m, vma->obj);
682 		seq_putc(m, '\n');
683 	}
684 	rcu_read_unlock();
685 
686 	return 0;
687 }
688 
689 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
gpu_state_read(struct file * file,char __user * ubuf,size_t count,loff_t * pos)690 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
691 			      size_t count, loff_t *pos)
692 {
693 	struct i915_gpu_coredump *error;
694 	ssize_t ret;
695 	void *buf;
696 
697 	error = file->private_data;
698 	if (!error)
699 		return 0;
700 
701 	/* Bounce buffer required because of kernfs __user API convenience. */
702 	buf = kmalloc(count, GFP_KERNEL);
703 	if (!buf)
704 		return -ENOMEM;
705 
706 	ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
707 	if (ret <= 0)
708 		goto out;
709 
710 	if (!copy_to_user(ubuf, buf, ret))
711 		*pos += ret;
712 	else
713 		ret = -EFAULT;
714 
715 out:
716 	kfree(buf);
717 	return ret;
718 }
719 
gpu_state_release(struct inode * inode,struct file * file)720 static int gpu_state_release(struct inode *inode, struct file *file)
721 {
722 	i915_gpu_coredump_put(file->private_data);
723 	return 0;
724 }
725 
i915_gpu_info_open(struct inode * inode,struct file * file)726 static int i915_gpu_info_open(struct inode *inode, struct file *file)
727 {
728 	struct drm_i915_private *i915 = inode->i_private;
729 	struct i915_gpu_coredump *gpu;
730 	intel_wakeref_t wakeref;
731 
732 	gpu = NULL;
733 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
734 		gpu = i915_gpu_coredump(i915);
735 	if (IS_ERR(gpu))
736 		return PTR_ERR(gpu);
737 
738 	file->private_data = gpu;
739 	return 0;
740 }
741 
742 static const struct file_operations i915_gpu_info_fops = {
743 	.owner = THIS_MODULE,
744 	.open = i915_gpu_info_open,
745 	.read = gpu_state_read,
746 	.llseek = default_llseek,
747 	.release = gpu_state_release,
748 };
749 
750 static ssize_t
i915_error_state_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)751 i915_error_state_write(struct file *filp,
752 		       const char __user *ubuf,
753 		       size_t cnt,
754 		       loff_t *ppos)
755 {
756 	struct i915_gpu_coredump *error = filp->private_data;
757 
758 	if (!error)
759 		return 0;
760 
761 	DRM_DEBUG_DRIVER("Resetting error state\n");
762 	i915_reset_error_state(error->i915);
763 
764 	return cnt;
765 }
766 
i915_error_state_open(struct inode * inode,struct file * file)767 static int i915_error_state_open(struct inode *inode, struct file *file)
768 {
769 	struct i915_gpu_coredump *error;
770 
771 	error = i915_first_error_state(inode->i_private);
772 	if (IS_ERR(error))
773 		return PTR_ERR(error);
774 
775 	file->private_data  = error;
776 	return 0;
777 }
778 
779 static const struct file_operations i915_error_state_fops = {
780 	.owner = THIS_MODULE,
781 	.open = i915_error_state_open,
782 	.read = gpu_state_read,
783 	.write = i915_error_state_write,
784 	.llseek = default_llseek,
785 	.release = gpu_state_release,
786 };
787 #endif
788 
i915_frequency_info(struct seq_file * m,void * unused)789 static int i915_frequency_info(struct seq_file *m, void *unused)
790 {
791 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
792 	struct intel_uncore *uncore = &dev_priv->uncore;
793 	struct intel_rps *rps = &dev_priv->gt.rps;
794 	intel_wakeref_t wakeref;
795 	int ret = 0;
796 
797 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
798 
799 	if (IS_GEN(dev_priv, 5)) {
800 		u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
801 		u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
802 
803 		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
804 		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
805 		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
806 			   MEMSTAT_VID_SHIFT);
807 		seq_printf(m, "Current P-state: %d\n",
808 			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
809 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
810 		u32 rpmodectl, freq_sts;
811 
812 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
813 		seq_printf(m, "Video Turbo Mode: %s\n",
814 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
815 		seq_printf(m, "HW control enabled: %s\n",
816 			   yesno(rpmodectl & GEN6_RP_ENABLE));
817 		seq_printf(m, "SW control enabled: %s\n",
818 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
819 				  GEN6_RP_MEDIA_SW_MODE));
820 
821 		vlv_punit_get(dev_priv);
822 		freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
823 		vlv_punit_put(dev_priv);
824 
825 		seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
826 		seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
827 
828 		seq_printf(m, "actual GPU freq: %d MHz\n",
829 			   intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
830 
831 		seq_printf(m, "current GPU freq: %d MHz\n",
832 			   intel_gpu_freq(rps, rps->cur_freq));
833 
834 		seq_printf(m, "max GPU freq: %d MHz\n",
835 			   intel_gpu_freq(rps, rps->max_freq));
836 
837 		seq_printf(m, "min GPU freq: %d MHz\n",
838 			   intel_gpu_freq(rps, rps->min_freq));
839 
840 		seq_printf(m, "idle GPU freq: %d MHz\n",
841 			   intel_gpu_freq(rps, rps->idle_freq));
842 
843 		seq_printf(m,
844 			   "efficient (RPe) frequency: %d MHz\n",
845 			   intel_gpu_freq(rps, rps->efficient_freq));
846 	} else if (INTEL_GEN(dev_priv) >= 6) {
847 		u32 rp_state_limits;
848 		u32 gt_perf_status;
849 		u32 rp_state_cap;
850 		u32 rpmodectl, rpinclimit, rpdeclimit;
851 		u32 rpstat, cagf, reqf;
852 		u32 rpupei, rpcurup, rpprevup;
853 		u32 rpdownei, rpcurdown, rpprevdown;
854 		u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
855 		int max_freq;
856 
857 		rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
858 		if (IS_GEN9_LP(dev_priv)) {
859 			rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
860 			gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
861 		} else {
862 			rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
863 			gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
864 		}
865 
866 		/* RPSTAT1 is in the GT power well */
867 		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
868 
869 		reqf = I915_READ(GEN6_RPNSWREQ);
870 		if (INTEL_GEN(dev_priv) >= 9)
871 			reqf >>= 23;
872 		else {
873 			reqf &= ~GEN6_TURBO_DISABLE;
874 			if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
875 				reqf >>= 24;
876 			else
877 				reqf >>= 25;
878 		}
879 		reqf = intel_gpu_freq(rps, reqf);
880 
881 		rpmodectl = I915_READ(GEN6_RP_CONTROL);
882 		rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
883 		rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
884 
885 		rpstat = I915_READ(GEN6_RPSTAT1);
886 		rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
887 		rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
888 		rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
889 		rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
890 		rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
891 		rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
892 		cagf = intel_rps_read_actual_frequency(rps);
893 
894 		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
895 
896 		if (INTEL_GEN(dev_priv) >= 11) {
897 			pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
898 			pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
899 			/*
900 			 * The equivalent to the PM ISR & IIR cannot be read
901 			 * without affecting the current state of the system
902 			 */
903 			pm_isr = 0;
904 			pm_iir = 0;
905 		} else if (INTEL_GEN(dev_priv) >= 8) {
906 			pm_ier = I915_READ(GEN8_GT_IER(2));
907 			pm_imr = I915_READ(GEN8_GT_IMR(2));
908 			pm_isr = I915_READ(GEN8_GT_ISR(2));
909 			pm_iir = I915_READ(GEN8_GT_IIR(2));
910 		} else {
911 			pm_ier = I915_READ(GEN6_PMIER);
912 			pm_imr = I915_READ(GEN6_PMIMR);
913 			pm_isr = I915_READ(GEN6_PMISR);
914 			pm_iir = I915_READ(GEN6_PMIIR);
915 		}
916 		pm_mask = I915_READ(GEN6_PMINTRMSK);
917 
918 		seq_printf(m, "Video Turbo Mode: %s\n",
919 			   yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
920 		seq_printf(m, "HW control enabled: %s\n",
921 			   yesno(rpmodectl & GEN6_RP_ENABLE));
922 		seq_printf(m, "SW control enabled: %s\n",
923 			   yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
924 				  GEN6_RP_MEDIA_SW_MODE));
925 
926 		seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
927 			   pm_ier, pm_imr, pm_mask);
928 		if (INTEL_GEN(dev_priv) <= 10)
929 			seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
930 				   pm_isr, pm_iir);
931 		seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
932 			   rps->pm_intrmsk_mbz);
933 		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
934 		seq_printf(m, "Render p-state ratio: %d\n",
935 			   (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
936 		seq_printf(m, "Render p-state VID: %d\n",
937 			   gt_perf_status & 0xff);
938 		seq_printf(m, "Render p-state limit: %d\n",
939 			   rp_state_limits & 0xff);
940 		seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
941 		seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
942 		seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
943 		seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
944 		seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
945 		seq_printf(m, "CAGF: %dMHz\n", cagf);
946 		seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
947 			   rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
948 		seq_printf(m, "RP CUR UP: %d (%dus)\n",
949 			   rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
950 		seq_printf(m, "RP PREV UP: %d (%dus)\n",
951 			   rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
952 		seq_printf(m, "Up threshold: %d%%\n",
953 			   rps->power.up_threshold);
954 
955 		seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
956 			   rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
957 		seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
958 			   rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
959 		seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
960 			   rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
961 		seq_printf(m, "Down threshold: %d%%\n",
962 			   rps->power.down_threshold);
963 
964 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
965 			    rp_state_cap >> 16) & 0xff;
966 		max_freq *= (IS_GEN9_BC(dev_priv) ||
967 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
968 		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
969 			   intel_gpu_freq(rps, max_freq));
970 
971 		max_freq = (rp_state_cap & 0xff00) >> 8;
972 		max_freq *= (IS_GEN9_BC(dev_priv) ||
973 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
974 		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
975 			   intel_gpu_freq(rps, max_freq));
976 
977 		max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
978 			    rp_state_cap >> 0) & 0xff;
979 		max_freq *= (IS_GEN9_BC(dev_priv) ||
980 			     INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
981 		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
982 			   intel_gpu_freq(rps, max_freq));
983 		seq_printf(m, "Max overclocked frequency: %dMHz\n",
984 			   intel_gpu_freq(rps, rps->max_freq));
985 
986 		seq_printf(m, "Current freq: %d MHz\n",
987 			   intel_gpu_freq(rps, rps->cur_freq));
988 		seq_printf(m, "Actual freq: %d MHz\n", cagf);
989 		seq_printf(m, "Idle freq: %d MHz\n",
990 			   intel_gpu_freq(rps, rps->idle_freq));
991 		seq_printf(m, "Min freq: %d MHz\n",
992 			   intel_gpu_freq(rps, rps->min_freq));
993 		seq_printf(m, "Boost freq: %d MHz\n",
994 			   intel_gpu_freq(rps, rps->boost_freq));
995 		seq_printf(m, "Max freq: %d MHz\n",
996 			   intel_gpu_freq(rps, rps->max_freq));
997 		seq_printf(m,
998 			   "efficient (RPe) frequency: %d MHz\n",
999 			   intel_gpu_freq(rps, rps->efficient_freq));
1000 	} else {
1001 		seq_puts(m, "no P-state info available\n");
1002 	}
1003 
1004 	seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1005 	seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1006 	seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1007 
1008 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1009 	return ret;
1010 }
1011 
ilk_drpc_info(struct seq_file * m)1012 static int ilk_drpc_info(struct seq_file *m)
1013 {
1014 	struct drm_i915_private *i915 = node_to_i915(m->private);
1015 	struct intel_uncore *uncore = &i915->uncore;
1016 	u32 rgvmodectl, rstdbyctl;
1017 	u16 crstandvid;
1018 
1019 	rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
1020 	rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
1021 	crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
1022 
1023 	seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1024 	seq_printf(m, "Boost freq: %d\n",
1025 		   (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1026 		   MEMMODE_BOOST_FREQ_SHIFT);
1027 	seq_printf(m, "HW control enabled: %s\n",
1028 		   yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1029 	seq_printf(m, "SW control enabled: %s\n",
1030 		   yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1031 	seq_printf(m, "Gated voltage change: %s\n",
1032 		   yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1033 	seq_printf(m, "Starting frequency: P%d\n",
1034 		   (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1035 	seq_printf(m, "Max P-state: P%d\n",
1036 		   (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1037 	seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1038 	seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1039 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1040 	seq_printf(m, "Render standby enabled: %s\n",
1041 		   yesno(!(rstdbyctl & RCX_SW_EXIT)));
1042 	seq_puts(m, "Current RS state: ");
1043 	switch (rstdbyctl & RSX_STATUS_MASK) {
1044 	case RSX_STATUS_ON:
1045 		seq_puts(m, "on\n");
1046 		break;
1047 	case RSX_STATUS_RC1:
1048 		seq_puts(m, "RC1\n");
1049 		break;
1050 	case RSX_STATUS_RC1E:
1051 		seq_puts(m, "RC1E\n");
1052 		break;
1053 	case RSX_STATUS_RS1:
1054 		seq_puts(m, "RS1\n");
1055 		break;
1056 	case RSX_STATUS_RS2:
1057 		seq_puts(m, "RS2 (RC6)\n");
1058 		break;
1059 	case RSX_STATUS_RS3:
1060 		seq_puts(m, "RC3 (RC6+)\n");
1061 		break;
1062 	default:
1063 		seq_puts(m, "unknown\n");
1064 		break;
1065 	}
1066 
1067 	return 0;
1068 }
1069 
i915_forcewake_domains(struct seq_file * m,void * data)1070 static int i915_forcewake_domains(struct seq_file *m, void *data)
1071 {
1072 	struct drm_i915_private *i915 = node_to_i915(m->private);
1073 	struct intel_uncore *uncore = &i915->uncore;
1074 	struct intel_uncore_forcewake_domain *fw_domain;
1075 	unsigned int tmp;
1076 
1077 	seq_printf(m, "user.bypass_count = %u\n",
1078 		   uncore->user_forcewake_count);
1079 
1080 	for_each_fw_domain(fw_domain, uncore, tmp)
1081 		seq_printf(m, "%s.wake_count = %u\n",
1082 			   intel_uncore_forcewake_domain_to_str(fw_domain->id),
1083 			   READ_ONCE(fw_domain->wake_count));
1084 
1085 	return 0;
1086 }
1087 
print_rc6_res(struct seq_file * m,const char * title,const i915_reg_t reg)1088 static void print_rc6_res(struct seq_file *m,
1089 			  const char *title,
1090 			  const i915_reg_t reg)
1091 {
1092 	struct drm_i915_private *i915 = node_to_i915(m->private);
1093 	intel_wakeref_t wakeref;
1094 
1095 	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
1096 		seq_printf(m, "%s %u (%llu us)\n", title,
1097 			   intel_uncore_read(&i915->uncore, reg),
1098 			   intel_rc6_residency_us(&i915->gt.rc6, reg));
1099 }
1100 
vlv_drpc_info(struct seq_file * m)1101 static int vlv_drpc_info(struct seq_file *m)
1102 {
1103 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1104 	u32 rcctl1, pw_status;
1105 
1106 	pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1107 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1108 
1109 	seq_printf(m, "RC6 Enabled: %s\n",
1110 		   yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1111 					GEN6_RC_CTL_EI_MODE(1))));
1112 	seq_printf(m, "Render Power Well: %s\n",
1113 		   (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1114 	seq_printf(m, "Media Power Well: %s\n",
1115 		   (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1116 
1117 	print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1118 	print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1119 
1120 	return i915_forcewake_domains(m, NULL);
1121 }
1122 
gen6_drpc_info(struct seq_file * m)1123 static int gen6_drpc_info(struct seq_file *m)
1124 {
1125 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1126 	u32 gt_core_status, rcctl1, rc6vids = 0;
1127 	u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1128 
1129 	gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1130 	trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1131 
1132 	rcctl1 = I915_READ(GEN6_RC_CONTROL);
1133 	if (INTEL_GEN(dev_priv) >= 9) {
1134 		gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1135 		gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1136 	}
1137 
1138 	if (INTEL_GEN(dev_priv) <= 7)
1139 		sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1140 				       &rc6vids, NULL);
1141 
1142 	seq_printf(m, "RC1e Enabled: %s\n",
1143 		   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1144 	seq_printf(m, "RC6 Enabled: %s\n",
1145 		   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1146 	if (INTEL_GEN(dev_priv) >= 9) {
1147 		seq_printf(m, "Render Well Gating Enabled: %s\n",
1148 			yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1149 		seq_printf(m, "Media Well Gating Enabled: %s\n",
1150 			yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1151 	}
1152 	seq_printf(m, "Deep RC6 Enabled: %s\n",
1153 		   yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1154 	seq_printf(m, "Deepest RC6 Enabled: %s\n",
1155 		   yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1156 	seq_puts(m, "Current RC state: ");
1157 	switch (gt_core_status & GEN6_RCn_MASK) {
1158 	case GEN6_RC0:
1159 		if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1160 			seq_puts(m, "Core Power Down\n");
1161 		else
1162 			seq_puts(m, "on\n");
1163 		break;
1164 	case GEN6_RC3:
1165 		seq_puts(m, "RC3\n");
1166 		break;
1167 	case GEN6_RC6:
1168 		seq_puts(m, "RC6\n");
1169 		break;
1170 	case GEN6_RC7:
1171 		seq_puts(m, "RC7\n");
1172 		break;
1173 	default:
1174 		seq_puts(m, "Unknown\n");
1175 		break;
1176 	}
1177 
1178 	seq_printf(m, "Core Power Down: %s\n",
1179 		   yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1180 	if (INTEL_GEN(dev_priv) >= 9) {
1181 		seq_printf(m, "Render Power Well: %s\n",
1182 			(gen9_powergate_status &
1183 			 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1184 		seq_printf(m, "Media Power Well: %s\n",
1185 			(gen9_powergate_status &
1186 			 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1187 	}
1188 
1189 	/* Not exactly sure what this is */
1190 	print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1191 		      GEN6_GT_GFX_RC6_LOCKED);
1192 	print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1193 	print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1194 	print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1195 
1196 	if (INTEL_GEN(dev_priv) <= 7) {
1197 		seq_printf(m, "RC6   voltage: %dmV\n",
1198 			   GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1199 		seq_printf(m, "RC6+  voltage: %dmV\n",
1200 			   GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1201 		seq_printf(m, "RC6++ voltage: %dmV\n",
1202 			   GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1203 	}
1204 
1205 	return i915_forcewake_domains(m, NULL);
1206 }
1207 
i915_drpc_info(struct seq_file * m,void * unused)1208 static int i915_drpc_info(struct seq_file *m, void *unused)
1209 {
1210 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1211 	intel_wakeref_t wakeref;
1212 	int err = -ENODEV;
1213 
1214 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1215 		if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1216 			err = vlv_drpc_info(m);
1217 		else if (INTEL_GEN(dev_priv) >= 6)
1218 			err = gen6_drpc_info(m);
1219 		else
1220 			err = ilk_drpc_info(m);
1221 	}
1222 
1223 	return err;
1224 }
1225 
i915_frontbuffer_tracking(struct seq_file * m,void * unused)1226 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1227 {
1228 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1229 
1230 	seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1231 		   dev_priv->fb_tracking.busy_bits);
1232 
1233 	seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1234 		   dev_priv->fb_tracking.flip_bits);
1235 
1236 	return 0;
1237 }
1238 
i915_fbc_status(struct seq_file * m,void * unused)1239 static int i915_fbc_status(struct seq_file *m, void *unused)
1240 {
1241 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1242 	struct intel_fbc *fbc = &dev_priv->fbc;
1243 	intel_wakeref_t wakeref;
1244 
1245 	if (!HAS_FBC(dev_priv))
1246 		return -ENODEV;
1247 
1248 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1249 	mutex_lock(&fbc->lock);
1250 
1251 	if (intel_fbc_is_active(dev_priv))
1252 		seq_puts(m, "FBC enabled\n");
1253 	else
1254 		seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1255 
1256 	if (intel_fbc_is_active(dev_priv)) {
1257 		u32 mask;
1258 
1259 		if (INTEL_GEN(dev_priv) >= 8)
1260 			mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1261 		else if (INTEL_GEN(dev_priv) >= 7)
1262 			mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1263 		else if (INTEL_GEN(dev_priv) >= 5)
1264 			mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1265 		else if (IS_G4X(dev_priv))
1266 			mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1267 		else
1268 			mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1269 							FBC_STAT_COMPRESSED);
1270 
1271 		seq_printf(m, "Compressing: %s\n", yesno(mask));
1272 	}
1273 
1274 	mutex_unlock(&fbc->lock);
1275 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1276 
1277 	return 0;
1278 }
1279 
i915_fbc_false_color_get(void * data,u64 * val)1280 static int i915_fbc_false_color_get(void *data, u64 *val)
1281 {
1282 	struct drm_i915_private *dev_priv = data;
1283 
1284 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1285 		return -ENODEV;
1286 
1287 	*val = dev_priv->fbc.false_color;
1288 
1289 	return 0;
1290 }
1291 
i915_fbc_false_color_set(void * data,u64 val)1292 static int i915_fbc_false_color_set(void *data, u64 val)
1293 {
1294 	struct drm_i915_private *dev_priv = data;
1295 	u32 reg;
1296 
1297 	if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1298 		return -ENODEV;
1299 
1300 	mutex_lock(&dev_priv->fbc.lock);
1301 
1302 	reg = I915_READ(ILK_DPFC_CONTROL);
1303 	dev_priv->fbc.false_color = val;
1304 
1305 	I915_WRITE(ILK_DPFC_CONTROL, val ?
1306 		   (reg | FBC_CTL_FALSE_COLOR) :
1307 		   (reg & ~FBC_CTL_FALSE_COLOR));
1308 
1309 	mutex_unlock(&dev_priv->fbc.lock);
1310 	return 0;
1311 }
1312 
1313 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1314 			i915_fbc_false_color_get, i915_fbc_false_color_set,
1315 			"%llu\n");
1316 
i915_ips_status(struct seq_file * m,void * unused)1317 static int i915_ips_status(struct seq_file *m, void *unused)
1318 {
1319 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1320 	intel_wakeref_t wakeref;
1321 
1322 	if (!HAS_IPS(dev_priv))
1323 		return -ENODEV;
1324 
1325 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1326 
1327 	seq_printf(m, "Enabled by kernel parameter: %s\n",
1328 		   yesno(i915_modparams.enable_ips));
1329 
1330 	if (INTEL_GEN(dev_priv) >= 8) {
1331 		seq_puts(m, "Currently: unknown\n");
1332 	} else {
1333 		if (I915_READ(IPS_CTL) & IPS_ENABLE)
1334 			seq_puts(m, "Currently: enabled\n");
1335 		else
1336 			seq_puts(m, "Currently: disabled\n");
1337 	}
1338 
1339 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1340 
1341 	return 0;
1342 }
1343 
i915_sr_status(struct seq_file * m,void * unused)1344 static int i915_sr_status(struct seq_file *m, void *unused)
1345 {
1346 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1347 	intel_wakeref_t wakeref;
1348 	bool sr_enabled = false;
1349 
1350 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1351 
1352 	if (INTEL_GEN(dev_priv) >= 9)
1353 		/* no global SR status; inspect per-plane WM */;
1354 	else if (HAS_PCH_SPLIT(dev_priv))
1355 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1356 	else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1357 		 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1358 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1359 	else if (IS_I915GM(dev_priv))
1360 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1361 	else if (IS_PINEVIEW(dev_priv))
1362 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1363 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1364 		sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1365 
1366 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1367 
1368 	seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1369 
1370 	return 0;
1371 }
1372 
i915_ring_freq_table(struct seq_file * m,void * unused)1373 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1374 {
1375 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1376 	struct intel_rps *rps = &dev_priv->gt.rps;
1377 	unsigned int max_gpu_freq, min_gpu_freq;
1378 	intel_wakeref_t wakeref;
1379 	int gpu_freq, ia_freq;
1380 
1381 	if (!HAS_LLC(dev_priv))
1382 		return -ENODEV;
1383 
1384 	min_gpu_freq = rps->min_freq;
1385 	max_gpu_freq = rps->max_freq;
1386 	if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1387 		/* Convert GT frequency to 50 HZ units */
1388 		min_gpu_freq /= GEN9_FREQ_SCALER;
1389 		max_gpu_freq /= GEN9_FREQ_SCALER;
1390 	}
1391 
1392 	seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1393 
1394 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1395 	for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1396 		ia_freq = gpu_freq;
1397 		sandybridge_pcode_read(dev_priv,
1398 				       GEN6_PCODE_READ_MIN_FREQ_TABLE,
1399 				       &ia_freq, NULL);
1400 		seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1401 			   intel_gpu_freq(rps,
1402 					  (gpu_freq *
1403 					   (IS_GEN9_BC(dev_priv) ||
1404 					    INTEL_GEN(dev_priv) >= 10 ?
1405 					    GEN9_FREQ_SCALER : 1))),
1406 			   ((ia_freq >> 0) & 0xff) * 100,
1407 			   ((ia_freq >> 8) & 0xff) * 100);
1408 	}
1409 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1410 
1411 	return 0;
1412 }
1413 
i915_opregion(struct seq_file * m,void * unused)1414 static int i915_opregion(struct seq_file *m, void *unused)
1415 {
1416 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1417 
1418 	if (opregion->header)
1419 		seq_write(m, opregion->header, OPREGION_SIZE);
1420 
1421 	return 0;
1422 }
1423 
i915_vbt(struct seq_file * m,void * unused)1424 static int i915_vbt(struct seq_file *m, void *unused)
1425 {
1426 	struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1427 
1428 	if (opregion->vbt)
1429 		seq_write(m, opregion->vbt, opregion->vbt_size);
1430 
1431 	return 0;
1432 }
1433 
i915_gem_framebuffer_info(struct seq_file * m,void * data)1434 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1435 {
1436 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1437 	struct drm_device *dev = &dev_priv->drm;
1438 	struct intel_framebuffer *fbdev_fb = NULL;
1439 	struct drm_framebuffer *drm_fb;
1440 
1441 #ifdef CONFIG_DRM_FBDEV_EMULATION
1442 	if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1443 		fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1444 
1445 		seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1446 			   fbdev_fb->base.width,
1447 			   fbdev_fb->base.height,
1448 			   fbdev_fb->base.format->depth,
1449 			   fbdev_fb->base.format->cpp[0] * 8,
1450 			   fbdev_fb->base.modifier,
1451 			   drm_framebuffer_read_refcount(&fbdev_fb->base));
1452 		describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1453 		seq_putc(m, '\n');
1454 	}
1455 #endif
1456 
1457 	mutex_lock(&dev->mode_config.fb_lock);
1458 	drm_for_each_fb(drm_fb, dev) {
1459 		struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1460 		if (fb == fbdev_fb)
1461 			continue;
1462 
1463 		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1464 			   fb->base.width,
1465 			   fb->base.height,
1466 			   fb->base.format->depth,
1467 			   fb->base.format->cpp[0] * 8,
1468 			   fb->base.modifier,
1469 			   drm_framebuffer_read_refcount(&fb->base));
1470 		describe_obj(m, intel_fb_obj(&fb->base));
1471 		seq_putc(m, '\n');
1472 	}
1473 	mutex_unlock(&dev->mode_config.fb_lock);
1474 
1475 	return 0;
1476 }
1477 
describe_ctx_ring(struct seq_file * m,struct intel_ring * ring)1478 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1479 {
1480 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1481 		   ring->space, ring->head, ring->tail, ring->emit);
1482 }
1483 
i915_context_status(struct seq_file * m,void * unused)1484 static int i915_context_status(struct seq_file *m, void *unused)
1485 {
1486 	struct drm_i915_private *i915 = node_to_i915(m->private);
1487 	struct i915_gem_context *ctx, *cn;
1488 
1489 	spin_lock(&i915->gem.contexts.lock);
1490 	list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
1491 		struct i915_gem_engines_iter it;
1492 		struct intel_context *ce;
1493 
1494 		if (!kref_get_unless_zero(&ctx->ref))
1495 			continue;
1496 
1497 		spin_unlock(&i915->gem.contexts.lock);
1498 
1499 		seq_puts(m, "HW context ");
1500 		if (ctx->pid) {
1501 			struct task_struct *task;
1502 
1503 			task = get_pid_task(ctx->pid, PIDTYPE_PID);
1504 			if (task) {
1505 				seq_printf(m, "(%s [%d]) ",
1506 					   task->comm, task->pid);
1507 				put_task_struct(task);
1508 			}
1509 		} else if (IS_ERR(ctx->file_priv)) {
1510 			seq_puts(m, "(deleted) ");
1511 		} else {
1512 			seq_puts(m, "(kernel) ");
1513 		}
1514 
1515 		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1516 		seq_putc(m, '\n');
1517 
1518 		for_each_gem_engine(ce,
1519 				    i915_gem_context_lock_engines(ctx), it) {
1520 			if (intel_context_pin_if_active(ce)) {
1521 				seq_printf(m, "%s: ", ce->engine->name);
1522 				if (ce->state)
1523 					describe_obj(m, ce->state->obj);
1524 				describe_ctx_ring(m, ce->ring);
1525 				seq_putc(m, '\n');
1526 				intel_context_unpin(ce);
1527 			}
1528 		}
1529 		i915_gem_context_unlock_engines(ctx);
1530 
1531 		seq_putc(m, '\n');
1532 
1533 		spin_lock(&i915->gem.contexts.lock);
1534 		list_safe_reset_next(ctx, cn, link);
1535 		i915_gem_context_put(ctx);
1536 	}
1537 	spin_unlock(&i915->gem.contexts.lock);
1538 
1539 	return 0;
1540 }
1541 
swizzle_string(unsigned swizzle)1542 static const char *swizzle_string(unsigned swizzle)
1543 {
1544 	switch (swizzle) {
1545 	case I915_BIT_6_SWIZZLE_NONE:
1546 		return "none";
1547 	case I915_BIT_6_SWIZZLE_9:
1548 		return "bit9";
1549 	case I915_BIT_6_SWIZZLE_9_10:
1550 		return "bit9/bit10";
1551 	case I915_BIT_6_SWIZZLE_9_11:
1552 		return "bit9/bit11";
1553 	case I915_BIT_6_SWIZZLE_9_10_11:
1554 		return "bit9/bit10/bit11";
1555 	case I915_BIT_6_SWIZZLE_9_17:
1556 		return "bit9/bit17";
1557 	case I915_BIT_6_SWIZZLE_9_10_17:
1558 		return "bit9/bit10/bit17";
1559 	case I915_BIT_6_SWIZZLE_UNKNOWN:
1560 		return "unknown";
1561 	}
1562 
1563 	return "bug";
1564 }
1565 
i915_swizzle_info(struct seq_file * m,void * data)1566 static int i915_swizzle_info(struct seq_file *m, void *data)
1567 {
1568 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1569 	struct intel_uncore *uncore = &dev_priv->uncore;
1570 	intel_wakeref_t wakeref;
1571 
1572 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1573 
1574 	seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1575 		   swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
1576 	seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1577 		   swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
1578 
1579 	if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1580 		seq_printf(m, "DDC = 0x%08x\n",
1581 			   intel_uncore_read(uncore, DCC));
1582 		seq_printf(m, "DDC2 = 0x%08x\n",
1583 			   intel_uncore_read(uncore, DCC2));
1584 		seq_printf(m, "C0DRB3 = 0x%04x\n",
1585 			   intel_uncore_read16(uncore, C0DRB3));
1586 		seq_printf(m, "C1DRB3 = 0x%04x\n",
1587 			   intel_uncore_read16(uncore, C1DRB3));
1588 	} else if (INTEL_GEN(dev_priv) >= 6) {
1589 		seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1590 			   intel_uncore_read(uncore, MAD_DIMM_C0));
1591 		seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1592 			   intel_uncore_read(uncore, MAD_DIMM_C1));
1593 		seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1594 			   intel_uncore_read(uncore, MAD_DIMM_C2));
1595 		seq_printf(m, "TILECTL = 0x%08x\n",
1596 			   intel_uncore_read(uncore, TILECTL));
1597 		if (INTEL_GEN(dev_priv) >= 8)
1598 			seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1599 				   intel_uncore_read(uncore, GAMTARBMODE));
1600 		else
1601 			seq_printf(m, "ARB_MODE = 0x%08x\n",
1602 				   intel_uncore_read(uncore, ARB_MODE));
1603 		seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1604 			   intel_uncore_read(uncore, DISP_ARB_CTL));
1605 	}
1606 
1607 	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1608 		seq_puts(m, "L-shaped memory detected\n");
1609 
1610 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1611 
1612 	return 0;
1613 }
1614 
rps_power_to_str(unsigned int power)1615 static const char *rps_power_to_str(unsigned int power)
1616 {
1617 	static const char * const strings[] = {
1618 		[LOW_POWER] = "low power",
1619 		[BETWEEN] = "mixed",
1620 		[HIGH_POWER] = "high power",
1621 	};
1622 
1623 	if (power >= ARRAY_SIZE(strings) || !strings[power])
1624 		return "unknown";
1625 
1626 	return strings[power];
1627 }
1628 
i915_rps_boost_info(struct seq_file * m,void * data)1629 static int i915_rps_boost_info(struct seq_file *m, void *data)
1630 {
1631 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1632 	struct intel_rps *rps = &dev_priv->gt.rps;
1633 
1634 	seq_printf(m, "RPS enabled? %d\n", rps->enabled);
1635 	seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
1636 	seq_printf(m, "Boosts outstanding? %d\n",
1637 		   atomic_read(&rps->num_waiters));
1638 	seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
1639 	seq_printf(m, "Frequency requested %d, actual %d\n",
1640 		   intel_gpu_freq(rps, rps->cur_freq),
1641 		   intel_rps_read_actual_frequency(rps));
1642 	seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1643 		   intel_gpu_freq(rps, rps->min_freq),
1644 		   intel_gpu_freq(rps, rps->min_freq_softlimit),
1645 		   intel_gpu_freq(rps, rps->max_freq_softlimit),
1646 		   intel_gpu_freq(rps, rps->max_freq));
1647 	seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
1648 		   intel_gpu_freq(rps, rps->idle_freq),
1649 		   intel_gpu_freq(rps, rps->efficient_freq),
1650 		   intel_gpu_freq(rps, rps->boost_freq));
1651 
1652 	seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1653 
1654 	if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
1655 		u32 rpup, rpupei;
1656 		u32 rpdown, rpdownei;
1657 
1658 		intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1659 		rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
1660 		rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
1661 		rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
1662 		rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
1663 		intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1664 
1665 		seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
1666 			   rps_power_to_str(rps->power.mode));
1667 		seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
1668 			   rpup && rpupei ? 100 * rpup / rpupei : 0,
1669 			   rps->power.up_threshold);
1670 		seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
1671 			   rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
1672 			   rps->power.down_threshold);
1673 	} else {
1674 		seq_puts(m, "\nRPS Autotuning inactive\n");
1675 	}
1676 
1677 	return 0;
1678 }
1679 
i915_llc(struct seq_file * m,void * data)1680 static int i915_llc(struct seq_file *m, void *data)
1681 {
1682 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1683 	const bool edram = INTEL_GEN(dev_priv) > 8;
1684 
1685 	seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
1686 	seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1687 		   dev_priv->edram_size_mb);
1688 
1689 	return 0;
1690 }
1691 
i915_huc_load_status_info(struct seq_file * m,void * data)1692 static int i915_huc_load_status_info(struct seq_file *m, void *data)
1693 {
1694 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1695 	intel_wakeref_t wakeref;
1696 	struct drm_printer p;
1697 
1698 	if (!HAS_GT_UC(dev_priv))
1699 		return -ENODEV;
1700 
1701 	p = drm_seq_file_printer(m);
1702 	intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
1703 
1704 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1705 		seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
1706 
1707 	return 0;
1708 }
1709 
i915_guc_load_status_info(struct seq_file * m,void * data)1710 static int i915_guc_load_status_info(struct seq_file *m, void *data)
1711 {
1712 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1713 	intel_wakeref_t wakeref;
1714 	struct drm_printer p;
1715 
1716 	if (!HAS_GT_UC(dev_priv))
1717 		return -ENODEV;
1718 
1719 	p = drm_seq_file_printer(m);
1720 	intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
1721 
1722 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1723 		u32 tmp = I915_READ(GUC_STATUS);
1724 		u32 i;
1725 
1726 		seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
1727 		seq_printf(m, "\tBootrom status = 0x%x\n",
1728 			   (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
1729 		seq_printf(m, "\tuKernel status = 0x%x\n",
1730 			   (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
1731 		seq_printf(m, "\tMIA Core status = 0x%x\n",
1732 			   (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
1733 		seq_puts(m, "\nScratch registers:\n");
1734 		for (i = 0; i < 16; i++) {
1735 			seq_printf(m, "\t%2d: \t0x%x\n",
1736 				   i, I915_READ(SOFT_SCRATCH(i)));
1737 		}
1738 	}
1739 
1740 	return 0;
1741 }
1742 
1743 static const char *
stringify_guc_log_type(enum guc_log_buffer_type type)1744 stringify_guc_log_type(enum guc_log_buffer_type type)
1745 {
1746 	switch (type) {
1747 	case GUC_ISR_LOG_BUFFER:
1748 		return "ISR";
1749 	case GUC_DPC_LOG_BUFFER:
1750 		return "DPC";
1751 	case GUC_CRASH_DUMP_LOG_BUFFER:
1752 		return "CRASH";
1753 	default:
1754 		MISSING_CASE(type);
1755 	}
1756 
1757 	return "";
1758 }
1759 
i915_guc_log_info(struct seq_file * m,struct drm_i915_private * dev_priv)1760 static void i915_guc_log_info(struct seq_file *m,
1761 			      struct drm_i915_private *dev_priv)
1762 {
1763 	struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
1764 	enum guc_log_buffer_type type;
1765 
1766 	if (!intel_guc_log_relay_created(log)) {
1767 		seq_puts(m, "GuC log relay not created\n");
1768 		return;
1769 	}
1770 
1771 	seq_puts(m, "GuC logging stats:\n");
1772 
1773 	seq_printf(m, "\tRelay full count: %u\n",
1774 		   log->relay.full_count);
1775 
1776 	for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
1777 		seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
1778 			   stringify_guc_log_type(type),
1779 			   log->stats[type].flush,
1780 			   log->stats[type].sampled_overflow);
1781 	}
1782 }
1783 
i915_guc_info(struct seq_file * m,void * data)1784 static int i915_guc_info(struct seq_file *m, void *data)
1785 {
1786 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1787 
1788 	if (!USES_GUC(dev_priv))
1789 		return -ENODEV;
1790 
1791 	i915_guc_log_info(m, dev_priv);
1792 
1793 	/* Add more as required ... */
1794 
1795 	return 0;
1796 }
1797 
i915_guc_stage_pool(struct seq_file * m,void * data)1798 static int i915_guc_stage_pool(struct seq_file *m, void *data)
1799 {
1800 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
1801 	const struct intel_guc *guc = &dev_priv->gt.uc.guc;
1802 	struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
1803 	int index;
1804 
1805 	if (!USES_GUC_SUBMISSION(dev_priv))
1806 		return -ENODEV;
1807 
1808 	for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
1809 		struct intel_engine_cs *engine;
1810 
1811 		if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
1812 			continue;
1813 
1814 		seq_printf(m, "GuC stage descriptor %u:\n", index);
1815 		seq_printf(m, "\tIndex: %u\n", desc->stage_id);
1816 		seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
1817 		seq_printf(m, "\tPriority: %d\n", desc->priority);
1818 		seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
1819 		seq_printf(m, "\tEngines used: 0x%x\n",
1820 			   desc->engines_used);
1821 		seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
1822 			   desc->db_trigger_phy,
1823 			   desc->db_trigger_cpu,
1824 			   desc->db_trigger_uk);
1825 		seq_printf(m, "\tProcess descriptor: 0x%x\n",
1826 			   desc->process_desc);
1827 		seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
1828 			   desc->wq_addr, desc->wq_size);
1829 		seq_putc(m, '\n');
1830 
1831 		for_each_uabi_engine(engine, dev_priv) {
1832 			u32 guc_engine_id = engine->guc_id;
1833 			struct guc_execlist_context *lrc =
1834 						&desc->lrc[guc_engine_id];
1835 
1836 			seq_printf(m, "\t%s LRC:\n", engine->name);
1837 			seq_printf(m, "\t\tContext desc: 0x%x\n",
1838 				   lrc->context_desc);
1839 			seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
1840 			seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
1841 			seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
1842 			seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
1843 			seq_putc(m, '\n');
1844 		}
1845 	}
1846 
1847 	return 0;
1848 }
1849 
i915_guc_log_dump(struct seq_file * m,void * data)1850 static int i915_guc_log_dump(struct seq_file *m, void *data)
1851 {
1852 	struct drm_info_node *node = m->private;
1853 	struct drm_i915_private *dev_priv = node_to_i915(node);
1854 	bool dump_load_err = !!node->info_ent->data;
1855 	struct drm_i915_gem_object *obj = NULL;
1856 	u32 *log;
1857 	int i = 0;
1858 
1859 	if (!HAS_GT_UC(dev_priv))
1860 		return -ENODEV;
1861 
1862 	if (dump_load_err)
1863 		obj = dev_priv->gt.uc.load_err_log;
1864 	else if (dev_priv->gt.uc.guc.log.vma)
1865 		obj = dev_priv->gt.uc.guc.log.vma->obj;
1866 
1867 	if (!obj)
1868 		return 0;
1869 
1870 	log = i915_gem_object_pin_map(obj, I915_MAP_WC);
1871 	if (IS_ERR(log)) {
1872 		DRM_DEBUG("Failed to pin object\n");
1873 		seq_puts(m, "(log data unaccessible)\n");
1874 		return PTR_ERR(log);
1875 	}
1876 
1877 	for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
1878 		seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
1879 			   *(log + i), *(log + i + 1),
1880 			   *(log + i + 2), *(log + i + 3));
1881 
1882 	seq_putc(m, '\n');
1883 
1884 	i915_gem_object_unpin_map(obj);
1885 
1886 	return 0;
1887 }
1888 
i915_guc_log_level_get(void * data,u64 * val)1889 static int i915_guc_log_level_get(void *data, u64 *val)
1890 {
1891 	struct drm_i915_private *dev_priv = data;
1892 
1893 	if (!USES_GUC(dev_priv))
1894 		return -ENODEV;
1895 
1896 	*val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
1897 
1898 	return 0;
1899 }
1900 
i915_guc_log_level_set(void * data,u64 val)1901 static int i915_guc_log_level_set(void *data, u64 val)
1902 {
1903 	struct drm_i915_private *dev_priv = data;
1904 
1905 	if (!USES_GUC(dev_priv))
1906 		return -ENODEV;
1907 
1908 	return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
1909 }
1910 
1911 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
1912 			i915_guc_log_level_get, i915_guc_log_level_set,
1913 			"%lld\n");
1914 
i915_guc_log_relay_open(struct inode * inode,struct file * file)1915 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
1916 {
1917 	struct drm_i915_private *i915 = inode->i_private;
1918 	struct intel_guc *guc = &i915->gt.uc.guc;
1919 	struct intel_guc_log *log = &guc->log;
1920 
1921 	if (!intel_guc_is_running(guc))
1922 		return -ENODEV;
1923 
1924 	file->private_data = log;
1925 
1926 	return intel_guc_log_relay_open(log);
1927 }
1928 
1929 static ssize_t
i915_guc_log_relay_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1930 i915_guc_log_relay_write(struct file *filp,
1931 			 const char __user *ubuf,
1932 			 size_t cnt,
1933 			 loff_t *ppos)
1934 {
1935 	struct intel_guc_log *log = filp->private_data;
1936 	int val;
1937 	int ret;
1938 
1939 	ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
1940 	if (ret < 0)
1941 		return ret;
1942 
1943 	/*
1944 	 * Enable and start the guc log relay on value of 1.
1945 	 * Flush log relay for any other value.
1946 	 */
1947 	if (val == 1)
1948 		ret = intel_guc_log_relay_start(log);
1949 	else
1950 		intel_guc_log_relay_flush(log);
1951 
1952 	return ret ?: cnt;
1953 }
1954 
i915_guc_log_relay_release(struct inode * inode,struct file * file)1955 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
1956 {
1957 	struct drm_i915_private *i915 = inode->i_private;
1958 	struct intel_guc *guc = &i915->gt.uc.guc;
1959 
1960 	intel_guc_log_relay_close(&guc->log);
1961 	return 0;
1962 }
1963 
1964 static const struct file_operations i915_guc_log_relay_fops = {
1965 	.owner = THIS_MODULE,
1966 	.open = i915_guc_log_relay_open,
1967 	.write = i915_guc_log_relay_write,
1968 	.release = i915_guc_log_relay_release,
1969 };
1970 
i915_psr_sink_status_show(struct seq_file * m,void * data)1971 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
1972 {
1973 	u8 val;
1974 	static const char * const sink_status[] = {
1975 		"inactive",
1976 		"transition to active, capture and display",
1977 		"active, display from RFB",
1978 		"active, capture and display on sink device timings",
1979 		"transition to inactive, capture and display, timing re-sync",
1980 		"reserved",
1981 		"reserved",
1982 		"sink internal error",
1983 	};
1984 	struct drm_connector *connector = m->private;
1985 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
1986 	struct intel_dp *intel_dp =
1987 		enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
1988 	int ret;
1989 
1990 	if (!CAN_PSR(dev_priv)) {
1991 		seq_puts(m, "PSR Unsupported\n");
1992 		return -ENODEV;
1993 	}
1994 
1995 	if (connector->status != connector_status_connected)
1996 		return -ENODEV;
1997 
1998 	ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
1999 
2000 	if (ret == 1) {
2001 		const char *str = "unknown";
2002 
2003 		val &= DP_PSR_SINK_STATE_MASK;
2004 		if (val < ARRAY_SIZE(sink_status))
2005 			str = sink_status[val];
2006 		seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2007 	} else {
2008 		return ret;
2009 	}
2010 
2011 	return 0;
2012 }
2013 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2014 
2015 static void
psr_source_status(struct drm_i915_private * dev_priv,struct seq_file * m)2016 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2017 {
2018 	u32 val, status_val;
2019 	const char *status = "unknown";
2020 
2021 	if (dev_priv->psr.psr2_enabled) {
2022 		static const char * const live_status[] = {
2023 			"IDLE",
2024 			"CAPTURE",
2025 			"CAPTURE_FS",
2026 			"SLEEP",
2027 			"BUFON_FW",
2028 			"ML_UP",
2029 			"SU_STANDBY",
2030 			"FAST_SLEEP",
2031 			"DEEP_SLEEP",
2032 			"BUF_ON",
2033 			"TG_ON"
2034 		};
2035 		val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder));
2036 		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2037 			      EDP_PSR2_STATUS_STATE_SHIFT;
2038 		if (status_val < ARRAY_SIZE(live_status))
2039 			status = live_status[status_val];
2040 	} else {
2041 		static const char * const live_status[] = {
2042 			"IDLE",
2043 			"SRDONACK",
2044 			"SRDENT",
2045 			"BUFOFF",
2046 			"BUFON",
2047 			"AUXACK",
2048 			"SRDOFFACK",
2049 			"SRDENT_ON",
2050 		};
2051 		val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder));
2052 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2053 			      EDP_PSR_STATUS_STATE_SHIFT;
2054 		if (status_val < ARRAY_SIZE(live_status))
2055 			status = live_status[status_val];
2056 	}
2057 
2058 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2059 }
2060 
i915_edp_psr_status(struct seq_file * m,void * data)2061 static int i915_edp_psr_status(struct seq_file *m, void *data)
2062 {
2063 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2064 	struct i915_psr *psr = &dev_priv->psr;
2065 	intel_wakeref_t wakeref;
2066 	const char *status;
2067 	bool enabled;
2068 	u32 val;
2069 
2070 	if (!HAS_PSR(dev_priv))
2071 		return -ENODEV;
2072 
2073 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2074 	if (psr->dp)
2075 		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2076 	seq_puts(m, "\n");
2077 
2078 	if (!psr->sink_support)
2079 		return 0;
2080 
2081 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2082 	mutex_lock(&psr->lock);
2083 
2084 	if (psr->enabled)
2085 		status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2086 	else
2087 		status = "disabled";
2088 	seq_printf(m, "PSR mode: %s\n", status);
2089 
2090 	if (!psr->enabled) {
2091 		seq_printf(m, "PSR sink not reliable: %s\n",
2092 			   yesno(psr->sink_not_reliable));
2093 
2094 		goto unlock;
2095 	}
2096 
2097 	if (psr->psr2_enabled) {
2098 		val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
2099 		enabled = val & EDP_PSR2_ENABLE;
2100 	} else {
2101 		val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
2102 		enabled = val & EDP_PSR_ENABLE;
2103 	}
2104 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2105 		   enableddisabled(enabled), val);
2106 	psr_source_status(dev_priv, m);
2107 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2108 		   psr->busy_frontbuffer_bits);
2109 
2110 	/*
2111 	 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2112 	 */
2113 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2114 		val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
2115 		val &= EDP_PSR_PERF_CNT_MASK;
2116 		seq_printf(m, "Performance counter: %u\n", val);
2117 	}
2118 
2119 	if (psr->debug & I915_PSR_DEBUG_IRQ) {
2120 		seq_printf(m, "Last attempted entry at: %lld\n",
2121 			   psr->last_entry_attempt);
2122 		seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2123 	}
2124 
2125 	if (psr->psr2_enabled) {
2126 		u32 su_frames_val[3];
2127 		int frame;
2128 
2129 		/*
2130 		 * Reading all 3 registers before hand to minimize crossing a
2131 		 * frame boundary between register reads
2132 		 */
2133 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
2134 			val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder,
2135 						       frame));
2136 			su_frames_val[frame / 3] = val;
2137 		}
2138 
2139 		seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2140 
2141 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2142 			u32 su_blocks;
2143 
2144 			su_blocks = su_frames_val[frame / 3] &
2145 				    PSR2_SU_STATUS_MASK(frame);
2146 			su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2147 			seq_printf(m, "%d\t%d\n", frame, su_blocks);
2148 		}
2149 	}
2150 
2151 unlock:
2152 	mutex_unlock(&psr->lock);
2153 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2154 
2155 	return 0;
2156 }
2157 
2158 static int
i915_edp_psr_debug_set(void * data,u64 val)2159 i915_edp_psr_debug_set(void *data, u64 val)
2160 {
2161 	struct drm_i915_private *dev_priv = data;
2162 	intel_wakeref_t wakeref;
2163 	int ret;
2164 
2165 	if (!CAN_PSR(dev_priv))
2166 		return -ENODEV;
2167 
2168 	DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2169 
2170 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2171 
2172 	ret = intel_psr_debug_set(dev_priv, val);
2173 
2174 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2175 
2176 	return ret;
2177 }
2178 
2179 static int
i915_edp_psr_debug_get(void * data,u64 * val)2180 i915_edp_psr_debug_get(void *data, u64 *val)
2181 {
2182 	struct drm_i915_private *dev_priv = data;
2183 
2184 	if (!CAN_PSR(dev_priv))
2185 		return -ENODEV;
2186 
2187 	*val = READ_ONCE(dev_priv->psr.debug);
2188 	return 0;
2189 }
2190 
2191 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2192 			i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2193 			"%llu\n");
2194 
i915_energy_uJ(struct seq_file * m,void * data)2195 static int i915_energy_uJ(struct seq_file *m, void *data)
2196 {
2197 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2198 	unsigned long long power;
2199 	intel_wakeref_t wakeref;
2200 	u32 units;
2201 
2202 	if (INTEL_GEN(dev_priv) < 6)
2203 		return -ENODEV;
2204 
2205 	if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2206 		return -ENODEV;
2207 
2208 	units = (power & 0x1f00) >> 8;
2209 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
2210 		power = I915_READ(MCH_SECP_NRG_STTS);
2211 
2212 	power = (1000000 * power) >> units; /* convert to uJ */
2213 	seq_printf(m, "%llu", power);
2214 
2215 	return 0;
2216 }
2217 
i915_runtime_pm_status(struct seq_file * m,void * unused)2218 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2219 {
2220 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2221 	struct pci_dev *pdev = dev_priv->drm.pdev;
2222 
2223 	if (!HAS_RUNTIME_PM(dev_priv))
2224 		seq_puts(m, "Runtime power management not supported\n");
2225 
2226 	seq_printf(m, "Runtime power status: %s\n",
2227 		   enableddisabled(!dev_priv->power_domains.wakeref));
2228 
2229 	seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2230 	seq_printf(m, "IRQs disabled: %s\n",
2231 		   yesno(!intel_irqs_enabled(dev_priv)));
2232 #ifdef CONFIG_PM
2233 	seq_printf(m, "Usage count: %d\n",
2234 		   atomic_read(&dev_priv->drm.dev->power.usage_count));
2235 #else
2236 	seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2237 #endif
2238 	seq_printf(m, "PCI device power state: %s [%d]\n",
2239 		   pci_power_name(pdev->current_state),
2240 		   pdev->current_state);
2241 
2242 	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2243 		struct drm_printer p = drm_seq_file_printer(m);
2244 
2245 		print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
2246 	}
2247 
2248 	return 0;
2249 }
2250 
i915_power_domain_info(struct seq_file * m,void * unused)2251 static int i915_power_domain_info(struct seq_file *m, void *unused)
2252 {
2253 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2254 	struct i915_power_domains *power_domains = &dev_priv->power_domains;
2255 	int i;
2256 
2257 	mutex_lock(&power_domains->lock);
2258 
2259 	seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2260 	for (i = 0; i < power_domains->power_well_count; i++) {
2261 		struct i915_power_well *power_well;
2262 		enum intel_display_power_domain power_domain;
2263 
2264 		power_well = &power_domains->power_wells[i];
2265 		seq_printf(m, "%-25s %d\n", power_well->desc->name,
2266 			   power_well->count);
2267 
2268 		for_each_power_domain(power_domain, power_well->desc->domains)
2269 			seq_printf(m, "  %-23s %d\n",
2270 				 intel_display_power_domain_str(power_domain),
2271 				 power_domains->domain_use_count[power_domain]);
2272 	}
2273 
2274 	mutex_unlock(&power_domains->lock);
2275 
2276 	return 0;
2277 }
2278 
i915_dmc_info(struct seq_file * m,void * unused)2279 static int i915_dmc_info(struct seq_file *m, void *unused)
2280 {
2281 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2282 	intel_wakeref_t wakeref;
2283 	struct intel_csr *csr;
2284 	i915_reg_t dc5_reg, dc6_reg = {};
2285 
2286 	if (!HAS_CSR(dev_priv))
2287 		return -ENODEV;
2288 
2289 	csr = &dev_priv->csr;
2290 
2291 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2292 
2293 	seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2294 	seq_printf(m, "path: %s\n", csr->fw_path);
2295 
2296 	if (!csr->dmc_payload)
2297 		goto out;
2298 
2299 	seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2300 		   CSR_VERSION_MINOR(csr->version));
2301 
2302 	if (INTEL_GEN(dev_priv) >= 12) {
2303 		dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
2304 		dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
2305 		/*
2306 		 * NOTE: DMC_DEBUG3 is a general purpose reg.
2307 		 * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
2308 		 * reg for DC3CO debugging and validation,
2309 		 * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
2310 		 */
2311 		seq_printf(m, "DC3CO count: %d\n", I915_READ(DMC_DEBUG3));
2312 	} else {
2313 		dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2314 						 SKL_CSR_DC3_DC5_COUNT;
2315 		if (!IS_GEN9_LP(dev_priv))
2316 			dc6_reg = SKL_CSR_DC5_DC6_COUNT;
2317 	}
2318 
2319 	seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg));
2320 	if (dc6_reg.reg)
2321 		seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg));
2322 
2323 out:
2324 	seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2325 	seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2326 	seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2327 
2328 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2329 
2330 	return 0;
2331 }
2332 
intel_seq_print_mode(struct seq_file * m,int tabs,const struct drm_display_mode * mode)2333 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2334 				 const struct drm_display_mode *mode)
2335 {
2336 	int i;
2337 
2338 	for (i = 0; i < tabs; i++)
2339 		seq_putc(m, '\t');
2340 
2341 	seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2342 }
2343 
intel_encoder_info(struct seq_file * m,struct intel_crtc * crtc,struct intel_encoder * encoder)2344 static void intel_encoder_info(struct seq_file *m,
2345 			       struct intel_crtc *crtc,
2346 			       struct intel_encoder *encoder)
2347 {
2348 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2349 	struct drm_connector_list_iter conn_iter;
2350 	struct drm_connector *connector;
2351 
2352 	seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
2353 		   encoder->base.base.id, encoder->base.name);
2354 
2355 	drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
2356 	drm_for_each_connector_iter(connector, &conn_iter) {
2357 		const struct drm_connector_state *conn_state =
2358 			connector->state;
2359 
2360 		if (conn_state->best_encoder != &encoder->base)
2361 			continue;
2362 
2363 		seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
2364 			   connector->base.id, connector->name);
2365 	}
2366 	drm_connector_list_iter_end(&conn_iter);
2367 }
2368 
intel_panel_info(struct seq_file * m,struct intel_panel * panel)2369 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2370 {
2371 	const struct drm_display_mode *mode = panel->fixed_mode;
2372 
2373 	seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2374 }
2375 
intel_hdcp_info(struct seq_file * m,struct intel_connector * intel_connector)2376 static void intel_hdcp_info(struct seq_file *m,
2377 			    struct intel_connector *intel_connector)
2378 {
2379 	bool hdcp_cap, hdcp2_cap;
2380 
2381 	hdcp_cap = intel_hdcp_capable(intel_connector);
2382 	hdcp2_cap = intel_hdcp2_capable(intel_connector);
2383 
2384 	if (hdcp_cap)
2385 		seq_puts(m, "HDCP1.4 ");
2386 	if (hdcp2_cap)
2387 		seq_puts(m, "HDCP2.2 ");
2388 
2389 	if (!hdcp_cap && !hdcp2_cap)
2390 		seq_puts(m, "None");
2391 
2392 	seq_puts(m, "\n");
2393 }
2394 
intel_dp_info(struct seq_file * m,struct intel_connector * intel_connector)2395 static void intel_dp_info(struct seq_file *m,
2396 			  struct intel_connector *intel_connector)
2397 {
2398 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2399 	struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
2400 
2401 	seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2402 	seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2403 	if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2404 		intel_panel_info(m, &intel_connector->panel);
2405 
2406 	drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2407 				&intel_dp->aux);
2408 	if (intel_connector->hdcp.shim) {
2409 		seq_puts(m, "\tHDCP version: ");
2410 		intel_hdcp_info(m, intel_connector);
2411 	}
2412 }
2413 
intel_dp_mst_info(struct seq_file * m,struct intel_connector * intel_connector)2414 static void intel_dp_mst_info(struct seq_file *m,
2415 			  struct intel_connector *intel_connector)
2416 {
2417 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2418 	struct intel_dp_mst_encoder *intel_mst =
2419 		enc_to_mst(intel_encoder);
2420 	struct intel_digital_port *intel_dig_port = intel_mst->primary;
2421 	struct intel_dp *intel_dp = &intel_dig_port->dp;
2422 	bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2423 					intel_connector->port);
2424 
2425 	seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2426 }
2427 
intel_hdmi_info(struct seq_file * m,struct intel_connector * intel_connector)2428 static void intel_hdmi_info(struct seq_file *m,
2429 			    struct intel_connector *intel_connector)
2430 {
2431 	struct intel_encoder *intel_encoder = intel_connector->encoder;
2432 	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
2433 
2434 	seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2435 	if (intel_connector->hdcp.shim) {
2436 		seq_puts(m, "\tHDCP version: ");
2437 		intel_hdcp_info(m, intel_connector);
2438 	}
2439 }
2440 
intel_lvds_info(struct seq_file * m,struct intel_connector * intel_connector)2441 static void intel_lvds_info(struct seq_file *m,
2442 			    struct intel_connector *intel_connector)
2443 {
2444 	intel_panel_info(m, &intel_connector->panel);
2445 }
2446 
intel_connector_info(struct seq_file * m,struct drm_connector * connector)2447 static void intel_connector_info(struct seq_file *m,
2448 				 struct drm_connector *connector)
2449 {
2450 	struct intel_connector *intel_connector = to_intel_connector(connector);
2451 	const struct drm_connector_state *conn_state = connector->state;
2452 	struct intel_encoder *encoder =
2453 		to_intel_encoder(conn_state->best_encoder);
2454 	const struct drm_display_mode *mode;
2455 
2456 	seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
2457 		   connector->base.id, connector->name,
2458 		   drm_get_connector_status_name(connector->status));
2459 
2460 	if (connector->status == connector_status_disconnected)
2461 		return;
2462 
2463 	seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2464 		   connector->display_info.width_mm,
2465 		   connector->display_info.height_mm);
2466 	seq_printf(m, "\tsubpixel order: %s\n",
2467 		   drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2468 	seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2469 
2470 	if (!encoder)
2471 		return;
2472 
2473 	switch (connector->connector_type) {
2474 	case DRM_MODE_CONNECTOR_DisplayPort:
2475 	case DRM_MODE_CONNECTOR_eDP:
2476 		if (encoder->type == INTEL_OUTPUT_DP_MST)
2477 			intel_dp_mst_info(m, intel_connector);
2478 		else
2479 			intel_dp_info(m, intel_connector);
2480 		break;
2481 	case DRM_MODE_CONNECTOR_LVDS:
2482 		if (encoder->type == INTEL_OUTPUT_LVDS)
2483 			intel_lvds_info(m, intel_connector);
2484 		break;
2485 	case DRM_MODE_CONNECTOR_HDMIA:
2486 		if (encoder->type == INTEL_OUTPUT_HDMI ||
2487 		    encoder->type == INTEL_OUTPUT_DDI)
2488 			intel_hdmi_info(m, intel_connector);
2489 		break;
2490 	default:
2491 		break;
2492 	}
2493 
2494 	seq_printf(m, "\tmodes:\n");
2495 	list_for_each_entry(mode, &connector->modes, head)
2496 		intel_seq_print_mode(m, 2, mode);
2497 }
2498 
plane_type(enum drm_plane_type type)2499 static const char *plane_type(enum drm_plane_type type)
2500 {
2501 	switch (type) {
2502 	case DRM_PLANE_TYPE_OVERLAY:
2503 		return "OVL";
2504 	case DRM_PLANE_TYPE_PRIMARY:
2505 		return "PRI";
2506 	case DRM_PLANE_TYPE_CURSOR:
2507 		return "CUR";
2508 	/*
2509 	 * Deliberately omitting default: to generate compiler warnings
2510 	 * when a new drm_plane_type gets added.
2511 	 */
2512 	}
2513 
2514 	return "unknown";
2515 }
2516 
plane_rotation(char * buf,size_t bufsize,unsigned int rotation)2517 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2518 {
2519 	/*
2520 	 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2521 	 * will print them all to visualize if the values are misused
2522 	 */
2523 	snprintf(buf, bufsize,
2524 		 "%s%s%s%s%s%s(0x%08x)",
2525 		 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2526 		 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2527 		 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2528 		 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2529 		 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2530 		 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2531 		 rotation);
2532 }
2533 
intel_plane_uapi_info(struct seq_file * m,struct intel_plane * plane)2534 static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
2535 {
2536 	const struct intel_plane_state *plane_state =
2537 		to_intel_plane_state(plane->base.state);
2538 	const struct drm_framebuffer *fb = plane_state->uapi.fb;
2539 	struct drm_format_name_buf format_name;
2540 	struct drm_rect src, dst;
2541 	char rot_str[48];
2542 
2543 	src = drm_plane_state_src(&plane_state->uapi);
2544 	dst = drm_plane_state_dest(&plane_state->uapi);
2545 
2546 	if (fb)
2547 		drm_get_format_name(fb->format->format, &format_name);
2548 
2549 	plane_rotation(rot_str, sizeof(rot_str),
2550 		       plane_state->uapi.rotation);
2551 
2552 	seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
2553 		   fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
2554 		   fb ? fb->width : 0, fb ? fb->height : 0,
2555 		   DRM_RECT_FP_ARG(&src),
2556 		   DRM_RECT_ARG(&dst),
2557 		   rot_str);
2558 }
2559 
intel_plane_hw_info(struct seq_file * m,struct intel_plane * plane)2560 static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
2561 {
2562 	const struct intel_plane_state *plane_state =
2563 		to_intel_plane_state(plane->base.state);
2564 	const struct drm_framebuffer *fb = plane_state->hw.fb;
2565 	struct drm_format_name_buf format_name;
2566 	char rot_str[48];
2567 
2568 	if (!fb)
2569 		return;
2570 
2571 	drm_get_format_name(fb->format->format, &format_name);
2572 
2573 	plane_rotation(rot_str, sizeof(rot_str),
2574 		       plane_state->hw.rotation);
2575 
2576 	seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
2577 		   fb->base.id, format_name.str,
2578 		   fb->width, fb->height,
2579 		   yesno(plane_state->uapi.visible),
2580 		   DRM_RECT_FP_ARG(&plane_state->uapi.src),
2581 		   DRM_RECT_ARG(&plane_state->uapi.dst),
2582 		   rot_str);
2583 }
2584 
intel_plane_info(struct seq_file * m,struct intel_crtc * crtc)2585 static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
2586 {
2587 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2588 	struct intel_plane *plane;
2589 
2590 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
2591 		seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
2592 			   plane->base.base.id, plane->base.name,
2593 			   plane_type(plane->base.type));
2594 		intel_plane_uapi_info(m, plane);
2595 		intel_plane_hw_info(m, plane);
2596 	}
2597 }
2598 
intel_scaler_info(struct seq_file * m,struct intel_crtc * crtc)2599 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
2600 {
2601 	const struct intel_crtc_state *crtc_state =
2602 		to_intel_crtc_state(crtc->base.state);
2603 	int num_scalers = crtc->num_scalers;
2604 	int i;
2605 
2606 	/* Not all platformas have a scaler */
2607 	if (num_scalers) {
2608 		seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2609 			   num_scalers,
2610 			   crtc_state->scaler_state.scaler_users,
2611 			   crtc_state->scaler_state.scaler_id);
2612 
2613 		for (i = 0; i < num_scalers; i++) {
2614 			const struct intel_scaler *sc =
2615 				&crtc_state->scaler_state.scalers[i];
2616 
2617 			seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
2618 				   i, yesno(sc->in_use), sc->mode);
2619 		}
2620 		seq_puts(m, "\n");
2621 	} else {
2622 		seq_puts(m, "\tNo scalers available on this platform\n");
2623 	}
2624 }
2625 
intel_crtc_info(struct seq_file * m,struct intel_crtc * crtc)2626 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
2627 {
2628 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2629 	const struct intel_crtc_state *crtc_state =
2630 		to_intel_crtc_state(crtc->base.state);
2631 	struct intel_encoder *encoder;
2632 
2633 	seq_printf(m, "[CRTC:%d:%s]:\n",
2634 		   crtc->base.base.id, crtc->base.name);
2635 
2636 	seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
2637 		   yesno(crtc_state->uapi.enable),
2638 		   yesno(crtc_state->uapi.active),
2639 		   DRM_MODE_ARG(&crtc_state->uapi.mode));
2640 
2641 	if (crtc_state->hw.enable) {
2642 		seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
2643 			   yesno(crtc_state->hw.active),
2644 			   DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
2645 
2646 		seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
2647 			   crtc_state->pipe_src_w, crtc_state->pipe_src_h,
2648 			   yesno(crtc_state->dither), crtc_state->pipe_bpp);
2649 
2650 		intel_scaler_info(m, crtc);
2651 	}
2652 
2653 	for_each_intel_encoder_mask(&dev_priv->drm, encoder,
2654 				    crtc_state->uapi.encoder_mask)
2655 		intel_encoder_info(m, crtc, encoder);
2656 
2657 	intel_plane_info(m, crtc);
2658 
2659 	seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
2660 		   yesno(!crtc->cpu_fifo_underrun_disabled),
2661 		   yesno(!crtc->pch_fifo_underrun_disabled));
2662 }
2663 
i915_display_info(struct seq_file * m,void * unused)2664 static int i915_display_info(struct seq_file *m, void *unused)
2665 {
2666 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2667 	struct drm_device *dev = &dev_priv->drm;
2668 	struct intel_crtc *crtc;
2669 	struct drm_connector *connector;
2670 	struct drm_connector_list_iter conn_iter;
2671 	intel_wakeref_t wakeref;
2672 
2673 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2674 
2675 	drm_modeset_lock_all(dev);
2676 
2677 	seq_printf(m, "CRTC info\n");
2678 	seq_printf(m, "---------\n");
2679 	for_each_intel_crtc(dev, crtc)
2680 		intel_crtc_info(m, crtc);
2681 
2682 	seq_printf(m, "\n");
2683 	seq_printf(m, "Connector info\n");
2684 	seq_printf(m, "--------------\n");
2685 	drm_connector_list_iter_begin(dev, &conn_iter);
2686 	drm_for_each_connector_iter(connector, &conn_iter)
2687 		intel_connector_info(m, connector);
2688 	drm_connector_list_iter_end(&conn_iter);
2689 
2690 	drm_modeset_unlock_all(dev);
2691 
2692 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2693 
2694 	return 0;
2695 }
2696 
i915_engine_info(struct seq_file * m,void * unused)2697 static int i915_engine_info(struct seq_file *m, void *unused)
2698 {
2699 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2700 	struct intel_engine_cs *engine;
2701 	intel_wakeref_t wakeref;
2702 	struct drm_printer p;
2703 
2704 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2705 
2706 	seq_printf(m, "GT awake? %s [%d]\n",
2707 		   yesno(dev_priv->gt.awake),
2708 		   atomic_read(&dev_priv->gt.wakeref.count));
2709 	seq_printf(m, "CS timestamp frequency: %u kHz\n",
2710 		   RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
2711 
2712 	p = drm_seq_file_printer(m);
2713 	for_each_uabi_engine(engine, dev_priv)
2714 		intel_engine_dump(engine, &p, "%s\n", engine->name);
2715 
2716 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2717 
2718 	return 0;
2719 }
2720 
i915_rcs_topology(struct seq_file * m,void * unused)2721 static int i915_rcs_topology(struct seq_file *m, void *unused)
2722 {
2723 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2724 	struct drm_printer p = drm_seq_file_printer(m);
2725 
2726 	intel_device_info_print_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
2727 
2728 	return 0;
2729 }
2730 
i915_shrinker_info(struct seq_file * m,void * unused)2731 static int i915_shrinker_info(struct seq_file *m, void *unused)
2732 {
2733 	struct drm_i915_private *i915 = node_to_i915(m->private);
2734 
2735 	seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
2736 	seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
2737 
2738 	return 0;
2739 }
2740 
i915_shared_dplls_info(struct seq_file * m,void * unused)2741 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2742 {
2743 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2744 	struct drm_device *dev = &dev_priv->drm;
2745 	int i;
2746 
2747 	drm_modeset_lock_all(dev);
2748 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2749 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2750 
2751 		seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
2752 			   pll->info->id);
2753 		seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2754 			   pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
2755 		seq_printf(m, " tracked hardware state:\n");
2756 		seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
2757 		seq_printf(m, " dpll_md: 0x%08x\n",
2758 			   pll->state.hw_state.dpll_md);
2759 		seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
2760 		seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
2761 		seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
2762 		seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
2763 		seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
2764 		seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
2765 			   pll->state.hw_state.mg_refclkin_ctl);
2766 		seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
2767 			   pll->state.hw_state.mg_clktop2_coreclkctl1);
2768 		seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
2769 			   pll->state.hw_state.mg_clktop2_hsclkctl);
2770 		seq_printf(m, " mg_pll_div0:  0x%08x\n",
2771 			   pll->state.hw_state.mg_pll_div0);
2772 		seq_printf(m, " mg_pll_div1:  0x%08x\n",
2773 			   pll->state.hw_state.mg_pll_div1);
2774 		seq_printf(m, " mg_pll_lf:    0x%08x\n",
2775 			   pll->state.hw_state.mg_pll_lf);
2776 		seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
2777 			   pll->state.hw_state.mg_pll_frac_lock);
2778 		seq_printf(m, " mg_pll_ssc:   0x%08x\n",
2779 			   pll->state.hw_state.mg_pll_ssc);
2780 		seq_printf(m, " mg_pll_bias:  0x%08x\n",
2781 			   pll->state.hw_state.mg_pll_bias);
2782 		seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
2783 			   pll->state.hw_state.mg_pll_tdc_coldst_bias);
2784 	}
2785 	drm_modeset_unlock_all(dev);
2786 
2787 	return 0;
2788 }
2789 
i915_wa_registers(struct seq_file * m,void * unused)2790 static int i915_wa_registers(struct seq_file *m, void *unused)
2791 {
2792 	struct drm_i915_private *i915 = node_to_i915(m->private);
2793 	struct intel_engine_cs *engine;
2794 
2795 	for_each_uabi_engine(engine, i915) {
2796 		const struct i915_wa_list *wal = &engine->ctx_wa_list;
2797 		const struct i915_wa *wa;
2798 		unsigned int count;
2799 
2800 		count = wal->count;
2801 		if (!count)
2802 			continue;
2803 
2804 		seq_printf(m, "%s: Workarounds applied: %u\n",
2805 			   engine->name, count);
2806 
2807 		for (wa = wal->list; count--; wa++)
2808 			seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
2809 				   i915_mmio_reg_offset(wa->reg),
2810 				   wa->val, wa->mask);
2811 
2812 		seq_printf(m, "\n");
2813 	}
2814 
2815 	return 0;
2816 }
2817 
i915_ipc_status_show(struct seq_file * m,void * data)2818 static int i915_ipc_status_show(struct seq_file *m, void *data)
2819 {
2820 	struct drm_i915_private *dev_priv = m->private;
2821 
2822 	seq_printf(m, "Isochronous Priority Control: %s\n",
2823 			yesno(dev_priv->ipc_enabled));
2824 	return 0;
2825 }
2826 
i915_ipc_status_open(struct inode * inode,struct file * file)2827 static int i915_ipc_status_open(struct inode *inode, struct file *file)
2828 {
2829 	struct drm_i915_private *dev_priv = inode->i_private;
2830 
2831 	if (!HAS_IPC(dev_priv))
2832 		return -ENODEV;
2833 
2834 	return single_open(file, i915_ipc_status_show, dev_priv);
2835 }
2836 
i915_ipc_status_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)2837 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
2838 				     size_t len, loff_t *offp)
2839 {
2840 	struct seq_file *m = file->private_data;
2841 	struct drm_i915_private *dev_priv = m->private;
2842 	intel_wakeref_t wakeref;
2843 	bool enable;
2844 	int ret;
2845 
2846 	ret = kstrtobool_from_user(ubuf, len, &enable);
2847 	if (ret < 0)
2848 		return ret;
2849 
2850 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
2851 		if (!dev_priv->ipc_enabled && enable)
2852 			DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
2853 		dev_priv->wm.distrust_bios_wm = true;
2854 		dev_priv->ipc_enabled = enable;
2855 		intel_enable_ipc(dev_priv);
2856 	}
2857 
2858 	return len;
2859 }
2860 
2861 static const struct file_operations i915_ipc_status_fops = {
2862 	.owner = THIS_MODULE,
2863 	.open = i915_ipc_status_open,
2864 	.read = seq_read,
2865 	.llseek = seq_lseek,
2866 	.release = single_release,
2867 	.write = i915_ipc_status_write
2868 };
2869 
i915_ddb_info(struct seq_file * m,void * unused)2870 static int i915_ddb_info(struct seq_file *m, void *unused)
2871 {
2872 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2873 	struct drm_device *dev = &dev_priv->drm;
2874 	struct skl_ddb_entry *entry;
2875 	struct intel_crtc *crtc;
2876 
2877 	if (INTEL_GEN(dev_priv) < 9)
2878 		return -ENODEV;
2879 
2880 	drm_modeset_lock_all(dev);
2881 
2882 	seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
2883 
2884 	for_each_intel_crtc(&dev_priv->drm, crtc) {
2885 		struct intel_crtc_state *crtc_state =
2886 			to_intel_crtc_state(crtc->base.state);
2887 		enum pipe pipe = crtc->pipe;
2888 		enum plane_id plane_id;
2889 
2890 		seq_printf(m, "Pipe %c\n", pipe_name(pipe));
2891 
2892 		for_each_plane_id_on_crtc(crtc, plane_id) {
2893 			entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
2894 			seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
2895 				   entry->start, entry->end,
2896 				   skl_ddb_entry_size(entry));
2897 		}
2898 
2899 		entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
2900 		seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
2901 			   entry->end, skl_ddb_entry_size(entry));
2902 	}
2903 
2904 	drm_modeset_unlock_all(dev);
2905 
2906 	return 0;
2907 }
2908 
drrs_status_per_crtc(struct seq_file * m,struct drm_device * dev,struct intel_crtc * intel_crtc)2909 static void drrs_status_per_crtc(struct seq_file *m,
2910 				 struct drm_device *dev,
2911 				 struct intel_crtc *intel_crtc)
2912 {
2913 	struct drm_i915_private *dev_priv = to_i915(dev);
2914 	struct i915_drrs *drrs = &dev_priv->drrs;
2915 	int vrefresh = 0;
2916 	struct drm_connector *connector;
2917 	struct drm_connector_list_iter conn_iter;
2918 
2919 	drm_connector_list_iter_begin(dev, &conn_iter);
2920 	drm_for_each_connector_iter(connector, &conn_iter) {
2921 		if (connector->state->crtc != &intel_crtc->base)
2922 			continue;
2923 
2924 		seq_printf(m, "%s:\n", connector->name);
2925 	}
2926 	drm_connector_list_iter_end(&conn_iter);
2927 
2928 	if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
2929 		seq_puts(m, "\tVBT: DRRS_type: Static");
2930 	else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
2931 		seq_puts(m, "\tVBT: DRRS_type: Seamless");
2932 	else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
2933 		seq_puts(m, "\tVBT: DRRS_type: None");
2934 	else
2935 		seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
2936 
2937 	seq_puts(m, "\n\n");
2938 
2939 	if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
2940 		struct intel_panel *panel;
2941 
2942 		mutex_lock(&drrs->mutex);
2943 		/* DRRS Supported */
2944 		seq_puts(m, "\tDRRS Supported: Yes\n");
2945 
2946 		/* disable_drrs() will make drrs->dp NULL */
2947 		if (!drrs->dp) {
2948 			seq_puts(m, "Idleness DRRS: Disabled\n");
2949 			if (dev_priv->psr.enabled)
2950 				seq_puts(m,
2951 				"\tAs PSR is enabled, DRRS is not enabled\n");
2952 			mutex_unlock(&drrs->mutex);
2953 			return;
2954 		}
2955 
2956 		panel = &drrs->dp->attached_connector->panel;
2957 		seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
2958 					drrs->busy_frontbuffer_bits);
2959 
2960 		seq_puts(m, "\n\t\t");
2961 		if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
2962 			seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
2963 			vrefresh = panel->fixed_mode->vrefresh;
2964 		} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
2965 			seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
2966 			vrefresh = panel->downclock_mode->vrefresh;
2967 		} else {
2968 			seq_printf(m, "DRRS_State: Unknown(%d)\n",
2969 						drrs->refresh_rate_type);
2970 			mutex_unlock(&drrs->mutex);
2971 			return;
2972 		}
2973 		seq_printf(m, "\t\tVrefresh: %d", vrefresh);
2974 
2975 		seq_puts(m, "\n\t\t");
2976 		mutex_unlock(&drrs->mutex);
2977 	} else {
2978 		/* DRRS not supported. Print the VBT parameter*/
2979 		seq_puts(m, "\tDRRS Supported : No");
2980 	}
2981 	seq_puts(m, "\n");
2982 }
2983 
i915_drrs_status(struct seq_file * m,void * unused)2984 static int i915_drrs_status(struct seq_file *m, void *unused)
2985 {
2986 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
2987 	struct drm_device *dev = &dev_priv->drm;
2988 	struct intel_crtc *intel_crtc;
2989 	int active_crtc_cnt = 0;
2990 
2991 	drm_modeset_lock_all(dev);
2992 	for_each_intel_crtc(dev, intel_crtc) {
2993 		if (intel_crtc->base.state->active) {
2994 			active_crtc_cnt++;
2995 			seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
2996 
2997 			drrs_status_per_crtc(m, dev, intel_crtc);
2998 		}
2999 	}
3000 	drm_modeset_unlock_all(dev);
3001 
3002 	if (!active_crtc_cnt)
3003 		seq_puts(m, "No active crtc found\n");
3004 
3005 	return 0;
3006 }
3007 
i915_dp_mst_info(struct seq_file * m,void * unused)3008 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3009 {
3010 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3011 	struct drm_device *dev = &dev_priv->drm;
3012 	struct intel_encoder *intel_encoder;
3013 	struct intel_digital_port *intel_dig_port;
3014 	struct drm_connector *connector;
3015 	struct drm_connector_list_iter conn_iter;
3016 
3017 	drm_connector_list_iter_begin(dev, &conn_iter);
3018 	drm_for_each_connector_iter(connector, &conn_iter) {
3019 		if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3020 			continue;
3021 
3022 		intel_encoder = intel_attached_encoder(to_intel_connector(connector));
3023 		if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3024 			continue;
3025 
3026 		intel_dig_port = enc_to_dig_port(intel_encoder);
3027 		if (!intel_dig_port->dp.can_mst)
3028 			continue;
3029 
3030 		seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
3031 			   intel_dig_port->base.base.base.id,
3032 			   intel_dig_port->base.base.name);
3033 		drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3034 	}
3035 	drm_connector_list_iter_end(&conn_iter);
3036 
3037 	return 0;
3038 }
3039 
i915_displayport_test_active_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)3040 static ssize_t i915_displayport_test_active_write(struct file *file,
3041 						  const char __user *ubuf,
3042 						  size_t len, loff_t *offp)
3043 {
3044 	char *input_buffer;
3045 	int status = 0;
3046 	struct drm_device *dev;
3047 	struct drm_connector *connector;
3048 	struct drm_connector_list_iter conn_iter;
3049 	struct intel_dp *intel_dp;
3050 	int val = 0;
3051 
3052 	dev = ((struct seq_file *)file->private_data)->private;
3053 
3054 	if (len == 0)
3055 		return 0;
3056 
3057 	input_buffer = memdup_user_nul(ubuf, len);
3058 	if (IS_ERR(input_buffer))
3059 		return PTR_ERR(input_buffer);
3060 
3061 	DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3062 
3063 	drm_connector_list_iter_begin(dev, &conn_iter);
3064 	drm_for_each_connector_iter(connector, &conn_iter) {
3065 		struct intel_encoder *encoder;
3066 
3067 		if (connector->connector_type !=
3068 		    DRM_MODE_CONNECTOR_DisplayPort)
3069 			continue;
3070 
3071 		encoder = to_intel_encoder(connector->encoder);
3072 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3073 			continue;
3074 
3075 		if (encoder && connector->status == connector_status_connected) {
3076 			intel_dp = enc_to_intel_dp(encoder);
3077 			status = kstrtoint(input_buffer, 10, &val);
3078 			if (status < 0)
3079 				break;
3080 			DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3081 			/* To prevent erroneous activation of the compliance
3082 			 * testing code, only accept an actual value of 1 here
3083 			 */
3084 			if (val == 1)
3085 				intel_dp->compliance.test_active = true;
3086 			else
3087 				intel_dp->compliance.test_active = false;
3088 		}
3089 	}
3090 	drm_connector_list_iter_end(&conn_iter);
3091 	kfree(input_buffer);
3092 	if (status < 0)
3093 		return status;
3094 
3095 	*offp += len;
3096 	return len;
3097 }
3098 
i915_displayport_test_active_show(struct seq_file * m,void * data)3099 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3100 {
3101 	struct drm_i915_private *dev_priv = m->private;
3102 	struct drm_device *dev = &dev_priv->drm;
3103 	struct drm_connector *connector;
3104 	struct drm_connector_list_iter conn_iter;
3105 	struct intel_dp *intel_dp;
3106 
3107 	drm_connector_list_iter_begin(dev, &conn_iter);
3108 	drm_for_each_connector_iter(connector, &conn_iter) {
3109 		struct intel_encoder *encoder;
3110 
3111 		if (connector->connector_type !=
3112 		    DRM_MODE_CONNECTOR_DisplayPort)
3113 			continue;
3114 
3115 		encoder = to_intel_encoder(connector->encoder);
3116 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3117 			continue;
3118 
3119 		if (encoder && connector->status == connector_status_connected) {
3120 			intel_dp = enc_to_intel_dp(encoder);
3121 			if (intel_dp->compliance.test_active)
3122 				seq_puts(m, "1");
3123 			else
3124 				seq_puts(m, "0");
3125 		} else
3126 			seq_puts(m, "0");
3127 	}
3128 	drm_connector_list_iter_end(&conn_iter);
3129 
3130 	return 0;
3131 }
3132 
i915_displayport_test_active_open(struct inode * inode,struct file * file)3133 static int i915_displayport_test_active_open(struct inode *inode,
3134 					     struct file *file)
3135 {
3136 	return single_open(file, i915_displayport_test_active_show,
3137 			   inode->i_private);
3138 }
3139 
3140 static const struct file_operations i915_displayport_test_active_fops = {
3141 	.owner = THIS_MODULE,
3142 	.open = i915_displayport_test_active_open,
3143 	.read = seq_read,
3144 	.llseek = seq_lseek,
3145 	.release = single_release,
3146 	.write = i915_displayport_test_active_write
3147 };
3148 
i915_displayport_test_data_show(struct seq_file * m,void * data)3149 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3150 {
3151 	struct drm_i915_private *dev_priv = m->private;
3152 	struct drm_device *dev = &dev_priv->drm;
3153 	struct drm_connector *connector;
3154 	struct drm_connector_list_iter conn_iter;
3155 	struct intel_dp *intel_dp;
3156 
3157 	drm_connector_list_iter_begin(dev, &conn_iter);
3158 	drm_for_each_connector_iter(connector, &conn_iter) {
3159 		struct intel_encoder *encoder;
3160 
3161 		if (connector->connector_type !=
3162 		    DRM_MODE_CONNECTOR_DisplayPort)
3163 			continue;
3164 
3165 		encoder = to_intel_encoder(connector->encoder);
3166 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3167 			continue;
3168 
3169 		if (encoder && connector->status == connector_status_connected) {
3170 			intel_dp = enc_to_intel_dp(encoder);
3171 			if (intel_dp->compliance.test_type ==
3172 			    DP_TEST_LINK_EDID_READ)
3173 				seq_printf(m, "%lx",
3174 					   intel_dp->compliance.test_data.edid);
3175 			else if (intel_dp->compliance.test_type ==
3176 				 DP_TEST_LINK_VIDEO_PATTERN) {
3177 				seq_printf(m, "hdisplay: %d\n",
3178 					   intel_dp->compliance.test_data.hdisplay);
3179 				seq_printf(m, "vdisplay: %d\n",
3180 					   intel_dp->compliance.test_data.vdisplay);
3181 				seq_printf(m, "bpc: %u\n",
3182 					   intel_dp->compliance.test_data.bpc);
3183 			}
3184 		} else
3185 			seq_puts(m, "0");
3186 	}
3187 	drm_connector_list_iter_end(&conn_iter);
3188 
3189 	return 0;
3190 }
3191 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3192 
i915_displayport_test_type_show(struct seq_file * m,void * data)3193 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3194 {
3195 	struct drm_i915_private *dev_priv = m->private;
3196 	struct drm_device *dev = &dev_priv->drm;
3197 	struct drm_connector *connector;
3198 	struct drm_connector_list_iter conn_iter;
3199 	struct intel_dp *intel_dp;
3200 
3201 	drm_connector_list_iter_begin(dev, &conn_iter);
3202 	drm_for_each_connector_iter(connector, &conn_iter) {
3203 		struct intel_encoder *encoder;
3204 
3205 		if (connector->connector_type !=
3206 		    DRM_MODE_CONNECTOR_DisplayPort)
3207 			continue;
3208 
3209 		encoder = to_intel_encoder(connector->encoder);
3210 		if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3211 			continue;
3212 
3213 		if (encoder && connector->status == connector_status_connected) {
3214 			intel_dp = enc_to_intel_dp(encoder);
3215 			seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3216 		} else
3217 			seq_puts(m, "0");
3218 	}
3219 	drm_connector_list_iter_end(&conn_iter);
3220 
3221 	return 0;
3222 }
3223 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3224 
wm_latency_show(struct seq_file * m,const u16 wm[8])3225 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
3226 {
3227 	struct drm_i915_private *dev_priv = m->private;
3228 	struct drm_device *dev = &dev_priv->drm;
3229 	int level;
3230 	int num_levels;
3231 
3232 	if (IS_CHERRYVIEW(dev_priv))
3233 		num_levels = 3;
3234 	else if (IS_VALLEYVIEW(dev_priv))
3235 		num_levels = 1;
3236 	else if (IS_G4X(dev_priv))
3237 		num_levels = 3;
3238 	else
3239 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3240 
3241 	drm_modeset_lock_all(dev);
3242 
3243 	for (level = 0; level < num_levels; level++) {
3244 		unsigned int latency = wm[level];
3245 
3246 		/*
3247 		 * - WM1+ latency values in 0.5us units
3248 		 * - latencies are in us on gen9/vlv/chv
3249 		 */
3250 		if (INTEL_GEN(dev_priv) >= 9 ||
3251 		    IS_VALLEYVIEW(dev_priv) ||
3252 		    IS_CHERRYVIEW(dev_priv) ||
3253 		    IS_G4X(dev_priv))
3254 			latency *= 10;
3255 		else if (level > 0)
3256 			latency *= 5;
3257 
3258 		seq_printf(m, "WM%d %u (%u.%u usec)\n",
3259 			   level, wm[level], latency / 10, latency % 10);
3260 	}
3261 
3262 	drm_modeset_unlock_all(dev);
3263 }
3264 
pri_wm_latency_show(struct seq_file * m,void * data)3265 static int pri_wm_latency_show(struct seq_file *m, void *data)
3266 {
3267 	struct drm_i915_private *dev_priv = m->private;
3268 	const u16 *latencies;
3269 
3270 	if (INTEL_GEN(dev_priv) >= 9)
3271 		latencies = dev_priv->wm.skl_latency;
3272 	else
3273 		latencies = dev_priv->wm.pri_latency;
3274 
3275 	wm_latency_show(m, latencies);
3276 
3277 	return 0;
3278 }
3279 
spr_wm_latency_show(struct seq_file * m,void * data)3280 static int spr_wm_latency_show(struct seq_file *m, void *data)
3281 {
3282 	struct drm_i915_private *dev_priv = m->private;
3283 	const u16 *latencies;
3284 
3285 	if (INTEL_GEN(dev_priv) >= 9)
3286 		latencies = dev_priv->wm.skl_latency;
3287 	else
3288 		latencies = dev_priv->wm.spr_latency;
3289 
3290 	wm_latency_show(m, latencies);
3291 
3292 	return 0;
3293 }
3294 
cur_wm_latency_show(struct seq_file * m,void * data)3295 static int cur_wm_latency_show(struct seq_file *m, void *data)
3296 {
3297 	struct drm_i915_private *dev_priv = m->private;
3298 	const u16 *latencies;
3299 
3300 	if (INTEL_GEN(dev_priv) >= 9)
3301 		latencies = dev_priv->wm.skl_latency;
3302 	else
3303 		latencies = dev_priv->wm.cur_latency;
3304 
3305 	wm_latency_show(m, latencies);
3306 
3307 	return 0;
3308 }
3309 
pri_wm_latency_open(struct inode * inode,struct file * file)3310 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3311 {
3312 	struct drm_i915_private *dev_priv = inode->i_private;
3313 
3314 	if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3315 		return -ENODEV;
3316 
3317 	return single_open(file, pri_wm_latency_show, dev_priv);
3318 }
3319 
spr_wm_latency_open(struct inode * inode,struct file * file)3320 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3321 {
3322 	struct drm_i915_private *dev_priv = inode->i_private;
3323 
3324 	if (HAS_GMCH(dev_priv))
3325 		return -ENODEV;
3326 
3327 	return single_open(file, spr_wm_latency_show, dev_priv);
3328 }
3329 
cur_wm_latency_open(struct inode * inode,struct file * file)3330 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3331 {
3332 	struct drm_i915_private *dev_priv = inode->i_private;
3333 
3334 	if (HAS_GMCH(dev_priv))
3335 		return -ENODEV;
3336 
3337 	return single_open(file, cur_wm_latency_show, dev_priv);
3338 }
3339 
wm_latency_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp,u16 wm[8])3340 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3341 				size_t len, loff_t *offp, u16 wm[8])
3342 {
3343 	struct seq_file *m = file->private_data;
3344 	struct drm_i915_private *dev_priv = m->private;
3345 	struct drm_device *dev = &dev_priv->drm;
3346 	u16 new[8] = { 0 };
3347 	int num_levels;
3348 	int level;
3349 	int ret;
3350 	char tmp[32];
3351 
3352 	if (IS_CHERRYVIEW(dev_priv))
3353 		num_levels = 3;
3354 	else if (IS_VALLEYVIEW(dev_priv))
3355 		num_levels = 1;
3356 	else if (IS_G4X(dev_priv))
3357 		num_levels = 3;
3358 	else
3359 		num_levels = ilk_wm_max_level(dev_priv) + 1;
3360 
3361 	if (len >= sizeof(tmp))
3362 		return -EINVAL;
3363 
3364 	if (copy_from_user(tmp, ubuf, len))
3365 		return -EFAULT;
3366 
3367 	tmp[len] = '\0';
3368 
3369 	ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3370 		     &new[0], &new[1], &new[2], &new[3],
3371 		     &new[4], &new[5], &new[6], &new[7]);
3372 	if (ret != num_levels)
3373 		return -EINVAL;
3374 
3375 	drm_modeset_lock_all(dev);
3376 
3377 	for (level = 0; level < num_levels; level++)
3378 		wm[level] = new[level];
3379 
3380 	drm_modeset_unlock_all(dev);
3381 
3382 	return len;
3383 }
3384 
3385 
pri_wm_latency_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)3386 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3387 				    size_t len, loff_t *offp)
3388 {
3389 	struct seq_file *m = file->private_data;
3390 	struct drm_i915_private *dev_priv = m->private;
3391 	u16 *latencies;
3392 
3393 	if (INTEL_GEN(dev_priv) >= 9)
3394 		latencies = dev_priv->wm.skl_latency;
3395 	else
3396 		latencies = dev_priv->wm.pri_latency;
3397 
3398 	return wm_latency_write(file, ubuf, len, offp, latencies);
3399 }
3400 
spr_wm_latency_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)3401 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3402 				    size_t len, loff_t *offp)
3403 {
3404 	struct seq_file *m = file->private_data;
3405 	struct drm_i915_private *dev_priv = m->private;
3406 	u16 *latencies;
3407 
3408 	if (INTEL_GEN(dev_priv) >= 9)
3409 		latencies = dev_priv->wm.skl_latency;
3410 	else
3411 		latencies = dev_priv->wm.spr_latency;
3412 
3413 	return wm_latency_write(file, ubuf, len, offp, latencies);
3414 }
3415 
cur_wm_latency_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)3416 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3417 				    size_t len, loff_t *offp)
3418 {
3419 	struct seq_file *m = file->private_data;
3420 	struct drm_i915_private *dev_priv = m->private;
3421 	u16 *latencies;
3422 
3423 	if (INTEL_GEN(dev_priv) >= 9)
3424 		latencies = dev_priv->wm.skl_latency;
3425 	else
3426 		latencies = dev_priv->wm.cur_latency;
3427 
3428 	return wm_latency_write(file, ubuf, len, offp, latencies);
3429 }
3430 
3431 static const struct file_operations i915_pri_wm_latency_fops = {
3432 	.owner = THIS_MODULE,
3433 	.open = pri_wm_latency_open,
3434 	.read = seq_read,
3435 	.llseek = seq_lseek,
3436 	.release = single_release,
3437 	.write = pri_wm_latency_write
3438 };
3439 
3440 static const struct file_operations i915_spr_wm_latency_fops = {
3441 	.owner = THIS_MODULE,
3442 	.open = spr_wm_latency_open,
3443 	.read = seq_read,
3444 	.llseek = seq_lseek,
3445 	.release = single_release,
3446 	.write = spr_wm_latency_write
3447 };
3448 
3449 static const struct file_operations i915_cur_wm_latency_fops = {
3450 	.owner = THIS_MODULE,
3451 	.open = cur_wm_latency_open,
3452 	.read = seq_read,
3453 	.llseek = seq_lseek,
3454 	.release = single_release,
3455 	.write = cur_wm_latency_write
3456 };
3457 
3458 static int
i915_wedged_get(void * data,u64 * val)3459 i915_wedged_get(void *data, u64 *val)
3460 {
3461 	struct drm_i915_private *i915 = data;
3462 	int ret = intel_gt_terminally_wedged(&i915->gt);
3463 
3464 	switch (ret) {
3465 	case -EIO:
3466 		*val = 1;
3467 		return 0;
3468 	case 0:
3469 		*val = 0;
3470 		return 0;
3471 	default:
3472 		return ret;
3473 	}
3474 }
3475 
3476 static int
i915_wedged_set(void * data,u64 val)3477 i915_wedged_set(void *data, u64 val)
3478 {
3479 	struct drm_i915_private *i915 = data;
3480 
3481 	/* Flush any previous reset before applying for a new one */
3482 	wait_event(i915->gt.reset.queue,
3483 		   !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
3484 
3485 	intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
3486 			      "Manually set wedged engine mask = %llx", val);
3487 	return 0;
3488 }
3489 
3490 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3491 			i915_wedged_get, i915_wedged_set,
3492 			"%llu\n");
3493 
3494 static int
i915_perf_noa_delay_set(void * data,u64 val)3495 i915_perf_noa_delay_set(void *data, u64 val)
3496 {
3497 	struct drm_i915_private *i915 = data;
3498 	const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
3499 
3500 	/*
3501 	 * This would lead to infinite waits as we're doing timestamp
3502 	 * difference on the CS with only 32bits.
3503 	 */
3504 	if (val > mul_u32_u32(U32_MAX, clk))
3505 		return -EINVAL;
3506 
3507 	atomic64_set(&i915->perf.noa_programming_delay, val);
3508 	return 0;
3509 }
3510 
3511 static int
i915_perf_noa_delay_get(void * data,u64 * val)3512 i915_perf_noa_delay_get(void *data, u64 *val)
3513 {
3514 	struct drm_i915_private *i915 = data;
3515 
3516 	*val = atomic64_read(&i915->perf.noa_programming_delay);
3517 	return 0;
3518 }
3519 
3520 DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
3521 			i915_perf_noa_delay_get,
3522 			i915_perf_noa_delay_set,
3523 			"%llu\n");
3524 
3525 #define DROP_UNBOUND	BIT(0)
3526 #define DROP_BOUND	BIT(1)
3527 #define DROP_RETIRE	BIT(2)
3528 #define DROP_ACTIVE	BIT(3)
3529 #define DROP_FREED	BIT(4)
3530 #define DROP_SHRINK_ALL	BIT(5)
3531 #define DROP_IDLE	BIT(6)
3532 #define DROP_RESET_ACTIVE	BIT(7)
3533 #define DROP_RESET_SEQNO	BIT(8)
3534 #define DROP_RCU	BIT(9)
3535 #define DROP_ALL (DROP_UNBOUND	| \
3536 		  DROP_BOUND	| \
3537 		  DROP_RETIRE	| \
3538 		  DROP_ACTIVE	| \
3539 		  DROP_FREED	| \
3540 		  DROP_SHRINK_ALL |\
3541 		  DROP_IDLE	| \
3542 		  DROP_RESET_ACTIVE | \
3543 		  DROP_RESET_SEQNO | \
3544 		  DROP_RCU)
3545 static int
i915_drop_caches_get(void * data,u64 * val)3546 i915_drop_caches_get(void *data, u64 *val)
3547 {
3548 	*val = DROP_ALL;
3549 
3550 	return 0;
3551 }
3552 static int
gt_drop_caches(struct intel_gt * gt,u64 val)3553 gt_drop_caches(struct intel_gt *gt, u64 val)
3554 {
3555 	int ret;
3556 
3557 	if (val & DROP_RESET_ACTIVE &&
3558 	    wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
3559 		intel_gt_set_wedged(gt);
3560 
3561 	if (val & DROP_RETIRE)
3562 		intel_gt_retire_requests(gt);
3563 
3564 	if (val & (DROP_IDLE | DROP_ACTIVE)) {
3565 		ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
3566 		if (ret)
3567 			return ret;
3568 	}
3569 
3570 	if (val & DROP_IDLE) {
3571 		ret = intel_gt_pm_wait_for_idle(gt);
3572 		if (ret)
3573 			return ret;
3574 	}
3575 
3576 	if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
3577 		intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
3578 
3579 	return 0;
3580 }
3581 
3582 static int
i915_drop_caches_set(void * data,u64 val)3583 i915_drop_caches_set(void *data, u64 val)
3584 {
3585 	struct drm_i915_private *i915 = data;
3586 	int ret;
3587 
3588 	DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3589 		  val, val & DROP_ALL);
3590 
3591 	ret = gt_drop_caches(&i915->gt, val);
3592 	if (ret)
3593 		return ret;
3594 
3595 	fs_reclaim_acquire(GFP_KERNEL);
3596 	if (val & DROP_BOUND)
3597 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
3598 
3599 	if (val & DROP_UNBOUND)
3600 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
3601 
3602 	if (val & DROP_SHRINK_ALL)
3603 		i915_gem_shrink_all(i915);
3604 	fs_reclaim_release(GFP_KERNEL);
3605 
3606 	if (val & DROP_RCU)
3607 		rcu_barrier();
3608 
3609 	if (val & DROP_FREED)
3610 		i915_gem_drain_freed_objects(i915);
3611 
3612 	return 0;
3613 }
3614 
3615 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3616 			i915_drop_caches_get, i915_drop_caches_set,
3617 			"0x%08llx\n");
3618 
3619 static int
i915_cache_sharing_get(void * data,u64 * val)3620 i915_cache_sharing_get(void *data, u64 *val)
3621 {
3622 	struct drm_i915_private *dev_priv = data;
3623 	intel_wakeref_t wakeref;
3624 	u32 snpcr = 0;
3625 
3626 	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3627 		return -ENODEV;
3628 
3629 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
3630 		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3631 
3632 	*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3633 
3634 	return 0;
3635 }
3636 
3637 static int
i915_cache_sharing_set(void * data,u64 val)3638 i915_cache_sharing_set(void *data, u64 val)
3639 {
3640 	struct drm_i915_private *dev_priv = data;
3641 	intel_wakeref_t wakeref;
3642 
3643 	if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3644 		return -ENODEV;
3645 
3646 	if (val > 3)
3647 		return -EINVAL;
3648 
3649 	DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3650 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3651 		u32 snpcr;
3652 
3653 		/* Update the cache sharing policy here as well */
3654 		snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3655 		snpcr &= ~GEN6_MBC_SNPCR_MASK;
3656 		snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3657 		I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3658 	}
3659 
3660 	return 0;
3661 }
3662 
3663 static void
intel_sseu_copy_subslices(const struct sseu_dev_info * sseu,int slice,u8 * to_mask)3664 intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
3665 			  u8 *to_mask)
3666 {
3667 	int offset = slice * sseu->ss_stride;
3668 
3669 	memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
3670 }
3671 
3672 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3673 			i915_cache_sharing_get, i915_cache_sharing_set,
3674 			"%llu\n");
3675 
cherryview_sseu_device_status(struct drm_i915_private * dev_priv,struct sseu_dev_info * sseu)3676 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
3677 					  struct sseu_dev_info *sseu)
3678 {
3679 #define SS_MAX 2
3680 	const int ss_max = SS_MAX;
3681 	u32 sig1[SS_MAX], sig2[SS_MAX];
3682 	int ss;
3683 
3684 	sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
3685 	sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
3686 	sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
3687 	sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
3688 
3689 	for (ss = 0; ss < ss_max; ss++) {
3690 		unsigned int eu_cnt;
3691 
3692 		if (sig1[ss] & CHV_SS_PG_ENABLE)
3693 			/* skip disabled subslice */
3694 			continue;
3695 
3696 		sseu->slice_mask = BIT(0);
3697 		sseu->subslice_mask[0] |= BIT(ss);
3698 		eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
3699 			 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
3700 			 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
3701 			 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
3702 		sseu->eu_total += eu_cnt;
3703 		sseu->eu_per_subslice = max_t(unsigned int,
3704 					      sseu->eu_per_subslice, eu_cnt);
3705 	}
3706 #undef SS_MAX
3707 }
3708 
gen10_sseu_device_status(struct drm_i915_private * dev_priv,struct sseu_dev_info * sseu)3709 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
3710 				     struct sseu_dev_info *sseu)
3711 {
3712 #define SS_MAX 6
3713 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3714 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3715 	int s, ss;
3716 
3717 	for (s = 0; s < info->sseu.max_slices; s++) {
3718 		/*
3719 		 * FIXME: Valid SS Mask respects the spec and read
3720 		 * only valid bits for those registers, excluding reserved
3721 		 * although this seems wrong because it would leave many
3722 		 * subslices without ACK.
3723 		 */
3724 		s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
3725 			GEN10_PGCTL_VALID_SS_MASK(s);
3726 		eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
3727 		eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
3728 	}
3729 
3730 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3731 		     GEN9_PGCTL_SSA_EU19_ACK |
3732 		     GEN9_PGCTL_SSA_EU210_ACK |
3733 		     GEN9_PGCTL_SSA_EU311_ACK;
3734 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3735 		     GEN9_PGCTL_SSB_EU19_ACK |
3736 		     GEN9_PGCTL_SSB_EU210_ACK |
3737 		     GEN9_PGCTL_SSB_EU311_ACK;
3738 
3739 	for (s = 0; s < info->sseu.max_slices; s++) {
3740 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3741 			/* skip disabled slice */
3742 			continue;
3743 
3744 		sseu->slice_mask |= BIT(s);
3745 		intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
3746 
3747 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3748 			unsigned int eu_cnt;
3749 
3750 			if (info->sseu.has_subslice_pg &&
3751 			    !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3752 				/* skip disabled subslice */
3753 				continue;
3754 
3755 			eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
3756 					       eu_mask[ss % 2]);
3757 			sseu->eu_total += eu_cnt;
3758 			sseu->eu_per_subslice = max_t(unsigned int,
3759 						      sseu->eu_per_subslice,
3760 						      eu_cnt);
3761 		}
3762 	}
3763 #undef SS_MAX
3764 }
3765 
gen9_sseu_device_status(struct drm_i915_private * dev_priv,struct sseu_dev_info * sseu)3766 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
3767 				    struct sseu_dev_info *sseu)
3768 {
3769 #define SS_MAX 3
3770 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3771 	u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3772 	int s, ss;
3773 
3774 	for (s = 0; s < info->sseu.max_slices; s++) {
3775 		s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
3776 		eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
3777 		eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
3778 	}
3779 
3780 	eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3781 		     GEN9_PGCTL_SSA_EU19_ACK |
3782 		     GEN9_PGCTL_SSA_EU210_ACK |
3783 		     GEN9_PGCTL_SSA_EU311_ACK;
3784 	eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3785 		     GEN9_PGCTL_SSB_EU19_ACK |
3786 		     GEN9_PGCTL_SSB_EU210_ACK |
3787 		     GEN9_PGCTL_SSB_EU311_ACK;
3788 
3789 	for (s = 0; s < info->sseu.max_slices; s++) {
3790 		if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3791 			/* skip disabled slice */
3792 			continue;
3793 
3794 		sseu->slice_mask |= BIT(s);
3795 
3796 		if (IS_GEN9_BC(dev_priv))
3797 			intel_sseu_copy_subslices(&info->sseu, s,
3798 						  sseu->subslice_mask);
3799 
3800 		for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3801 			unsigned int eu_cnt;
3802 			u8 ss_idx = s * info->sseu.ss_stride +
3803 				    ss / BITS_PER_BYTE;
3804 
3805 			if (IS_GEN9_LP(dev_priv)) {
3806 				if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3807 					/* skip disabled subslice */
3808 					continue;
3809 
3810 				sseu->subslice_mask[ss_idx] |=
3811 					BIT(ss % BITS_PER_BYTE);
3812 			}
3813 
3814 			eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
3815 					       eu_mask[ss%2]);
3816 			sseu->eu_total += eu_cnt;
3817 			sseu->eu_per_subslice = max_t(unsigned int,
3818 						      sseu->eu_per_subslice,
3819 						      eu_cnt);
3820 		}
3821 	}
3822 #undef SS_MAX
3823 }
3824 
bdw_sseu_device_status(struct drm_i915_private * dev_priv,struct sseu_dev_info * sseu)3825 static void bdw_sseu_device_status(struct drm_i915_private *dev_priv,
3826 				   struct sseu_dev_info *sseu)
3827 {
3828 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3829 	u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
3830 	int s;
3831 
3832 	sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
3833 
3834 	if (sseu->slice_mask) {
3835 		sseu->eu_per_subslice = info->sseu.eu_per_subslice;
3836 		for (s = 0; s < fls(sseu->slice_mask); s++)
3837 			intel_sseu_copy_subslices(&info->sseu, s,
3838 						  sseu->subslice_mask);
3839 		sseu->eu_total = sseu->eu_per_subslice *
3840 				 intel_sseu_subslice_total(sseu);
3841 
3842 		/* subtract fused off EU(s) from enabled slice(s) */
3843 		for (s = 0; s < fls(sseu->slice_mask); s++) {
3844 			u8 subslice_7eu = info->sseu.subslice_7eu[s];
3845 
3846 			sseu->eu_total -= hweight8(subslice_7eu);
3847 		}
3848 	}
3849 }
3850 
i915_print_sseu_info(struct seq_file * m,bool is_available_info,const struct sseu_dev_info * sseu)3851 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
3852 				 const struct sseu_dev_info *sseu)
3853 {
3854 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3855 	const char *type = is_available_info ? "Available" : "Enabled";
3856 	int s;
3857 
3858 	seq_printf(m, "  %s Slice Mask: %04x\n", type,
3859 		   sseu->slice_mask);
3860 	seq_printf(m, "  %s Slice Total: %u\n", type,
3861 		   hweight8(sseu->slice_mask));
3862 	seq_printf(m, "  %s Subslice Total: %u\n", type,
3863 		   intel_sseu_subslice_total(sseu));
3864 	for (s = 0; s < fls(sseu->slice_mask); s++) {
3865 		seq_printf(m, "  %s Slice%i subslices: %u\n", type,
3866 			   s, intel_sseu_subslices_per_slice(sseu, s));
3867 	}
3868 	seq_printf(m, "  %s EU Total: %u\n", type,
3869 		   sseu->eu_total);
3870 	seq_printf(m, "  %s EU Per Subslice: %u\n", type,
3871 		   sseu->eu_per_subslice);
3872 
3873 	if (!is_available_info)
3874 		return;
3875 
3876 	seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
3877 	if (HAS_POOLED_EU(dev_priv))
3878 		seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
3879 
3880 	seq_printf(m, "  Has Slice Power Gating: %s\n",
3881 		   yesno(sseu->has_slice_pg));
3882 	seq_printf(m, "  Has Subslice Power Gating: %s\n",
3883 		   yesno(sseu->has_subslice_pg));
3884 	seq_printf(m, "  Has EU Power Gating: %s\n",
3885 		   yesno(sseu->has_eu_pg));
3886 }
3887 
i915_sseu_status(struct seq_file * m,void * unused)3888 static int i915_sseu_status(struct seq_file *m, void *unused)
3889 {
3890 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
3891 	const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3892 	struct sseu_dev_info sseu;
3893 	intel_wakeref_t wakeref;
3894 
3895 	if (INTEL_GEN(dev_priv) < 8)
3896 		return -ENODEV;
3897 
3898 	seq_puts(m, "SSEU Device Info\n");
3899 	i915_print_sseu_info(m, true, &info->sseu);
3900 
3901 	seq_puts(m, "SSEU Device Status\n");
3902 	memset(&sseu, 0, sizeof(sseu));
3903 	intel_sseu_set_info(&sseu, info->sseu.max_slices,
3904 			    info->sseu.max_subslices,
3905 			    info->sseu.max_eus_per_subslice);
3906 
3907 	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3908 		if (IS_CHERRYVIEW(dev_priv))
3909 			cherryview_sseu_device_status(dev_priv, &sseu);
3910 		else if (IS_BROADWELL(dev_priv))
3911 			bdw_sseu_device_status(dev_priv, &sseu);
3912 		else if (IS_GEN(dev_priv, 9))
3913 			gen9_sseu_device_status(dev_priv, &sseu);
3914 		else if (INTEL_GEN(dev_priv) >= 10)
3915 			gen10_sseu_device_status(dev_priv, &sseu);
3916 	}
3917 
3918 	i915_print_sseu_info(m, false, &sseu);
3919 
3920 	return 0;
3921 }
3922 
i915_forcewake_open(struct inode * inode,struct file * file)3923 static int i915_forcewake_open(struct inode *inode, struct file *file)
3924 {
3925 	struct drm_i915_private *i915 = inode->i_private;
3926 	struct intel_gt *gt = &i915->gt;
3927 
3928 	atomic_inc(&gt->user_wakeref);
3929 	intel_gt_pm_get(gt);
3930 	if (INTEL_GEN(i915) >= 6)
3931 		intel_uncore_forcewake_user_get(gt->uncore);
3932 
3933 	return 0;
3934 }
3935 
i915_forcewake_release(struct inode * inode,struct file * file)3936 static int i915_forcewake_release(struct inode *inode, struct file *file)
3937 {
3938 	struct drm_i915_private *i915 = inode->i_private;
3939 	struct intel_gt *gt = &i915->gt;
3940 
3941 	if (INTEL_GEN(i915) >= 6)
3942 		intel_uncore_forcewake_user_put(&i915->uncore);
3943 	intel_gt_pm_put(gt);
3944 	atomic_dec(&gt->user_wakeref);
3945 
3946 	return 0;
3947 }
3948 
3949 static const struct file_operations i915_forcewake_fops = {
3950 	.owner = THIS_MODULE,
3951 	.open = i915_forcewake_open,
3952 	.release = i915_forcewake_release,
3953 };
3954 
i915_hpd_storm_ctl_show(struct seq_file * m,void * data)3955 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
3956 {
3957 	struct drm_i915_private *dev_priv = m->private;
3958 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
3959 
3960 	/* Synchronize with everything first in case there's been an HPD
3961 	 * storm, but we haven't finished handling it in the kernel yet
3962 	 */
3963 	intel_synchronize_irq(dev_priv);
3964 	flush_work(&dev_priv->hotplug.dig_port_work);
3965 	flush_delayed_work(&dev_priv->hotplug.hotplug_work);
3966 
3967 	seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
3968 	seq_printf(m, "Detected: %s\n",
3969 		   yesno(delayed_work_pending(&hotplug->reenable_work)));
3970 
3971 	return 0;
3972 }
3973 
i915_hpd_storm_ctl_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)3974 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
3975 					const char __user *ubuf, size_t len,
3976 					loff_t *offp)
3977 {
3978 	struct seq_file *m = file->private_data;
3979 	struct drm_i915_private *dev_priv = m->private;
3980 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
3981 	unsigned int new_threshold;
3982 	int i;
3983 	char *newline;
3984 	char tmp[16];
3985 
3986 	if (len >= sizeof(tmp))
3987 		return -EINVAL;
3988 
3989 	if (copy_from_user(tmp, ubuf, len))
3990 		return -EFAULT;
3991 
3992 	tmp[len] = '\0';
3993 
3994 	/* Strip newline, if any */
3995 	newline = strchr(tmp, '\n');
3996 	if (newline)
3997 		*newline = '\0';
3998 
3999 	if (strcmp(tmp, "reset") == 0)
4000 		new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4001 	else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4002 		return -EINVAL;
4003 
4004 	if (new_threshold > 0)
4005 		DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4006 			      new_threshold);
4007 	else
4008 		DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4009 
4010 	spin_lock_irq(&dev_priv->irq_lock);
4011 	hotplug->hpd_storm_threshold = new_threshold;
4012 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4013 	for_each_hpd_pin(i)
4014 		hotplug->stats[i].count = 0;
4015 	spin_unlock_irq(&dev_priv->irq_lock);
4016 
4017 	/* Re-enable hpd immediately if we were in an irq storm */
4018 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4019 
4020 	return len;
4021 }
4022 
i915_hpd_storm_ctl_open(struct inode * inode,struct file * file)4023 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4024 {
4025 	return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4026 }
4027 
4028 static const struct file_operations i915_hpd_storm_ctl_fops = {
4029 	.owner = THIS_MODULE,
4030 	.open = i915_hpd_storm_ctl_open,
4031 	.read = seq_read,
4032 	.llseek = seq_lseek,
4033 	.release = single_release,
4034 	.write = i915_hpd_storm_ctl_write
4035 };
4036 
i915_hpd_short_storm_ctl_show(struct seq_file * m,void * data)4037 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4038 {
4039 	struct drm_i915_private *dev_priv = m->private;
4040 
4041 	seq_printf(m, "Enabled: %s\n",
4042 		   yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4043 
4044 	return 0;
4045 }
4046 
4047 static int
i915_hpd_short_storm_ctl_open(struct inode * inode,struct file * file)4048 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4049 {
4050 	return single_open(file, i915_hpd_short_storm_ctl_show,
4051 			   inode->i_private);
4052 }
4053 
i915_hpd_short_storm_ctl_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)4054 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4055 					      const char __user *ubuf,
4056 					      size_t len, loff_t *offp)
4057 {
4058 	struct seq_file *m = file->private_data;
4059 	struct drm_i915_private *dev_priv = m->private;
4060 	struct i915_hotplug *hotplug = &dev_priv->hotplug;
4061 	char *newline;
4062 	char tmp[16];
4063 	int i;
4064 	bool new_state;
4065 
4066 	if (len >= sizeof(tmp))
4067 		return -EINVAL;
4068 
4069 	if (copy_from_user(tmp, ubuf, len))
4070 		return -EFAULT;
4071 
4072 	tmp[len] = '\0';
4073 
4074 	/* Strip newline, if any */
4075 	newline = strchr(tmp, '\n');
4076 	if (newline)
4077 		*newline = '\0';
4078 
4079 	/* Reset to the "default" state for this system */
4080 	if (strcmp(tmp, "reset") == 0)
4081 		new_state = !HAS_DP_MST(dev_priv);
4082 	else if (kstrtobool(tmp, &new_state) != 0)
4083 		return -EINVAL;
4084 
4085 	DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4086 		      new_state ? "En" : "Dis");
4087 
4088 	spin_lock_irq(&dev_priv->irq_lock);
4089 	hotplug->hpd_short_storm_enabled = new_state;
4090 	/* Reset the HPD storm stats so we don't accidentally trigger a storm */
4091 	for_each_hpd_pin(i)
4092 		hotplug->stats[i].count = 0;
4093 	spin_unlock_irq(&dev_priv->irq_lock);
4094 
4095 	/* Re-enable hpd immediately if we were in an irq storm */
4096 	flush_delayed_work(&dev_priv->hotplug.reenable_work);
4097 
4098 	return len;
4099 }
4100 
4101 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4102 	.owner = THIS_MODULE,
4103 	.open = i915_hpd_short_storm_ctl_open,
4104 	.read = seq_read,
4105 	.llseek = seq_lseek,
4106 	.release = single_release,
4107 	.write = i915_hpd_short_storm_ctl_write,
4108 };
4109 
i915_drrs_ctl_set(void * data,u64 val)4110 static int i915_drrs_ctl_set(void *data, u64 val)
4111 {
4112 	struct drm_i915_private *dev_priv = data;
4113 	struct drm_device *dev = &dev_priv->drm;
4114 	struct intel_crtc *crtc;
4115 
4116 	if (INTEL_GEN(dev_priv) < 7)
4117 		return -ENODEV;
4118 
4119 	for_each_intel_crtc(dev, crtc) {
4120 		struct drm_connector_list_iter conn_iter;
4121 		struct intel_crtc_state *crtc_state;
4122 		struct drm_connector *connector;
4123 		struct drm_crtc_commit *commit;
4124 		int ret;
4125 
4126 		ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4127 		if (ret)
4128 			return ret;
4129 
4130 		crtc_state = to_intel_crtc_state(crtc->base.state);
4131 
4132 		if (!crtc_state->hw.active ||
4133 		    !crtc_state->has_drrs)
4134 			goto out;
4135 
4136 		commit = crtc_state->uapi.commit;
4137 		if (commit) {
4138 			ret = wait_for_completion_interruptible(&commit->hw_done);
4139 			if (ret)
4140 				goto out;
4141 		}
4142 
4143 		drm_connector_list_iter_begin(dev, &conn_iter);
4144 		drm_for_each_connector_iter(connector, &conn_iter) {
4145 			struct intel_encoder *encoder;
4146 			struct intel_dp *intel_dp;
4147 
4148 			if (!(crtc_state->uapi.connector_mask &
4149 			      drm_connector_mask(connector)))
4150 				continue;
4151 
4152 			encoder = intel_attached_encoder(to_intel_connector(connector));
4153 			if (encoder->type != INTEL_OUTPUT_EDP)
4154 				continue;
4155 
4156 			DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4157 						val ? "en" : "dis", val);
4158 
4159 			intel_dp = enc_to_intel_dp(encoder);
4160 			if (val)
4161 				intel_edp_drrs_enable(intel_dp,
4162 						      crtc_state);
4163 			else
4164 				intel_edp_drrs_disable(intel_dp,
4165 						       crtc_state);
4166 		}
4167 		drm_connector_list_iter_end(&conn_iter);
4168 
4169 out:
4170 		drm_modeset_unlock(&crtc->base.mutex);
4171 		if (ret)
4172 			return ret;
4173 	}
4174 
4175 	return 0;
4176 }
4177 
4178 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4179 
4180 static ssize_t
i915_fifo_underrun_reset_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)4181 i915_fifo_underrun_reset_write(struct file *filp,
4182 			       const char __user *ubuf,
4183 			       size_t cnt, loff_t *ppos)
4184 {
4185 	struct drm_i915_private *dev_priv = filp->private_data;
4186 	struct intel_crtc *intel_crtc;
4187 	struct drm_device *dev = &dev_priv->drm;
4188 	int ret;
4189 	bool reset;
4190 
4191 	ret = kstrtobool_from_user(ubuf, cnt, &reset);
4192 	if (ret)
4193 		return ret;
4194 
4195 	if (!reset)
4196 		return cnt;
4197 
4198 	for_each_intel_crtc(dev, intel_crtc) {
4199 		struct drm_crtc_commit *commit;
4200 		struct intel_crtc_state *crtc_state;
4201 
4202 		ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4203 		if (ret)
4204 			return ret;
4205 
4206 		crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4207 		commit = crtc_state->uapi.commit;
4208 		if (commit) {
4209 			ret = wait_for_completion_interruptible(&commit->hw_done);
4210 			if (!ret)
4211 				ret = wait_for_completion_interruptible(&commit->flip_done);
4212 		}
4213 
4214 		if (!ret && crtc_state->hw.active) {
4215 			DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4216 				      pipe_name(intel_crtc->pipe));
4217 
4218 			intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4219 		}
4220 
4221 		drm_modeset_unlock(&intel_crtc->base.mutex);
4222 
4223 		if (ret)
4224 			return ret;
4225 	}
4226 
4227 	ret = intel_fbc_reset_underrun(dev_priv);
4228 	if (ret)
4229 		return ret;
4230 
4231 	return cnt;
4232 }
4233 
4234 static const struct file_operations i915_fifo_underrun_reset_ops = {
4235 	.owner = THIS_MODULE,
4236 	.open = simple_open,
4237 	.write = i915_fifo_underrun_reset_write,
4238 	.llseek = default_llseek,
4239 };
4240 
4241 static const struct drm_info_list i915_debugfs_list[] = {
4242 	{"i915_capabilities", i915_capabilities, 0},
4243 	{"i915_gem_objects", i915_gem_object_info, 0},
4244 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4245 	{"i915_gem_interrupt", i915_interrupt_info, 0},
4246 	{"i915_guc_info", i915_guc_info, 0},
4247 	{"i915_guc_load_status", i915_guc_load_status_info, 0},
4248 	{"i915_guc_log_dump", i915_guc_log_dump, 0},
4249 	{"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4250 	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4251 	{"i915_huc_load_status", i915_huc_load_status_info, 0},
4252 	{"i915_frequency_info", i915_frequency_info, 0},
4253 	{"i915_drpc_info", i915_drpc_info, 0},
4254 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
4255 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4256 	{"i915_fbc_status", i915_fbc_status, 0},
4257 	{"i915_ips_status", i915_ips_status, 0},
4258 	{"i915_sr_status", i915_sr_status, 0},
4259 	{"i915_opregion", i915_opregion, 0},
4260 	{"i915_vbt", i915_vbt, 0},
4261 	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4262 	{"i915_context_status", i915_context_status, 0},
4263 	{"i915_forcewake_domains", i915_forcewake_domains, 0},
4264 	{"i915_swizzle_info", i915_swizzle_info, 0},
4265 	{"i915_llc", i915_llc, 0},
4266 	{"i915_edp_psr_status", i915_edp_psr_status, 0},
4267 	{"i915_energy_uJ", i915_energy_uJ, 0},
4268 	{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4269 	{"i915_power_domain_info", i915_power_domain_info, 0},
4270 	{"i915_dmc_info", i915_dmc_info, 0},
4271 	{"i915_display_info", i915_display_info, 0},
4272 	{"i915_engine_info", i915_engine_info, 0},
4273 	{"i915_rcs_topology", i915_rcs_topology, 0},
4274 	{"i915_shrinker_info", i915_shrinker_info, 0},
4275 	{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4276 	{"i915_dp_mst_info", i915_dp_mst_info, 0},
4277 	{"i915_wa_registers", i915_wa_registers, 0},
4278 	{"i915_ddb_info", i915_ddb_info, 0},
4279 	{"i915_sseu_status", i915_sseu_status, 0},
4280 	{"i915_drrs_status", i915_drrs_status, 0},
4281 	{"i915_rps_boost_info", i915_rps_boost_info, 0},
4282 };
4283 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4284 
4285 static const struct i915_debugfs_files {
4286 	const char *name;
4287 	const struct file_operations *fops;
4288 } i915_debugfs_files[] = {
4289 	{"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
4290 	{"i915_wedged", &i915_wedged_fops},
4291 	{"i915_cache_sharing", &i915_cache_sharing_fops},
4292 	{"i915_gem_drop_caches", &i915_drop_caches_fops},
4293 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4294 	{"i915_error_state", &i915_error_state_fops},
4295 	{"i915_gpu_info", &i915_gpu_info_fops},
4296 #endif
4297 	{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4298 	{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4299 	{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4300 	{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4301 	{"i915_fbc_false_color", &i915_fbc_false_color_fops},
4302 	{"i915_dp_test_data", &i915_displayport_test_data_fops},
4303 	{"i915_dp_test_type", &i915_displayport_test_type_fops},
4304 	{"i915_dp_test_active", &i915_displayport_test_active_fops},
4305 	{"i915_guc_log_level", &i915_guc_log_level_fops},
4306 	{"i915_guc_log_relay", &i915_guc_log_relay_fops},
4307 	{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4308 	{"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4309 	{"i915_ipc_status", &i915_ipc_status_fops},
4310 	{"i915_drrs_ctl", &i915_drrs_ctl_fops},
4311 	{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4312 };
4313 
i915_debugfs_register(struct drm_i915_private * dev_priv)4314 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4315 {
4316 	struct drm_minor *minor = dev_priv->drm.primary;
4317 	int i;
4318 
4319 	debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
4320 			    to_i915(minor->dev), &i915_forcewake_fops);
4321 
4322 	for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4323 		debugfs_create_file(i915_debugfs_files[i].name,
4324 				    S_IRUGO | S_IWUSR,
4325 				    minor->debugfs_root,
4326 				    to_i915(minor->dev),
4327 				    i915_debugfs_files[i].fops);
4328 	}
4329 
4330 	return drm_debugfs_create_files(i915_debugfs_list,
4331 					I915_DEBUGFS_ENTRIES,
4332 					minor->debugfs_root, minor);
4333 }
4334 
4335 struct dpcd_block {
4336 	/* DPCD dump start address. */
4337 	unsigned int offset;
4338 	/* DPCD dump end address, inclusive. If unset, .size will be used. */
4339 	unsigned int end;
4340 	/* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4341 	size_t size;
4342 	/* Only valid for eDP. */
4343 	bool edp;
4344 };
4345 
4346 static const struct dpcd_block i915_dpcd_debug[] = {
4347 	{ .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4348 	{ .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4349 	{ .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4350 	{ .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4351 	{ .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4352 	{ .offset = DP_SET_POWER },
4353 	{ .offset = DP_EDP_DPCD_REV },
4354 	{ .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4355 	{ .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4356 	{ .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4357 };
4358 
i915_dpcd_show(struct seq_file * m,void * data)4359 static int i915_dpcd_show(struct seq_file *m, void *data)
4360 {
4361 	struct drm_connector *connector = m->private;
4362 	struct intel_dp *intel_dp =
4363 		enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
4364 	u8 buf[16];
4365 	ssize_t err;
4366 	int i;
4367 
4368 	if (connector->status != connector_status_connected)
4369 		return -ENODEV;
4370 
4371 	for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4372 		const struct dpcd_block *b = &i915_dpcd_debug[i];
4373 		size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4374 
4375 		if (b->edp &&
4376 		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4377 			continue;
4378 
4379 		/* low tech for now */
4380 		if (WARN_ON(size > sizeof(buf)))
4381 			continue;
4382 
4383 		err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4384 		if (err < 0)
4385 			seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4386 		else
4387 			seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4388 	}
4389 
4390 	return 0;
4391 }
4392 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4393 
i915_panel_show(struct seq_file * m,void * data)4394 static int i915_panel_show(struct seq_file *m, void *data)
4395 {
4396 	struct drm_connector *connector = m->private;
4397 	struct intel_dp *intel_dp =
4398 		enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
4399 
4400 	if (connector->status != connector_status_connected)
4401 		return -ENODEV;
4402 
4403 	seq_printf(m, "Panel power up delay: %d\n",
4404 		   intel_dp->panel_power_up_delay);
4405 	seq_printf(m, "Panel power down delay: %d\n",
4406 		   intel_dp->panel_power_down_delay);
4407 	seq_printf(m, "Backlight on delay: %d\n",
4408 		   intel_dp->backlight_on_delay);
4409 	seq_printf(m, "Backlight off delay: %d\n",
4410 		   intel_dp->backlight_off_delay);
4411 
4412 	return 0;
4413 }
4414 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4415 
i915_hdcp_sink_capability_show(struct seq_file * m,void * data)4416 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4417 {
4418 	struct drm_connector *connector = m->private;
4419 	struct intel_connector *intel_connector = to_intel_connector(connector);
4420 
4421 	if (connector->status != connector_status_connected)
4422 		return -ENODEV;
4423 
4424 	/* HDCP is supported by connector */
4425 	if (!intel_connector->hdcp.shim)
4426 		return -EINVAL;
4427 
4428 	seq_printf(m, "%s:%d HDCP version: ", connector->name,
4429 		   connector->base.id);
4430 	intel_hdcp_info(m, intel_connector);
4431 
4432 	return 0;
4433 }
4434 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4435 
i915_dsc_fec_support_show(struct seq_file * m,void * data)4436 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4437 {
4438 	struct drm_connector *connector = m->private;
4439 	struct drm_device *dev = connector->dev;
4440 	struct drm_crtc *crtc;
4441 	struct intel_dp *intel_dp;
4442 	struct drm_modeset_acquire_ctx ctx;
4443 	struct intel_crtc_state *crtc_state = NULL;
4444 	int ret = 0;
4445 	bool try_again = false;
4446 
4447 	drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4448 
4449 	do {
4450 		try_again = false;
4451 		ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4452 				       &ctx);
4453 		if (ret) {
4454 			if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4455 				try_again = true;
4456 				continue;
4457 			}
4458 			break;
4459 		}
4460 		crtc = connector->state->crtc;
4461 		if (connector->status != connector_status_connected || !crtc) {
4462 			ret = -ENODEV;
4463 			break;
4464 		}
4465 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
4466 		if (ret == -EDEADLK) {
4467 			ret = drm_modeset_backoff(&ctx);
4468 			if (!ret) {
4469 				try_again = true;
4470 				continue;
4471 			}
4472 			break;
4473 		} else if (ret) {
4474 			break;
4475 		}
4476 		intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
4477 		crtc_state = to_intel_crtc_state(crtc->state);
4478 		seq_printf(m, "DSC_Enabled: %s\n",
4479 			   yesno(crtc_state->dsc.compression_enable));
4480 		seq_printf(m, "DSC_Sink_Support: %s\n",
4481 			   yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4482 		seq_printf(m, "Force_DSC_Enable: %s\n",
4483 			   yesno(intel_dp->force_dsc_en));
4484 		if (!intel_dp_is_edp(intel_dp))
4485 			seq_printf(m, "FEC_Sink_Support: %s\n",
4486 				   yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4487 	} while (try_again);
4488 
4489 	drm_modeset_drop_locks(&ctx);
4490 	drm_modeset_acquire_fini(&ctx);
4491 
4492 	return ret;
4493 }
4494 
i915_dsc_fec_support_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)4495 static ssize_t i915_dsc_fec_support_write(struct file *file,
4496 					  const char __user *ubuf,
4497 					  size_t len, loff_t *offp)
4498 {
4499 	bool dsc_enable = false;
4500 	int ret;
4501 	struct drm_connector *connector =
4502 		((struct seq_file *)file->private_data)->private;
4503 	struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
4504 	struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
4505 
4506 	if (len == 0)
4507 		return 0;
4508 
4509 	DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4510 			 len);
4511 
4512 	ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4513 	if (ret < 0)
4514 		return ret;
4515 
4516 	DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4517 			 (dsc_enable) ? "true" : "false");
4518 	intel_dp->force_dsc_en = dsc_enable;
4519 
4520 	*offp += len;
4521 	return len;
4522 }
4523 
i915_dsc_fec_support_open(struct inode * inode,struct file * file)4524 static int i915_dsc_fec_support_open(struct inode *inode,
4525 				     struct file *file)
4526 {
4527 	return single_open(file, i915_dsc_fec_support_show,
4528 			   inode->i_private);
4529 }
4530 
4531 static const struct file_operations i915_dsc_fec_support_fops = {
4532 	.owner = THIS_MODULE,
4533 	.open = i915_dsc_fec_support_open,
4534 	.read = seq_read,
4535 	.llseek = seq_lseek,
4536 	.release = single_release,
4537 	.write = i915_dsc_fec_support_write
4538 };
4539 
4540 /**
4541  * i915_debugfs_connector_add - add i915 specific connector debugfs files
4542  * @connector: pointer to a registered drm_connector
4543  *
4544  * Cleanup will be done by drm_connector_unregister() through a call to
4545  * drm_debugfs_connector_remove().
4546  *
4547  * Returns 0 on success, negative error codes on error.
4548  */
i915_debugfs_connector_add(struct drm_connector * connector)4549 int i915_debugfs_connector_add(struct drm_connector *connector)
4550 {
4551 	struct dentry *root = connector->debugfs_entry;
4552 	struct drm_i915_private *dev_priv = to_i915(connector->dev);
4553 
4554 	/* The connector must have been registered beforehands. */
4555 	if (!root)
4556 		return -ENODEV;
4557 
4558 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4559 	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4560 		debugfs_create_file("i915_dpcd", S_IRUGO, root,
4561 				    connector, &i915_dpcd_fops);
4562 
4563 	if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4564 		debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4565 				    connector, &i915_panel_fops);
4566 		debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4567 				    connector, &i915_psr_sink_status_fops);
4568 	}
4569 
4570 	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4571 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4572 	    connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4573 		debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4574 				    connector, &i915_hdcp_sink_capability_fops);
4575 	}
4576 
4577 	if (INTEL_GEN(dev_priv) >= 10 &&
4578 	    (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4579 	     connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4580 		debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4581 				    connector, &i915_dsc_fec_support_fops);
4582 
4583 	return 0;
4584 }
4585