xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/i915_sysfs.c (revision 9fd8799cb5ceb66c69f2eb1a6d26a1d587ba1f1e)
1 /*	$NetBSD: i915_sysfs.c,v 1.2 2018/08/27 04:58:24 riastradh Exp $	*/
2 
3 /*
4  * Copyright © 2012 Intel Corporation
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23  * IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Ben Widawsky <ben@bwidawsk.net>
27  *
28  */
29 
30 #include <sys/cdefs.h>
31 __KERNEL_RCSID(0, "$NetBSD: i915_sysfs.c,v 1.2 2018/08/27 04:58:24 riastradh Exp $");
32 
33 #include <linux/device.h>
34 #include <linux/module.h>
35 #include <linux/stat.h>
36 #include <linux/sysfs.h>
37 #include "intel_drv.h"
38 #include "i915_drv.h"
39 
40 #define dev_to_drm_minor(d) dev_get_drvdata((d))
41 
42 #ifdef CONFIG_PM
43 static u32 calc_residency(struct drm_device *dev, const u32 reg)
44 {
45 	struct drm_i915_private *dev_priv = dev->dev_private;
46 	u64 raw_time; /* 32b value may overflow during fixed point math */
47 	u64 units = 128ULL, div = 100000ULL;
48 	u32 ret;
49 
50 	if (!intel_enable_rc6(dev))
51 		return 0;
52 
53 	intel_runtime_pm_get(dev_priv);
54 
55 	/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
56 	if (IS_VALLEYVIEW(dev)) {
57 		units = 1;
58 		div = dev_priv->czclk_freq;
59 
60 		if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
61 			units <<= 8;
62 	} else if (IS_BROXTON(dev)) {
63 		units = 1;
64 		div = 1200;		/* 833.33ns */
65 	}
66 
67 	raw_time = I915_READ(reg) * units;
68 	ret = DIV_ROUND_UP_ULL(raw_time, div);
69 
70 	intel_runtime_pm_put(dev_priv);
71 	return ret;
72 }
73 
74 static ssize_t
75 show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
76 {
77 	struct drm_minor *dminor = dev_to_drm_minor(kdev);
78 	return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
79 }
80 
81 static ssize_t
82 show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
83 {
84 	struct drm_minor *dminor = dev_get_drvdata(kdev);
85 	u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
86 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
87 }
88 
89 static ssize_t
90 show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
91 {
92 	struct drm_minor *dminor = dev_to_drm_minor(kdev);
93 	u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
94 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
95 }
96 
97 static ssize_t
98 show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
99 {
100 	struct drm_minor *dminor = dev_to_drm_minor(kdev);
101 	u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
102 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
103 }
104 
105 static ssize_t
106 show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
107 {
108 	struct drm_minor *dminor = dev_get_drvdata(kdev);
109 	u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
110 	return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
111 }
112 
113 static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
114 static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
115 static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
116 static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
117 static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
118 
119 static struct attribute *rc6_attrs[] = {
120 	&dev_attr_rc6_enable.attr,
121 	&dev_attr_rc6_residency_ms.attr,
122 	NULL
123 };
124 
125 static struct attribute_group rc6_attr_group = {
126 	.name = power_group_name,
127 	.attrs =  rc6_attrs
128 };
129 
130 static struct attribute *rc6p_attrs[] = {
131 	&dev_attr_rc6p_residency_ms.attr,
132 	&dev_attr_rc6pp_residency_ms.attr,
133 	NULL
134 };
135 
136 static struct attribute_group rc6p_attr_group = {
137 	.name = power_group_name,
138 	.attrs =  rc6p_attrs
139 };
140 
141 static struct attribute *media_rc6_attrs[] = {
142 	&dev_attr_media_rc6_residency_ms.attr,
143 	NULL
144 };
145 
146 static struct attribute_group media_rc6_attr_group = {
147 	.name = power_group_name,
148 	.attrs =  media_rc6_attrs
149 };
150 #endif
151 
152 static int l3_access_valid(struct drm_device *dev, loff_t offset)
153 {
154 	if (!HAS_L3_DPF(dev))
155 		return -EPERM;
156 
157 	if (offset % 4 != 0)
158 		return -EINVAL;
159 
160 	if (offset >= GEN7_L3LOG_SIZE)
161 		return -ENXIO;
162 
163 	return 0;
164 }
165 
166 static ssize_t
167 i915_l3_read(struct file *filp, struct kobject *kobj,
168 	     struct bin_attribute *attr, char *buf,
169 	     loff_t offset, size_t count)
170 {
171 	struct device *dev = container_of(kobj, struct device, kobj);
172 	struct drm_minor *dminor = dev_to_drm_minor(dev);
173 	struct drm_device *drm_dev = dminor->dev;
174 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
175 	int slice = (int)(uintptr_t)attr->private;
176 	int ret;
177 
178 	count = round_down(count, 4);
179 
180 	ret = l3_access_valid(drm_dev, offset);
181 	if (ret)
182 		return ret;
183 
184 	count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
185 
186 	ret = i915_mutex_lock_interruptible(drm_dev);
187 	if (ret)
188 		return ret;
189 
190 	if (dev_priv->l3_parity.remap_info[slice])
191 		memcpy(buf,
192 		       dev_priv->l3_parity.remap_info[slice] + (offset/4),
193 		       count);
194 	else
195 		memset(buf, 0, count);
196 
197 	mutex_unlock(&drm_dev->struct_mutex);
198 
199 	return count;
200 }
201 
202 static ssize_t
203 i915_l3_write(struct file *filp, struct kobject *kobj,
204 	      struct bin_attribute *attr, char *buf,
205 	      loff_t offset, size_t count)
206 {
207 	struct device *dev = container_of(kobj, struct device, kobj);
208 	struct drm_minor *dminor = dev_to_drm_minor(dev);
209 	struct drm_device *drm_dev = dminor->dev;
210 	struct drm_i915_private *dev_priv = drm_dev->dev_private;
211 	struct intel_context *ctx;
212 	u32 *temp = NULL; /* Just here to make handling failures easy */
213 	int slice = (int)(uintptr_t)attr->private;
214 	int ret;
215 
216 	if (!HAS_HW_CONTEXTS(drm_dev))
217 		return -ENXIO;
218 
219 	ret = l3_access_valid(drm_dev, offset);
220 	if (ret)
221 		return ret;
222 
223 	ret = i915_mutex_lock_interruptible(drm_dev);
224 	if (ret)
225 		return ret;
226 
227 	if (!dev_priv->l3_parity.remap_info[slice]) {
228 		temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
229 		if (!temp) {
230 			mutex_unlock(&drm_dev->struct_mutex);
231 			return -ENOMEM;
232 		}
233 	}
234 
235 	ret = i915_gpu_idle(drm_dev);
236 	if (ret) {
237 		kfree(temp);
238 		mutex_unlock(&drm_dev->struct_mutex);
239 		return ret;
240 	}
241 
242 	/* TODO: Ideally we really want a GPU reset here to make sure errors
243 	 * aren't propagated. Since I cannot find a stable way to reset the GPU
244 	 * at this point it is left as a TODO.
245 	*/
246 	if (temp)
247 		dev_priv->l3_parity.remap_info[slice] = temp;
248 
249 	memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
250 
251 	/* NB: We defer the remapping until we switch to the context */
252 	list_for_each_entry(ctx, &dev_priv->context_list, link)
253 		ctx->remap_slice |= (1<<slice);
254 
255 	mutex_unlock(&drm_dev->struct_mutex);
256 
257 	return count;
258 }
259 
260 static struct bin_attribute dpf_attrs = {
261 	.attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
262 	.size = GEN7_L3LOG_SIZE,
263 	.read = i915_l3_read,
264 	.write = i915_l3_write,
265 	.mmap = NULL,
266 	.private = (void *)0
267 };
268 
269 static struct bin_attribute dpf_attrs_1 = {
270 	.attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
271 	.size = GEN7_L3LOG_SIZE,
272 	.read = i915_l3_read,
273 	.write = i915_l3_write,
274 	.mmap = NULL,
275 	.private = (void *)1
276 };
277 
278 static ssize_t gt_act_freq_mhz_show(struct device *kdev,
279 				    struct device_attribute *attr, char *buf)
280 {
281 	struct drm_minor *minor = dev_to_drm_minor(kdev);
282 	struct drm_device *dev = minor->dev;
283 	struct drm_i915_private *dev_priv = dev->dev_private;
284 	int ret;
285 
286 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
287 
288 	intel_runtime_pm_get(dev_priv);
289 
290 	mutex_lock(&dev_priv->rps.hw_lock);
291 	if (IS_VALLEYVIEW(dev_priv->dev)) {
292 		u32 freq;
293 		freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
294 		ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
295 	} else {
296 		u32 rpstat = I915_READ(GEN6_RPSTAT1);
297 		if (IS_GEN9(dev_priv))
298 			ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
299 		else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
300 			ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
301 		else
302 			ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
303 		ret = intel_gpu_freq(dev_priv, ret);
304 	}
305 	mutex_unlock(&dev_priv->rps.hw_lock);
306 
307 	intel_runtime_pm_put(dev_priv);
308 
309 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
310 }
311 
312 static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
313 				    struct device_attribute *attr, char *buf)
314 {
315 	struct drm_minor *minor = dev_to_drm_minor(kdev);
316 	struct drm_device *dev = minor->dev;
317 	struct drm_i915_private *dev_priv = dev->dev_private;
318 	int ret;
319 
320 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
321 
322 	intel_runtime_pm_get(dev_priv);
323 
324 	mutex_lock(&dev_priv->rps.hw_lock);
325 	ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
326 	mutex_unlock(&dev_priv->rps.hw_lock);
327 
328 	intel_runtime_pm_put(dev_priv);
329 
330 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
331 }
332 
333 static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
334 				     struct device_attribute *attr, char *buf)
335 {
336 	struct drm_minor *minor = dev_to_drm_minor(kdev);
337 	struct drm_device *dev = minor->dev;
338 	struct drm_i915_private *dev_priv = dev->dev_private;
339 
340 	return snprintf(buf, PAGE_SIZE,
341 			"%d\n",
342 			intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
343 }
344 
345 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
346 {
347 	struct drm_minor *minor = dev_to_drm_minor(kdev);
348 	struct drm_device *dev = minor->dev;
349 	struct drm_i915_private *dev_priv = dev->dev_private;
350 	int ret;
351 
352 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
353 
354 	mutex_lock(&dev_priv->rps.hw_lock);
355 	ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
356 	mutex_unlock(&dev_priv->rps.hw_lock);
357 
358 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
359 }
360 
361 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
362 				     struct device_attribute *attr,
363 				     const char *buf, size_t count)
364 {
365 	struct drm_minor *minor = dev_to_drm_minor(kdev);
366 	struct drm_device *dev = minor->dev;
367 	struct drm_i915_private *dev_priv = dev->dev_private;
368 	u32 val;
369 	ssize_t ret;
370 
371 	ret = kstrtou32(buf, 0, &val);
372 	if (ret)
373 		return ret;
374 
375 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
376 
377 	mutex_lock(&dev_priv->rps.hw_lock);
378 
379 	val = intel_freq_opcode(dev_priv, val);
380 
381 	if (val < dev_priv->rps.min_freq ||
382 	    val > dev_priv->rps.max_freq ||
383 	    val < dev_priv->rps.min_freq_softlimit) {
384 		mutex_unlock(&dev_priv->rps.hw_lock);
385 		return -EINVAL;
386 	}
387 
388 	if (val > dev_priv->rps.rp0_freq)
389 		DRM_DEBUG("User requested overclocking to %d\n",
390 			  intel_gpu_freq(dev_priv, val));
391 
392 	dev_priv->rps.max_freq_softlimit = val;
393 
394 	val = clamp_t(int, dev_priv->rps.cur_freq,
395 		      dev_priv->rps.min_freq_softlimit,
396 		      dev_priv->rps.max_freq_softlimit);
397 
398 	/* We still need *_set_rps to process the new max_delay and
399 	 * update the interrupt limits and PMINTRMSK even though
400 	 * frequency request may be unchanged. */
401 	intel_set_rps(dev, val);
402 
403 	mutex_unlock(&dev_priv->rps.hw_lock);
404 
405 	return count;
406 }
407 
408 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
409 {
410 	struct drm_minor *minor = dev_to_drm_minor(kdev);
411 	struct drm_device *dev = minor->dev;
412 	struct drm_i915_private *dev_priv = dev->dev_private;
413 	int ret;
414 
415 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
416 
417 	mutex_lock(&dev_priv->rps.hw_lock);
418 	ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
419 	mutex_unlock(&dev_priv->rps.hw_lock);
420 
421 	return snprintf(buf, PAGE_SIZE, "%d\n", ret);
422 }
423 
424 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
425 				     struct device_attribute *attr,
426 				     const char *buf, size_t count)
427 {
428 	struct drm_minor *minor = dev_to_drm_minor(kdev);
429 	struct drm_device *dev = minor->dev;
430 	struct drm_i915_private *dev_priv = dev->dev_private;
431 	u32 val;
432 	ssize_t ret;
433 
434 	ret = kstrtou32(buf, 0, &val);
435 	if (ret)
436 		return ret;
437 
438 	flush_delayed_work(&dev_priv->rps.delayed_resume_work);
439 
440 	mutex_lock(&dev_priv->rps.hw_lock);
441 
442 	val = intel_freq_opcode(dev_priv, val);
443 
444 	if (val < dev_priv->rps.min_freq ||
445 	    val > dev_priv->rps.max_freq ||
446 	    val > dev_priv->rps.max_freq_softlimit) {
447 		mutex_unlock(&dev_priv->rps.hw_lock);
448 		return -EINVAL;
449 	}
450 
451 	dev_priv->rps.min_freq_softlimit = val;
452 
453 	val = clamp_t(int, dev_priv->rps.cur_freq,
454 		      dev_priv->rps.min_freq_softlimit,
455 		      dev_priv->rps.max_freq_softlimit);
456 
457 	/* We still need *_set_rps to process the new min_delay and
458 	 * update the interrupt limits and PMINTRMSK even though
459 	 * frequency request may be unchanged. */
460 	intel_set_rps(dev, val);
461 
462 	mutex_unlock(&dev_priv->rps.hw_lock);
463 
464 	return count;
465 
466 }
467 
468 static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
469 static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
470 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
471 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
472 
473 static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
474 
475 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
476 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
477 static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
478 static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
479 
480 /* For now we have a static number of RP states */
481 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
482 {
483 	struct drm_minor *minor = dev_to_drm_minor(kdev);
484 	struct drm_device *dev = minor->dev;
485 	struct drm_i915_private *dev_priv = dev->dev_private;
486 	u32 val;
487 
488 	if (attr == &dev_attr_gt_RP0_freq_mhz)
489 		val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
490 	else if (attr == &dev_attr_gt_RP1_freq_mhz)
491 		val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
492 	else if (attr == &dev_attr_gt_RPn_freq_mhz)
493 		val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
494 	else
495 		BUG();
496 
497 	return snprintf(buf, PAGE_SIZE, "%d\n", val);
498 }
499 
500 static const struct attribute *gen6_attrs[] = {
501 	&dev_attr_gt_act_freq_mhz.attr,
502 	&dev_attr_gt_cur_freq_mhz.attr,
503 	&dev_attr_gt_max_freq_mhz.attr,
504 	&dev_attr_gt_min_freq_mhz.attr,
505 	&dev_attr_gt_RP0_freq_mhz.attr,
506 	&dev_attr_gt_RP1_freq_mhz.attr,
507 	&dev_attr_gt_RPn_freq_mhz.attr,
508 	NULL,
509 };
510 
511 static const struct attribute *vlv_attrs[] = {
512 	&dev_attr_gt_act_freq_mhz.attr,
513 	&dev_attr_gt_cur_freq_mhz.attr,
514 	&dev_attr_gt_max_freq_mhz.attr,
515 	&dev_attr_gt_min_freq_mhz.attr,
516 	&dev_attr_gt_RP0_freq_mhz.attr,
517 	&dev_attr_gt_RP1_freq_mhz.attr,
518 	&dev_attr_gt_RPn_freq_mhz.attr,
519 	&dev_attr_vlv_rpe_freq_mhz.attr,
520 	NULL,
521 };
522 
523 static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
524 				struct bin_attribute *attr, char *buf,
525 				loff_t off, size_t count)
526 {
527 
528 	struct device *kdev = container_of(kobj, struct device, kobj);
529 	struct drm_minor *minor = dev_to_drm_minor(kdev);
530 	struct drm_device *dev = minor->dev;
531 	struct i915_error_state_file_priv error_priv;
532 	struct drm_i915_error_state_buf error_str;
533 	ssize_t ret_count = 0;
534 	int ret;
535 
536 	memset(&error_priv, 0, sizeof(error_priv));
537 
538 	ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
539 	if (ret)
540 		return ret;
541 
542 	error_priv.dev = dev;
543 	i915_error_state_get(dev, &error_priv);
544 
545 	ret = i915_error_state_to_str(&error_str, &error_priv);
546 	if (ret)
547 		goto out;
548 
549 	ret_count = count < error_str.bytes ? count : error_str.bytes;
550 
551 	memcpy(buf, error_str.buf, ret_count);
552 out:
553 	i915_error_state_put(&error_priv);
554 	i915_error_state_buf_release(&error_str);
555 
556 	return ret ?: ret_count;
557 }
558 
559 static ssize_t error_state_write(struct file *file, struct kobject *kobj,
560 				 struct bin_attribute *attr, char *buf,
561 				 loff_t off, size_t count)
562 {
563 	struct device *kdev = container_of(kobj, struct device, kobj);
564 	struct drm_minor *minor = dev_to_drm_minor(kdev);
565 	struct drm_device *dev = minor->dev;
566 	int ret;
567 
568 	DRM_DEBUG_DRIVER("Resetting error state\n");
569 
570 	ret = mutex_lock_interruptible(&dev->struct_mutex);
571 	if (ret)
572 		return ret;
573 
574 	i915_destroy_error_state(dev);
575 	mutex_unlock(&dev->struct_mutex);
576 
577 	return count;
578 }
579 
580 static struct bin_attribute error_state_attr = {
581 	.attr.name = "error",
582 	.attr.mode = S_IRUSR | S_IWUSR,
583 	.size = 0,
584 	.read = error_state_read,
585 	.write = error_state_write,
586 };
587 
588 void i915_setup_sysfs(struct drm_device *dev)
589 {
590 	int ret;
591 
592 #ifdef CONFIG_PM
593 	if (HAS_RC6(dev)) {
594 		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
595 					&rc6_attr_group);
596 		if (ret)
597 			DRM_ERROR("RC6 residency sysfs setup failed\n");
598 	}
599 	if (HAS_RC6p(dev)) {
600 		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
601 					&rc6p_attr_group);
602 		if (ret)
603 			DRM_ERROR("RC6p residency sysfs setup failed\n");
604 	}
605 	if (IS_VALLEYVIEW(dev)) {
606 		ret = sysfs_merge_group(&dev->primary->kdev->kobj,
607 					&media_rc6_attr_group);
608 		if (ret)
609 			DRM_ERROR("Media RC6 residency sysfs setup failed\n");
610 	}
611 #endif
612 	if (HAS_L3_DPF(dev)) {
613 		ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
614 		if (ret)
615 			DRM_ERROR("l3 parity sysfs setup failed\n");
616 
617 		if (NUM_L3_SLICES(dev) > 1) {
618 			ret = device_create_bin_file(dev->primary->kdev,
619 						     &dpf_attrs_1);
620 			if (ret)
621 				DRM_ERROR("l3 parity slice 1 setup failed\n");
622 		}
623 	}
624 
625 	ret = 0;
626 	if (IS_VALLEYVIEW(dev))
627 		ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
628 	else if (INTEL_INFO(dev)->gen >= 6)
629 		ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
630 	if (ret)
631 		DRM_ERROR("RPS sysfs setup failed\n");
632 
633 	ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
634 				    &error_state_attr);
635 	if (ret)
636 		DRM_ERROR("error_state sysfs setup failed\n");
637 }
638 
639 void i915_teardown_sysfs(struct drm_device *dev)
640 {
641 	sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
642 	if (IS_VALLEYVIEW(dev))
643 		sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
644 	else
645 		sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
646 	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs_1);
647 	device_remove_bin_file(dev->primary->kdev,  &dpf_attrs);
648 #ifdef CONFIG_PM
649 	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
650 	sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
651 #endif
652 }
653