xref: /dflybsd-src/sys/dev/drm/i915/i915_irq.c (revision 5f0fe703ba9b92b80474ba29ad14f8e1fb1d97e9)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 
35 /**
36  * DOC: interrupt handling
37  *
38  * These functions provide the basic support for enabling and disabling the
39  * interrupt handling support. There's a lot more functionality in i915_irq.c
40  * and related files, but that will be described in separate chapters.
41  */
42 
43 static const u32 hpd_ibx[HPD_NUM_PINS] = {
44 	[HPD_CRT] = SDE_CRT_HOTPLUG,
45 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
46 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
47 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
48 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
49 };
50 
51 static const u32 hpd_cpt[HPD_NUM_PINS] = {
52 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
53 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
54 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
55 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
56 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
57 };
58 
59 static const u32 hpd_spt[HPD_NUM_PINS] = {
60 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
61 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
62 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
63 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
64 };
65 
66 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
67 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
68 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
69 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
70 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
71 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
72 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
73 };
74 
75 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
76 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
77 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
78 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
79 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
80 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
81 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
82 };
83 
84 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
85 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
86 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
87 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
88 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
89 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
90 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
91 };
92 
93 /* BXT hpd list */
94 static const u32 hpd_bxt[HPD_NUM_PINS] = {
95 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
96 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
97 };
98 
99 /* IIR can theoretically queue up two events. Be paranoid. */
100 #define GEN8_IRQ_RESET_NDX(type, which) do { \
101 	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
102 	POSTING_READ(GEN8_##type##_IMR(which)); \
103 	I915_WRITE(GEN8_##type##_IER(which), 0); \
104 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
105 	POSTING_READ(GEN8_##type##_IIR(which)); \
106 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
107 	POSTING_READ(GEN8_##type##_IIR(which)); \
108 } while (0)
109 
110 #define GEN5_IRQ_RESET(type) do { \
111 	I915_WRITE(type##IMR, 0xffffffff); \
112 	POSTING_READ(type##IMR); \
113 	I915_WRITE(type##IER, 0); \
114 	I915_WRITE(type##IIR, 0xffffffff); \
115 	POSTING_READ(type##IIR); \
116 	I915_WRITE(type##IIR, 0xffffffff); \
117 	POSTING_READ(type##IIR); \
118 } while (0)
119 
120 /*
121  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
122  */
123 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
124 	u32 val = I915_READ(reg); \
125 	if (val) { \
126 		WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
127 		     (reg), val); \
128 		I915_WRITE((reg), 0xffffffff); \
129 		POSTING_READ(reg); \
130 		I915_WRITE((reg), 0xffffffff); \
131 		POSTING_READ(reg); \
132 	} \
133 } while (0)
134 
135 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
136 	GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
137 	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
138 	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
139 	POSTING_READ(GEN8_##type##_IMR(which)); \
140 } while (0)
141 
142 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
143 	GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
144 	I915_WRITE(type##IER, (ier_val)); \
145 	I915_WRITE(type##IMR, (imr_val)); \
146 	POSTING_READ(type##IMR); \
147 } while (0)
148 
149 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
150 
151 /* For display hotplug interrupt */
152 void
153 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
154 {
155 	assert_spin_locked(&dev_priv->irq_lock);
156 
157 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
158 		return;
159 
160 	if ((dev_priv->irq_mask & mask) != 0) {
161 		dev_priv->irq_mask &= ~mask;
162 		I915_WRITE(DEIMR, dev_priv->irq_mask);
163 		POSTING_READ(DEIMR);
164 	}
165 }
166 
167 void
168 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
169 {
170 	assert_spin_locked(&dev_priv->irq_lock);
171 
172 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
173 		return;
174 
175 	if ((dev_priv->irq_mask & mask) != mask) {
176 		dev_priv->irq_mask |= mask;
177 		I915_WRITE(DEIMR, dev_priv->irq_mask);
178 		POSTING_READ(DEIMR);
179 	}
180 }
181 
182 /**
183  * ilk_update_gt_irq - update GTIMR
184  * @dev_priv: driver private
185  * @interrupt_mask: mask of interrupt bits to update
186  * @enabled_irq_mask: mask of interrupt bits to enable
187  */
188 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
189 			      uint32_t interrupt_mask,
190 			      uint32_t enabled_irq_mask)
191 {
192 	assert_spin_locked(&dev_priv->irq_lock);
193 
194 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
195 
196 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
197 		return;
198 
199 	dev_priv->gt_irq_mask &= ~interrupt_mask;
200 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
201 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
202 	POSTING_READ(GTIMR);
203 }
204 
205 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
206 {
207 	ilk_update_gt_irq(dev_priv, mask, mask);
208 }
209 
210 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
211 {
212 	ilk_update_gt_irq(dev_priv, mask, 0);
213 }
214 
215 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
216 {
217 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
218 }
219 
220 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
221 {
222 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
223 }
224 
225 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
226 {
227 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
228 }
229 
230 /**
231   * snb_update_pm_irq - update GEN6_PMIMR
232   * @dev_priv: driver private
233   * @interrupt_mask: mask of interrupt bits to update
234   * @enabled_irq_mask: mask of interrupt bits to enable
235   */
236 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
237 			      uint32_t interrupt_mask,
238 			      uint32_t enabled_irq_mask)
239 {
240 	uint32_t new_val;
241 
242 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
243 
244 	assert_spin_locked(&dev_priv->irq_lock);
245 
246 	new_val = dev_priv->pm_irq_mask;
247 	new_val &= ~interrupt_mask;
248 	new_val |= (~enabled_irq_mask & interrupt_mask);
249 
250 	if (new_val != dev_priv->pm_irq_mask) {
251 		dev_priv->pm_irq_mask = new_val;
252 		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
253 		POSTING_READ(gen6_pm_imr(dev_priv));
254 	}
255 }
256 
257 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
258 {
259 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
260 		return;
261 
262 	snb_update_pm_irq(dev_priv, mask, mask);
263 }
264 
265 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
266 				  uint32_t mask)
267 {
268 	snb_update_pm_irq(dev_priv, mask, 0);
269 }
270 
271 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
272 {
273 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
274 		return;
275 
276 	__gen6_disable_pm_irq(dev_priv, mask);
277 }
278 
279 void gen6_reset_rps_interrupts(struct drm_device *dev)
280 {
281 	struct drm_i915_private *dev_priv = dev->dev_private;
282 	uint32_t reg = gen6_pm_iir(dev_priv);
283 
284 	spin_lock_irq(&dev_priv->irq_lock);
285 	I915_WRITE(reg, dev_priv->pm_rps_events);
286 	I915_WRITE(reg, dev_priv->pm_rps_events);
287 	POSTING_READ(reg);
288 	dev_priv->rps.pm_iir = 0;
289 	spin_unlock_irq(&dev_priv->irq_lock);
290 }
291 
292 void gen6_enable_rps_interrupts(struct drm_device *dev)
293 {
294 	struct drm_i915_private *dev_priv = dev->dev_private;
295 
296 	spin_lock_irq(&dev_priv->irq_lock);
297 
298 	WARN_ON(dev_priv->rps.pm_iir);
299 	WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
300 	dev_priv->rps.interrupts_enabled = true;
301 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
302 				dev_priv->pm_rps_events);
303 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
304 
305 	spin_unlock_irq(&dev_priv->irq_lock);
306 }
307 
308 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
309 {
310 	/*
311 	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
312 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
313 	 *
314 	 * TODO: verify if this can be reproduced on VLV,CHV.
315 	 */
316 	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
317 		mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
318 
319 	if (INTEL_INFO(dev_priv)->gen >= 8)
320 		mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
321 
322 	return mask;
323 }
324 
325 void gen6_disable_rps_interrupts(struct drm_device *dev)
326 {
327 	struct drm_i915_private *dev_priv = dev->dev_private;
328 
329 	spin_lock_irq(&dev_priv->irq_lock);
330 	dev_priv->rps.interrupts_enabled = false;
331 	spin_unlock_irq(&dev_priv->irq_lock);
332 
333 	cancel_work_sync(&dev_priv->rps.work);
334 
335 	spin_lock_irq(&dev_priv->irq_lock);
336 
337 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
338 
339 	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
340 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
341 				~dev_priv->pm_rps_events);
342 
343 	spin_unlock_irq(&dev_priv->irq_lock);
344 
345 #if 0
346 	synchronize_irq(dev->irq);
347 #endif
348 }
349 
350 /**
351  * ibx_display_interrupt_update - update SDEIMR
352  * @dev_priv: driver private
353  * @interrupt_mask: mask of interrupt bits to update
354  * @enabled_irq_mask: mask of interrupt bits to enable
355  */
356 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
357 				  uint32_t interrupt_mask,
358 				  uint32_t enabled_irq_mask)
359 {
360 	uint32_t sdeimr = I915_READ(SDEIMR);
361 	sdeimr &= ~interrupt_mask;
362 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
363 
364 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
365 
366 	assert_spin_locked(&dev_priv->irq_lock);
367 
368 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
369 		return;
370 
371 	I915_WRITE(SDEIMR, sdeimr);
372 	POSTING_READ(SDEIMR);
373 }
374 
375 static void
376 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
377 		       u32 enable_mask, u32 status_mask)
378 {
379 	u32 reg = PIPESTAT(pipe);
380 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
381 
382 	assert_spin_locked(&dev_priv->irq_lock);
383 	WARN_ON(!intel_irqs_enabled(dev_priv));
384 
385 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
386 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
387 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
388 		      pipe_name(pipe), enable_mask, status_mask))
389 		return;
390 
391 	if ((pipestat & enable_mask) == enable_mask)
392 		return;
393 
394 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
395 
396 	/* Enable the interrupt, clear any pending status */
397 	pipestat |= enable_mask | status_mask;
398 	I915_WRITE(reg, pipestat);
399 	POSTING_READ(reg);
400 }
401 
402 static void
403 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
404 		        u32 enable_mask, u32 status_mask)
405 {
406 	u32 reg = PIPESTAT(pipe);
407 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
408 
409 	assert_spin_locked(&dev_priv->irq_lock);
410 	WARN_ON(!intel_irqs_enabled(dev_priv));
411 
412 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
413 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
414 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
415 		      pipe_name(pipe), enable_mask, status_mask))
416 		return;
417 
418 	if ((pipestat & enable_mask) == 0)
419 		return;
420 
421 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
422 
423 	pipestat &= ~enable_mask;
424 	I915_WRITE(reg, pipestat);
425 	POSTING_READ(reg);
426 }
427 
428 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
429 {
430 	u32 enable_mask = status_mask << 16;
431 
432 	/*
433 	 * On pipe A we don't support the PSR interrupt yet,
434 	 * on pipe B and C the same bit MBZ.
435 	 */
436 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
437 		return 0;
438 	/*
439 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
440 	 * A the same bit is for perf counters which we don't use either.
441 	 */
442 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
443 		return 0;
444 
445 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
446 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
447 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
448 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
449 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
450 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
451 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
452 
453 	return enable_mask;
454 }
455 
456 void
457 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
458 		     u32 status_mask)
459 {
460 	u32 enable_mask;
461 
462 	if (IS_VALLEYVIEW(dev_priv->dev))
463 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
464 							   status_mask);
465 	else
466 		enable_mask = status_mask << 16;
467 	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
468 }
469 
470 void
471 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
472 		      u32 status_mask)
473 {
474 	u32 enable_mask;
475 
476 	if (IS_VALLEYVIEW(dev_priv->dev))
477 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
478 							   status_mask);
479 	else
480 		enable_mask = status_mask << 16;
481 	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
482 }
483 
484 /**
485  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
486  */
487 static void i915_enable_asle_pipestat(struct drm_device *dev)
488 {
489 	struct drm_i915_private *dev_priv = dev->dev_private;
490 
491 	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
492 		return;
493 
494 	spin_lock_irq(&dev_priv->irq_lock);
495 
496 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
497 	if (INTEL_INFO(dev)->gen >= 4)
498 		i915_enable_pipestat(dev_priv, PIPE_A,
499 				     PIPE_LEGACY_BLC_EVENT_STATUS);
500 
501 	spin_unlock_irq(&dev_priv->irq_lock);
502 }
503 
504 /*
505  * This timing diagram depicts the video signal in and
506  * around the vertical blanking period.
507  *
508  * Assumptions about the fictitious mode used in this example:
509  *  vblank_start >= 3
510  *  vsync_start = vblank_start + 1
511  *  vsync_end = vblank_start + 2
512  *  vtotal = vblank_start + 3
513  *
514  *           start of vblank:
515  *           latch double buffered registers
516  *           increment frame counter (ctg+)
517  *           generate start of vblank interrupt (gen4+)
518  *           |
519  *           |          frame start:
520  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
521  *           |          may be shifted forward 1-3 extra lines via PIPECONF
522  *           |          |
523  *           |          |  start of vsync:
524  *           |          |  generate vsync interrupt
525  *           |          |  |
526  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
527  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
528  * ----va---> <-----------------vb--------------------> <--------va-------------
529  *       |          |       <----vs----->                     |
530  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
531  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
532  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
533  *       |          |                                         |
534  *       last visible pixel                                   first visible pixel
535  *                  |                                         increment frame counter (gen3/4)
536  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
537  *
538  * x  = horizontal active
539  * _  = horizontal blanking
540  * hs = horizontal sync
541  * va = vertical active
542  * vb = vertical blanking
543  * vs = vertical sync
544  * vbs = vblank_start (number)
545  *
546  * Summary:
547  * - most events happen at the start of horizontal sync
548  * - frame start happens at the start of horizontal blank, 1-4 lines
549  *   (depending on PIPECONF settings) after the start of vblank
550  * - gen3/4 pixel and frame counter are synchronized with the start
551  *   of horizontal active on the first line of vertical active
552  */
553 
554 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
555 {
556 	/* Gen2 doesn't have a hardware frame counter */
557 	return 0;
558 }
559 
560 /* Called from drm generic code, passed a 'crtc', which
561  * we use as a pipe index
562  */
563 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
564 {
565 	struct drm_i915_private *dev_priv = dev->dev_private;
566 	unsigned long high_frame;
567 	unsigned long low_frame;
568 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
569 	struct intel_crtc *intel_crtc =
570 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
571 	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
572 
573 	htotal = mode->crtc_htotal;
574 	hsync_start = mode->crtc_hsync_start;
575 	vbl_start = mode->crtc_vblank_start;
576 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
577 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
578 
579 	/* Convert to pixel count */
580 	vbl_start *= htotal;
581 
582 	/* Start of vblank event occurs at start of hsync */
583 	vbl_start -= htotal - hsync_start;
584 
585 	high_frame = PIPEFRAME(pipe);
586 	low_frame = PIPEFRAMEPIXEL(pipe);
587 
588 	/*
589 	 * High & low register fields aren't synchronized, so make sure
590 	 * we get a low value that's stable across two reads of the high
591 	 * register.
592 	 */
593 	do {
594 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
595 		low   = I915_READ(low_frame);
596 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
597 	} while (high1 != high2);
598 
599 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
600 	pixel = low & PIPE_PIXEL_MASK;
601 	low >>= PIPE_FRAME_LOW_SHIFT;
602 
603 	/*
604 	 * The frame counter increments at beginning of active.
605 	 * Cook up a vblank counter by also checking the pixel
606 	 * counter against vblank start.
607 	 */
608 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
609 }
610 
611 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
612 {
613 	struct drm_i915_private *dev_priv = dev->dev_private;
614 	int reg = PIPE_FRMCOUNT_GM45(pipe);
615 
616 	return I915_READ(reg);
617 }
618 
619 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
620 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
621 
622 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
623 {
624 	struct drm_device *dev = crtc->base.dev;
625 	struct drm_i915_private *dev_priv = dev->dev_private;
626 	const struct drm_display_mode *mode = &crtc->base.hwmode;
627 	enum i915_pipe pipe = crtc->pipe;
628 	int position, vtotal;
629 
630 	vtotal = mode->crtc_vtotal;
631 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
632 		vtotal /= 2;
633 
634 	if (IS_GEN2(dev))
635 		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
636 	else
637 		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
638 
639 	/*
640 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
641 	 * read it just before the start of vblank.  So try it again
642 	 * so we don't accidentally end up spanning a vblank frame
643 	 * increment, causing the pipe_update_end() code to squak at us.
644 	 *
645 	 * The nature of this problem means we can't simply check the ISR
646 	 * bit and return the vblank start value; nor can we use the scanline
647 	 * debug register in the transcoder as it appears to have the same
648 	 * problem.  We may need to extend this to include other platforms,
649 	 * but so far testing only shows the problem on HSW.
650 	 */
651 	if (IS_HASWELL(dev) && !position) {
652 		int i, temp;
653 
654 		for (i = 0; i < 100; i++) {
655 			udelay(1);
656 			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
657 				DSL_LINEMASK_GEN3;
658 			if (temp != position) {
659 				position = temp;
660 				break;
661 			}
662 		}
663 	}
664 
665 	/*
666 	 * See update_scanline_offset() for the details on the
667 	 * scanline_offset adjustment.
668 	 */
669 	return (position + crtc->scanline_offset) % vtotal;
670 }
671 
672 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
673 				    unsigned int flags, int *vpos, int *hpos,
674 				    ktime_t *stime, ktime_t *etime)
675 {
676 	struct drm_i915_private *dev_priv = dev->dev_private;
677 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
678 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
679 	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
680 	int position;
681 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
682 	bool in_vbl = true;
683 	int ret = 0;
684 	unsigned long irqflags;
685 
686 	if (WARN_ON(!mode->crtc_clock)) {
687 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
688 				 "pipe %c\n", pipe_name(pipe));
689 		return 0;
690 	}
691 
692 	htotal = mode->crtc_htotal;
693 	hsync_start = mode->crtc_hsync_start;
694 	vtotal = mode->crtc_vtotal;
695 	vbl_start = mode->crtc_vblank_start;
696 	vbl_end = mode->crtc_vblank_end;
697 
698 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
699 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
700 		vbl_end /= 2;
701 		vtotal /= 2;
702 	}
703 
704 	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
705 
706 	/*
707 	 * Lock uncore.lock, as we will do multiple timing critical raw
708 	 * register reads, potentially with preemption disabled, so the
709 	 * following code must not block on uncore.lock.
710 	 */
711 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
712 
713 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
714 
715 	/* Get optional system timestamp before query. */
716 	if (stime)
717 		*stime = ktime_get();
718 
719 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
720 		/* No obvious pixelcount register. Only query vertical
721 		 * scanout position from Display scan line register.
722 		 */
723 		position = __intel_get_crtc_scanline(intel_crtc);
724 	} else {
725 		/* Have access to pixelcount since start of frame.
726 		 * We can split this into vertical and horizontal
727 		 * scanout position.
728 		 */
729 		position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
730 
731 		/* convert to pixel counts */
732 		vbl_start *= htotal;
733 		vbl_end *= htotal;
734 		vtotal *= htotal;
735 
736 		/*
737 		 * In interlaced modes, the pixel counter counts all pixels,
738 		 * so one field will have htotal more pixels. In order to avoid
739 		 * the reported position from jumping backwards when the pixel
740 		 * counter is beyond the length of the shorter field, just
741 		 * clamp the position the length of the shorter field. This
742 		 * matches how the scanline counter based position works since
743 		 * the scanline counter doesn't count the two half lines.
744 		 */
745 		if (position >= vtotal)
746 			position = vtotal - 1;
747 
748 		/*
749 		 * Start of vblank interrupt is triggered at start of hsync,
750 		 * just prior to the first active line of vblank. However we
751 		 * consider lines to start at the leading edge of horizontal
752 		 * active. So, should we get here before we've crossed into
753 		 * the horizontal active of the first line in vblank, we would
754 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
755 		 * always add htotal-hsync_start to the current pixel position.
756 		 */
757 		position = (position + htotal - hsync_start) % vtotal;
758 	}
759 
760 	/* Get optional system timestamp after query. */
761 	if (etime)
762 		*etime = ktime_get();
763 
764 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
765 
766 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
767 
768 	in_vbl = position >= vbl_start && position < vbl_end;
769 
770 	/*
771 	 * While in vblank, position will be negative
772 	 * counting up towards 0 at vbl_end. And outside
773 	 * vblank, position will be positive counting
774 	 * up since vbl_end.
775 	 */
776 	if (position >= vbl_start)
777 		position -= vbl_end;
778 	else
779 		position += vtotal - vbl_end;
780 
781 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
782 		*vpos = position;
783 		*hpos = 0;
784 	} else {
785 		*vpos = position / htotal;
786 		*hpos = position - (*vpos * htotal);
787 	}
788 
789 	/* In vblank? */
790 	if (in_vbl)
791 		ret |= DRM_SCANOUTPOS_IN_VBLANK;
792 
793 	return ret;
794 }
795 
796 int intel_get_crtc_scanline(struct intel_crtc *crtc)
797 {
798 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
799 	unsigned long irqflags;
800 	int position;
801 
802 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
803 	position = __intel_get_crtc_scanline(crtc);
804 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
805 
806 	return position;
807 }
808 
809 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
810 			      int *max_error,
811 			      struct timeval *vblank_time,
812 			      unsigned flags)
813 {
814 	struct drm_crtc *crtc;
815 
816 	if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
817 		DRM_ERROR("Invalid crtc %d\n", pipe);
818 		return -EINVAL;
819 	}
820 
821 	/* Get drm_crtc to timestamp: */
822 	crtc = intel_get_crtc_for_pipe(dev, pipe);
823 	if (crtc == NULL) {
824 		DRM_ERROR("Invalid crtc %d\n", pipe);
825 		return -EINVAL;
826 	}
827 
828 	if (!crtc->hwmode.crtc_clock) {
829 		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
830 		return -EBUSY;
831 	}
832 
833 	/* Helper routine in DRM core does all the work: */
834 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
835 						     vblank_time, flags,
836 						     crtc,
837 						     &crtc->hwmode);
838 }
839 
840 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
841 {
842 	struct drm_i915_private *dev_priv = dev->dev_private;
843 	u32 busy_up, busy_down, max_avg, min_avg;
844 	u8 new_delay;
845 
846 	lockmgr(&mchdev_lock, LK_EXCLUSIVE);
847 
848 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
849 
850 	new_delay = dev_priv->ips.cur_delay;
851 
852 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
853 	busy_up = I915_READ(RCPREVBSYTUPAVG);
854 	busy_down = I915_READ(RCPREVBSYTDNAVG);
855 	max_avg = I915_READ(RCBMAXAVG);
856 	min_avg = I915_READ(RCBMINAVG);
857 
858 	/* Handle RCS change request from hw */
859 	if (busy_up > max_avg) {
860 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
861 			new_delay = dev_priv->ips.cur_delay - 1;
862 		if (new_delay < dev_priv->ips.max_delay)
863 			new_delay = dev_priv->ips.max_delay;
864 	} else if (busy_down < min_avg) {
865 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
866 			new_delay = dev_priv->ips.cur_delay + 1;
867 		if (new_delay > dev_priv->ips.min_delay)
868 			new_delay = dev_priv->ips.min_delay;
869 	}
870 
871 	if (ironlake_set_drps(dev, new_delay))
872 		dev_priv->ips.cur_delay = new_delay;
873 
874 	lockmgr(&mchdev_lock, LK_RELEASE);
875 
876 	return;
877 }
878 
879 static void notify_ring(struct intel_engine_cs *ring)
880 {
881 	if (!intel_ring_initialized(ring))
882 		return;
883 
884 	trace_i915_gem_request_notify(ring);
885 
886 	wake_up_all(&ring->irq_queue);
887 }
888 
889 static void vlv_c0_read(struct drm_i915_private *dev_priv,
890 			struct intel_rps_ei *ei)
891 {
892 	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
893 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
894 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
895 }
896 
897 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
898 			 const struct intel_rps_ei *old,
899 			 const struct intel_rps_ei *now,
900 			 int threshold)
901 {
902 	u64 time, c0;
903 
904 	if (old->cz_clock == 0)
905 		return false;
906 
907 	time = now->cz_clock - old->cz_clock;
908 	time *= threshold * dev_priv->mem_freq;
909 
910 	/* Workload can be split between render + media, e.g. SwapBuffers
911 	 * being blitted in X after being rendered in mesa. To account for
912 	 * this we need to combine both engines into our activity counter.
913 	 */
914 	c0 = now->render_c0 - old->render_c0;
915 	c0 += now->media_c0 - old->media_c0;
916 	c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000;
917 
918 	return c0 >= time;
919 }
920 
921 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
922 {
923 	vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
924 	dev_priv->rps.up_ei = dev_priv->rps.down_ei;
925 }
926 
927 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
928 {
929 	struct intel_rps_ei now;
930 	u32 events = 0;
931 
932 	if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
933 		return 0;
934 
935 	vlv_c0_read(dev_priv, &now);
936 	if (now.cz_clock == 0)
937 		return 0;
938 
939 	if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
940 		if (!vlv_c0_above(dev_priv,
941 				  &dev_priv->rps.down_ei, &now,
942 				  dev_priv->rps.down_threshold))
943 			events |= GEN6_PM_RP_DOWN_THRESHOLD;
944 		dev_priv->rps.down_ei = now;
945 	}
946 
947 	if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
948 		if (vlv_c0_above(dev_priv,
949 				 &dev_priv->rps.up_ei, &now,
950 				 dev_priv->rps.up_threshold))
951 			events |= GEN6_PM_RP_UP_THRESHOLD;
952 		dev_priv->rps.up_ei = now;
953 	}
954 
955 	return events;
956 }
957 
958 static bool any_waiters(struct drm_i915_private *dev_priv)
959 {
960 	struct intel_engine_cs *ring;
961 	int i;
962 
963 	for_each_ring(ring, dev_priv, i)
964 		if (ring->irq_refcount)
965 			return true;
966 
967 	return false;
968 }
969 
970 static void gen6_pm_rps_work(struct work_struct *work)
971 {
972 	struct drm_i915_private *dev_priv =
973 		container_of(work, struct drm_i915_private, rps.work);
974 	bool client_boost;
975 	int new_delay, adj, min, max;
976 	u32 pm_iir;
977 
978 	spin_lock_irq(&dev_priv->irq_lock);
979 	/* Speed up work cancelation during disabling rps interrupts. */
980 	if (!dev_priv->rps.interrupts_enabled) {
981 		spin_unlock_irq(&dev_priv->irq_lock);
982 		return;
983 	}
984 	pm_iir = dev_priv->rps.pm_iir;
985 	dev_priv->rps.pm_iir = 0;
986 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
987 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
988 	client_boost = dev_priv->rps.client_boost;
989 	dev_priv->rps.client_boost = false;
990 	spin_unlock_irq(&dev_priv->irq_lock);
991 
992 	/* Make sure we didn't queue anything we're not going to process. */
993 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
994 
995 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
996 		return;
997 
998 	mutex_lock(&dev_priv->rps.hw_lock);
999 
1000 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1001 
1002 	adj = dev_priv->rps.last_adj;
1003 	new_delay = dev_priv->rps.cur_freq;
1004 	min = dev_priv->rps.min_freq_softlimit;
1005 	max = dev_priv->rps.max_freq_softlimit;
1006 
1007 	if (client_boost) {
1008 		new_delay = dev_priv->rps.max_freq_softlimit;
1009 		adj = 0;
1010 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1011 		if (adj > 0)
1012 			adj *= 2;
1013 		else /* CHV needs even encode values */
1014 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1015 		/*
1016 		 * For better performance, jump directly
1017 		 * to RPe if we're below it.
1018 		 */
1019 		if (new_delay < dev_priv->rps.efficient_freq - adj) {
1020 			new_delay = dev_priv->rps.efficient_freq;
1021 			adj = 0;
1022 		}
1023 	} else if (any_waiters(dev_priv)) {
1024 		adj = 0;
1025 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1026 		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1027 			new_delay = dev_priv->rps.efficient_freq;
1028 		else
1029 			new_delay = dev_priv->rps.min_freq_softlimit;
1030 		adj = 0;
1031 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1032 		if (adj < 0)
1033 			adj *= 2;
1034 		else /* CHV needs even encode values */
1035 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1036 	} else { /* unknown event */
1037 		adj = 0;
1038 	}
1039 
1040 	dev_priv->rps.last_adj = adj;
1041 
1042 	/* sysfs frequency interfaces may have snuck in while servicing the
1043 	 * interrupt
1044 	 */
1045 	new_delay += adj;
1046 	new_delay = clamp_t(int, new_delay, min, max);
1047 
1048 	intel_set_rps(dev_priv->dev, new_delay);
1049 
1050 	mutex_unlock(&dev_priv->rps.hw_lock);
1051 }
1052 
1053 
1054 /**
1055  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1056  * occurred.
1057  * @work: workqueue struct
1058  *
1059  * Doesn't actually do anything except notify userspace. As a consequence of
1060  * this event, userspace should try to remap the bad rows since statistically
1061  * it is likely the same row is more likely to go bad again.
1062  */
1063 static void ivybridge_parity_work(struct work_struct *work)
1064 {
1065 	struct drm_i915_private *dev_priv =
1066 		container_of(work, struct drm_i915_private, l3_parity.error_work);
1067 	u32 error_status, row, bank, subbank;
1068 	char *parity_event[6];
1069 	uint32_t misccpctl;
1070 	uint8_t slice = 0;
1071 
1072 	/* We must turn off DOP level clock gating to access the L3 registers.
1073 	 * In order to prevent a get/put style interface, acquire struct mutex
1074 	 * any time we access those registers.
1075 	 */
1076 	mutex_lock(&dev_priv->dev->struct_mutex);
1077 
1078 	/* If we've screwed up tracking, just let the interrupt fire again */
1079 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
1080 		goto out;
1081 
1082 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1083 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1084 	POSTING_READ(GEN7_MISCCPCTL);
1085 
1086 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1087 		u32 reg;
1088 
1089 		slice--;
1090 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1091 			break;
1092 
1093 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1094 
1095 		reg = GEN7_L3CDERRST1 + (slice * 0x200);
1096 
1097 		error_status = I915_READ(reg);
1098 		row = GEN7_PARITY_ERROR_ROW(error_status);
1099 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1100 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1101 
1102 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1103 		POSTING_READ(reg);
1104 
1105 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1106 		parity_event[1] = drm_asprintf(GFP_KERNEL, "ROW=%d", row);
1107 		parity_event[2] = drm_asprintf(GFP_KERNEL, "BANK=%d", bank);
1108 		parity_event[3] = drm_asprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1109 		parity_event[4] = drm_asprintf(GFP_KERNEL, "SLICE=%d", slice);
1110 		parity_event[5] = NULL;
1111 
1112 #if 0
1113 		kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1114 				   KOBJ_CHANGE, parity_event);
1115 #endif
1116 
1117 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1118 			  slice, row, bank, subbank);
1119 
1120 		kfree(parity_event[4]);
1121 		kfree(parity_event[3]);
1122 		kfree(parity_event[2]);
1123 		kfree(parity_event[1]);
1124 	}
1125 
1126 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1127 
1128 out:
1129 	WARN_ON(dev_priv->l3_parity.which_slice);
1130 	spin_lock_irq(&dev_priv->irq_lock);
1131 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1132 	spin_unlock_irq(&dev_priv->irq_lock);
1133 
1134 	mutex_unlock(&dev_priv->dev->struct_mutex);
1135 }
1136 
1137 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1138 {
1139 	struct drm_i915_private *dev_priv = dev->dev_private;
1140 
1141 	if (!HAS_L3_DPF(dev))
1142 		return;
1143 
1144 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1145 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1146 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1147 
1148 	iir &= GT_PARITY_ERROR(dev);
1149 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1150 		dev_priv->l3_parity.which_slice |= 1 << 1;
1151 
1152 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1153 		dev_priv->l3_parity.which_slice |= 1 << 0;
1154 
1155 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1156 }
1157 
1158 static void ilk_gt_irq_handler(struct drm_device *dev,
1159 			       struct drm_i915_private *dev_priv,
1160 			       u32 gt_iir)
1161 {
1162 	if (gt_iir &
1163 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1164 		notify_ring(&dev_priv->ring[RCS]);
1165 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1166 		notify_ring(&dev_priv->ring[VCS]);
1167 }
1168 
1169 static void snb_gt_irq_handler(struct drm_device *dev,
1170 			       struct drm_i915_private *dev_priv,
1171 			       u32 gt_iir)
1172 {
1173 
1174 	if (gt_iir &
1175 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1176 		notify_ring(&dev_priv->ring[RCS]);
1177 	if (gt_iir & GT_BSD_USER_INTERRUPT)
1178 		notify_ring(&dev_priv->ring[VCS]);
1179 	if (gt_iir & GT_BLT_USER_INTERRUPT)
1180 		notify_ring(&dev_priv->ring[BCS]);
1181 
1182 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1183 		      GT_BSD_CS_ERROR_INTERRUPT |
1184 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1185 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1186 
1187 	if (gt_iir & GT_PARITY_ERROR(dev))
1188 		ivybridge_parity_error_irq_handler(dev, gt_iir);
1189 }
1190 
1191 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1192 				       u32 master_ctl)
1193 {
1194 
1195 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1196 		u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
1197 		if (tmp) {
1198 			I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
1199 
1200 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1201 				intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1202 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1203 				notify_ring(&dev_priv->ring[RCS]);
1204 
1205 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1206 				intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1207 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1208 				notify_ring(&dev_priv->ring[BCS]);
1209 		} else
1210 			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1211 	}
1212 
1213 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1214 		u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
1215 		if (tmp) {
1216 			I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
1217 
1218 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1219 				intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1220 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1221 				notify_ring(&dev_priv->ring[VCS]);
1222 
1223 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1224 				intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1225 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1226 				notify_ring(&dev_priv->ring[VCS2]);
1227 		} else
1228 			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1229 	}
1230 
1231 	if (master_ctl & GEN8_GT_VECS_IRQ) {
1232 		u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
1233 		if (tmp) {
1234 			I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
1235 
1236 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1237 				intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1238 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1239 				notify_ring(&dev_priv->ring[VECS]);
1240 		} else
1241 			DRM_ERROR("The master control interrupt lied (GT3)!\n");
1242 	}
1243 
1244 	if (master_ctl & GEN8_GT_PM_IRQ) {
1245 		u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
1246 		if (tmp & dev_priv->pm_rps_events) {
1247 			I915_WRITE_FW(GEN8_GT_IIR(2),
1248 				      tmp & dev_priv->pm_rps_events);
1249 			gen6_rps_irq_handler(dev_priv, tmp);
1250 		} else
1251 			DRM_ERROR("The master control interrupt lied (PM)!\n");
1252 	}
1253 
1254 }
1255 
1256 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1257 {
1258 	switch (port) {
1259 	case PORT_A:
1260 		return val & BXT_PORTA_HOTPLUG_LONG_DETECT;
1261 	case PORT_B:
1262 		return val & PORTB_HOTPLUG_LONG_DETECT;
1263 	case PORT_C:
1264 		return val & PORTC_HOTPLUG_LONG_DETECT;
1265 	case PORT_D:
1266 		return val & PORTD_HOTPLUG_LONG_DETECT;
1267 	default:
1268 		return false;
1269 	}
1270 }
1271 
1272 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1273 {
1274 	switch (port) {
1275 	case PORT_B:
1276 		return val & PORTB_HOTPLUG_LONG_DETECT;
1277 	case PORT_C:
1278 		return val & PORTC_HOTPLUG_LONG_DETECT;
1279 	case PORT_D:
1280 		return val & PORTD_HOTPLUG_LONG_DETECT;
1281 	case PORT_E:
1282 		return val & PORTE_HOTPLUG_LONG_DETECT;
1283 	default:
1284 		return false;
1285 	}
1286 }
1287 
1288 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1289 {
1290 	switch (port) {
1291 	case PORT_B:
1292 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1293 	case PORT_C:
1294 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1295 	case PORT_D:
1296 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1297 	default:
1298 		return false;
1299 	}
1300 }
1301 
1302 /* Get a bit mask of pins that have triggered, and which ones may be long. */
1303 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1304 			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1305 			     const u32 hpd[HPD_NUM_PINS],
1306 			     bool long_pulse_detect(enum port port, u32 val))
1307 {
1308 	enum port port;
1309 	int i;
1310 
1311 	*pin_mask = 0;
1312 	*long_mask = 0;
1313 
1314 	for_each_hpd_pin(i) {
1315 		if ((hpd[i] & hotplug_trigger) == 0)
1316 			continue;
1317 
1318 		*pin_mask |= BIT(i);
1319 
1320 		if (!intel_hpd_pin_to_port(i, &port))
1321 			continue;
1322 
1323 		if (long_pulse_detect(port, dig_hotplug_reg))
1324 			*long_mask |= BIT(i);
1325 	}
1326 
1327 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1328 			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1329 
1330 }
1331 
1332 static void gmbus_irq_handler(struct drm_device *dev)
1333 {
1334 	struct drm_i915_private *dev_priv = dev->dev_private;
1335 
1336 	wake_up_all(&dev_priv->gmbus_wait_queue);
1337 }
1338 
1339 static void dp_aux_irq_handler(struct drm_device *dev)
1340 {
1341 	struct drm_i915_private *dev_priv = dev->dev_private;
1342 
1343 	wake_up_all(&dev_priv->gmbus_wait_queue);
1344 }
1345 
1346 #if defined(CONFIG_DEBUG_FS)
1347 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1348 					 uint32_t crc0, uint32_t crc1,
1349 					 uint32_t crc2, uint32_t crc3,
1350 					 uint32_t crc4)
1351 {
1352 	struct drm_i915_private *dev_priv = dev->dev_private;
1353 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1354 	struct intel_pipe_crc_entry *entry;
1355 	int head, tail;
1356 
1357 	spin_lock(&pipe_crc->lock);
1358 
1359 	if (!pipe_crc->entries) {
1360 		spin_unlock(&pipe_crc->lock);
1361 		DRM_DEBUG_KMS("spurious interrupt\n");
1362 		return;
1363 	}
1364 
1365 	head = pipe_crc->head;
1366 	tail = pipe_crc->tail;
1367 
1368 	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1369 		spin_unlock(&pipe_crc->lock);
1370 		DRM_ERROR("CRC buffer overflowing\n");
1371 		return;
1372 	}
1373 
1374 	entry = &pipe_crc->entries[head];
1375 
1376 	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1377 	entry->crc[0] = crc0;
1378 	entry->crc[1] = crc1;
1379 	entry->crc[2] = crc2;
1380 	entry->crc[3] = crc3;
1381 	entry->crc[4] = crc4;
1382 
1383 	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1384 	pipe_crc->head = head;
1385 
1386 	spin_unlock(&pipe_crc->lock);
1387 
1388 	wake_up_interruptible(&pipe_crc->wq);
1389 }
1390 #else
1391 static inline void
1392 display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1393 			     uint32_t crc0, uint32_t crc1,
1394 			     uint32_t crc2, uint32_t crc3,
1395 			     uint32_t crc4) {}
1396 #endif
1397 
1398 
1399 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1400 {
1401 	struct drm_i915_private *dev_priv = dev->dev_private;
1402 
1403 	display_pipe_crc_irq_handler(dev, pipe,
1404 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1405 				     0, 0, 0, 0);
1406 }
1407 
1408 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1409 {
1410 	struct drm_i915_private *dev_priv = dev->dev_private;
1411 
1412 	display_pipe_crc_irq_handler(dev, pipe,
1413 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1414 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1415 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1416 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1417 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1418 }
1419 
1420 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1421 {
1422 	struct drm_i915_private *dev_priv = dev->dev_private;
1423 	uint32_t res1, res2;
1424 
1425 	if (INTEL_INFO(dev)->gen >= 3)
1426 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1427 	else
1428 		res1 = 0;
1429 
1430 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1431 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1432 	else
1433 		res2 = 0;
1434 
1435 	display_pipe_crc_irq_handler(dev, pipe,
1436 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1437 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1438 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1439 				     res1, res2);
1440 }
1441 
1442 /* The RPS events need forcewake, so we add them to a work queue and mask their
1443  * IMR bits until the work is done. Other interrupts can be processed without
1444  * the work queue. */
1445 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1446 {
1447 	if (pm_iir & dev_priv->pm_rps_events) {
1448 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1449 		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1450 		if (dev_priv->rps.interrupts_enabled) {
1451 			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1452 			queue_work(dev_priv->wq, &dev_priv->rps.work);
1453 		}
1454 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1455 	}
1456 
1457 	if (INTEL_INFO(dev_priv)->gen >= 8)
1458 		return;
1459 
1460 	if (HAS_VEBOX(dev_priv->dev)) {
1461 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1462 			notify_ring(&dev_priv->ring[VECS]);
1463 
1464 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1465 			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1466 	}
1467 }
1468 
1469 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum i915_pipe pipe)
1470 {
1471 	if (!drm_handle_vblank(dev, pipe))
1472 		return false;
1473 
1474 	return true;
1475 }
1476 
1477 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1478 {
1479 	struct drm_i915_private *dev_priv = dev->dev_private;
1480 	u32 pipe_stats[I915_MAX_PIPES] = { };
1481 	int pipe;
1482 
1483 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1484 	for_each_pipe(dev_priv, pipe) {
1485 		int reg;
1486 		u32 mask, iir_bit = 0;
1487 
1488 		/*
1489 		 * PIPESTAT bits get signalled even when the interrupt is
1490 		 * disabled with the mask bits, and some of the status bits do
1491 		 * not generate interrupts at all (like the underrun bit). Hence
1492 		 * we need to be careful that we only handle what we want to
1493 		 * handle.
1494 		 */
1495 
1496 		/* fifo underruns are filterered in the underrun handler. */
1497 		mask = PIPE_FIFO_UNDERRUN_STATUS;
1498 
1499 		switch (pipe) {
1500 		case PIPE_A:
1501 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1502 			break;
1503 		case PIPE_B:
1504 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1505 			break;
1506 		case PIPE_C:
1507 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1508 			break;
1509 		}
1510 		if (iir & iir_bit)
1511 			mask |= dev_priv->pipestat_irq_mask[pipe];
1512 
1513 		if (!mask)
1514 			continue;
1515 
1516 		reg = PIPESTAT(pipe);
1517 		mask |= PIPESTAT_INT_ENABLE_MASK;
1518 		pipe_stats[pipe] = I915_READ(reg) & mask;
1519 
1520 		/*
1521 		 * Clear the PIPE*STAT regs before the IIR
1522 		 */
1523 		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1524 					PIPESTAT_INT_STATUS_MASK))
1525 			I915_WRITE(reg, pipe_stats[pipe]);
1526 	}
1527 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1528 
1529 	for_each_pipe(dev_priv, pipe) {
1530 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1531 		    intel_pipe_handle_vblank(dev, pipe))
1532 			intel_check_page_flip(dev, pipe);
1533 
1534 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1535 			intel_prepare_page_flip(dev, pipe);
1536 			intel_finish_page_flip(dev, pipe);
1537 		}
1538 
1539 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1540 			i9xx_pipe_crc_irq_handler(dev, pipe);
1541 
1542 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1543 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1544 	}
1545 
1546 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1547 		gmbus_irq_handler(dev);
1548 }
1549 
1550 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1551 {
1552 	struct drm_i915_private *dev_priv = dev->dev_private;
1553 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1554 	u32 pin_mask, long_mask;
1555 
1556 	if (!hotplug_status)
1557 		return;
1558 
1559 	I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1560 	/*
1561 	 * Make sure hotplug status is cleared before we clear IIR, or else we
1562 	 * may miss hotplug events.
1563 	 */
1564 	POSTING_READ(PORT_HOTPLUG_STAT);
1565 
1566 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
1567 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1568 
1569 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1570 				   hotplug_trigger, hpd_status_g4x,
1571 				   i9xx_port_hotplug_long_detect);
1572 		intel_hpd_irq_handler(dev, pin_mask, long_mask);
1573 
1574 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1575 			dp_aux_irq_handler(dev);
1576 	} else {
1577 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1578 
1579 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1580 				   hotplug_trigger, hpd_status_i915,
1581 				   i9xx_port_hotplug_long_detect);
1582 		intel_hpd_irq_handler(dev, pin_mask, long_mask);
1583 	}
1584 }
1585 
1586 static irqreturn_t valleyview_irq_handler(void *arg)
1587 {
1588 	struct drm_device *dev = arg;
1589 	struct drm_i915_private *dev_priv = dev->dev_private;
1590 	u32 iir, gt_iir, pm_iir;
1591 
1592 	if (!intel_irqs_enabled(dev_priv))
1593 		return IRQ_NONE;
1594 
1595 	while (true) {
1596 		/* Find, clear, then process each source of interrupt */
1597 
1598 		gt_iir = I915_READ(GTIIR);
1599 		if (gt_iir)
1600 			I915_WRITE(GTIIR, gt_iir);
1601 
1602 		pm_iir = I915_READ(GEN6_PMIIR);
1603 		if (pm_iir)
1604 			I915_WRITE(GEN6_PMIIR, pm_iir);
1605 
1606 		iir = I915_READ(VLV_IIR);
1607 		if (iir) {
1608 			/* Consume port before clearing IIR or we'll miss events */
1609 			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1610 				i9xx_hpd_irq_handler(dev);
1611 			I915_WRITE(VLV_IIR, iir);
1612 		}
1613 
1614 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1615 			goto out;
1616 
1617 
1618 		if (gt_iir)
1619 			snb_gt_irq_handler(dev, dev_priv, gt_iir);
1620 		if (pm_iir)
1621 			gen6_rps_irq_handler(dev_priv, pm_iir);
1622 		/* Call regardless, as some status bits might not be
1623 		 * signalled in iir */
1624 		valleyview_pipestat_irq_handler(dev, iir);
1625 	}
1626 
1627 out:
1628 	return;
1629 }
1630 
1631 static irqreturn_t cherryview_irq_handler(void *arg)
1632 {
1633 	struct drm_device *dev = arg;
1634 	struct drm_i915_private *dev_priv = dev->dev_private;
1635 	u32 master_ctl, iir;
1636 
1637 	if (!intel_irqs_enabled(dev_priv))
1638 		return IRQ_NONE;
1639 
1640 	for (;;) {
1641 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1642 		iir = I915_READ(VLV_IIR);
1643 
1644 		if (master_ctl == 0 && iir == 0)
1645 			break;
1646 
1647 
1648 		I915_WRITE(GEN8_MASTER_IRQ, 0);
1649 
1650 		/* Find, clear, then process each source of interrupt */
1651 
1652 		if (iir) {
1653 			/* Consume port before clearing IIR or we'll miss events */
1654 			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1655 				i9xx_hpd_irq_handler(dev);
1656 			I915_WRITE(VLV_IIR, iir);
1657 		}
1658 
1659 		gen8_gt_irq_handler(dev_priv, master_ctl);
1660 
1661 		/* Call regardless, as some status bits might not be
1662 		 * signalled in iir */
1663 		valleyview_pipestat_irq_handler(dev, iir);
1664 
1665 		I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1666 		POSTING_READ(GEN8_MASTER_IRQ);
1667 	}
1668 
1669 }
1670 
1671 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1672 {
1673 	struct drm_i915_private *dev_priv = dev->dev_private;
1674 	int pipe;
1675 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1676 
1677 	if (hotplug_trigger) {
1678 		u32 dig_hotplug_reg, pin_mask, long_mask;
1679 
1680 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1681 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1682 
1683 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1684 				   dig_hotplug_reg, hpd_ibx,
1685 				   pch_port_hotplug_long_detect);
1686 		intel_hpd_irq_handler(dev, pin_mask, long_mask);
1687 	}
1688 
1689 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1690 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1691 			       SDE_AUDIO_POWER_SHIFT);
1692 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1693 				 port_name(port));
1694 	}
1695 
1696 	if (pch_iir & SDE_AUX_MASK)
1697 		dp_aux_irq_handler(dev);
1698 
1699 	if (pch_iir & SDE_GMBUS)
1700 		gmbus_irq_handler(dev);
1701 
1702 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1703 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1704 
1705 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1706 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1707 
1708 	if (pch_iir & SDE_POISON)
1709 		DRM_ERROR("PCH poison interrupt\n");
1710 
1711 	if (pch_iir & SDE_FDI_MASK)
1712 		for_each_pipe(dev_priv, pipe)
1713 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1714 					 pipe_name(pipe),
1715 					 I915_READ(FDI_RX_IIR(pipe)));
1716 
1717 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1718 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1719 
1720 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1721 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1722 
1723 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1724 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1725 
1726 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1727 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1728 }
1729 
1730 static void ivb_err_int_handler(struct drm_device *dev)
1731 {
1732 	struct drm_i915_private *dev_priv = dev->dev_private;
1733 	u32 err_int = I915_READ(GEN7_ERR_INT);
1734 	enum i915_pipe pipe;
1735 
1736 	if (err_int & ERR_INT_POISON)
1737 		DRM_ERROR("Poison interrupt\n");
1738 
1739 	for_each_pipe(dev_priv, pipe) {
1740 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1741 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1742 
1743 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1744 			if (IS_IVYBRIDGE(dev))
1745 				ivb_pipe_crc_irq_handler(dev, pipe);
1746 			else
1747 				hsw_pipe_crc_irq_handler(dev, pipe);
1748 		}
1749 	}
1750 
1751 	I915_WRITE(GEN7_ERR_INT, err_int);
1752 }
1753 
1754 static void cpt_serr_int_handler(struct drm_device *dev)
1755 {
1756 	struct drm_i915_private *dev_priv = dev->dev_private;
1757 	u32 serr_int = I915_READ(SERR_INT);
1758 
1759 	if (serr_int & SERR_INT_POISON)
1760 		DRM_ERROR("PCH poison interrupt\n");
1761 
1762 	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1763 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1764 
1765 	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1766 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1767 
1768 	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1769 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1770 
1771 	I915_WRITE(SERR_INT, serr_int);
1772 }
1773 
1774 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1775 {
1776 	struct drm_i915_private *dev_priv = dev->dev_private;
1777 	int pipe;
1778 	u32 hotplug_trigger;
1779 
1780 	if (HAS_PCH_SPT(dev))
1781 		hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT;
1782 	else
1783 		hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1784 
1785 	if (hotplug_trigger) {
1786 		u32 dig_hotplug_reg, pin_mask, long_mask;
1787 
1788 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1789 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1790 
1791 		if (HAS_PCH_SPT(dev)) {
1792 			intel_get_hpd_pins(&pin_mask, &long_mask,
1793 					   hotplug_trigger,
1794 					   dig_hotplug_reg, hpd_spt,
1795 					   pch_port_hotplug_long_detect);
1796 
1797 			/* detect PORTE HP event */
1798 			dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1799 			if (pch_port_hotplug_long_detect(PORT_E,
1800 							 dig_hotplug_reg))
1801 				long_mask |= 1 << HPD_PORT_E;
1802 		} else
1803 			intel_get_hpd_pins(&pin_mask, &long_mask,
1804 					   hotplug_trigger,
1805 					   dig_hotplug_reg, hpd_cpt,
1806 					   pch_port_hotplug_long_detect);
1807 
1808 		intel_hpd_irq_handler(dev, pin_mask, long_mask);
1809 	}
1810 
1811 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1812 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1813 			       SDE_AUDIO_POWER_SHIFT_CPT);
1814 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1815 				 port_name(port));
1816 	}
1817 
1818 	if (pch_iir & SDE_AUX_MASK_CPT)
1819 		dp_aux_irq_handler(dev);
1820 
1821 	if (pch_iir & SDE_GMBUS_CPT)
1822 		gmbus_irq_handler(dev);
1823 
1824 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1825 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1826 
1827 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1828 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1829 
1830 	if (pch_iir & SDE_FDI_MASK_CPT)
1831 		for_each_pipe(dev_priv, pipe)
1832 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1833 					 pipe_name(pipe),
1834 					 I915_READ(FDI_RX_IIR(pipe)));
1835 
1836 	if (pch_iir & SDE_ERROR_CPT)
1837 		cpt_serr_int_handler(dev);
1838 }
1839 
1840 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1841 {
1842 	struct drm_i915_private *dev_priv = dev->dev_private;
1843 	enum i915_pipe pipe;
1844 
1845 	if (de_iir & DE_AUX_CHANNEL_A)
1846 		dp_aux_irq_handler(dev);
1847 
1848 	if (de_iir & DE_GSE)
1849 		intel_opregion_asle_intr(dev);
1850 
1851 	if (de_iir & DE_POISON)
1852 		DRM_ERROR("Poison interrupt\n");
1853 
1854 	for_each_pipe(dev_priv, pipe) {
1855 		if (de_iir & DE_PIPE_VBLANK(pipe) &&
1856 		    intel_pipe_handle_vblank(dev, pipe))
1857 			intel_check_page_flip(dev, pipe);
1858 
1859 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1860 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1861 
1862 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
1863 			i9xx_pipe_crc_irq_handler(dev, pipe);
1864 
1865 		/* plane/pipes map 1:1 on ilk+ */
1866 		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1867 			intel_prepare_page_flip(dev, pipe);
1868 			intel_finish_page_flip_plane(dev, pipe);
1869 		}
1870 	}
1871 
1872 	/* check event from PCH */
1873 	if (de_iir & DE_PCH_EVENT) {
1874 		u32 pch_iir = I915_READ(SDEIIR);
1875 
1876 		if (HAS_PCH_CPT(dev))
1877 			cpt_irq_handler(dev, pch_iir);
1878 		else
1879 			ibx_irq_handler(dev, pch_iir);
1880 
1881 		/* should clear PCH hotplug event before clear CPU irq */
1882 		I915_WRITE(SDEIIR, pch_iir);
1883 	}
1884 
1885 	if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1886 		ironlake_rps_change_irq_handler(dev);
1887 }
1888 
1889 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1890 {
1891 	struct drm_i915_private *dev_priv = dev->dev_private;
1892 	enum i915_pipe pipe;
1893 
1894 	if (de_iir & DE_ERR_INT_IVB)
1895 		ivb_err_int_handler(dev);
1896 
1897 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
1898 		dp_aux_irq_handler(dev);
1899 
1900 	if (de_iir & DE_GSE_IVB)
1901 		intel_opregion_asle_intr(dev);
1902 
1903 	for_each_pipe(dev_priv, pipe) {
1904 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
1905 		    intel_pipe_handle_vblank(dev, pipe))
1906 			intel_check_page_flip(dev, pipe);
1907 
1908 		/* plane/pipes map 1:1 on ilk+ */
1909 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
1910 			intel_prepare_page_flip(dev, pipe);
1911 			intel_finish_page_flip_plane(dev, pipe);
1912 		}
1913 	}
1914 
1915 	/* check event from PCH */
1916 	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1917 		u32 pch_iir = I915_READ(SDEIIR);
1918 
1919 		cpt_irq_handler(dev, pch_iir);
1920 
1921 		/* clear PCH hotplug event before clear CPU irq */
1922 		I915_WRITE(SDEIIR, pch_iir);
1923 	}
1924 }
1925 
1926 /*
1927  * To handle irqs with the minimum potential races with fresh interrupts, we:
1928  * 1 - Disable Master Interrupt Control.
1929  * 2 - Find the source(s) of the interrupt.
1930  * 3 - Clear the Interrupt Identity bits (IIR).
1931  * 4 - Process the interrupt(s) that had bits set in the IIRs.
1932  * 5 - Re-enable Master Interrupt Control.
1933  */
1934 static irqreturn_t ironlake_irq_handler(void *arg)
1935 {
1936 	struct drm_device *dev = arg;
1937 	struct drm_i915_private *dev_priv = dev->dev_private;
1938 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1939 
1940 	if (!intel_irqs_enabled(dev_priv))
1941 		return IRQ_NONE;
1942 
1943 	/* We get interrupts on unclaimed registers, so check for this before we
1944 	 * do any I915_{READ,WRITE}. */
1945 	intel_uncore_check_errors(dev);
1946 
1947 	/* disable master interrupt before clearing iir  */
1948 	de_ier = I915_READ(DEIER);
1949 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1950 	POSTING_READ(DEIER);
1951 
1952 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
1953 	 * interrupts will will be stored on its back queue, and then we'll be
1954 	 * able to process them after we restore SDEIER (as soon as we restore
1955 	 * it, we'll get an interrupt if SDEIIR still has something to process
1956 	 * due to its back queue). */
1957 	if (!HAS_PCH_NOP(dev)) {
1958 		sde_ier = I915_READ(SDEIER);
1959 		I915_WRITE(SDEIER, 0);
1960 		POSTING_READ(SDEIER);
1961 	}
1962 
1963 	/* Find, clear, then process each source of interrupt */
1964 
1965 	gt_iir = I915_READ(GTIIR);
1966 	if (gt_iir) {
1967 		I915_WRITE(GTIIR, gt_iir);
1968 		if (INTEL_INFO(dev)->gen >= 6)
1969 			snb_gt_irq_handler(dev, dev_priv, gt_iir);
1970 		else
1971 			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1972 	}
1973 
1974 	de_iir = I915_READ(DEIIR);
1975 	if (de_iir) {
1976 		I915_WRITE(DEIIR, de_iir);
1977 		if (INTEL_INFO(dev)->gen >= 7)
1978 			ivb_display_irq_handler(dev, de_iir);
1979 		else
1980 			ilk_display_irq_handler(dev, de_iir);
1981 	}
1982 
1983 	if (INTEL_INFO(dev)->gen >= 6) {
1984 		u32 pm_iir = I915_READ(GEN6_PMIIR);
1985 		if (pm_iir) {
1986 			I915_WRITE(GEN6_PMIIR, pm_iir);
1987 			gen6_rps_irq_handler(dev_priv, pm_iir);
1988 		}
1989 	}
1990 
1991 	I915_WRITE(DEIER, de_ier);
1992 	POSTING_READ(DEIER);
1993 	if (!HAS_PCH_NOP(dev)) {
1994 		I915_WRITE(SDEIER, sde_ier);
1995 		POSTING_READ(SDEIER);
1996 	}
1997 
1998 }
1999 
2000 static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status)
2001 {
2002 	struct drm_i915_private *dev_priv = dev->dev_private;
2003 	u32 hp_control, hp_trigger;
2004 	u32 pin_mask, long_mask;
2005 
2006 	/* Get the status */
2007 	hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK;
2008 	hp_control = I915_READ(BXT_HOTPLUG_CTL);
2009 
2010 	/* Hotplug not enabled ? */
2011 	if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) {
2012 		DRM_ERROR("Interrupt when HPD disabled\n");
2013 		return;
2014 	}
2015 
2016 	/* Clear sticky bits in hpd status */
2017 	I915_WRITE(BXT_HOTPLUG_CTL, hp_control);
2018 
2019 	intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control,
2020 			   hpd_bxt, bxt_port_hotplug_long_detect);
2021 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
2022 }
2023 
2024 static irqreturn_t gen8_irq_handler(void *arg)
2025 {
2026 	struct drm_device *dev = arg;
2027 	struct drm_i915_private *dev_priv = dev->dev_private;
2028 	u32 master_ctl;
2029 	uint32_t tmp = 0;
2030 	enum i915_pipe pipe;
2031 	u32 aux_mask = GEN8_AUX_CHANNEL_A;
2032 
2033 	if (!intel_irqs_enabled(dev_priv))
2034 		return IRQ_NONE;
2035 
2036 	if (IS_GEN9(dev))
2037 		aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2038 			GEN9_AUX_CHANNEL_D;
2039 
2040 	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2041 	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2042 	if (!master_ctl)
2043 		return IRQ_NONE;
2044 
2045 	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2046 
2047 	/* Find, clear, then process each source of interrupt */
2048 
2049 	gen8_gt_irq_handler(dev_priv, master_ctl);
2050 
2051 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2052 		tmp = I915_READ(GEN8_DE_MISC_IIR);
2053 		if (tmp) {
2054 			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2055 			if (tmp & GEN8_DE_MISC_GSE)
2056 				intel_opregion_asle_intr(dev);
2057 			else
2058 				DRM_ERROR("Unexpected DE Misc interrupt\n");
2059 		}
2060 		else
2061 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2062 	}
2063 
2064 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2065 		tmp = I915_READ(GEN8_DE_PORT_IIR);
2066 		if (tmp) {
2067 			bool found = false;
2068 
2069 			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2070 
2071 			if (tmp & aux_mask) {
2072 				dp_aux_irq_handler(dev);
2073 				found = true;
2074 			}
2075 
2076 			if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) {
2077 				bxt_hpd_handler(dev, tmp);
2078 				found = true;
2079 			}
2080 
2081 			if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2082 				gmbus_irq_handler(dev);
2083 				found = true;
2084 			}
2085 
2086 			if (!found)
2087 				DRM_ERROR("Unexpected DE Port interrupt\n");
2088 		}
2089 		else
2090 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2091 	}
2092 
2093 	for_each_pipe(dev_priv, pipe) {
2094 		uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2095 
2096 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2097 			continue;
2098 
2099 		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2100 		if (pipe_iir) {
2101 			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2102 
2103 			if (pipe_iir & GEN8_PIPE_VBLANK &&
2104 			    intel_pipe_handle_vblank(dev, pipe))
2105 				intel_check_page_flip(dev, pipe);
2106 
2107 			if (IS_GEN9(dev))
2108 				flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2109 			else
2110 				flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2111 
2112 			if (flip_done) {
2113 				intel_prepare_page_flip(dev, pipe);
2114 				intel_finish_page_flip_plane(dev, pipe);
2115 			}
2116 
2117 			if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2118 				hsw_pipe_crc_irq_handler(dev, pipe);
2119 
2120 			if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2121 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
2122 								    pipe);
2123 
2124 
2125 			if (IS_GEN9(dev))
2126 				fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2127 			else
2128 				fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2129 
2130 			if (fault_errors)
2131 				DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2132 					  pipe_name(pipe),
2133 					  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2134 		} else
2135 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2136 	}
2137 
2138 	if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2139 	    master_ctl & GEN8_DE_PCH_IRQ) {
2140 		/*
2141 		 * FIXME(BDW): Assume for now that the new interrupt handling
2142 		 * scheme also closed the SDE interrupt handling race we've seen
2143 		 * on older pch-split platforms. But this needs testing.
2144 		 */
2145 		u32 pch_iir = I915_READ(SDEIIR);
2146 		if (pch_iir) {
2147 			I915_WRITE(SDEIIR, pch_iir);
2148 			cpt_irq_handler(dev, pch_iir);
2149 		} else
2150 			DRM_ERROR("The master control interrupt lied (SDE)!\n");
2151 
2152 	}
2153 
2154 	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2155 	POSTING_READ_FW(GEN8_MASTER_IRQ);
2156 
2157 }
2158 
2159 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2160 			       bool reset_completed)
2161 {
2162 	struct intel_engine_cs *ring;
2163 	int i;
2164 
2165 	/*
2166 	 * Notify all waiters for GPU completion events that reset state has
2167 	 * been changed, and that they need to restart their wait after
2168 	 * checking for potential errors (and bail out to drop locks if there is
2169 	 * a gpu reset pending so that i915_error_work_func can acquire them).
2170 	 */
2171 
2172 	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2173 	for_each_ring(ring, dev_priv, i)
2174 		wake_up_all(&ring->irq_queue);
2175 
2176 	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2177 	wake_up_all(&dev_priv->pending_flip_queue);
2178 
2179 	/*
2180 	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2181 	 * reset state is cleared.
2182 	 */
2183 	if (reset_completed)
2184 		wake_up_all(&dev_priv->gpu_error.reset_queue);
2185 }
2186 
2187 /**
2188  * i915_reset_and_wakeup - do process context error handling work
2189  *
2190  * Fire an error uevent so userspace can see that a hang or error
2191  * was detected.
2192  */
2193 static void i915_reset_and_wakeup(struct drm_device *dev)
2194 {
2195 	struct drm_i915_private *dev_priv = to_i915(dev);
2196 	struct i915_gpu_error *error = &dev_priv->gpu_error;
2197 #if 0
2198 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2199 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2200 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2201 #endif
2202 	int ret;
2203 
2204 #if 0
2205 	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2206 #endif
2207 
2208 	/*
2209 	 * Note that there's only one work item which does gpu resets, so we
2210 	 * need not worry about concurrent gpu resets potentially incrementing
2211 	 * error->reset_counter twice. We only need to take care of another
2212 	 * racing irq/hangcheck declaring the gpu dead for a second time. A
2213 	 * quick check for that is good enough: schedule_work ensures the
2214 	 * correct ordering between hang detection and this work item, and since
2215 	 * the reset in-progress bit is only ever set by code outside of this
2216 	 * work we don't need to worry about any other races.
2217 	 */
2218 	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2219 		DRM_DEBUG_DRIVER("resetting chip\n");
2220 #if 0
2221 		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2222 				   reset_event);
2223 #endif
2224 
2225 		/*
2226 		 * In most cases it's guaranteed that we get here with an RPM
2227 		 * reference held, for example because there is a pending GPU
2228 		 * request that won't finish until the reset is done. This
2229 		 * isn't the case at least when we get here by doing a
2230 		 * simulated reset via debugs, so get an RPM reference.
2231 		 */
2232 		intel_runtime_pm_get(dev_priv);
2233 
2234 		intel_prepare_reset(dev);
2235 
2236 		/*
2237 		 * All state reset _must_ be completed before we update the
2238 		 * reset counter, for otherwise waiters might miss the reset
2239 		 * pending state and not properly drop locks, resulting in
2240 		 * deadlocks with the reset work.
2241 		 */
2242 		ret = i915_reset(dev);
2243 
2244 		intel_finish_reset(dev);
2245 
2246 		intel_runtime_pm_put(dev_priv);
2247 
2248 		if (ret == 0) {
2249 			/*
2250 			 * After all the gem state is reset, increment the reset
2251 			 * counter and wake up everyone waiting for the reset to
2252 			 * complete.
2253 			 *
2254 			 * Since unlock operations are a one-sided barrier only,
2255 			 * we need to insert a barrier here to order any seqno
2256 			 * updates before
2257 			 * the counter increment.
2258 			 */
2259 			smp_mb__before_atomic();
2260 			atomic_inc(&dev_priv->gpu_error.reset_counter);
2261 
2262 #if 0
2263 			kobject_uevent_env(&dev->primary->kdev->kobj,
2264 					   KOBJ_CHANGE, reset_done_event);
2265 #endif
2266 		} else {
2267 			atomic_or(I915_WEDGED, &error->reset_counter);
2268 		}
2269 
2270 		/*
2271 		 * Note: The wake_up also serves as a memory barrier so that
2272 		 * waiters see the update value of the reset counter atomic_t.
2273 		 */
2274 		i915_error_wake_up(dev_priv, true);
2275 	}
2276 }
2277 
2278 static void i915_report_and_clear_eir(struct drm_device *dev)
2279 {
2280 	struct drm_i915_private *dev_priv = dev->dev_private;
2281 	uint32_t instdone[I915_NUM_INSTDONE_REG];
2282 	u32 eir = I915_READ(EIR);
2283 	int pipe, i;
2284 
2285 	if (!eir)
2286 		return;
2287 
2288 	pr_err("render error detected, EIR: 0x%08x\n", eir);
2289 
2290 #if 0
2291 	i915_get_extra_instdone(dev, instdone);
2292 #endif
2293 
2294 	if (IS_G4X(dev)) {
2295 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2296 			u32 ipeir = I915_READ(IPEIR_I965);
2297 
2298 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2299 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2300 			for (i = 0; i < ARRAY_SIZE(instdone); i++)
2301 				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2302 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2303 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2304 			I915_WRITE(IPEIR_I965, ipeir);
2305 			POSTING_READ(IPEIR_I965);
2306 		}
2307 		if (eir & GM45_ERROR_PAGE_TABLE) {
2308 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2309 			pr_err("page table error\n");
2310 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2311 			I915_WRITE(PGTBL_ER, pgtbl_err);
2312 			POSTING_READ(PGTBL_ER);
2313 		}
2314 	}
2315 
2316 	if (!IS_GEN2(dev)) {
2317 		if (eir & I915_ERROR_PAGE_TABLE) {
2318 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2319 			pr_err("page table error\n");
2320 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2321 			I915_WRITE(PGTBL_ER, pgtbl_err);
2322 			POSTING_READ(PGTBL_ER);
2323 		}
2324 	}
2325 
2326 	if (eir & I915_ERROR_MEMORY_REFRESH) {
2327 		pr_err("memory refresh error:\n");
2328 		for_each_pipe(dev_priv, pipe)
2329 			pr_err("pipe %c stat: 0x%08x\n",
2330 			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2331 		/* pipestat has already been acked */
2332 	}
2333 	if (eir & I915_ERROR_INSTRUCTION) {
2334 		pr_err("instruction error\n");
2335 		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2336 		for (i = 0; i < ARRAY_SIZE(instdone); i++)
2337 			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2338 		if (INTEL_INFO(dev)->gen < 4) {
2339 			u32 ipeir = I915_READ(IPEIR);
2340 
2341 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2342 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2343 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2344 			I915_WRITE(IPEIR, ipeir);
2345 			POSTING_READ(IPEIR);
2346 		} else {
2347 			u32 ipeir = I915_READ(IPEIR_I965);
2348 
2349 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2350 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2351 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2352 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2353 			I915_WRITE(IPEIR_I965, ipeir);
2354 			POSTING_READ(IPEIR_I965);
2355 		}
2356 	}
2357 
2358 	I915_WRITE(EIR, eir);
2359 	POSTING_READ(EIR);
2360 	eir = I915_READ(EIR);
2361 	if (eir) {
2362 		/*
2363 		 * some errors might have become stuck,
2364 		 * mask them.
2365 		 */
2366 		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2367 		I915_WRITE(EMR, I915_READ(EMR) | eir);
2368 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2369 	}
2370 }
2371 
2372 /**
2373  * i915_handle_error - handle a gpu error
2374  * @dev: drm device
2375  *
2376  * Do some basic checking of regsiter state at error time and
2377  * dump it to the syslog.  Also call i915_capture_error_state() to make
2378  * sure we get a record and make it available in debugfs.  Fire a uevent
2379  * so userspace knows something bad happened (should trigger collection
2380  * of a ring dump etc.).
2381  */
2382 void i915_handle_error(struct drm_device *dev, bool wedged,
2383 		       const char *fmt, ...)
2384 {
2385 	struct drm_i915_private *dev_priv = dev->dev_private;
2386 #if 0
2387 	va_list args;
2388 	char error_msg[80];
2389 
2390 	va_start(args, fmt);
2391 	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2392 	va_end(args);
2393 
2394 	i915_capture_error_state(dev, wedged, error_msg);
2395 #endif
2396 	i915_report_and_clear_eir(dev);
2397 
2398 	if (wedged) {
2399 		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2400 				&dev_priv->gpu_error.reset_counter);
2401 
2402 		/*
2403 		 * Wakeup waiting processes so that the reset function
2404 		 * i915_reset_and_wakeup doesn't deadlock trying to grab
2405 		 * various locks. By bumping the reset counter first, the woken
2406 		 * processes will see a reset in progress and back off,
2407 		 * releasing their locks and then wait for the reset completion.
2408 		 * We must do this for _all_ gpu waiters that might hold locks
2409 		 * that the reset work needs to acquire.
2410 		 *
2411 		 * Note: The wake_up serves as the required memory barrier to
2412 		 * ensure that the waiters see the updated value of the reset
2413 		 * counter atomic_t.
2414 		 */
2415 		i915_error_wake_up(dev_priv, false);
2416 	}
2417 
2418 	i915_reset_and_wakeup(dev);
2419 }
2420 
2421 /* Called from drm generic code, passed 'crtc' which
2422  * we use as a pipe index
2423  */
2424 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2425 {
2426 	struct drm_i915_private *dev_priv = dev->dev_private;
2427 	unsigned long irqflags;
2428 
2429 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2430 	if (INTEL_INFO(dev)->gen >= 4)
2431 		i915_enable_pipestat(dev_priv, pipe,
2432 				     PIPE_START_VBLANK_INTERRUPT_STATUS);
2433 	else
2434 		i915_enable_pipestat(dev_priv, pipe,
2435 				     PIPE_VBLANK_INTERRUPT_STATUS);
2436 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2437 
2438 	return 0;
2439 }
2440 
2441 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2442 {
2443 	struct drm_i915_private *dev_priv = dev->dev_private;
2444 	unsigned long irqflags;
2445 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2446 						     DE_PIPE_VBLANK(pipe);
2447 
2448 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2449 	ironlake_enable_display_irq(dev_priv, bit);
2450 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2451 
2452 	return 0;
2453 }
2454 
2455 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2456 {
2457 	struct drm_i915_private *dev_priv = dev->dev_private;
2458 	unsigned long irqflags;
2459 
2460 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2461 	i915_enable_pipestat(dev_priv, pipe,
2462 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2463 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2464 
2465 	return 0;
2466 }
2467 
2468 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2469 {
2470 	struct drm_i915_private *dev_priv = dev->dev_private;
2471 	unsigned long irqflags;
2472 
2473 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2474 	dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2475 	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2476 	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2477 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2478 	return 0;
2479 }
2480 
2481 /* Called from drm generic code, passed 'crtc' which
2482  * we use as a pipe index
2483  */
2484 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2485 {
2486 	struct drm_i915_private *dev_priv = dev->dev_private;
2487 	unsigned long irqflags;
2488 
2489 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2490 	i915_disable_pipestat(dev_priv, pipe,
2491 			      PIPE_VBLANK_INTERRUPT_STATUS |
2492 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2493 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2494 }
2495 
2496 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2497 {
2498 	struct drm_i915_private *dev_priv = dev->dev_private;
2499 	unsigned long irqflags;
2500 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2501 						     DE_PIPE_VBLANK(pipe);
2502 
2503 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2504 	ironlake_disable_display_irq(dev_priv, bit);
2505 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2506 }
2507 
2508 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2509 {
2510 	struct drm_i915_private *dev_priv = dev->dev_private;
2511 	unsigned long irqflags;
2512 
2513 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2514 	i915_disable_pipestat(dev_priv, pipe,
2515 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2516 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2517 }
2518 
2519 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2520 {
2521 	struct drm_i915_private *dev_priv = dev->dev_private;
2522 	unsigned long irqflags;
2523 
2524 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2525 	dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2526 	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2527 	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2528 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2529 }
2530 
2531 static bool
2532 ring_idle(struct intel_engine_cs *ring, u32 seqno)
2533 {
2534 	return (list_empty(&ring->request_list) ||
2535 		i915_seqno_passed(seqno, ring->last_submitted_seqno));
2536 }
2537 
2538 static bool
2539 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2540 {
2541 	if (INTEL_INFO(dev)->gen >= 8) {
2542 		return (ipehr >> 23) == 0x1c;
2543 	} else {
2544 		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2545 		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2546 				 MI_SEMAPHORE_REGISTER);
2547 	}
2548 }
2549 
2550 static struct intel_engine_cs *
2551 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2552 {
2553 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2554 	struct intel_engine_cs *signaller;
2555 	int i;
2556 
2557 	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2558 		for_each_ring(signaller, dev_priv, i) {
2559 			if (ring == signaller)
2560 				continue;
2561 
2562 			if (offset == signaller->semaphore.signal_ggtt[ring->id])
2563 				return signaller;
2564 		}
2565 	} else {
2566 		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2567 
2568 		for_each_ring(signaller, dev_priv, i) {
2569 			if(ring == signaller)
2570 				continue;
2571 
2572 			if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2573 				return signaller;
2574 		}
2575 	}
2576 
2577 	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016lx\n",
2578 		  ring->id, ipehr, offset);
2579 
2580 	return NULL;
2581 }
2582 
2583 static struct intel_engine_cs *
2584 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2585 {
2586 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2587 	u32 cmd, ipehr, head;
2588 	u64 offset = 0;
2589 	int i, backwards;
2590 
2591 	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2592 	if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2593 		return NULL;
2594 
2595 	/*
2596 	 * HEAD is likely pointing to the dword after the actual command,
2597 	 * so scan backwards until we find the MBOX. But limit it to just 3
2598 	 * or 4 dwords depending on the semaphore wait command size.
2599 	 * Note that we don't care about ACTHD here since that might
2600 	 * point at at batch, and semaphores are always emitted into the
2601 	 * ringbuffer itself.
2602 	 */
2603 	head = I915_READ_HEAD(ring) & HEAD_ADDR;
2604 	backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2605 
2606 	for (i = backwards; i; --i) {
2607 		/*
2608 		 * Be paranoid and presume the hw has gone off into the wild -
2609 		 * our ring is smaller than what the hardware (and hence
2610 		 * HEAD_ADDR) allows. Also handles wrap-around.
2611 		 */
2612 		head &= ring->buffer->size - 1;
2613 
2614 		/* This here seems to blow up */
2615 		cmd = ioread32(ring->buffer->virtual_start + head);
2616 		if (cmd == ipehr)
2617 			break;
2618 
2619 		head -= 4;
2620 	}
2621 
2622 	if (!i)
2623 		return NULL;
2624 
2625 	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2626 	if (INTEL_INFO(ring->dev)->gen >= 8) {
2627 		offset = ioread32(ring->buffer->virtual_start + head + 12);
2628 		offset <<= 32;
2629 		offset = ioread32(ring->buffer->virtual_start + head + 8);
2630 	}
2631 	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2632 }
2633 
2634 static int semaphore_passed(struct intel_engine_cs *ring)
2635 {
2636 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2637 	struct intel_engine_cs *signaller;
2638 	u32 seqno;
2639 
2640 	ring->hangcheck.deadlock++;
2641 
2642 	signaller = semaphore_waits_for(ring, &seqno);
2643 	if (signaller == NULL)
2644 		return -1;
2645 
2646 	/* Prevent pathological recursion due to driver bugs */
2647 	if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2648 		return -1;
2649 
2650 	if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2651 		return 1;
2652 
2653 	/* cursory check for an unkickable deadlock */
2654 	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2655 	    semaphore_passed(signaller) < 0)
2656 		return -1;
2657 
2658 	return 0;
2659 }
2660 
2661 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2662 {
2663 	struct intel_engine_cs *ring;
2664 	int i;
2665 
2666 	for_each_ring(ring, dev_priv, i)
2667 		ring->hangcheck.deadlock = 0;
2668 }
2669 
2670 static enum intel_ring_hangcheck_action
2671 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2672 {
2673 	struct drm_device *dev = ring->dev;
2674 	struct drm_i915_private *dev_priv = dev->dev_private;
2675 	u32 tmp;
2676 
2677 	if (acthd != ring->hangcheck.acthd) {
2678 		if (acthd > ring->hangcheck.max_acthd) {
2679 			ring->hangcheck.max_acthd = acthd;
2680 			return HANGCHECK_ACTIVE;
2681 		}
2682 
2683 		return HANGCHECK_ACTIVE_LOOP;
2684 	}
2685 
2686 	if (IS_GEN2(dev))
2687 		return HANGCHECK_HUNG;
2688 
2689 	/* Is the chip hanging on a WAIT_FOR_EVENT?
2690 	 * If so we can simply poke the RB_WAIT bit
2691 	 * and break the hang. This should work on
2692 	 * all but the second generation chipsets.
2693 	 */
2694 	tmp = I915_READ_CTL(ring);
2695 	if (tmp & RING_WAIT) {
2696 		i915_handle_error(dev, false,
2697 				  "Kicking stuck wait on %s",
2698 				  ring->name);
2699 		I915_WRITE_CTL(ring, tmp);
2700 		return HANGCHECK_KICK;
2701 	}
2702 
2703 	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2704 		switch (semaphore_passed(ring)) {
2705 		default:
2706 			return HANGCHECK_HUNG;
2707 		case 1:
2708 			i915_handle_error(dev, false,
2709 					  "Kicking stuck semaphore on %s",
2710 					  ring->name);
2711 			I915_WRITE_CTL(ring, tmp);
2712 			return HANGCHECK_KICK;
2713 		case 0:
2714 			return HANGCHECK_WAIT;
2715 		}
2716 	}
2717 
2718 	return HANGCHECK_HUNG;
2719 }
2720 
2721 /*
2722  * This is called when the chip hasn't reported back with completed
2723  * batchbuffers in a long time. We keep track per ring seqno progress and
2724  * if there are no progress, hangcheck score for that ring is increased.
2725  * Further, acthd is inspected to see if the ring is stuck. On stuck case
2726  * we kick the ring. If we see no progress on three subsequent calls
2727  * we assume chip is wedged and try to fix it by resetting the chip.
2728  */
2729 static void i915_hangcheck_elapsed(struct work_struct *work)
2730 {
2731 	struct drm_i915_private *dev_priv =
2732 		container_of(work, typeof(*dev_priv),
2733 			     gpu_error.hangcheck_work.work);
2734 	struct drm_device *dev = dev_priv->dev;
2735 	struct intel_engine_cs *ring;
2736 	int i;
2737 	int busy_count = 0, rings_hung = 0;
2738 	bool stuck[I915_NUM_RINGS] = { 0 };
2739 #define BUSY 1
2740 #define KICK 5
2741 #define HUNG 20
2742 
2743 	if (!i915.enable_hangcheck)
2744 		return;
2745 
2746 	for_each_ring(ring, dev_priv, i) {
2747 		u64 acthd;
2748 		u32 seqno;
2749 		bool busy = true;
2750 
2751 		semaphore_clear_deadlocks(dev_priv);
2752 
2753 		seqno = ring->get_seqno(ring, false);
2754 		acthd = intel_ring_get_active_head(ring);
2755 
2756 		if (ring->hangcheck.seqno == seqno) {
2757 			if (ring_idle(ring, seqno)) {
2758 				ring->hangcheck.action = HANGCHECK_IDLE;
2759 
2760 				if (waitqueue_active(&ring->irq_queue)) {
2761 					/* Issue a wake-up to catch stuck h/w. */
2762 					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2763 						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2764 							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2765 								  ring->name);
2766 						else
2767 							DRM_INFO("Fake missed irq on %s\n",
2768 								 ring->name);
2769 						wake_up_all(&ring->irq_queue);
2770 					}
2771 					/* Safeguard against driver failure */
2772 					ring->hangcheck.score += BUSY;
2773 				} else
2774 					busy = false;
2775 			} else {
2776 				/* We always increment the hangcheck score
2777 				 * if the ring is busy and still processing
2778 				 * the same request, so that no single request
2779 				 * can run indefinitely (such as a chain of
2780 				 * batches). The only time we do not increment
2781 				 * the hangcheck score on this ring, if this
2782 				 * ring is in a legitimate wait for another
2783 				 * ring. In that case the waiting ring is a
2784 				 * victim and we want to be sure we catch the
2785 				 * right culprit. Then every time we do kick
2786 				 * the ring, add a small increment to the
2787 				 * score so that we can catch a batch that is
2788 				 * being repeatedly kicked and so responsible
2789 				 * for stalling the machine.
2790 				 */
2791 				ring->hangcheck.action = ring_stuck(ring,
2792 								    acthd);
2793 
2794 				switch (ring->hangcheck.action) {
2795 				case HANGCHECK_IDLE:
2796 				case HANGCHECK_WAIT:
2797 				case HANGCHECK_ACTIVE:
2798 					break;
2799 				case HANGCHECK_ACTIVE_LOOP:
2800 					ring->hangcheck.score += BUSY;
2801 					break;
2802 				case HANGCHECK_KICK:
2803 					ring->hangcheck.score += KICK;
2804 					break;
2805 				case HANGCHECK_HUNG:
2806 					ring->hangcheck.score += HUNG;
2807 					stuck[i] = true;
2808 					break;
2809 				}
2810 			}
2811 		} else {
2812 			ring->hangcheck.action = HANGCHECK_ACTIVE;
2813 
2814 			/* Gradually reduce the count so that we catch DoS
2815 			 * attempts across multiple batches.
2816 			 */
2817 			if (ring->hangcheck.score > 0)
2818 				ring->hangcheck.score--;
2819 
2820 			ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
2821 		}
2822 
2823 		ring->hangcheck.seqno = seqno;
2824 		ring->hangcheck.acthd = acthd;
2825 		busy_count += busy;
2826 	}
2827 
2828 	for_each_ring(ring, dev_priv, i) {
2829 		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
2830 			DRM_INFO("%s on %s\n",
2831 				 stuck[i] ? "stuck" : "no progress",
2832 				 ring->name);
2833 			rings_hung++;
2834 		}
2835 	}
2836 
2837 	if (rings_hung)
2838 		return i915_handle_error(dev, true, "Ring hung");
2839 
2840 	if (busy_count)
2841 		/* Reset timer case chip hangs without another request
2842 		 * being added */
2843 		i915_queue_hangcheck(dev);
2844 }
2845 
2846 void i915_queue_hangcheck(struct drm_device *dev)
2847 {
2848 	struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
2849 
2850 	if (!i915.enable_hangcheck)
2851 		return;
2852 
2853 	/* Don't continually defer the hangcheck so that it is always run at
2854 	 * least once after work has been scheduled on any ring. Otherwise,
2855 	 * we will ignore a hung ring if a second ring is kept busy.
2856 	 */
2857 
2858 	queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
2859 			   round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
2860 }
2861 
2862 static void ibx_irq_reset(struct drm_device *dev)
2863 {
2864 	struct drm_i915_private *dev_priv = dev->dev_private;
2865 
2866 	if (HAS_PCH_NOP(dev))
2867 		return;
2868 
2869 	GEN5_IRQ_RESET(SDE);
2870 
2871 	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2872 		I915_WRITE(SERR_INT, 0xffffffff);
2873 }
2874 
2875 /*
2876  * SDEIER is also touched by the interrupt handler to work around missed PCH
2877  * interrupts. Hence we can't update it after the interrupt handler is enabled -
2878  * instead we unconditionally enable all PCH interrupt sources here, but then
2879  * only unmask them as needed with SDEIMR.
2880  *
2881  * This function needs to be called before interrupts are enabled.
2882  */
2883 static void ibx_irq_pre_postinstall(struct drm_device *dev)
2884 {
2885 	struct drm_i915_private *dev_priv = dev->dev_private;
2886 
2887 	if (HAS_PCH_NOP(dev))
2888 		return;
2889 
2890 	WARN_ON(I915_READ(SDEIER) != 0);
2891 	I915_WRITE(SDEIER, 0xffffffff);
2892 	POSTING_READ(SDEIER);
2893 }
2894 
2895 static void gen5_gt_irq_reset(struct drm_device *dev)
2896 {
2897 	struct drm_i915_private *dev_priv = dev->dev_private;
2898 
2899 	GEN5_IRQ_RESET(GT);
2900 	if (INTEL_INFO(dev)->gen >= 6)
2901 		GEN5_IRQ_RESET(GEN6_PM);
2902 }
2903 
2904 /* drm_dma.h hooks
2905 */
2906 static void ironlake_irq_reset(struct drm_device *dev)
2907 {
2908 	struct drm_i915_private *dev_priv = dev->dev_private;
2909 
2910 	I915_WRITE(HWSTAM, 0xffffffff);
2911 
2912 	GEN5_IRQ_RESET(DE);
2913 	if (IS_GEN7(dev))
2914 		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
2915 
2916 	gen5_gt_irq_reset(dev);
2917 
2918 	ibx_irq_reset(dev);
2919 }
2920 
2921 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2922 {
2923 	enum i915_pipe pipe;
2924 
2925 	I915_WRITE(PORT_HOTPLUG_EN, 0);
2926 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2927 
2928 	for_each_pipe(dev_priv, pipe)
2929 		I915_WRITE(PIPESTAT(pipe), 0xffff);
2930 
2931 	GEN5_IRQ_RESET(VLV_);
2932 }
2933 
2934 static void valleyview_irq_preinstall(struct drm_device *dev)
2935 {
2936 	struct drm_i915_private *dev_priv = dev->dev_private;
2937 
2938 	/* VLV magic */
2939 	I915_WRITE(VLV_IMR, 0);
2940 	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2941 	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2942 	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2943 
2944 	gen5_gt_irq_reset(dev);
2945 
2946 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2947 
2948 	vlv_display_irq_reset(dev_priv);
2949 }
2950 
2951 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
2952 {
2953 	GEN8_IRQ_RESET_NDX(GT, 0);
2954 	GEN8_IRQ_RESET_NDX(GT, 1);
2955 	GEN8_IRQ_RESET_NDX(GT, 2);
2956 	GEN8_IRQ_RESET_NDX(GT, 3);
2957 }
2958 
2959 static void gen8_irq_reset(struct drm_device *dev)
2960 {
2961 	struct drm_i915_private *dev_priv = dev->dev_private;
2962 	int pipe;
2963 
2964 	I915_WRITE(GEN8_MASTER_IRQ, 0);
2965 	POSTING_READ(GEN8_MASTER_IRQ);
2966 
2967 	gen8_gt_irq_reset(dev_priv);
2968 
2969 	for_each_pipe(dev_priv, pipe)
2970 		if (intel_display_power_is_enabled(dev_priv,
2971 						   POWER_DOMAIN_PIPE(pipe)))
2972 			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
2973 
2974 	GEN5_IRQ_RESET(GEN8_DE_PORT_);
2975 	GEN5_IRQ_RESET(GEN8_DE_MISC_);
2976 	GEN5_IRQ_RESET(GEN8_PCU_);
2977 
2978 	if (HAS_PCH_SPLIT(dev))
2979 		ibx_irq_reset(dev);
2980 }
2981 
2982 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
2983 				     unsigned int pipe_mask)
2984 {
2985 	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
2986 
2987 	spin_lock_irq(&dev_priv->irq_lock);
2988 	if (pipe_mask & 1 << PIPE_A)
2989 		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
2990 				  dev_priv->de_irq_mask[PIPE_A],
2991 				  ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
2992 	if (pipe_mask & 1 << PIPE_B)
2993 		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
2994 				  dev_priv->de_irq_mask[PIPE_B],
2995 				  ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
2996 	if (pipe_mask & 1 << PIPE_C)
2997 		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
2998 				  dev_priv->de_irq_mask[PIPE_C],
2999 				  ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3000 	spin_unlock_irq(&dev_priv->irq_lock);
3001 }
3002 
3003 static void cherryview_irq_preinstall(struct drm_device *dev)
3004 {
3005 	struct drm_i915_private *dev_priv = dev->dev_private;
3006 
3007 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3008 	POSTING_READ(GEN8_MASTER_IRQ);
3009 
3010 	gen8_gt_irq_reset(dev_priv);
3011 
3012 	GEN5_IRQ_RESET(GEN8_PCU_);
3013 
3014 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3015 
3016 	vlv_display_irq_reset(dev_priv);
3017 }
3018 
3019 static void ibx_hpd_irq_setup(struct drm_device *dev)
3020 {
3021 	struct drm_i915_private *dev_priv = dev->dev_private;
3022 	struct intel_encoder *intel_encoder;
3023 	u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3024 
3025 	if (HAS_PCH_IBX(dev)) {
3026 		hotplug_irqs = SDE_HOTPLUG_MASK;
3027 		for_each_intel_encoder(dev, intel_encoder)
3028 			if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
3029 				enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3030 	} else if (HAS_PCH_SPT(dev)) {
3031 		hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3032 		for_each_intel_encoder(dev, intel_encoder)
3033 			if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
3034 				enabled_irqs |= hpd_spt[intel_encoder->hpd_pin];
3035 	} else {
3036 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3037 		for_each_intel_encoder(dev, intel_encoder)
3038 			if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
3039 				enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3040 	}
3041 
3042 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3043 
3044 	/*
3045 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3046 	 * duration to 2ms (which is the minimum in the Display Port spec)
3047 	 *
3048 	 * This register is the same on all known PCH chips.
3049 	 */
3050 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3051 	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3052 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3053 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3054 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3055 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3056 
3057 	/* enable SPT PORTE hot plug */
3058 	if (HAS_PCH_SPT(dev)) {
3059 		hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3060 		hotplug |= PORTE_HOTPLUG_ENABLE;
3061 		I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3062 	}
3063 }
3064 
3065 static void bxt_hpd_irq_setup(struct drm_device *dev)
3066 {
3067 	struct drm_i915_private *dev_priv = dev->dev_private;
3068 	struct intel_encoder *intel_encoder;
3069 	u32 hotplug_port = 0;
3070 	u32 hotplug_ctrl;
3071 
3072 	/* Now, enable HPD */
3073 	for_each_intel_encoder(dev, intel_encoder) {
3074 		if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state
3075 				== HPD_ENABLED)
3076 			hotplug_port |= hpd_bxt[intel_encoder->hpd_pin];
3077 	}
3078 
3079 	/* Mask all HPD control bits */
3080 	hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK;
3081 
3082 	/* Enable requested port in hotplug control */
3083 	/* TODO: implement (short) HPD support on port A */
3084 	WARN_ON_ONCE(hotplug_port & BXT_DE_PORT_HP_DDIA);
3085 	if (hotplug_port & BXT_DE_PORT_HP_DDIB)
3086 		hotplug_ctrl |= BXT_DDIB_HPD_ENABLE;
3087 	if (hotplug_port & BXT_DE_PORT_HP_DDIC)
3088 		hotplug_ctrl |= BXT_DDIC_HPD_ENABLE;
3089 	I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl);
3090 
3091 	/* Unmask DDI hotplug in IMR */
3092 	hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port;
3093 	I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl);
3094 
3095 	/* Enable DDI hotplug in IER */
3096 	hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port;
3097 	I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl);
3098 	POSTING_READ(GEN8_DE_PORT_IER);
3099 }
3100 
3101 static void ibx_irq_postinstall(struct drm_device *dev)
3102 {
3103 	struct drm_i915_private *dev_priv = dev->dev_private;
3104 	u32 mask;
3105 
3106 	if (HAS_PCH_NOP(dev))
3107 		return;
3108 
3109 	if (HAS_PCH_IBX(dev))
3110 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3111 	else
3112 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3113 
3114 	GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3115 	I915_WRITE(SDEIMR, ~mask);
3116 }
3117 
3118 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3119 {
3120 	struct drm_i915_private *dev_priv = dev->dev_private;
3121 	u32 pm_irqs, gt_irqs;
3122 
3123 	pm_irqs = gt_irqs = 0;
3124 
3125 	dev_priv->gt_irq_mask = ~0;
3126 	if (HAS_L3_DPF(dev)) {
3127 		/* L3 parity interrupt is always unmasked. */
3128 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3129 		gt_irqs |= GT_PARITY_ERROR(dev);
3130 	}
3131 
3132 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3133 	if (IS_GEN5(dev)) {
3134 		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3135 			   ILK_BSD_USER_INTERRUPT;
3136 	} else {
3137 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3138 	}
3139 
3140 	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3141 
3142 	if (INTEL_INFO(dev)->gen >= 6) {
3143 		/*
3144 		 * RPS interrupts will get enabled/disabled on demand when RPS
3145 		 * itself is enabled/disabled.
3146 		 */
3147 		if (HAS_VEBOX(dev))
3148 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3149 
3150 		dev_priv->pm_irq_mask = 0xffffffff;
3151 		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3152 	}
3153 }
3154 
3155 static int ironlake_irq_postinstall(struct drm_device *dev)
3156 {
3157 	struct drm_i915_private *dev_priv = dev->dev_private;
3158 	u32 display_mask, extra_mask;
3159 
3160 	if (INTEL_INFO(dev)->gen >= 7) {
3161 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3162 				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3163 				DE_PLANEB_FLIP_DONE_IVB |
3164 				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3165 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3166 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3167 	} else {
3168 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3169 				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3170 				DE_AUX_CHANNEL_A |
3171 				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3172 				DE_POISON);
3173 		extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3174 				DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3175 	}
3176 
3177 	dev_priv->irq_mask = ~display_mask;
3178 
3179 	I915_WRITE(HWSTAM, 0xeffe);
3180 
3181 	ibx_irq_pre_postinstall(dev);
3182 
3183 	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3184 
3185 	gen5_gt_irq_postinstall(dev);
3186 
3187 	ibx_irq_postinstall(dev);
3188 
3189 	if (IS_IRONLAKE_M(dev)) {
3190 		/* Enable PCU event interrupts
3191 		 *
3192 		 * spinlocking not required here for correctness since interrupt
3193 		 * setup is guaranteed to run in single-threaded context. But we
3194 		 * need it to make the assert_spin_locked happy. */
3195 		spin_lock_irq(&dev_priv->irq_lock);
3196 		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3197 		spin_unlock_irq(&dev_priv->irq_lock);
3198 	}
3199 
3200 	return 0;
3201 }
3202 
3203 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3204 {
3205 	u32 pipestat_mask;
3206 	u32 iir_mask;
3207 	enum i915_pipe pipe;
3208 
3209 	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3210 			PIPE_FIFO_UNDERRUN_STATUS;
3211 
3212 	for_each_pipe(dev_priv, pipe)
3213 		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3214 	POSTING_READ(PIPESTAT(PIPE_A));
3215 
3216 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3217 			PIPE_CRC_DONE_INTERRUPT_STATUS;
3218 
3219 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3220 	for_each_pipe(dev_priv, pipe)
3221 		      i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3222 
3223 	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3224 		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3225 		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3226 	if (IS_CHERRYVIEW(dev_priv))
3227 		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3228 	dev_priv->irq_mask &= ~iir_mask;
3229 
3230 	I915_WRITE(VLV_IIR, iir_mask);
3231 	I915_WRITE(VLV_IIR, iir_mask);
3232 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3233 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3234 	POSTING_READ(VLV_IMR);
3235 }
3236 
3237 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3238 {
3239 	u32 pipestat_mask;
3240 	u32 iir_mask;
3241 	enum i915_pipe pipe;
3242 
3243 	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3244 		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3245 		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3246 	if (IS_CHERRYVIEW(dev_priv))
3247 		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3248 
3249 	dev_priv->irq_mask |= iir_mask;
3250 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3251 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3252 	I915_WRITE(VLV_IIR, iir_mask);
3253 	I915_WRITE(VLV_IIR, iir_mask);
3254 	POSTING_READ(VLV_IIR);
3255 
3256 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3257 			PIPE_CRC_DONE_INTERRUPT_STATUS;
3258 
3259 	i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3260 	for_each_pipe(dev_priv, pipe)
3261 		i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3262 
3263 	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3264 			PIPE_FIFO_UNDERRUN_STATUS;
3265 
3266 	for_each_pipe(dev_priv, pipe)
3267 		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3268 	POSTING_READ(PIPESTAT(PIPE_A));
3269 }
3270 
3271 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3272 {
3273 	assert_spin_locked(&dev_priv->irq_lock);
3274 
3275 	if (dev_priv->display_irqs_enabled)
3276 		return;
3277 
3278 	dev_priv->display_irqs_enabled = true;
3279 
3280 	if (intel_irqs_enabled(dev_priv))
3281 		valleyview_display_irqs_install(dev_priv);
3282 }
3283 
3284 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3285 {
3286 	assert_spin_locked(&dev_priv->irq_lock);
3287 
3288 	if (!dev_priv->display_irqs_enabled)
3289 		return;
3290 
3291 	dev_priv->display_irqs_enabled = false;
3292 
3293 	if (intel_irqs_enabled(dev_priv))
3294 		valleyview_display_irqs_uninstall(dev_priv);
3295 }
3296 
3297 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3298 {
3299 	dev_priv->irq_mask = ~0;
3300 
3301 	I915_WRITE(PORT_HOTPLUG_EN, 0);
3302 	POSTING_READ(PORT_HOTPLUG_EN);
3303 
3304 	I915_WRITE(VLV_IIR, 0xffffffff);
3305 	I915_WRITE(VLV_IIR, 0xffffffff);
3306 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3307 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3308 	POSTING_READ(VLV_IMR);
3309 
3310 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3311 	 * just to make the assert_spin_locked check happy. */
3312 	spin_lock_irq(&dev_priv->irq_lock);
3313 	if (dev_priv->display_irqs_enabled)
3314 		valleyview_display_irqs_install(dev_priv);
3315 	spin_unlock_irq(&dev_priv->irq_lock);
3316 }
3317 
3318 static int valleyview_irq_postinstall(struct drm_device *dev)
3319 {
3320 	struct drm_i915_private *dev_priv = dev->dev_private;
3321 
3322 	vlv_display_irq_postinstall(dev_priv);
3323 
3324 	gen5_gt_irq_postinstall(dev);
3325 
3326 	/* ack & enable invalid PTE error interrupts */
3327 #if 0 /* FIXME: add support to irq handler for checking these bits */
3328 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3329 	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3330 #endif
3331 
3332 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3333 
3334 	return 0;
3335 }
3336 
3337 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3338 {
3339 	/* These are interrupts we'll toggle with the ring mask register */
3340 	uint32_t gt_interrupts[] = {
3341 		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3342 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3343 			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3344 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3345 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3346 		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3347 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3348 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3349 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3350 		0,
3351 		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3352 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3353 		};
3354 
3355 	dev_priv->pm_irq_mask = 0xffffffff;
3356 	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3357 	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3358 	/*
3359 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
3360 	 * is enabled/disabled.
3361 	 */
3362 	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3363 	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3364 }
3365 
3366 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3367 {
3368 	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3369 	uint32_t de_pipe_enables;
3370 	int pipe;
3371 	u32 de_port_en = GEN8_AUX_CHANNEL_A;
3372 
3373 	if (IS_GEN9(dev_priv)) {
3374 		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3375 				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3376 		de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3377 			GEN9_AUX_CHANNEL_D;
3378 
3379 		if (IS_BROXTON(dev_priv))
3380 			de_port_en |= BXT_DE_PORT_GMBUS;
3381 	} else
3382 		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3383 				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3384 
3385 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3386 					   GEN8_PIPE_FIFO_UNDERRUN;
3387 
3388 	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3389 	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3390 	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3391 
3392 	for_each_pipe(dev_priv, pipe)
3393 		if (intel_display_power_is_enabled(dev_priv,
3394 				POWER_DOMAIN_PIPE(pipe)))
3395 			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3396 					  dev_priv->de_irq_mask[pipe],
3397 					  de_pipe_enables);
3398 
3399 	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en);
3400 }
3401 
3402 static int gen8_irq_postinstall(struct drm_device *dev)
3403 {
3404 	struct drm_i915_private *dev_priv = dev->dev_private;
3405 
3406 	if (HAS_PCH_SPLIT(dev))
3407 		ibx_irq_pre_postinstall(dev);
3408 
3409 	gen8_gt_irq_postinstall(dev_priv);
3410 	gen8_de_irq_postinstall(dev_priv);
3411 
3412 	if (HAS_PCH_SPLIT(dev))
3413 		ibx_irq_postinstall(dev);
3414 
3415 	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3416 	POSTING_READ(GEN8_MASTER_IRQ);
3417 
3418 	return 0;
3419 }
3420 
3421 static int cherryview_irq_postinstall(struct drm_device *dev)
3422 {
3423 	struct drm_i915_private *dev_priv = dev->dev_private;
3424 
3425 	vlv_display_irq_postinstall(dev_priv);
3426 
3427 	gen8_gt_irq_postinstall(dev_priv);
3428 
3429 	I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3430 	POSTING_READ(GEN8_MASTER_IRQ);
3431 
3432 	return 0;
3433 }
3434 
3435 static void gen8_irq_uninstall(struct drm_device *dev)
3436 {
3437 	struct drm_i915_private *dev_priv = dev->dev_private;
3438 
3439 	if (!dev_priv)
3440 		return;
3441 
3442 	gen8_irq_reset(dev);
3443 }
3444 
3445 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3446 {
3447 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3448 	 * just to make the assert_spin_locked check happy. */
3449 	spin_lock_irq(&dev_priv->irq_lock);
3450 	if (dev_priv->display_irqs_enabled)
3451 		valleyview_display_irqs_uninstall(dev_priv);
3452 	spin_unlock_irq(&dev_priv->irq_lock);
3453 
3454 	vlv_display_irq_reset(dev_priv);
3455 
3456 	dev_priv->irq_mask = ~0;
3457 }
3458 
3459 static void valleyview_irq_uninstall(struct drm_device *dev)
3460 {
3461 	struct drm_i915_private *dev_priv = dev->dev_private;
3462 
3463 	if (!dev_priv)
3464 		return;
3465 
3466 	I915_WRITE(VLV_MASTER_IER, 0);
3467 
3468 	gen5_gt_irq_reset(dev);
3469 
3470 	I915_WRITE(HWSTAM, 0xffffffff);
3471 
3472 	vlv_display_irq_uninstall(dev_priv);
3473 }
3474 
3475 static void cherryview_irq_uninstall(struct drm_device *dev)
3476 {
3477 	struct drm_i915_private *dev_priv = dev->dev_private;
3478 
3479 	if (!dev_priv)
3480 		return;
3481 
3482 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3483 	POSTING_READ(GEN8_MASTER_IRQ);
3484 
3485 	gen8_gt_irq_reset(dev_priv);
3486 
3487 	GEN5_IRQ_RESET(GEN8_PCU_);
3488 
3489 	vlv_display_irq_uninstall(dev_priv);
3490 }
3491 
3492 static void ironlake_irq_uninstall(struct drm_device *dev)
3493 {
3494 	struct drm_i915_private *dev_priv = dev->dev_private;
3495 
3496 	if (!dev_priv)
3497 		return;
3498 
3499 	ironlake_irq_reset(dev);
3500 }
3501 
3502 static void i8xx_irq_preinstall(struct drm_device * dev)
3503 {
3504 	struct drm_i915_private *dev_priv = dev->dev_private;
3505 	int pipe;
3506 
3507 	for_each_pipe(dev_priv, pipe)
3508 		I915_WRITE(PIPESTAT(pipe), 0);
3509 	I915_WRITE16(IMR, 0xffff);
3510 	I915_WRITE16(IER, 0x0);
3511 	POSTING_READ16(IER);
3512 }
3513 
3514 static int i8xx_irq_postinstall(struct drm_device *dev)
3515 {
3516 	struct drm_i915_private *dev_priv = dev->dev_private;
3517 
3518 	I915_WRITE16(EMR,
3519 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3520 
3521 	/* Unmask the interrupts that we always want on. */
3522 	dev_priv->irq_mask =
3523 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3524 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3525 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3526 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3527 	I915_WRITE16(IMR, dev_priv->irq_mask);
3528 
3529 	I915_WRITE16(IER,
3530 		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3531 		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3532 		     I915_USER_INTERRUPT);
3533 	POSTING_READ16(IER);
3534 
3535 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3536 	 * just to make the assert_spin_locked check happy. */
3537 	spin_lock_irq(&dev_priv->irq_lock);
3538 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3539 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3540 	spin_unlock_irq(&dev_priv->irq_lock);
3541 
3542 	return 0;
3543 }
3544 
3545 /*
3546  * Returns true when a page flip has completed.
3547  */
3548 static bool i8xx_handle_vblank(struct drm_device *dev,
3549 			       int plane, int pipe, u32 iir)
3550 {
3551 	struct drm_i915_private *dev_priv = dev->dev_private;
3552 	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3553 
3554 	if (!intel_pipe_handle_vblank(dev, pipe))
3555 		return false;
3556 
3557 	if ((iir & flip_pending) == 0)
3558 		goto check_page_flip;
3559 
3560 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3561 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3562 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3563 	 * the flip is completed (no longer pending). Since this doesn't raise
3564 	 * an interrupt per se, we watch for the change at vblank.
3565 	 */
3566 	if (I915_READ16(ISR) & flip_pending)
3567 		goto check_page_flip;
3568 
3569 	intel_prepare_page_flip(dev, plane);
3570 	intel_finish_page_flip(dev, pipe);
3571 	return true;
3572 
3573 check_page_flip:
3574 	intel_check_page_flip(dev, pipe);
3575 	return false;
3576 }
3577 
3578 static irqreturn_t i8xx_irq_handler(void *arg)
3579 {
3580 	struct drm_device *dev = arg;
3581 	struct drm_i915_private *dev_priv = dev->dev_private;
3582 	u16 iir, new_iir;
3583 	u32 pipe_stats[2];
3584 	int pipe;
3585 	u16 flip_mask =
3586 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3587 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3588 
3589 	if (!intel_irqs_enabled(dev_priv))
3590 		return IRQ_NONE;
3591 
3592 	iir = I915_READ16(IIR);
3593 	if (iir == 0)
3594 		return;
3595 
3596 	while (iir & ~flip_mask) {
3597 		/* Can't rely on pipestat interrupt bit in iir as it might
3598 		 * have been cleared after the pipestat interrupt was received.
3599 		 * It doesn't set the bit in iir again, but it still produces
3600 		 * interrupts (for non-MSI).
3601 		 */
3602 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3603 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3604 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3605 
3606 		for_each_pipe(dev_priv, pipe) {
3607 			int reg = PIPESTAT(pipe);
3608 			pipe_stats[pipe] = I915_READ(reg);
3609 
3610 			/*
3611 			 * Clear the PIPE*STAT regs before the IIR
3612 			 */
3613 			if (pipe_stats[pipe] & 0x8000ffff)
3614 				I915_WRITE(reg, pipe_stats[pipe]);
3615 		}
3616 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3617 
3618 		I915_WRITE16(IIR, iir & ~flip_mask);
3619 		new_iir = I915_READ16(IIR); /* Flush posted writes */
3620 
3621 		if (iir & I915_USER_INTERRUPT)
3622 			notify_ring(&dev_priv->ring[RCS]);
3623 
3624 		for_each_pipe(dev_priv, pipe) {
3625 			int plane = pipe;
3626 			if (HAS_FBC(dev))
3627 				plane = !plane;
3628 
3629 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3630 			    i8xx_handle_vblank(dev, plane, pipe, iir))
3631 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3632 
3633 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3634 				i9xx_pipe_crc_irq_handler(dev, pipe);
3635 
3636 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3637 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
3638 								    pipe);
3639 		}
3640 
3641 		iir = new_iir;
3642 	}
3643 
3644 }
3645 
3646 static void i8xx_irq_uninstall(struct drm_device * dev)
3647 {
3648 	struct drm_i915_private *dev_priv = dev->dev_private;
3649 	int pipe;
3650 
3651 	for_each_pipe(dev_priv, pipe) {
3652 		/* Clear enable bits; then clear status bits */
3653 		I915_WRITE(PIPESTAT(pipe), 0);
3654 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3655 	}
3656 	I915_WRITE16(IMR, 0xffff);
3657 	I915_WRITE16(IER, 0x0);
3658 	I915_WRITE16(IIR, I915_READ16(IIR));
3659 }
3660 
3661 static void i915_irq_preinstall(struct drm_device * dev)
3662 {
3663 	struct drm_i915_private *dev_priv = dev->dev_private;
3664 	int pipe;
3665 
3666 	if (I915_HAS_HOTPLUG(dev)) {
3667 		I915_WRITE(PORT_HOTPLUG_EN, 0);
3668 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3669 	}
3670 
3671 	I915_WRITE16(HWSTAM, 0xeffe);
3672 	for_each_pipe(dev_priv, pipe)
3673 		I915_WRITE(PIPESTAT(pipe), 0);
3674 	I915_WRITE(IMR, 0xffffffff);
3675 	I915_WRITE(IER, 0x0);
3676 	POSTING_READ(IER);
3677 }
3678 
3679 static int i915_irq_postinstall(struct drm_device *dev)
3680 {
3681 	struct drm_i915_private *dev_priv = dev->dev_private;
3682 	u32 enable_mask;
3683 
3684 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3685 
3686 	/* Unmask the interrupts that we always want on. */
3687 	dev_priv->irq_mask =
3688 		~(I915_ASLE_INTERRUPT |
3689 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3690 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3691 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3692 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3693 
3694 	enable_mask =
3695 		I915_ASLE_INTERRUPT |
3696 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3697 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3698 		I915_USER_INTERRUPT;
3699 
3700 	if (I915_HAS_HOTPLUG(dev)) {
3701 		I915_WRITE(PORT_HOTPLUG_EN, 0);
3702 		POSTING_READ(PORT_HOTPLUG_EN);
3703 
3704 		/* Enable in IER... */
3705 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3706 		/* and unmask in IMR */
3707 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3708 	}
3709 
3710 	I915_WRITE(IMR, dev_priv->irq_mask);
3711 	I915_WRITE(IER, enable_mask);
3712 	POSTING_READ(IER);
3713 
3714 	i915_enable_asle_pipestat(dev);
3715 
3716 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3717 	 * just to make the assert_spin_locked check happy. */
3718 	spin_lock_irq(&dev_priv->irq_lock);
3719 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3720 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3721 	spin_unlock_irq(&dev_priv->irq_lock);
3722 
3723 	return 0;
3724 }
3725 
3726 /*
3727  * Returns true when a page flip has completed.
3728  */
3729 static bool i915_handle_vblank(struct drm_device *dev,
3730 			       int plane, int pipe, u32 iir)
3731 {
3732 	struct drm_i915_private *dev_priv = dev->dev_private;
3733 	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3734 
3735 	if (!intel_pipe_handle_vblank(dev, pipe))
3736 		return false;
3737 
3738 	if ((iir & flip_pending) == 0)
3739 		goto check_page_flip;
3740 
3741 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3742 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3743 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3744 	 * the flip is completed (no longer pending). Since this doesn't raise
3745 	 * an interrupt per se, we watch for the change at vblank.
3746 	 */
3747 	if (I915_READ(ISR) & flip_pending)
3748 		goto check_page_flip;
3749 
3750 	intel_prepare_page_flip(dev, plane);
3751 	intel_finish_page_flip(dev, pipe);
3752 	return true;
3753 
3754 check_page_flip:
3755 	intel_check_page_flip(dev, pipe);
3756 	return false;
3757 }
3758 
3759 static irqreturn_t i915_irq_handler(void *arg)
3760 {
3761 	struct drm_device *dev = arg;
3762 	struct drm_i915_private *dev_priv = dev->dev_private;
3763 	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3764 	u32 flip_mask =
3765 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3766 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3767 	int pipe;
3768 
3769 	if (!intel_irqs_enabled(dev_priv))
3770 		return IRQ_NONE;
3771 
3772 	iir = I915_READ(IIR);
3773 	do {
3774 		bool irq_received = (iir & ~flip_mask) != 0;
3775 		bool blc_event = false;
3776 
3777 		/* Can't rely on pipestat interrupt bit in iir as it might
3778 		 * have been cleared after the pipestat interrupt was received.
3779 		 * It doesn't set the bit in iir again, but it still produces
3780 		 * interrupts (for non-MSI).
3781 		 */
3782 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3783 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3784 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3785 
3786 		for_each_pipe(dev_priv, pipe) {
3787 			int reg = PIPESTAT(pipe);
3788 			pipe_stats[pipe] = I915_READ(reg);
3789 
3790 			/* Clear the PIPE*STAT regs before the IIR */
3791 			if (pipe_stats[pipe] & 0x8000ffff) {
3792 				I915_WRITE(reg, pipe_stats[pipe]);
3793 				irq_received = true;
3794 			}
3795 		}
3796 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3797 
3798 		if (!irq_received)
3799 			break;
3800 
3801 		/* Consume port.  Then clear IIR or we'll miss events */
3802 		if (I915_HAS_HOTPLUG(dev) &&
3803 		    iir & I915_DISPLAY_PORT_INTERRUPT)
3804 			i9xx_hpd_irq_handler(dev);
3805 
3806 		I915_WRITE(IIR, iir & ~flip_mask);
3807 		new_iir = I915_READ(IIR); /* Flush posted writes */
3808 
3809 		if (iir & I915_USER_INTERRUPT)
3810 			notify_ring(&dev_priv->ring[RCS]);
3811 
3812 		for_each_pipe(dev_priv, pipe) {
3813 			int plane = pipe;
3814 			if (HAS_FBC(dev))
3815 				plane = !plane;
3816 
3817 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3818 			    i915_handle_vblank(dev, plane, pipe, iir))
3819 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3820 
3821 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3822 				blc_event = true;
3823 
3824 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3825 				i9xx_pipe_crc_irq_handler(dev, pipe);
3826 
3827 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3828 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
3829 								    pipe);
3830 		}
3831 
3832 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
3833 			intel_opregion_asle_intr(dev);
3834 
3835 		/* With MSI, interrupts are only generated when iir
3836 		 * transitions from zero to nonzero.  If another bit got
3837 		 * set while we were handling the existing iir bits, then
3838 		 * we would never get another interrupt.
3839 		 *
3840 		 * This is fine on non-MSI as well, as if we hit this path
3841 		 * we avoid exiting the interrupt handler only to generate
3842 		 * another one.
3843 		 *
3844 		 * Note that for MSI this could cause a stray interrupt report
3845 		 * if an interrupt landed in the time between writing IIR and
3846 		 * the posting read.  This should be rare enough to never
3847 		 * trigger the 99% of 100,000 interrupts test for disabling
3848 		 * stray interrupts.
3849 		 */
3850 		iir = new_iir;
3851 	} while (iir & ~flip_mask);
3852 
3853 }
3854 
3855 static void i915_irq_uninstall(struct drm_device * dev)
3856 {
3857 	struct drm_i915_private *dev_priv = dev->dev_private;
3858 	int pipe;
3859 
3860 	if (I915_HAS_HOTPLUG(dev)) {
3861 		I915_WRITE(PORT_HOTPLUG_EN, 0);
3862 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3863 	}
3864 
3865 	I915_WRITE16(HWSTAM, 0xffff);
3866 	for_each_pipe(dev_priv, pipe) {
3867 		/* Clear enable bits; then clear status bits */
3868 		I915_WRITE(PIPESTAT(pipe), 0);
3869 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3870 	}
3871 	I915_WRITE(IMR, 0xffffffff);
3872 	I915_WRITE(IER, 0x0);
3873 
3874 	I915_WRITE(IIR, I915_READ(IIR));
3875 }
3876 
3877 static void i965_irq_preinstall(struct drm_device * dev)
3878 {
3879 	struct drm_i915_private *dev_priv = dev->dev_private;
3880 	int pipe;
3881 
3882 	I915_WRITE(PORT_HOTPLUG_EN, 0);
3883 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3884 
3885 	I915_WRITE(HWSTAM, 0xeffe);
3886 	for_each_pipe(dev_priv, pipe)
3887 		I915_WRITE(PIPESTAT(pipe), 0);
3888 	I915_WRITE(IMR, 0xffffffff);
3889 	I915_WRITE(IER, 0x0);
3890 	POSTING_READ(IER);
3891 }
3892 
3893 static int i965_irq_postinstall(struct drm_device *dev)
3894 {
3895 	struct drm_i915_private *dev_priv = dev->dev_private;
3896 	u32 enable_mask;
3897 	u32 error_mask;
3898 
3899 	/* Unmask the interrupts that we always want on. */
3900 	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3901 			       I915_DISPLAY_PORT_INTERRUPT |
3902 			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3903 			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3904 			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3905 			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3906 			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3907 
3908 	enable_mask = ~dev_priv->irq_mask;
3909 	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3910 			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3911 	enable_mask |= I915_USER_INTERRUPT;
3912 
3913 	if (IS_G4X(dev))
3914 		enable_mask |= I915_BSD_USER_INTERRUPT;
3915 
3916 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3917 	 * just to make the assert_spin_locked check happy. */
3918 	spin_lock_irq(&dev_priv->irq_lock);
3919 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3920 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3921 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3922 	spin_unlock_irq(&dev_priv->irq_lock);
3923 
3924 	/*
3925 	 * Enable some error detection, note the instruction error mask
3926 	 * bit is reserved, so we leave it masked.
3927 	 */
3928 	if (IS_G4X(dev)) {
3929 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
3930 			       GM45_ERROR_MEM_PRIV |
3931 			       GM45_ERROR_CP_PRIV |
3932 			       I915_ERROR_MEMORY_REFRESH);
3933 	} else {
3934 		error_mask = ~(I915_ERROR_PAGE_TABLE |
3935 			       I915_ERROR_MEMORY_REFRESH);
3936 	}
3937 	I915_WRITE(EMR, error_mask);
3938 
3939 	I915_WRITE(IMR, dev_priv->irq_mask);
3940 	I915_WRITE(IER, enable_mask);
3941 	POSTING_READ(IER);
3942 
3943 	I915_WRITE(PORT_HOTPLUG_EN, 0);
3944 	POSTING_READ(PORT_HOTPLUG_EN);
3945 
3946 	i915_enable_asle_pipestat(dev);
3947 
3948 	return 0;
3949 }
3950 
3951 static void i915_hpd_irq_setup(struct drm_device *dev)
3952 {
3953 	struct drm_i915_private *dev_priv = dev->dev_private;
3954 	struct intel_encoder *intel_encoder;
3955 	u32 hotplug_en;
3956 
3957 	assert_spin_locked(&dev_priv->irq_lock);
3958 
3959 	hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3960 	hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3961 	/* Note HDMI and DP share hotplug bits */
3962 	/* enable bits are the same for all generations */
3963 	for_each_intel_encoder(dev, intel_encoder)
3964 		if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED)
3965 			hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3966 	/* Programming the CRT detection parameters tends
3967 	   to generate a spurious hotplug event about three
3968 	   seconds later.  So just do it once.
3969 	*/
3970 	if (IS_G4X(dev))
3971 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3972 	hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3973 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3974 
3975 	/* Ignore TV since it's buggy */
3976 	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3977 }
3978 
3979 static irqreturn_t i965_irq_handler(void *arg)
3980 {
3981 	struct drm_device *dev = arg;
3982 	struct drm_i915_private *dev_priv = dev->dev_private;
3983 	u32 iir, new_iir;
3984 	u32 pipe_stats[I915_MAX_PIPES];
3985 	int pipe;
3986 	u32 flip_mask =
3987 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3988 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3989 
3990 	if (!intel_irqs_enabled(dev_priv))
3991 		return IRQ_NONE;
3992 
3993 	iir = I915_READ(IIR);
3994 
3995 	for (;;) {
3996 		bool irq_received = (iir & ~flip_mask) != 0;
3997 		bool blc_event = false;
3998 
3999 		/* Can't rely on pipestat interrupt bit in iir as it might
4000 		 * have been cleared after the pipestat interrupt was received.
4001 		 * It doesn't set the bit in iir again, but it still produces
4002 		 * interrupts (for non-MSI).
4003 		 */
4004 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4005 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4006 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4007 
4008 		for_each_pipe(dev_priv, pipe) {
4009 			int reg = PIPESTAT(pipe);
4010 			pipe_stats[pipe] = I915_READ(reg);
4011 
4012 			/*
4013 			 * Clear the PIPE*STAT regs before the IIR
4014 			 */
4015 			if (pipe_stats[pipe] & 0x8000ffff) {
4016 				I915_WRITE(reg, pipe_stats[pipe]);
4017 				irq_received = true;
4018 			}
4019 		}
4020 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4021 
4022 		if (!irq_received)
4023 			break;
4024 
4025 		/* Consume port.  Then clear IIR or we'll miss events */
4026 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4027 			i9xx_hpd_irq_handler(dev);
4028 
4029 		I915_WRITE(IIR, iir & ~flip_mask);
4030 		new_iir = I915_READ(IIR); /* Flush posted writes */
4031 
4032 		if (iir & I915_USER_INTERRUPT)
4033 			notify_ring(&dev_priv->ring[RCS]);
4034 		if (iir & I915_BSD_USER_INTERRUPT)
4035 			notify_ring(&dev_priv->ring[VCS]);
4036 
4037 		for_each_pipe(dev_priv, pipe) {
4038 			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4039 			    i915_handle_vblank(dev, pipe, pipe, iir))
4040 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4041 
4042 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4043 				blc_event = true;
4044 
4045 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4046 				i9xx_pipe_crc_irq_handler(dev, pipe);
4047 
4048 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4049 				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4050 		}
4051 
4052 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4053 			intel_opregion_asle_intr(dev);
4054 
4055 		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4056 			gmbus_irq_handler(dev);
4057 
4058 		/* With MSI, interrupts are only generated when iir
4059 		 * transitions from zero to nonzero.  If another bit got
4060 		 * set while we were handling the existing iir bits, then
4061 		 * we would never get another interrupt.
4062 		 *
4063 		 * This is fine on non-MSI as well, as if we hit this path
4064 		 * we avoid exiting the interrupt handler only to generate
4065 		 * another one.
4066 		 *
4067 		 * Note that for MSI this could cause a stray interrupt report
4068 		 * if an interrupt landed in the time between writing IIR and
4069 		 * the posting read.  This should be rare enough to never
4070 		 * trigger the 99% of 100,000 interrupts test for disabling
4071 		 * stray interrupts.
4072 		 */
4073 		iir = new_iir;
4074 	}
4075 
4076 }
4077 
4078 static void i965_irq_uninstall(struct drm_device * dev)
4079 {
4080 	struct drm_i915_private *dev_priv = dev->dev_private;
4081 	int pipe;
4082 
4083 	if (!dev_priv)
4084 		return;
4085 
4086 	I915_WRITE(PORT_HOTPLUG_EN, 0);
4087 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4088 
4089 	I915_WRITE(HWSTAM, 0xffffffff);
4090 	for_each_pipe(dev_priv, pipe)
4091 		I915_WRITE(PIPESTAT(pipe), 0);
4092 	I915_WRITE(IMR, 0xffffffff);
4093 	I915_WRITE(IER, 0x0);
4094 
4095 	for_each_pipe(dev_priv, pipe)
4096 		I915_WRITE(PIPESTAT(pipe),
4097 			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4098 	I915_WRITE(IIR, I915_READ(IIR));
4099 }
4100 
4101 /**
4102  * intel_irq_init - initializes irq support
4103  * @dev_priv: i915 device instance
4104  *
4105  * This function initializes all the irq support including work items, timers
4106  * and all the vtables. It does not setup the interrupt itself though.
4107  */
4108 void intel_irq_init(struct drm_i915_private *dev_priv)
4109 {
4110 	struct drm_device *dev = dev_priv->dev;
4111 
4112 	intel_hpd_init_work(dev_priv);
4113 
4114 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4115 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4116 
4117 	/* Let's track the enabled rps events */
4118 	if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4119 		/* WaGsvRC0ResidencyMethod:vlv */
4120 		dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4121 	else
4122 		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4123 
4124 	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4125 			  i915_hangcheck_elapsed);
4126 
4127 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4128 
4129 	if (IS_GEN2(dev_priv)) {
4130 		dev->max_vblank_count = 0;
4131 		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4132 	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4133 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4134 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4135 	} else {
4136 		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4137 		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4138 	}
4139 
4140 	/*
4141 	 * Opt out of the vblank disable timer on everything except gen2.
4142 	 * Gen2 doesn't have a hardware frame counter and so depends on
4143 	 * vblank interrupts to produce sane vblank seuquence numbers.
4144 	 */
4145 	if (!IS_GEN2(dev_priv))
4146 		dev->vblank_disable_immediate = true;
4147 
4148 	dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4149 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4150 
4151 	if (IS_CHERRYVIEW(dev_priv)) {
4152 		dev->driver->irq_handler = cherryview_irq_handler;
4153 		dev->driver->irq_preinstall = cherryview_irq_preinstall;
4154 		dev->driver->irq_postinstall = cherryview_irq_postinstall;
4155 		dev->driver->irq_uninstall = cherryview_irq_uninstall;
4156 		dev->driver->enable_vblank = valleyview_enable_vblank;
4157 		dev->driver->disable_vblank = valleyview_disable_vblank;
4158 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4159 	} else if (IS_VALLEYVIEW(dev_priv)) {
4160 		dev->driver->irq_handler = valleyview_irq_handler;
4161 		dev->driver->irq_preinstall = valleyview_irq_preinstall;
4162 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4163 		dev->driver->irq_uninstall = valleyview_irq_uninstall;
4164 		dev->driver->enable_vblank = valleyview_enable_vblank;
4165 		dev->driver->disable_vblank = valleyview_disable_vblank;
4166 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4167 	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4168 		dev->driver->irq_handler = gen8_irq_handler;
4169 		dev->driver->irq_preinstall = gen8_irq_reset;
4170 		dev->driver->irq_postinstall = gen8_irq_postinstall;
4171 		dev->driver->irq_uninstall = gen8_irq_uninstall;
4172 		dev->driver->enable_vblank = gen8_enable_vblank;
4173 		dev->driver->disable_vblank = gen8_disable_vblank;
4174 		if (HAS_PCH_SPLIT(dev))
4175 			dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4176 		else
4177 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4178 	} else if (HAS_PCH_SPLIT(dev)) {
4179 		dev->driver->irq_handler = ironlake_irq_handler;
4180 		dev->driver->irq_preinstall = ironlake_irq_reset;
4181 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4182 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
4183 		dev->driver->enable_vblank = ironlake_enable_vblank;
4184 		dev->driver->disable_vblank = ironlake_disable_vblank;
4185 		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4186 	} else {
4187 		if (INTEL_INFO(dev_priv)->gen == 2) {
4188 			dev->driver->irq_preinstall = i8xx_irq_preinstall;
4189 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
4190 			dev->driver->irq_handler = i8xx_irq_handler;
4191 			dev->driver->irq_uninstall = i8xx_irq_uninstall;
4192 		} else if (INTEL_INFO(dev_priv)->gen == 3) {
4193 			dev->driver->irq_preinstall = i915_irq_preinstall;
4194 			dev->driver->irq_postinstall = i915_irq_postinstall;
4195 			dev->driver->irq_uninstall = i915_irq_uninstall;
4196 			dev->driver->irq_handler = i915_irq_handler;
4197 		} else {
4198 			dev->driver->irq_preinstall = i965_irq_preinstall;
4199 			dev->driver->irq_postinstall = i965_irq_postinstall;
4200 			dev->driver->irq_uninstall = i965_irq_uninstall;
4201 			dev->driver->irq_handler = i965_irq_handler;
4202 		}
4203 		if (I915_HAS_HOTPLUG(dev_priv))
4204 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4205 		dev->driver->enable_vblank = i915_enable_vblank;
4206 		dev->driver->disable_vblank = i915_disable_vblank;
4207 	}
4208 }
4209 
4210 /**
4211  * intel_irq_install - enables the hardware interrupt
4212  * @dev_priv: i915 device instance
4213  *
4214  * This function enables the hardware interrupt handling, but leaves the hotplug
4215  * handling still disabled. It is called after intel_irq_init().
4216  *
4217  * In the driver load and resume code we need working interrupts in a few places
4218  * but don't want to deal with the hassle of concurrent probe and hotplug
4219  * workers. Hence the split into this two-stage approach.
4220  */
4221 int intel_irq_install(struct drm_i915_private *dev_priv)
4222 {
4223 	/*
4224 	 * We enable some interrupt sources in our postinstall hooks, so mark
4225 	 * interrupts as enabled _before_ actually enabling them to avoid
4226 	 * special cases in our ordering checks.
4227 	 */
4228 	dev_priv->pm.irqs_enabled = true;
4229 
4230 	return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4231 }
4232 
4233 /**
4234  * intel_irq_uninstall - finilizes all irq handling
4235  * @dev_priv: i915 device instance
4236  *
4237  * This stops interrupt and hotplug handling and unregisters and frees all
4238  * resources acquired in the init functions.
4239  */
4240 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4241 {
4242 	drm_irq_uninstall(dev_priv->dev);
4243 	intel_hpd_cancel_work(dev_priv);
4244 	dev_priv->pm.irqs_enabled = false;
4245 }
4246 
4247 /**
4248  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4249  * @dev_priv: i915 device instance
4250  *
4251  * This function is used to disable interrupts at runtime, both in the runtime
4252  * pm and the system suspend/resume code.
4253  */
4254 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4255 {
4256 	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4257 	dev_priv->pm.irqs_enabled = false;
4258 #if 0
4259 	synchronize_irq(dev_priv->dev->irq);
4260 #endif
4261 }
4262 
4263 /**
4264  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4265  * @dev_priv: i915 device instance
4266  *
4267  * This function is used to enable interrupts at runtime, both in the runtime
4268  * pm and the system suspend/resume code.
4269  */
4270 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4271 {
4272 	dev_priv->pm.irqs_enabled = true;
4273 	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4274 	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4275 }
4276