xref: /dflybsd-src/sys/dev/drm/i915/i915_irq.c (revision b4315fc7edb71a3b20b837c3aa96f46bf41080bf)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 
35 /**
36  * DOC: interrupt handling
37  *
38  * These functions provide the basic support for enabling and disabling the
39  * interrupt handling support. There's a lot more functionality in i915_irq.c
40  * and related files, but that will be described in separate chapters.
41  */
42 
43 static const u32 hpd_ilk[HPD_NUM_PINS] = {
44 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
45 };
46 
47 static const u32 hpd_ivb[HPD_NUM_PINS] = {
48 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
49 };
50 
51 static const u32 hpd_bdw[HPD_NUM_PINS] = {
52 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
53 };
54 
55 static const u32 hpd_ibx[HPD_NUM_PINS] = {
56 	[HPD_CRT] = SDE_CRT_HOTPLUG,
57 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
58 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
59 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
60 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
61 };
62 
63 static const u32 hpd_cpt[HPD_NUM_PINS] = {
64 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
65 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
66 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
67 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
68 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
69 };
70 
71 static const u32 hpd_spt[HPD_NUM_PINS] = {
72 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
73 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
74 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
75 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
76 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
77 };
78 
79 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
80 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
81 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
82 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
83 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
84 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
85 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
86 };
87 
88 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
89 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
90 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
91 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
92 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
93 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
94 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
95 };
96 
97 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
98 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
99 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
100 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
101 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
102 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
103 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
104 };
105 
106 /* BXT hpd list */
107 static const u32 hpd_bxt[HPD_NUM_PINS] = {
108 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
109 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
110 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
111 };
112 
113 /* IIR can theoretically queue up two events. Be paranoid. */
114 #define GEN8_IRQ_RESET_NDX(type, which) do { \
115 	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
116 	POSTING_READ(GEN8_##type##_IMR(which)); \
117 	I915_WRITE(GEN8_##type##_IER(which), 0); \
118 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
119 	POSTING_READ(GEN8_##type##_IIR(which)); \
120 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
121 	POSTING_READ(GEN8_##type##_IIR(which)); \
122 } while (0)
123 
124 #define GEN5_IRQ_RESET(type) do { \
125 	I915_WRITE(type##IMR, 0xffffffff); \
126 	POSTING_READ(type##IMR); \
127 	I915_WRITE(type##IER, 0); \
128 	I915_WRITE(type##IIR, 0xffffffff); \
129 	POSTING_READ(type##IIR); \
130 	I915_WRITE(type##IIR, 0xffffffff); \
131 	POSTING_READ(type##IIR); \
132 } while (0)
133 
134 /*
135  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
136  */
137 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
138 				    i915_reg_t reg)
139 {
140 	u32 val = I915_READ(reg);
141 
142 	if (val == 0)
143 		return;
144 
145 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
146 	     i915_mmio_reg_offset(reg), val);
147 	I915_WRITE(reg, 0xffffffff);
148 	POSTING_READ(reg);
149 	I915_WRITE(reg, 0xffffffff);
150 	POSTING_READ(reg);
151 }
152 
153 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
154 	gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
155 	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
156 	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
157 	POSTING_READ(GEN8_##type##_IMR(which)); \
158 } while (0)
159 
160 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
161 	gen5_assert_iir_is_zero(dev_priv, type##IIR); \
162 	I915_WRITE(type##IER, (ier_val)); \
163 	I915_WRITE(type##IMR, (imr_val)); \
164 	POSTING_READ(type##IMR); \
165 } while (0)
166 
167 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
168 
169 /* For display hotplug interrupt */
170 static inline void
171 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
172 				     uint32_t mask,
173 				     uint32_t bits)
174 {
175 	uint32_t val;
176 
177 	assert_spin_locked(&dev_priv->irq_lock);
178 	WARN_ON(bits & ~mask);
179 
180 	val = I915_READ(PORT_HOTPLUG_EN);
181 	val &= ~mask;
182 	val |= bits;
183 	I915_WRITE(PORT_HOTPLUG_EN, val);
184 }
185 
186 /**
187  * i915_hotplug_interrupt_update - update hotplug interrupt enable
188  * @dev_priv: driver private
189  * @mask: bits to update
190  * @bits: bits to enable
191  * NOTE: the HPD enable bits are modified both inside and outside
192  * of an interrupt context. To avoid that read-modify-write cycles
193  * interfer, these bits are protected by a spinlock. Since this
194  * function is usually not called from a context where the lock is
195  * held already, this function acquires the lock itself. A non-locking
196  * version is also available.
197  */
198 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
199 				   uint32_t mask,
200 				   uint32_t bits)
201 {
202 	spin_lock_irq(&dev_priv->irq_lock);
203 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
204 	spin_unlock_irq(&dev_priv->irq_lock);
205 }
206 
207 /**
208  * ilk_update_display_irq - update DEIMR
209  * @dev_priv: driver private
210  * @interrupt_mask: mask of interrupt bits to update
211  * @enabled_irq_mask: mask of interrupt bits to enable
212  */
213 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
214 			    uint32_t interrupt_mask,
215 			    uint32_t enabled_irq_mask)
216 {
217 	uint32_t new_val;
218 
219 	assert_spin_locked(&dev_priv->irq_lock);
220 
221 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
222 
223 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
224 		return;
225 
226 	new_val = dev_priv->irq_mask;
227 	new_val &= ~interrupt_mask;
228 	new_val |= (~enabled_irq_mask & interrupt_mask);
229 
230 	if (new_val != dev_priv->irq_mask) {
231 		dev_priv->irq_mask = new_val;
232 		I915_WRITE(DEIMR, dev_priv->irq_mask);
233 		POSTING_READ(DEIMR);
234 	}
235 }
236 
237 /**
238  * ilk_update_gt_irq - update GTIMR
239  * @dev_priv: driver private
240  * @interrupt_mask: mask of interrupt bits to update
241  * @enabled_irq_mask: mask of interrupt bits to enable
242  */
243 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
244 			      uint32_t interrupt_mask,
245 			      uint32_t enabled_irq_mask)
246 {
247 	assert_spin_locked(&dev_priv->irq_lock);
248 
249 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
250 
251 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
252 		return;
253 
254 	dev_priv->gt_irq_mask &= ~interrupt_mask;
255 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
256 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
257 	POSTING_READ(GTIMR);
258 }
259 
260 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
261 {
262 	ilk_update_gt_irq(dev_priv, mask, mask);
263 }
264 
265 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
266 {
267 	ilk_update_gt_irq(dev_priv, mask, 0);
268 }
269 
270 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
271 {
272 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
273 }
274 
275 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
276 {
277 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
278 }
279 
280 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
281 {
282 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
283 }
284 
285 /**
286  * snb_update_pm_irq - update GEN6_PMIMR
287  * @dev_priv: driver private
288  * @interrupt_mask: mask of interrupt bits to update
289  * @enabled_irq_mask: mask of interrupt bits to enable
290  */
291 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
292 			      uint32_t interrupt_mask,
293 			      uint32_t enabled_irq_mask)
294 {
295 	uint32_t new_val;
296 
297 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
298 
299 	assert_spin_locked(&dev_priv->irq_lock);
300 
301 	new_val = dev_priv->pm_irq_mask;
302 	new_val &= ~interrupt_mask;
303 	new_val |= (~enabled_irq_mask & interrupt_mask);
304 
305 	if (new_val != dev_priv->pm_irq_mask) {
306 		dev_priv->pm_irq_mask = new_val;
307 		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
308 		POSTING_READ(gen6_pm_imr(dev_priv));
309 	}
310 }
311 
312 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
313 {
314 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
315 		return;
316 
317 	snb_update_pm_irq(dev_priv, mask, mask);
318 }
319 
320 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
321 				  uint32_t mask)
322 {
323 	snb_update_pm_irq(dev_priv, mask, 0);
324 }
325 
326 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
327 {
328 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
329 		return;
330 
331 	__gen6_disable_pm_irq(dev_priv, mask);
332 }
333 
334 void gen6_reset_rps_interrupts(struct drm_device *dev)
335 {
336 	struct drm_i915_private *dev_priv = dev->dev_private;
337 	i915_reg_t reg = gen6_pm_iir(dev_priv);
338 
339 	spin_lock_irq(&dev_priv->irq_lock);
340 	I915_WRITE(reg, dev_priv->pm_rps_events);
341 	I915_WRITE(reg, dev_priv->pm_rps_events);
342 	POSTING_READ(reg);
343 	dev_priv->rps.pm_iir = 0;
344 	spin_unlock_irq(&dev_priv->irq_lock);
345 }
346 
347 void gen6_enable_rps_interrupts(struct drm_device *dev)
348 {
349 	struct drm_i915_private *dev_priv = dev->dev_private;
350 
351 	spin_lock_irq(&dev_priv->irq_lock);
352 
353 	WARN_ON(dev_priv->rps.pm_iir);
354 	WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
355 	dev_priv->rps.interrupts_enabled = true;
356 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
357 				dev_priv->pm_rps_events);
358 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
359 
360 	spin_unlock_irq(&dev_priv->irq_lock);
361 }
362 
363 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
364 {
365 	/*
366 	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
367 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
368 	 *
369 	 * TODO: verify if this can be reproduced on VLV,CHV.
370 	 */
371 	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
372 		mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
373 
374 	if (INTEL_INFO(dev_priv)->gen >= 8)
375 		mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
376 
377 	return mask;
378 }
379 
380 void gen6_disable_rps_interrupts(struct drm_device *dev)
381 {
382 	struct drm_i915_private *dev_priv = dev->dev_private;
383 
384 	spin_lock_irq(&dev_priv->irq_lock);
385 	dev_priv->rps.interrupts_enabled = false;
386 	spin_unlock_irq(&dev_priv->irq_lock);
387 
388 	cancel_work_sync(&dev_priv->rps.work);
389 
390 	spin_lock_irq(&dev_priv->irq_lock);
391 
392 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
393 
394 	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
395 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
396 				~dev_priv->pm_rps_events);
397 
398 	spin_unlock_irq(&dev_priv->irq_lock);
399 
400 #if 0
401 	synchronize_irq(dev->irq);
402 #endif
403 }
404 
405 /**
406  * bdw_update_port_irq - update DE port interrupt
407  * @dev_priv: driver private
408  * @interrupt_mask: mask of interrupt bits to update
409  * @enabled_irq_mask: mask of interrupt bits to enable
410  */
411 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
412 				uint32_t interrupt_mask,
413 				uint32_t enabled_irq_mask)
414 {
415 	uint32_t new_val;
416 	uint32_t old_val;
417 
418 	assert_spin_locked(&dev_priv->irq_lock);
419 
420 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
421 
422 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
423 		return;
424 
425 	old_val = I915_READ(GEN8_DE_PORT_IMR);
426 
427 	new_val = old_val;
428 	new_val &= ~interrupt_mask;
429 	new_val |= (~enabled_irq_mask & interrupt_mask);
430 
431 	if (new_val != old_val) {
432 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
433 		POSTING_READ(GEN8_DE_PORT_IMR);
434 	}
435 }
436 
437 /**
438  * bdw_update_pipe_irq - update DE pipe interrupt
439  * @dev_priv: driver private
440  * @pipe: pipe whose interrupt to update
441  * @interrupt_mask: mask of interrupt bits to update
442  * @enabled_irq_mask: mask of interrupt bits to enable
443  */
444 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
445 			 enum i915_pipe pipe,
446 			 uint32_t interrupt_mask,
447 			 uint32_t enabled_irq_mask)
448 {
449 	uint32_t new_val;
450 
451 	assert_spin_locked(&dev_priv->irq_lock);
452 
453 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
454 
455 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
456 		return;
457 
458 	new_val = dev_priv->de_irq_mask[pipe];
459 	new_val &= ~interrupt_mask;
460 	new_val |= (~enabled_irq_mask & interrupt_mask);
461 
462 	if (new_val != dev_priv->de_irq_mask[pipe]) {
463 		dev_priv->de_irq_mask[pipe] = new_val;
464 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
465 		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
466 	}
467 }
468 
469 /**
470  * ibx_display_interrupt_update - update SDEIMR
471  * @dev_priv: driver private
472  * @interrupt_mask: mask of interrupt bits to update
473  * @enabled_irq_mask: mask of interrupt bits to enable
474  */
475 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
476 				  uint32_t interrupt_mask,
477 				  uint32_t enabled_irq_mask)
478 {
479 	uint32_t sdeimr = I915_READ(SDEIMR);
480 	sdeimr &= ~interrupt_mask;
481 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
482 
483 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
484 
485 	assert_spin_locked(&dev_priv->irq_lock);
486 
487 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
488 		return;
489 
490 	I915_WRITE(SDEIMR, sdeimr);
491 	POSTING_READ(SDEIMR);
492 }
493 
494 static void
495 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
496 		       u32 enable_mask, u32 status_mask)
497 {
498 	i915_reg_t reg = PIPESTAT(pipe);
499 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
500 
501 	assert_spin_locked(&dev_priv->irq_lock);
502 	WARN_ON(!intel_irqs_enabled(dev_priv));
503 
504 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
505 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
506 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
507 		      pipe_name(pipe), enable_mask, status_mask))
508 		return;
509 
510 	if ((pipestat & enable_mask) == enable_mask)
511 		return;
512 
513 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
514 
515 	/* Enable the interrupt, clear any pending status */
516 	pipestat |= enable_mask | status_mask;
517 	I915_WRITE(reg, pipestat);
518 	POSTING_READ(reg);
519 }
520 
521 static void
522 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
523 		        u32 enable_mask, u32 status_mask)
524 {
525 	i915_reg_t reg = PIPESTAT(pipe);
526 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
527 
528 	assert_spin_locked(&dev_priv->irq_lock);
529 	WARN_ON(!intel_irqs_enabled(dev_priv));
530 
531 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
532 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
533 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
534 		      pipe_name(pipe), enable_mask, status_mask))
535 		return;
536 
537 	if ((pipestat & enable_mask) == 0)
538 		return;
539 
540 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
541 
542 	pipestat &= ~enable_mask;
543 	I915_WRITE(reg, pipestat);
544 	POSTING_READ(reg);
545 }
546 
547 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
548 {
549 	u32 enable_mask = status_mask << 16;
550 
551 	/*
552 	 * On pipe A we don't support the PSR interrupt yet,
553 	 * on pipe B and C the same bit MBZ.
554 	 */
555 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
556 		return 0;
557 	/*
558 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
559 	 * A the same bit is for perf counters which we don't use either.
560 	 */
561 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
562 		return 0;
563 
564 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
565 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
566 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
567 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
568 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
569 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
570 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
571 
572 	return enable_mask;
573 }
574 
575 void
576 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
577 		     u32 status_mask)
578 {
579 	u32 enable_mask;
580 
581 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
582 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
583 							   status_mask);
584 	else
585 		enable_mask = status_mask << 16;
586 	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
587 }
588 
589 void
590 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
591 		      u32 status_mask)
592 {
593 	u32 enable_mask;
594 
595 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
596 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
597 							   status_mask);
598 	else
599 		enable_mask = status_mask << 16;
600 	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
601 }
602 
603 /**
604  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
605  * @dev: drm device
606  */
607 static void i915_enable_asle_pipestat(struct drm_device *dev)
608 {
609 	struct drm_i915_private *dev_priv = dev->dev_private;
610 
611 	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
612 		return;
613 
614 	spin_lock_irq(&dev_priv->irq_lock);
615 
616 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
617 	if (INTEL_INFO(dev)->gen >= 4)
618 		i915_enable_pipestat(dev_priv, PIPE_A,
619 				     PIPE_LEGACY_BLC_EVENT_STATUS);
620 
621 	spin_unlock_irq(&dev_priv->irq_lock);
622 }
623 
624 /*
625  * This timing diagram depicts the video signal in and
626  * around the vertical blanking period.
627  *
628  * Assumptions about the fictitious mode used in this example:
629  *  vblank_start >= 3
630  *  vsync_start = vblank_start + 1
631  *  vsync_end = vblank_start + 2
632  *  vtotal = vblank_start + 3
633  *
634  *           start of vblank:
635  *           latch double buffered registers
636  *           increment frame counter (ctg+)
637  *           generate start of vblank interrupt (gen4+)
638  *           |
639  *           |          frame start:
640  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
641  *           |          may be shifted forward 1-3 extra lines via PIPECONF
642  *           |          |
643  *           |          |  start of vsync:
644  *           |          |  generate vsync interrupt
645  *           |          |  |
646  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
647  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
648  * ----va---> <-----------------vb--------------------> <--------va-------------
649  *       |          |       <----vs----->                     |
650  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
651  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
652  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
653  *       |          |                                         |
654  *       last visible pixel                                   first visible pixel
655  *                  |                                         increment frame counter (gen3/4)
656  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
657  *
658  * x  = horizontal active
659  * _  = horizontal blanking
660  * hs = horizontal sync
661  * va = vertical active
662  * vb = vertical blanking
663  * vs = vertical sync
664  * vbs = vblank_start (number)
665  *
666  * Summary:
667  * - most events happen at the start of horizontal sync
668  * - frame start happens at the start of horizontal blank, 1-4 lines
669  *   (depending on PIPECONF settings) after the start of vblank
670  * - gen3/4 pixel and frame counter are synchronized with the start
671  *   of horizontal active on the first line of vertical active
672  */
673 
674 static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
675 {
676 	/* Gen2 doesn't have a hardware frame counter */
677 	return 0;
678 }
679 
680 /* Called from drm generic code, passed a 'crtc', which
681  * we use as a pipe index
682  */
683 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
684 {
685 	struct drm_i915_private *dev_priv = dev->dev_private;
686 	i915_reg_t high_frame, low_frame;
687 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
688 	struct intel_crtc *intel_crtc =
689 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
690 	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
691 
692 	htotal = mode->crtc_htotal;
693 	hsync_start = mode->crtc_hsync_start;
694 	vbl_start = mode->crtc_vblank_start;
695 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
696 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
697 
698 	/* Convert to pixel count */
699 	vbl_start *= htotal;
700 
701 	/* Start of vblank event occurs at start of hsync */
702 	vbl_start -= htotal - hsync_start;
703 
704 	high_frame = PIPEFRAME(pipe);
705 	low_frame = PIPEFRAMEPIXEL(pipe);
706 
707 	/*
708 	 * High & low register fields aren't synchronized, so make sure
709 	 * we get a low value that's stable across two reads of the high
710 	 * register.
711 	 */
712 	do {
713 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
714 		low   = I915_READ(low_frame);
715 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
716 	} while (high1 != high2);
717 
718 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
719 	pixel = low & PIPE_PIXEL_MASK;
720 	low >>= PIPE_FRAME_LOW_SHIFT;
721 
722 	/*
723 	 * The frame counter increments at beginning of active.
724 	 * Cook up a vblank counter by also checking the pixel
725 	 * counter against vblank start.
726 	 */
727 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
728 }
729 
730 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
731 {
732 	struct drm_i915_private *dev_priv = dev->dev_private;
733 
734 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
735 }
736 
737 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
738 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
739 {
740 	struct drm_device *dev = crtc->base.dev;
741 	struct drm_i915_private *dev_priv = dev->dev_private;
742 	const struct drm_display_mode *mode = &crtc->base.hwmode;
743 	enum i915_pipe pipe = crtc->pipe;
744 	int position, vtotal;
745 
746 	vtotal = mode->crtc_vtotal;
747 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
748 		vtotal /= 2;
749 
750 	if (IS_GEN2(dev))
751 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
752 	else
753 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
754 
755 	/*
756 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
757 	 * read it just before the start of vblank.  So try it again
758 	 * so we don't accidentally end up spanning a vblank frame
759 	 * increment, causing the pipe_update_end() code to squak at us.
760 	 *
761 	 * The nature of this problem means we can't simply check the ISR
762 	 * bit and return the vblank start value; nor can we use the scanline
763 	 * debug register in the transcoder as it appears to have the same
764 	 * problem.  We may need to extend this to include other platforms,
765 	 * but so far testing only shows the problem on HSW.
766 	 */
767 	if (HAS_DDI(dev) && !position) {
768 		int i, temp;
769 
770 		for (i = 0; i < 100; i++) {
771 			udelay(1);
772 			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
773 				DSL_LINEMASK_GEN3;
774 			if (temp != position) {
775 				position = temp;
776 				break;
777 			}
778 		}
779 	}
780 
781 	/*
782 	 * See update_scanline_offset() for the details on the
783 	 * scanline_offset adjustment.
784 	 */
785 	return (position + crtc->scanline_offset) % vtotal;
786 }
787 
788 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
789 				    unsigned int flags, int *vpos, int *hpos,
790 				    ktime_t *stime, ktime_t *etime,
791 				    const struct drm_display_mode *mode)
792 {
793 	struct drm_i915_private *dev_priv = dev->dev_private;
794 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
795 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
796 	int position;
797 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
798 	bool in_vbl = true;
799 	int ret = 0;
800 	unsigned long irqflags;
801 
802 	if (WARN_ON(!mode->crtc_clock)) {
803 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
804 				 "pipe %c\n", pipe_name(pipe));
805 		return 0;
806 	}
807 
808 	htotal = mode->crtc_htotal;
809 	hsync_start = mode->crtc_hsync_start;
810 	vtotal = mode->crtc_vtotal;
811 	vbl_start = mode->crtc_vblank_start;
812 	vbl_end = mode->crtc_vblank_end;
813 
814 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
815 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
816 		vbl_end /= 2;
817 		vtotal /= 2;
818 	}
819 
820 	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
821 
822 	/*
823 	 * Lock uncore.lock, as we will do multiple timing critical raw
824 	 * register reads, potentially with preemption disabled, so the
825 	 * following code must not block on uncore.lock.
826 	 */
827 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
828 
829 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
830 
831 	/* Get optional system timestamp before query. */
832 	if (stime)
833 		*stime = ktime_get();
834 
835 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
836 		/* No obvious pixelcount register. Only query vertical
837 		 * scanout position from Display scan line register.
838 		 */
839 		position = __intel_get_crtc_scanline(intel_crtc);
840 	} else {
841 		/* Have access to pixelcount since start of frame.
842 		 * We can split this into vertical and horizontal
843 		 * scanout position.
844 		 */
845 		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
846 
847 		/* convert to pixel counts */
848 		vbl_start *= htotal;
849 		vbl_end *= htotal;
850 		vtotal *= htotal;
851 
852 		/*
853 		 * In interlaced modes, the pixel counter counts all pixels,
854 		 * so one field will have htotal more pixels. In order to avoid
855 		 * the reported position from jumping backwards when the pixel
856 		 * counter is beyond the length of the shorter field, just
857 		 * clamp the position the length of the shorter field. This
858 		 * matches how the scanline counter based position works since
859 		 * the scanline counter doesn't count the two half lines.
860 		 */
861 		if (position >= vtotal)
862 			position = vtotal - 1;
863 
864 		/*
865 		 * Start of vblank interrupt is triggered at start of hsync,
866 		 * just prior to the first active line of vblank. However we
867 		 * consider lines to start at the leading edge of horizontal
868 		 * active. So, should we get here before we've crossed into
869 		 * the horizontal active of the first line in vblank, we would
870 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
871 		 * always add htotal-hsync_start to the current pixel position.
872 		 */
873 		position = (position + htotal - hsync_start) % vtotal;
874 	}
875 
876 	/* Get optional system timestamp after query. */
877 	if (etime)
878 		*etime = ktime_get();
879 
880 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
881 
882 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
883 
884 	in_vbl = position >= vbl_start && position < vbl_end;
885 
886 	/*
887 	 * While in vblank, position will be negative
888 	 * counting up towards 0 at vbl_end. And outside
889 	 * vblank, position will be positive counting
890 	 * up since vbl_end.
891 	 */
892 	if (position >= vbl_start)
893 		position -= vbl_end;
894 	else
895 		position += vtotal - vbl_end;
896 
897 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
898 		*vpos = position;
899 		*hpos = 0;
900 	} else {
901 		*vpos = position / htotal;
902 		*hpos = position - (*vpos * htotal);
903 	}
904 
905 	/* In vblank? */
906 	if (in_vbl)
907 		ret |= DRM_SCANOUTPOS_IN_VBLANK;
908 
909 	return ret;
910 }
911 
912 int intel_get_crtc_scanline(struct intel_crtc *crtc)
913 {
914 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
915 	unsigned long irqflags;
916 	int position;
917 
918 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
919 	position = __intel_get_crtc_scanline(crtc);
920 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
921 
922 	return position;
923 }
924 
925 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
926 			      int *max_error,
927 			      struct timeval *vblank_time,
928 			      unsigned flags)
929 {
930 	struct drm_crtc *crtc;
931 
932 	if (pipe >= INTEL_INFO(dev)->num_pipes) {
933 		DRM_ERROR("Invalid crtc %u\n", pipe);
934 		return -EINVAL;
935 	}
936 
937 	/* Get drm_crtc to timestamp: */
938 	crtc = intel_get_crtc_for_pipe(dev, pipe);
939 	if (crtc == NULL) {
940 		DRM_ERROR("Invalid crtc %u\n", pipe);
941 		return -EINVAL;
942 	}
943 
944 	if (!crtc->hwmode.crtc_clock) {
945 		DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
946 		return -EBUSY;
947 	}
948 
949 	/* Helper routine in DRM core does all the work: */
950 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
951 						     vblank_time, flags,
952 						     &crtc->hwmode);
953 }
954 
955 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
956 {
957 	struct drm_i915_private *dev_priv = dev->dev_private;
958 	u32 busy_up, busy_down, max_avg, min_avg;
959 	u8 new_delay;
960 
961 	lockmgr(&mchdev_lock, LK_EXCLUSIVE);
962 
963 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
964 
965 	new_delay = dev_priv->ips.cur_delay;
966 
967 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
968 	busy_up = I915_READ(RCPREVBSYTUPAVG);
969 	busy_down = I915_READ(RCPREVBSYTDNAVG);
970 	max_avg = I915_READ(RCBMAXAVG);
971 	min_avg = I915_READ(RCBMINAVG);
972 
973 	/* Handle RCS change request from hw */
974 	if (busy_up > max_avg) {
975 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
976 			new_delay = dev_priv->ips.cur_delay - 1;
977 		if (new_delay < dev_priv->ips.max_delay)
978 			new_delay = dev_priv->ips.max_delay;
979 	} else if (busy_down < min_avg) {
980 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
981 			new_delay = dev_priv->ips.cur_delay + 1;
982 		if (new_delay > dev_priv->ips.min_delay)
983 			new_delay = dev_priv->ips.min_delay;
984 	}
985 
986 	if (ironlake_set_drps(dev, new_delay))
987 		dev_priv->ips.cur_delay = new_delay;
988 
989 	lockmgr(&mchdev_lock, LK_RELEASE);
990 
991 	return;
992 }
993 
994 static void notify_ring(struct intel_engine_cs *ring)
995 {
996 	if (!intel_ring_initialized(ring))
997 		return;
998 
999 	trace_i915_gem_request_notify(ring);
1000 
1001 	wake_up_all(&ring->irq_queue);
1002 }
1003 
1004 static void vlv_c0_read(struct drm_i915_private *dev_priv,
1005 			struct intel_rps_ei *ei)
1006 {
1007 	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1008 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1009 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1010 }
1011 
1012 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
1013 			 const struct intel_rps_ei *old,
1014 			 const struct intel_rps_ei *now,
1015 			 int threshold)
1016 {
1017 	u64 time, c0;
1018 	unsigned int mul = 100;
1019 
1020 	if (old->cz_clock == 0)
1021 		return false;
1022 
1023 	if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1024 		mul <<= 8;
1025 
1026 	time = now->cz_clock - old->cz_clock;
1027 	time *= threshold * dev_priv->czclk_freq;
1028 
1029 	/* Workload can be split between render + media, e.g. SwapBuffers
1030 	 * being blitted in X after being rendered in mesa. To account for
1031 	 * this we need to combine both engines into our activity counter.
1032 	 */
1033 	c0 = now->render_c0 - old->render_c0;
1034 	c0 += now->media_c0 - old->media_c0;
1035 	c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1036 
1037 	return c0 >= time;
1038 }
1039 
1040 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1041 {
1042 	vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1043 	dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1044 }
1045 
1046 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1047 {
1048 	struct intel_rps_ei now;
1049 	u32 events = 0;
1050 
1051 	if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1052 		return 0;
1053 
1054 	vlv_c0_read(dev_priv, &now);
1055 	if (now.cz_clock == 0)
1056 		return 0;
1057 
1058 	if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1059 		if (!vlv_c0_above(dev_priv,
1060 				  &dev_priv->rps.down_ei, &now,
1061 				  dev_priv->rps.down_threshold))
1062 			events |= GEN6_PM_RP_DOWN_THRESHOLD;
1063 		dev_priv->rps.down_ei = now;
1064 	}
1065 
1066 	if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1067 		if (vlv_c0_above(dev_priv,
1068 				 &dev_priv->rps.up_ei, &now,
1069 				 dev_priv->rps.up_threshold))
1070 			events |= GEN6_PM_RP_UP_THRESHOLD;
1071 		dev_priv->rps.up_ei = now;
1072 	}
1073 
1074 	return events;
1075 }
1076 
1077 static bool any_waiters(struct drm_i915_private *dev_priv)
1078 {
1079 	struct intel_engine_cs *ring;
1080 	int i;
1081 
1082 	for_each_ring(ring, dev_priv, i)
1083 		if (ring->irq_refcount)
1084 			return true;
1085 
1086 	return false;
1087 }
1088 
1089 static void gen6_pm_rps_work(struct work_struct *work)
1090 {
1091 	struct drm_i915_private *dev_priv =
1092 		container_of(work, struct drm_i915_private, rps.work);
1093 	bool client_boost;
1094 	int new_delay, adj, min, max;
1095 	u32 pm_iir;
1096 
1097 	spin_lock_irq(&dev_priv->irq_lock);
1098 	/* Speed up work cancelation during disabling rps interrupts. */
1099 	if (!dev_priv->rps.interrupts_enabled) {
1100 		spin_unlock_irq(&dev_priv->irq_lock);
1101 		return;
1102 	}
1103 
1104 	/*
1105 	 * The RPS work is synced during runtime suspend, we don't require a
1106 	 * wakeref. TODO: instead of disabling the asserts make sure that we
1107 	 * always hold an RPM reference while the work is running.
1108 	 */
1109 	DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1110 
1111 	pm_iir = dev_priv->rps.pm_iir;
1112 	dev_priv->rps.pm_iir = 0;
1113 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1114 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1115 	client_boost = dev_priv->rps.client_boost;
1116 	dev_priv->rps.client_boost = false;
1117 	spin_unlock_irq(&dev_priv->irq_lock);
1118 
1119 	/* Make sure we didn't queue anything we're not going to process. */
1120 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1121 
1122 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1123 		goto out;
1124 
1125 	mutex_lock(&dev_priv->rps.hw_lock);
1126 
1127 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1128 
1129 	adj = dev_priv->rps.last_adj;
1130 	new_delay = dev_priv->rps.cur_freq;
1131 	min = dev_priv->rps.min_freq_softlimit;
1132 	max = dev_priv->rps.max_freq_softlimit;
1133 
1134 	if (client_boost) {
1135 		new_delay = dev_priv->rps.max_freq_softlimit;
1136 		adj = 0;
1137 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1138 		if (adj > 0)
1139 			adj *= 2;
1140 		else /* CHV needs even encode values */
1141 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1142 		/*
1143 		 * For better performance, jump directly
1144 		 * to RPe if we're below it.
1145 		 */
1146 		if (new_delay < dev_priv->rps.efficient_freq - adj) {
1147 			new_delay = dev_priv->rps.efficient_freq;
1148 			adj = 0;
1149 		}
1150 	} else if (any_waiters(dev_priv)) {
1151 		adj = 0;
1152 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1153 		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1154 			new_delay = dev_priv->rps.efficient_freq;
1155 		else
1156 			new_delay = dev_priv->rps.min_freq_softlimit;
1157 		adj = 0;
1158 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1159 		if (adj < 0)
1160 			adj *= 2;
1161 		else /* CHV needs even encode values */
1162 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1163 	} else { /* unknown event */
1164 		adj = 0;
1165 	}
1166 
1167 	dev_priv->rps.last_adj = adj;
1168 
1169 	/* sysfs frequency interfaces may have snuck in while servicing the
1170 	 * interrupt
1171 	 */
1172 	new_delay += adj;
1173 	new_delay = clamp_t(int, new_delay, min, max);
1174 
1175 	intel_set_rps(dev_priv->dev, new_delay);
1176 
1177 	mutex_unlock(&dev_priv->rps.hw_lock);
1178 out:
1179 	ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1180 }
1181 
1182 
1183 /**
1184  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1185  * occurred.
1186  * @work: workqueue struct
1187  *
1188  * Doesn't actually do anything except notify userspace. As a consequence of
1189  * this event, userspace should try to remap the bad rows since statistically
1190  * it is likely the same row is more likely to go bad again.
1191  */
1192 static void ivybridge_parity_work(struct work_struct *work)
1193 {
1194 	struct drm_i915_private *dev_priv =
1195 		container_of(work, struct drm_i915_private, l3_parity.error_work);
1196 	u32 error_status, row, bank, subbank;
1197 	char *parity_event[6];
1198 	uint32_t misccpctl;
1199 	uint8_t slice = 0;
1200 
1201 	/* We must turn off DOP level clock gating to access the L3 registers.
1202 	 * In order to prevent a get/put style interface, acquire struct mutex
1203 	 * any time we access those registers.
1204 	 */
1205 	mutex_lock(&dev_priv->dev->struct_mutex);
1206 
1207 	/* If we've screwed up tracking, just let the interrupt fire again */
1208 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
1209 		goto out;
1210 
1211 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1212 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1213 	POSTING_READ(GEN7_MISCCPCTL);
1214 
1215 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1216 		i915_reg_t reg;
1217 
1218 		slice--;
1219 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1220 			break;
1221 
1222 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1223 
1224 		reg = GEN7_L3CDERRST1(slice);
1225 
1226 		error_status = I915_READ(reg);
1227 		row = GEN7_PARITY_ERROR_ROW(error_status);
1228 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1229 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1230 
1231 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1232 		POSTING_READ(reg);
1233 
1234 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1235 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1236 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1237 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1238 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1239 		parity_event[5] = NULL;
1240 
1241 #if 0
1242 		kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1243 				   KOBJ_CHANGE, parity_event);
1244 #endif
1245 
1246 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1247 			  slice, row, bank, subbank);
1248 
1249 		kfree(parity_event[4]);
1250 		kfree(parity_event[3]);
1251 		kfree(parity_event[2]);
1252 		kfree(parity_event[1]);
1253 	}
1254 
1255 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1256 
1257 out:
1258 	WARN_ON(dev_priv->l3_parity.which_slice);
1259 	spin_lock_irq(&dev_priv->irq_lock);
1260 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1261 	spin_unlock_irq(&dev_priv->irq_lock);
1262 
1263 	mutex_unlock(&dev_priv->dev->struct_mutex);
1264 }
1265 
1266 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1267 {
1268 	struct drm_i915_private *dev_priv = dev->dev_private;
1269 
1270 	if (!HAS_L3_DPF(dev))
1271 		return;
1272 
1273 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1274 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1275 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1276 
1277 	iir &= GT_PARITY_ERROR(dev);
1278 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1279 		dev_priv->l3_parity.which_slice |= 1 << 1;
1280 
1281 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1282 		dev_priv->l3_parity.which_slice |= 1 << 0;
1283 
1284 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1285 }
1286 
1287 static void ilk_gt_irq_handler(struct drm_device *dev,
1288 			       struct drm_i915_private *dev_priv,
1289 			       u32 gt_iir)
1290 {
1291 	if (gt_iir &
1292 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1293 		notify_ring(&dev_priv->ring[RCS]);
1294 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1295 		notify_ring(&dev_priv->ring[VCS]);
1296 }
1297 
1298 static void snb_gt_irq_handler(struct drm_device *dev,
1299 			       struct drm_i915_private *dev_priv,
1300 			       u32 gt_iir)
1301 {
1302 
1303 	if (gt_iir &
1304 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1305 		notify_ring(&dev_priv->ring[RCS]);
1306 	if (gt_iir & GT_BSD_USER_INTERRUPT)
1307 		notify_ring(&dev_priv->ring[VCS]);
1308 	if (gt_iir & GT_BLT_USER_INTERRUPT)
1309 		notify_ring(&dev_priv->ring[BCS]);
1310 
1311 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1312 		      GT_BSD_CS_ERROR_INTERRUPT |
1313 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1314 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1315 
1316 	if (gt_iir & GT_PARITY_ERROR(dev))
1317 		ivybridge_parity_error_irq_handler(dev, gt_iir);
1318 }
1319 
1320 static __always_inline void
1321 gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift)
1322 {
1323 	if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1324 		notify_ring(ring);
1325 	if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1326 		intel_lrc_irq_handler(ring);
1327 }
1328 
1329 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1330 				       u32 master_ctl)
1331 {
1332 
1333 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1334 		u32 iir = I915_READ_FW(GEN8_GT_IIR(0));
1335 		if (iir) {
1336 			I915_WRITE_FW(GEN8_GT_IIR(0), iir);
1337 
1338 			gen8_cs_irq_handler(&dev_priv->ring[RCS],
1339 					iir, GEN8_RCS_IRQ_SHIFT);
1340 
1341 			gen8_cs_irq_handler(&dev_priv->ring[BCS],
1342 					iir, GEN8_BCS_IRQ_SHIFT);
1343 		} else
1344 			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1345 	}
1346 
1347 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1348 		u32 iir = I915_READ_FW(GEN8_GT_IIR(1));
1349 		if (iir) {
1350 			I915_WRITE_FW(GEN8_GT_IIR(1), iir);
1351 
1352 			gen8_cs_irq_handler(&dev_priv->ring[VCS],
1353 					iir, GEN8_VCS1_IRQ_SHIFT);
1354 
1355 			gen8_cs_irq_handler(&dev_priv->ring[VCS2],
1356 					iir, GEN8_VCS2_IRQ_SHIFT);
1357 		} else
1358 			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1359 	}
1360 
1361 	if (master_ctl & GEN8_GT_VECS_IRQ) {
1362 		u32 iir = I915_READ_FW(GEN8_GT_IIR(3));
1363 		if (iir) {
1364 			I915_WRITE_FW(GEN8_GT_IIR(3), iir);
1365 
1366 			gen8_cs_irq_handler(&dev_priv->ring[VECS],
1367 					iir, GEN8_VECS_IRQ_SHIFT);
1368 		} else
1369 			DRM_ERROR("The master control interrupt lied (GT3)!\n");
1370 	}
1371 
1372 	if (master_ctl & GEN8_GT_PM_IRQ) {
1373 		u32 iir = I915_READ_FW(GEN8_GT_IIR(2));
1374 		if (iir & dev_priv->pm_rps_events) {
1375 			I915_WRITE_FW(GEN8_GT_IIR(2),
1376 				      iir & dev_priv->pm_rps_events);
1377 			gen6_rps_irq_handler(dev_priv, iir);
1378 		} else
1379 			DRM_ERROR("The master control interrupt lied (PM)!\n");
1380 	}
1381 
1382 }
1383 
1384 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1385 {
1386 	switch (port) {
1387 	case PORT_A:
1388 		return val & PORTA_HOTPLUG_LONG_DETECT;
1389 	case PORT_B:
1390 		return val & PORTB_HOTPLUG_LONG_DETECT;
1391 	case PORT_C:
1392 		return val & PORTC_HOTPLUG_LONG_DETECT;
1393 	default:
1394 		return false;
1395 	}
1396 }
1397 
1398 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1399 {
1400 	switch (port) {
1401 	case PORT_E:
1402 		return val & PORTE_HOTPLUG_LONG_DETECT;
1403 	default:
1404 		return false;
1405 	}
1406 }
1407 
1408 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1409 {
1410 	switch (port) {
1411 	case PORT_A:
1412 		return val & PORTA_HOTPLUG_LONG_DETECT;
1413 	case PORT_B:
1414 		return val & PORTB_HOTPLUG_LONG_DETECT;
1415 	case PORT_C:
1416 		return val & PORTC_HOTPLUG_LONG_DETECT;
1417 	case PORT_D:
1418 		return val & PORTD_HOTPLUG_LONG_DETECT;
1419 	default:
1420 		return false;
1421 	}
1422 }
1423 
1424 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1425 {
1426 	switch (port) {
1427 	case PORT_A:
1428 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1429 	default:
1430 		return false;
1431 	}
1432 }
1433 
1434 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1435 {
1436 	switch (port) {
1437 	case PORT_B:
1438 		return val & PORTB_HOTPLUG_LONG_DETECT;
1439 	case PORT_C:
1440 		return val & PORTC_HOTPLUG_LONG_DETECT;
1441 	case PORT_D:
1442 		return val & PORTD_HOTPLUG_LONG_DETECT;
1443 	default:
1444 		return false;
1445 	}
1446 }
1447 
1448 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1449 {
1450 	switch (port) {
1451 	case PORT_B:
1452 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1453 	case PORT_C:
1454 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1455 	case PORT_D:
1456 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1457 	default:
1458 		return false;
1459 	}
1460 }
1461 
1462 /*
1463  * Get a bit mask of pins that have triggered, and which ones may be long.
1464  * This can be called multiple times with the same masks to accumulate
1465  * hotplug detection results from several registers.
1466  *
1467  * Note that the caller is expected to zero out the masks initially.
1468  */
1469 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1470 			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1471 			     const u32 hpd[HPD_NUM_PINS],
1472 			     bool long_pulse_detect(enum port port, u32 val))
1473 {
1474 	enum port port;
1475 	int i;
1476 
1477 	for_each_hpd_pin(i) {
1478 		if ((hpd[i] & hotplug_trigger) == 0)
1479 			continue;
1480 
1481 		*pin_mask |= BIT(i);
1482 
1483 		if (!intel_hpd_pin_to_port(i, &port))
1484 			continue;
1485 
1486 		if (long_pulse_detect(port, dig_hotplug_reg))
1487 			*long_mask |= BIT(i);
1488 	}
1489 
1490 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1491 			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1492 
1493 }
1494 
1495 static void gmbus_irq_handler(struct drm_device *dev)
1496 {
1497 	struct drm_i915_private *dev_priv = dev->dev_private;
1498 
1499 	wake_up_all(&dev_priv->gmbus_wait_queue);
1500 }
1501 
1502 static void dp_aux_irq_handler(struct drm_device *dev)
1503 {
1504 	struct drm_i915_private *dev_priv = dev->dev_private;
1505 
1506 	wake_up_all(&dev_priv->gmbus_wait_queue);
1507 }
1508 
1509 #if defined(CONFIG_DEBUG_FS)
1510 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1511 					 uint32_t crc0, uint32_t crc1,
1512 					 uint32_t crc2, uint32_t crc3,
1513 					 uint32_t crc4)
1514 {
1515 	struct drm_i915_private *dev_priv = dev->dev_private;
1516 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1517 	struct intel_pipe_crc_entry *entry;
1518 	int head, tail;
1519 
1520 	spin_lock(&pipe_crc->lock);
1521 
1522 	if (!pipe_crc->entries) {
1523 		spin_unlock(&pipe_crc->lock);
1524 		DRM_DEBUG_KMS("spurious interrupt\n");
1525 		return;
1526 	}
1527 
1528 	head = pipe_crc->head;
1529 	tail = pipe_crc->tail;
1530 
1531 	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1532 		spin_unlock(&pipe_crc->lock);
1533 		DRM_ERROR("CRC buffer overflowing\n");
1534 		return;
1535 	}
1536 
1537 	entry = &pipe_crc->entries[head];
1538 
1539 	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1540 	entry->crc[0] = crc0;
1541 	entry->crc[1] = crc1;
1542 	entry->crc[2] = crc2;
1543 	entry->crc[3] = crc3;
1544 	entry->crc[4] = crc4;
1545 
1546 	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1547 	pipe_crc->head = head;
1548 
1549 	spin_unlock(&pipe_crc->lock);
1550 
1551 	wake_up_interruptible(&pipe_crc->wq);
1552 }
1553 #else
1554 static inline void
1555 display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1556 			     uint32_t crc0, uint32_t crc1,
1557 			     uint32_t crc2, uint32_t crc3,
1558 			     uint32_t crc4) {}
1559 #endif
1560 
1561 
1562 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1563 {
1564 	struct drm_i915_private *dev_priv = dev->dev_private;
1565 
1566 	display_pipe_crc_irq_handler(dev, pipe,
1567 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1568 				     0, 0, 0, 0);
1569 }
1570 
1571 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1572 {
1573 	struct drm_i915_private *dev_priv = dev->dev_private;
1574 
1575 	display_pipe_crc_irq_handler(dev, pipe,
1576 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1577 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1578 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1579 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1580 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1581 }
1582 
1583 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1584 {
1585 	struct drm_i915_private *dev_priv = dev->dev_private;
1586 	uint32_t res1, res2;
1587 
1588 	if (INTEL_INFO(dev)->gen >= 3)
1589 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1590 	else
1591 		res1 = 0;
1592 
1593 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1594 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1595 	else
1596 		res2 = 0;
1597 
1598 	display_pipe_crc_irq_handler(dev, pipe,
1599 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1600 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1601 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1602 				     res1, res2);
1603 }
1604 
1605 /* The RPS events need forcewake, so we add them to a work queue and mask their
1606  * IMR bits until the work is done. Other interrupts can be processed without
1607  * the work queue. */
1608 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1609 {
1610 	if (pm_iir & dev_priv->pm_rps_events) {
1611 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1612 		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1613 		if (dev_priv->rps.interrupts_enabled) {
1614 			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1615 			queue_work(dev_priv->wq, &dev_priv->rps.work);
1616 		}
1617 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1618 	}
1619 
1620 	if (INTEL_INFO(dev_priv)->gen >= 8)
1621 		return;
1622 
1623 	if (HAS_VEBOX(dev_priv->dev)) {
1624 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1625 			notify_ring(&dev_priv->ring[VECS]);
1626 
1627 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1628 			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1629 	}
1630 }
1631 
1632 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum i915_pipe pipe)
1633 {
1634 	if (!drm_handle_vblank(dev, pipe))
1635 		return false;
1636 
1637 	return true;
1638 }
1639 
1640 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1641 {
1642 	struct drm_i915_private *dev_priv = dev->dev_private;
1643 	u32 pipe_stats[I915_MAX_PIPES] = { };
1644 	int pipe;
1645 
1646 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1647 
1648 	if (!dev_priv->display_irqs_enabled) {
1649 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1650 		return;
1651 	}
1652 
1653 	for_each_pipe(dev_priv, pipe) {
1654 		i915_reg_t reg;
1655 		u32 mask, iir_bit = 0;
1656 
1657 		/*
1658 		 * PIPESTAT bits get signalled even when the interrupt is
1659 		 * disabled with the mask bits, and some of the status bits do
1660 		 * not generate interrupts at all (like the underrun bit). Hence
1661 		 * we need to be careful that we only handle what we want to
1662 		 * handle.
1663 		 */
1664 
1665 		/* fifo underruns are filterered in the underrun handler. */
1666 		mask = PIPE_FIFO_UNDERRUN_STATUS;
1667 
1668 		switch (pipe) {
1669 		case PIPE_A:
1670 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1671 			break;
1672 		case PIPE_B:
1673 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1674 			break;
1675 		case PIPE_C:
1676 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1677 			break;
1678 		}
1679 		if (iir & iir_bit)
1680 			mask |= dev_priv->pipestat_irq_mask[pipe];
1681 
1682 		if (!mask)
1683 			continue;
1684 
1685 		reg = PIPESTAT(pipe);
1686 		mask |= PIPESTAT_INT_ENABLE_MASK;
1687 		pipe_stats[pipe] = I915_READ(reg) & mask;
1688 
1689 		/*
1690 		 * Clear the PIPE*STAT regs before the IIR
1691 		 */
1692 		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1693 					PIPESTAT_INT_STATUS_MASK))
1694 			I915_WRITE(reg, pipe_stats[pipe]);
1695 	}
1696 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1697 
1698 	for_each_pipe(dev_priv, pipe) {
1699 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1700 		    intel_pipe_handle_vblank(dev, pipe))
1701 			intel_check_page_flip(dev, pipe);
1702 
1703 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1704 			intel_prepare_page_flip(dev, pipe);
1705 			intel_finish_page_flip(dev, pipe);
1706 		}
1707 
1708 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1709 			i9xx_pipe_crc_irq_handler(dev, pipe);
1710 
1711 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1712 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1713 	}
1714 
1715 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1716 		gmbus_irq_handler(dev);
1717 }
1718 
1719 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1720 {
1721 	struct drm_i915_private *dev_priv = dev->dev_private;
1722 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1723 	u32 pin_mask = 0, long_mask = 0;
1724 
1725 	if (!hotplug_status)
1726 		return;
1727 
1728 	I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1729 	/*
1730 	 * Make sure hotplug status is cleared before we clear IIR, or else we
1731 	 * may miss hotplug events.
1732 	 */
1733 	POSTING_READ(PORT_HOTPLUG_STAT);
1734 
1735 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1736 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1737 
1738 		if (hotplug_trigger) {
1739 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1740 					   hotplug_trigger, hpd_status_g4x,
1741 					   i9xx_port_hotplug_long_detect);
1742 
1743 			intel_hpd_irq_handler(dev, pin_mask, long_mask);
1744 		}
1745 
1746 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1747 			dp_aux_irq_handler(dev);
1748 	} else {
1749 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1750 
1751 		if (hotplug_trigger) {
1752 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1753 					   hotplug_trigger, hpd_status_i915,
1754 					   i9xx_port_hotplug_long_detect);
1755 			intel_hpd_irq_handler(dev, pin_mask, long_mask);
1756 		}
1757 	}
1758 }
1759 
1760 static irqreturn_t valleyview_irq_handler(void *arg)
1761 {
1762 	struct drm_device *dev = arg;
1763 	struct drm_i915_private *dev_priv = dev->dev_private;
1764 	u32 iir, gt_iir, pm_iir;
1765 
1766 	if (!intel_irqs_enabled(dev_priv))
1767 		return IRQ_NONE;
1768 
1769 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1770 	disable_rpm_wakeref_asserts(dev_priv);
1771 
1772 	while (true) {
1773 		/* Find, clear, then process each source of interrupt */
1774 
1775 		gt_iir = I915_READ(GTIIR);
1776 		if (gt_iir)
1777 			I915_WRITE(GTIIR, gt_iir);
1778 
1779 		pm_iir = I915_READ(GEN6_PMIIR);
1780 		if (pm_iir)
1781 			I915_WRITE(GEN6_PMIIR, pm_iir);
1782 
1783 		iir = I915_READ(VLV_IIR);
1784 		if (iir) {
1785 			/* Consume port before clearing IIR or we'll miss events */
1786 			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1787 				i9xx_hpd_irq_handler(dev);
1788 			I915_WRITE(VLV_IIR, iir);
1789 		}
1790 
1791 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1792 			goto out;
1793 
1794 
1795 		if (gt_iir)
1796 			snb_gt_irq_handler(dev, dev_priv, gt_iir);
1797 		if (pm_iir)
1798 			gen6_rps_irq_handler(dev_priv, pm_iir);
1799 		/* Call regardless, as some status bits might not be
1800 		 * signalled in iir */
1801 		valleyview_pipestat_irq_handler(dev, iir);
1802 	}
1803 
1804 out:
1805 	enable_rpm_wakeref_asserts(dev_priv);
1806 
1807 }
1808 
1809 static irqreturn_t cherryview_irq_handler(void *arg)
1810 {
1811 	struct drm_device *dev = arg;
1812 	struct drm_i915_private *dev_priv = dev->dev_private;
1813 	u32 master_ctl, iir;
1814 
1815 	if (!intel_irqs_enabled(dev_priv))
1816 		return IRQ_NONE;
1817 
1818 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1819 	disable_rpm_wakeref_asserts(dev_priv);
1820 
1821 	do {
1822 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1823 		iir = I915_READ(VLV_IIR);
1824 
1825 		if (master_ctl == 0 && iir == 0)
1826 			break;
1827 
1828 
1829 		I915_WRITE(GEN8_MASTER_IRQ, 0);
1830 
1831 		/* Find, clear, then process each source of interrupt */
1832 
1833 		if (iir) {
1834 			/* Consume port before clearing IIR or we'll miss events */
1835 			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1836 				i9xx_hpd_irq_handler(dev);
1837 			I915_WRITE(VLV_IIR, iir);
1838 		}
1839 
1840 		gen8_gt_irq_handler(dev_priv, master_ctl);
1841 
1842 		/* Call regardless, as some status bits might not be
1843 		 * signalled in iir */
1844 		valleyview_pipestat_irq_handler(dev, iir);
1845 
1846 		I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1847 		POSTING_READ(GEN8_MASTER_IRQ);
1848 	} while (0);
1849 
1850 	enable_rpm_wakeref_asserts(dev_priv);
1851 
1852 }
1853 
1854 static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1855 				const u32 hpd[HPD_NUM_PINS])
1856 {
1857 	struct drm_i915_private *dev_priv = to_i915(dev);
1858 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1859 
1860 	/*
1861 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1862 	 * unless we touch the hotplug register, even if hotplug_trigger is
1863 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1864 	 * errors.
1865 	 */
1866 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1867 	if (!hotplug_trigger) {
1868 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1869 			PORTD_HOTPLUG_STATUS_MASK |
1870 			PORTC_HOTPLUG_STATUS_MASK |
1871 			PORTB_HOTPLUG_STATUS_MASK;
1872 		dig_hotplug_reg &= ~mask;
1873 	}
1874 
1875 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1876 	if (!hotplug_trigger)
1877 		return;
1878 
1879 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1880 			   dig_hotplug_reg, hpd,
1881 			   pch_port_hotplug_long_detect);
1882 
1883 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
1884 }
1885 
1886 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1887 {
1888 	struct drm_i915_private *dev_priv = dev->dev_private;
1889 	int pipe;
1890 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1891 
1892 	ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1893 
1894 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1895 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1896 			       SDE_AUDIO_POWER_SHIFT);
1897 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1898 				 port_name(port));
1899 	}
1900 
1901 	if (pch_iir & SDE_AUX_MASK)
1902 		dp_aux_irq_handler(dev);
1903 
1904 	if (pch_iir & SDE_GMBUS)
1905 		gmbus_irq_handler(dev);
1906 
1907 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1908 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1909 
1910 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1911 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1912 
1913 	if (pch_iir & SDE_POISON)
1914 		DRM_ERROR("PCH poison interrupt\n");
1915 
1916 	if (pch_iir & SDE_FDI_MASK)
1917 		for_each_pipe(dev_priv, pipe)
1918 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1919 					 pipe_name(pipe),
1920 					 I915_READ(FDI_RX_IIR(pipe)));
1921 
1922 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1923 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1924 
1925 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1926 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1927 
1928 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1929 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1930 
1931 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1932 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1933 }
1934 
1935 static void ivb_err_int_handler(struct drm_device *dev)
1936 {
1937 	struct drm_i915_private *dev_priv = dev->dev_private;
1938 	u32 err_int = I915_READ(GEN7_ERR_INT);
1939 	enum i915_pipe pipe;
1940 
1941 	if (err_int & ERR_INT_POISON)
1942 		DRM_ERROR("Poison interrupt\n");
1943 
1944 	for_each_pipe(dev_priv, pipe) {
1945 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1946 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1947 
1948 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1949 			if (IS_IVYBRIDGE(dev))
1950 				ivb_pipe_crc_irq_handler(dev, pipe);
1951 			else
1952 				hsw_pipe_crc_irq_handler(dev, pipe);
1953 		}
1954 	}
1955 
1956 	I915_WRITE(GEN7_ERR_INT, err_int);
1957 }
1958 
1959 static void cpt_serr_int_handler(struct drm_device *dev)
1960 {
1961 	struct drm_i915_private *dev_priv = dev->dev_private;
1962 	u32 serr_int = I915_READ(SERR_INT);
1963 
1964 	if (serr_int & SERR_INT_POISON)
1965 		DRM_ERROR("PCH poison interrupt\n");
1966 
1967 	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1968 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1969 
1970 	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1971 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1972 
1973 	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1974 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1975 
1976 	I915_WRITE(SERR_INT, serr_int);
1977 }
1978 
1979 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1980 {
1981 	struct drm_i915_private *dev_priv = dev->dev_private;
1982 	int pipe;
1983 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1984 
1985 	ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1986 
1987 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1988 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1989 			       SDE_AUDIO_POWER_SHIFT_CPT);
1990 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1991 				 port_name(port));
1992 	}
1993 
1994 	if (pch_iir & SDE_AUX_MASK_CPT)
1995 		dp_aux_irq_handler(dev);
1996 
1997 	if (pch_iir & SDE_GMBUS_CPT)
1998 		gmbus_irq_handler(dev);
1999 
2000 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2001 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2002 
2003 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2004 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2005 
2006 	if (pch_iir & SDE_FDI_MASK_CPT)
2007 		for_each_pipe(dev_priv, pipe)
2008 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2009 					 pipe_name(pipe),
2010 					 I915_READ(FDI_RX_IIR(pipe)));
2011 
2012 	if (pch_iir & SDE_ERROR_CPT)
2013 		cpt_serr_int_handler(dev);
2014 }
2015 
2016 static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
2017 {
2018 	struct drm_i915_private *dev_priv = dev->dev_private;
2019 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2020 		~SDE_PORTE_HOTPLUG_SPT;
2021 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2022 	u32 pin_mask = 0, long_mask = 0;
2023 
2024 	if (hotplug_trigger) {
2025 		u32 dig_hotplug_reg;
2026 
2027 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2028 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2029 
2030 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2031 				   dig_hotplug_reg, hpd_spt,
2032 				   spt_port_hotplug_long_detect);
2033 	}
2034 
2035 	if (hotplug2_trigger) {
2036 		u32 dig_hotplug_reg;
2037 
2038 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2039 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2040 
2041 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2042 				   dig_hotplug_reg, hpd_spt,
2043 				   spt_port_hotplug2_long_detect);
2044 	}
2045 
2046 	if (pin_mask)
2047 		intel_hpd_irq_handler(dev, pin_mask, long_mask);
2048 
2049 	if (pch_iir & SDE_GMBUS_CPT)
2050 		gmbus_irq_handler(dev);
2051 }
2052 
2053 static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2054 				const u32 hpd[HPD_NUM_PINS])
2055 {
2056 	struct drm_i915_private *dev_priv = to_i915(dev);
2057 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2058 
2059 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2060 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2061 
2062 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2063 			   dig_hotplug_reg, hpd,
2064 			   ilk_port_hotplug_long_detect);
2065 
2066 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
2067 }
2068 
2069 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2070 {
2071 	struct drm_i915_private *dev_priv = dev->dev_private;
2072 	enum i915_pipe pipe;
2073 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2074 
2075 	if (hotplug_trigger)
2076 		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
2077 
2078 	if (de_iir & DE_AUX_CHANNEL_A)
2079 		dp_aux_irq_handler(dev);
2080 
2081 	if (de_iir & DE_GSE)
2082 		intel_opregion_asle_intr(dev);
2083 
2084 	if (de_iir & DE_POISON)
2085 		DRM_ERROR("Poison interrupt\n");
2086 
2087 	for_each_pipe(dev_priv, pipe) {
2088 		if (de_iir & DE_PIPE_VBLANK(pipe) &&
2089 		    intel_pipe_handle_vblank(dev, pipe))
2090 			intel_check_page_flip(dev, pipe);
2091 
2092 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2093 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2094 
2095 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2096 			i9xx_pipe_crc_irq_handler(dev, pipe);
2097 
2098 		/* plane/pipes map 1:1 on ilk+ */
2099 		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2100 			intel_prepare_page_flip(dev, pipe);
2101 			intel_finish_page_flip_plane(dev, pipe);
2102 		}
2103 	}
2104 
2105 	/* check event from PCH */
2106 	if (de_iir & DE_PCH_EVENT) {
2107 		u32 pch_iir = I915_READ(SDEIIR);
2108 
2109 		if (HAS_PCH_CPT(dev))
2110 			cpt_irq_handler(dev, pch_iir);
2111 		else
2112 			ibx_irq_handler(dev, pch_iir);
2113 
2114 		/* should clear PCH hotplug event before clear CPU irq */
2115 		I915_WRITE(SDEIIR, pch_iir);
2116 	}
2117 
2118 	if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2119 		ironlake_rps_change_irq_handler(dev);
2120 }
2121 
2122 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2123 {
2124 	struct drm_i915_private *dev_priv = dev->dev_private;
2125 	enum i915_pipe pipe;
2126 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2127 
2128 	if (hotplug_trigger)
2129 		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
2130 
2131 	if (de_iir & DE_ERR_INT_IVB)
2132 		ivb_err_int_handler(dev);
2133 
2134 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2135 		dp_aux_irq_handler(dev);
2136 
2137 	if (de_iir & DE_GSE_IVB)
2138 		intel_opregion_asle_intr(dev);
2139 
2140 	for_each_pipe(dev_priv, pipe) {
2141 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2142 		    intel_pipe_handle_vblank(dev, pipe))
2143 			intel_check_page_flip(dev, pipe);
2144 
2145 		/* plane/pipes map 1:1 on ilk+ */
2146 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2147 			intel_prepare_page_flip(dev, pipe);
2148 			intel_finish_page_flip_plane(dev, pipe);
2149 		}
2150 	}
2151 
2152 	/* check event from PCH */
2153 	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2154 		u32 pch_iir = I915_READ(SDEIIR);
2155 
2156 		cpt_irq_handler(dev, pch_iir);
2157 
2158 		/* clear PCH hotplug event before clear CPU irq */
2159 		I915_WRITE(SDEIIR, pch_iir);
2160 	}
2161 }
2162 
2163 /*
2164  * To handle irqs with the minimum potential races with fresh interrupts, we:
2165  * 1 - Disable Master Interrupt Control.
2166  * 2 - Find the source(s) of the interrupt.
2167  * 3 - Clear the Interrupt Identity bits (IIR).
2168  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2169  * 5 - Re-enable Master Interrupt Control.
2170  */
2171 static irqreturn_t ironlake_irq_handler(void *arg)
2172 {
2173 	struct drm_device *dev = arg;
2174 	struct drm_i915_private *dev_priv = dev->dev_private;
2175 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2176 
2177 	if (!intel_irqs_enabled(dev_priv))
2178 		return IRQ_NONE;
2179 
2180 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2181 	disable_rpm_wakeref_asserts(dev_priv);
2182 
2183 	/* disable master interrupt before clearing iir  */
2184 	de_ier = I915_READ(DEIER);
2185 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2186 	POSTING_READ(DEIER);
2187 
2188 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2189 	 * interrupts will will be stored on its back queue, and then we'll be
2190 	 * able to process them after we restore SDEIER (as soon as we restore
2191 	 * it, we'll get an interrupt if SDEIIR still has something to process
2192 	 * due to its back queue). */
2193 	if (!HAS_PCH_NOP(dev)) {
2194 		sde_ier = I915_READ(SDEIER);
2195 		I915_WRITE(SDEIER, 0);
2196 		POSTING_READ(SDEIER);
2197 	}
2198 
2199 	/* Find, clear, then process each source of interrupt */
2200 
2201 	gt_iir = I915_READ(GTIIR);
2202 	if (gt_iir) {
2203 		I915_WRITE(GTIIR, gt_iir);
2204 		if (INTEL_INFO(dev)->gen >= 6)
2205 			snb_gt_irq_handler(dev, dev_priv, gt_iir);
2206 		else
2207 			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2208 	}
2209 
2210 	de_iir = I915_READ(DEIIR);
2211 	if (de_iir) {
2212 		I915_WRITE(DEIIR, de_iir);
2213 		if (INTEL_INFO(dev)->gen >= 7)
2214 			ivb_display_irq_handler(dev, de_iir);
2215 		else
2216 			ilk_display_irq_handler(dev, de_iir);
2217 	}
2218 
2219 	if (INTEL_INFO(dev)->gen >= 6) {
2220 		u32 pm_iir = I915_READ(GEN6_PMIIR);
2221 		if (pm_iir) {
2222 			I915_WRITE(GEN6_PMIIR, pm_iir);
2223 			gen6_rps_irq_handler(dev_priv, pm_iir);
2224 		}
2225 	}
2226 
2227 	I915_WRITE(DEIER, de_ier);
2228 	POSTING_READ(DEIER);
2229 	if (!HAS_PCH_NOP(dev)) {
2230 		I915_WRITE(SDEIER, sde_ier);
2231 		POSTING_READ(SDEIER);
2232 	}
2233 
2234 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2235 	enable_rpm_wakeref_asserts(dev_priv);
2236 
2237 }
2238 
2239 static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2240 				const u32 hpd[HPD_NUM_PINS])
2241 {
2242 	struct drm_i915_private *dev_priv = to_i915(dev);
2243 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2244 
2245 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2246 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2247 
2248 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2249 			   dig_hotplug_reg, hpd,
2250 			   bxt_port_hotplug_long_detect);
2251 
2252 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
2253 }
2254 
2255 static irqreturn_t
2256 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2257 {
2258 	struct drm_device *dev = dev_priv->dev;
2259 	u32 iir;
2260 	enum i915_pipe pipe;
2261 
2262 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2263 		iir = I915_READ(GEN8_DE_MISC_IIR);
2264 		if (iir) {
2265 			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2266 			if (iir & GEN8_DE_MISC_GSE)
2267 				intel_opregion_asle_intr(dev);
2268 			else
2269 				DRM_ERROR("Unexpected DE Misc interrupt\n");
2270 		}
2271 		else
2272 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2273 	}
2274 
2275 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2276 		iir = I915_READ(GEN8_DE_PORT_IIR);
2277 		if (iir) {
2278 			u32 tmp_mask;
2279 			bool found = false;
2280 
2281 			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2282 
2283 			tmp_mask = GEN8_AUX_CHANNEL_A;
2284 			if (INTEL_INFO(dev_priv)->gen >= 9)
2285 				tmp_mask |= GEN9_AUX_CHANNEL_B |
2286 					    GEN9_AUX_CHANNEL_C |
2287 					    GEN9_AUX_CHANNEL_D;
2288 
2289 			if (iir & tmp_mask) {
2290 				dp_aux_irq_handler(dev);
2291 				found = true;
2292 			}
2293 
2294 			if (IS_BROXTON(dev_priv)) {
2295 				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2296 				if (tmp_mask) {
2297 					bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt);
2298 					found = true;
2299 				}
2300 			} else if (IS_BROADWELL(dev_priv)) {
2301 				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2302 				if (tmp_mask) {
2303 					ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw);
2304 					found = true;
2305 				}
2306 			}
2307 
2308 			if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) {
2309 				gmbus_irq_handler(dev);
2310 				found = true;
2311 			}
2312 
2313 			if (!found)
2314 				DRM_ERROR("Unexpected DE Port interrupt\n");
2315 		}
2316 		else
2317 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2318 	}
2319 
2320 	for_each_pipe(dev_priv, pipe) {
2321 		u32 flip_done, fault_errors;
2322 
2323 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2324 			continue;
2325 
2326 		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2327 		if (!iir) {
2328 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2329 			continue;
2330 		}
2331 
2332 		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2333 
2334 		if (iir & GEN8_PIPE_VBLANK &&
2335 		    intel_pipe_handle_vblank(dev, pipe))
2336 			intel_check_page_flip(dev, pipe);
2337 
2338 		flip_done = iir;
2339 		if (INTEL_INFO(dev_priv)->gen >= 9)
2340 			flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
2341 		else
2342 			flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2343 
2344 		if (flip_done) {
2345 			intel_prepare_page_flip(dev, pipe);
2346 			intel_finish_page_flip_plane(dev, pipe);
2347 		}
2348 
2349 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2350 			hsw_pipe_crc_irq_handler(dev, pipe);
2351 
2352 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2353 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2354 
2355 		fault_errors = iir;
2356 		if (INTEL_INFO(dev_priv)->gen >= 9)
2357 			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2358 		else
2359 			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2360 
2361 		if (fault_errors)
2362 			DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2363 				  pipe_name(pipe),
2364 				  fault_errors);
2365 	}
2366 
2367 	if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2368 	    master_ctl & GEN8_DE_PCH_IRQ) {
2369 		/*
2370 		 * FIXME(BDW): Assume for now that the new interrupt handling
2371 		 * scheme also closed the SDE interrupt handling race we've seen
2372 		 * on older pch-split platforms. But this needs testing.
2373 		 */
2374 		iir = I915_READ(SDEIIR);
2375 		if (iir) {
2376 			I915_WRITE(SDEIIR, iir);
2377 
2378 			if (HAS_PCH_SPT(dev_priv))
2379 				spt_irq_handler(dev, iir);
2380 			else
2381 				cpt_irq_handler(dev, iir);
2382 		} else {
2383 			/*
2384 			 * Like on previous PCH there seems to be something
2385 			 * fishy going on with forwarding PCH interrupts.
2386 			 */
2387 			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2388 		}
2389 	}
2390 
2391 }
2392 
2393 static irqreturn_t gen8_irq_handler(void *arg)
2394 {
2395 	struct drm_device *dev = arg;
2396 	struct drm_i915_private *dev_priv = dev->dev_private;
2397 	u32 master_ctl;
2398 
2399 	if (!intel_irqs_enabled(dev_priv))
2400 		return IRQ_NONE;
2401 
2402 	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2403 	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2404 	if (!master_ctl)
2405 		return IRQ_NONE;
2406 
2407 	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2408 
2409 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2410 	disable_rpm_wakeref_asserts(dev_priv);
2411 
2412 	/* Find, clear, then process each source of interrupt */
2413 	gen8_gt_irq_handler(dev_priv, master_ctl);
2414 	gen8_de_irq_handler(dev_priv, master_ctl);
2415 
2416 	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2417 	POSTING_READ_FW(GEN8_MASTER_IRQ);
2418 
2419 	enable_rpm_wakeref_asserts(dev_priv);
2420 
2421 }
2422 
2423 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2424 			       bool reset_completed)
2425 {
2426 	struct intel_engine_cs *ring;
2427 	int i;
2428 
2429 	/*
2430 	 * Notify all waiters for GPU completion events that reset state has
2431 	 * been changed, and that they need to restart their wait after
2432 	 * checking for potential errors (and bail out to drop locks if there is
2433 	 * a gpu reset pending so that i915_error_work_func can acquire them).
2434 	 */
2435 
2436 	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2437 	for_each_ring(ring, dev_priv, i)
2438 		wake_up_all(&ring->irq_queue);
2439 
2440 	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2441 	wake_up_all(&dev_priv->pending_flip_queue);
2442 
2443 	/*
2444 	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2445 	 * reset state is cleared.
2446 	 */
2447 	if (reset_completed)
2448 		wake_up_all(&dev_priv->gpu_error.reset_queue);
2449 }
2450 
2451 /**
2452  * i915_reset_and_wakeup - do process context error handling work
2453  * @dev: drm device
2454  *
2455  * Fire an error uevent so userspace can see that a hang or error
2456  * was detected.
2457  */
2458 static void i915_reset_and_wakeup(struct drm_device *dev)
2459 {
2460 	struct drm_i915_private *dev_priv = to_i915(dev);
2461 	struct i915_gpu_error *error = &dev_priv->gpu_error;
2462 #if 0
2463 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2464 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2465 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2466 #endif
2467 	int ret;
2468 
2469 #if 0
2470 	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2471 #endif
2472 
2473 	/*
2474 	 * Note that there's only one work item which does gpu resets, so we
2475 	 * need not worry about concurrent gpu resets potentially incrementing
2476 	 * error->reset_counter twice. We only need to take care of another
2477 	 * racing irq/hangcheck declaring the gpu dead for a second time. A
2478 	 * quick check for that is good enough: schedule_work ensures the
2479 	 * correct ordering between hang detection and this work item, and since
2480 	 * the reset in-progress bit is only ever set by code outside of this
2481 	 * work we don't need to worry about any other races.
2482 	 */
2483 	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2484 		DRM_DEBUG_DRIVER("resetting chip\n");
2485 #if 0
2486 		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2487 				   reset_event);
2488 #endif
2489 
2490 		/*
2491 		 * In most cases it's guaranteed that we get here with an RPM
2492 		 * reference held, for example because there is a pending GPU
2493 		 * request that won't finish until the reset is done. This
2494 		 * isn't the case at least when we get here by doing a
2495 		 * simulated reset via debugs, so get an RPM reference.
2496 		 */
2497 		intel_runtime_pm_get(dev_priv);
2498 
2499 		intel_prepare_reset(dev);
2500 
2501 		/*
2502 		 * All state reset _must_ be completed before we update the
2503 		 * reset counter, for otherwise waiters might miss the reset
2504 		 * pending state and not properly drop locks, resulting in
2505 		 * deadlocks with the reset work.
2506 		 */
2507 		ret = i915_reset(dev);
2508 
2509 		intel_finish_reset(dev);
2510 
2511 		intel_runtime_pm_put(dev_priv);
2512 
2513 		if (ret == 0) {
2514 			/*
2515 			 * After all the gem state is reset, increment the reset
2516 			 * counter and wake up everyone waiting for the reset to
2517 			 * complete.
2518 			 *
2519 			 * Since unlock operations are a one-sided barrier only,
2520 			 * we need to insert a barrier here to order any seqno
2521 			 * updates before
2522 			 * the counter increment.
2523 			 */
2524 			smp_mb__before_atomic();
2525 			atomic_inc(&dev_priv->gpu_error.reset_counter);
2526 
2527 #if 0
2528 			kobject_uevent_env(&dev->primary->kdev->kobj,
2529 					   KOBJ_CHANGE, reset_done_event);
2530 #endif
2531 		} else {
2532 			atomic_or(I915_WEDGED, &error->reset_counter);
2533 		}
2534 
2535 		/*
2536 		 * Note: The wake_up also serves as a memory barrier so that
2537 		 * waiters see the update value of the reset counter atomic_t.
2538 		 */
2539 		i915_error_wake_up(dev_priv, true);
2540 	}
2541 }
2542 
2543 static void i915_report_and_clear_eir(struct drm_device *dev)
2544 {
2545 	struct drm_i915_private *dev_priv = dev->dev_private;
2546 	uint32_t instdone[I915_NUM_INSTDONE_REG];
2547 	u32 eir = I915_READ(EIR);
2548 	int pipe, i;
2549 
2550 	if (!eir)
2551 		return;
2552 
2553 	pr_err("render error detected, EIR: 0x%08x\n", eir);
2554 
2555 	i915_get_extra_instdone(dev, instdone);
2556 
2557 	if (IS_G4X(dev)) {
2558 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2559 			u32 ipeir = I915_READ(IPEIR_I965);
2560 
2561 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2562 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2563 			for (i = 0; i < ARRAY_SIZE(instdone); i++)
2564 				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2565 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2566 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2567 			I915_WRITE(IPEIR_I965, ipeir);
2568 			POSTING_READ(IPEIR_I965);
2569 		}
2570 		if (eir & GM45_ERROR_PAGE_TABLE) {
2571 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2572 			pr_err("page table error\n");
2573 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2574 			I915_WRITE(PGTBL_ER, pgtbl_err);
2575 			POSTING_READ(PGTBL_ER);
2576 		}
2577 	}
2578 
2579 	if (!IS_GEN2(dev)) {
2580 		if (eir & I915_ERROR_PAGE_TABLE) {
2581 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2582 			pr_err("page table error\n");
2583 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2584 			I915_WRITE(PGTBL_ER, pgtbl_err);
2585 			POSTING_READ(PGTBL_ER);
2586 		}
2587 	}
2588 
2589 	if (eir & I915_ERROR_MEMORY_REFRESH) {
2590 		pr_err("memory refresh error:\n");
2591 		for_each_pipe(dev_priv, pipe)
2592 			pr_err("pipe %c stat: 0x%08x\n",
2593 			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2594 		/* pipestat has already been acked */
2595 	}
2596 	if (eir & I915_ERROR_INSTRUCTION) {
2597 		pr_err("instruction error\n");
2598 		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2599 		for (i = 0; i < ARRAY_SIZE(instdone); i++)
2600 			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2601 		if (INTEL_INFO(dev)->gen < 4) {
2602 			u32 ipeir = I915_READ(IPEIR);
2603 
2604 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2605 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2606 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2607 			I915_WRITE(IPEIR, ipeir);
2608 			POSTING_READ(IPEIR);
2609 		} else {
2610 			u32 ipeir = I915_READ(IPEIR_I965);
2611 
2612 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2613 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2614 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2615 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2616 			I915_WRITE(IPEIR_I965, ipeir);
2617 			POSTING_READ(IPEIR_I965);
2618 		}
2619 	}
2620 
2621 	I915_WRITE(EIR, eir);
2622 	POSTING_READ(EIR);
2623 	eir = I915_READ(EIR);
2624 	if (eir) {
2625 		/*
2626 		 * some errors might have become stuck,
2627 		 * mask them.
2628 		 */
2629 		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2630 		I915_WRITE(EMR, I915_READ(EMR) | eir);
2631 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2632 	}
2633 }
2634 
2635 /**
2636  * i915_handle_error - handle a gpu error
2637  * @dev: drm device
2638  *
2639  * Do some basic checking of register state at error time and
2640  * dump it to the syslog.  Also call i915_capture_error_state() to make
2641  * sure we get a record and make it available in debugfs.  Fire a uevent
2642  * so userspace knows something bad happened (should trigger collection
2643  * of a ring dump etc.).
2644  */
2645 void i915_handle_error(struct drm_device *dev, bool wedged,
2646 		       const char *fmt, ...)
2647 {
2648 	struct drm_i915_private *dev_priv = dev->dev_private;
2649 #if 0
2650 	va_list args;
2651 	char error_msg[80];
2652 
2653 	va_start(args, fmt);
2654 	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2655 	va_end(args);
2656 
2657 	i915_capture_error_state(dev, wedged, error_msg);
2658 #endif
2659 	i915_report_and_clear_eir(dev);
2660 
2661 	if (wedged) {
2662 		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2663 				&dev_priv->gpu_error.reset_counter);
2664 
2665 		/*
2666 		 * Wakeup waiting processes so that the reset function
2667 		 * i915_reset_and_wakeup doesn't deadlock trying to grab
2668 		 * various locks. By bumping the reset counter first, the woken
2669 		 * processes will see a reset in progress and back off,
2670 		 * releasing their locks and then wait for the reset completion.
2671 		 * We must do this for _all_ gpu waiters that might hold locks
2672 		 * that the reset work needs to acquire.
2673 		 *
2674 		 * Note: The wake_up serves as the required memory barrier to
2675 		 * ensure that the waiters see the updated value of the reset
2676 		 * counter atomic_t.
2677 		 */
2678 		i915_error_wake_up(dev_priv, false);
2679 	}
2680 
2681 	i915_reset_and_wakeup(dev);
2682 }
2683 
2684 /* Called from drm generic code, passed 'crtc' which
2685  * we use as a pipe index
2686  */
2687 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2688 {
2689 	struct drm_i915_private *dev_priv = dev->dev_private;
2690 	unsigned long irqflags;
2691 
2692 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2693 	if (INTEL_INFO(dev)->gen >= 4)
2694 		i915_enable_pipestat(dev_priv, pipe,
2695 				     PIPE_START_VBLANK_INTERRUPT_STATUS);
2696 	else
2697 		i915_enable_pipestat(dev_priv, pipe,
2698 				     PIPE_VBLANK_INTERRUPT_STATUS);
2699 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2700 
2701 	return 0;
2702 }
2703 
2704 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2705 {
2706 	struct drm_i915_private *dev_priv = dev->dev_private;
2707 	unsigned long irqflags;
2708 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2709 						     DE_PIPE_VBLANK(pipe);
2710 
2711 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2712 	ilk_enable_display_irq(dev_priv, bit);
2713 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2714 
2715 	return 0;
2716 }
2717 
2718 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2719 {
2720 	struct drm_i915_private *dev_priv = dev->dev_private;
2721 	unsigned long irqflags;
2722 
2723 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2724 	i915_enable_pipestat(dev_priv, pipe,
2725 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2726 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2727 
2728 	return 0;
2729 }
2730 
2731 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2732 {
2733 	struct drm_i915_private *dev_priv = dev->dev_private;
2734 	unsigned long irqflags;
2735 
2736 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2737 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2738 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2739 
2740 	return 0;
2741 }
2742 
2743 /* Called from drm generic code, passed 'crtc' which
2744  * we use as a pipe index
2745  */
2746 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2747 {
2748 	struct drm_i915_private *dev_priv = dev->dev_private;
2749 	unsigned long irqflags;
2750 
2751 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2752 	i915_disable_pipestat(dev_priv, pipe,
2753 			      PIPE_VBLANK_INTERRUPT_STATUS |
2754 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2755 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2756 }
2757 
2758 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2759 {
2760 	struct drm_i915_private *dev_priv = dev->dev_private;
2761 	unsigned long irqflags;
2762 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2763 						     DE_PIPE_VBLANK(pipe);
2764 
2765 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2766 	ilk_disable_display_irq(dev_priv, bit);
2767 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2768 }
2769 
2770 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2771 {
2772 	struct drm_i915_private *dev_priv = dev->dev_private;
2773 	unsigned long irqflags;
2774 
2775 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2776 	i915_disable_pipestat(dev_priv, pipe,
2777 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2778 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2779 }
2780 
2781 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2782 {
2783 	struct drm_i915_private *dev_priv = dev->dev_private;
2784 	unsigned long irqflags;
2785 
2786 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2787 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2788 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2789 }
2790 
2791 static bool
2792 ring_idle(struct intel_engine_cs *ring, u32 seqno)
2793 {
2794 	return (list_empty(&ring->request_list) ||
2795 		i915_seqno_passed(seqno, ring->last_submitted_seqno));
2796 }
2797 
2798 static bool
2799 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2800 {
2801 	if (INTEL_INFO(dev)->gen >= 8) {
2802 		return (ipehr >> 23) == 0x1c;
2803 	} else {
2804 		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2805 		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2806 				 MI_SEMAPHORE_REGISTER);
2807 	}
2808 }
2809 
2810 static struct intel_engine_cs *
2811 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2812 {
2813 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2814 	struct intel_engine_cs *signaller;
2815 	int i;
2816 
2817 	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2818 		for_each_ring(signaller, dev_priv, i) {
2819 			if (ring == signaller)
2820 				continue;
2821 
2822 			if (offset == signaller->semaphore.signal_ggtt[ring->id])
2823 				return signaller;
2824 		}
2825 	} else {
2826 		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2827 
2828 		for_each_ring(signaller, dev_priv, i) {
2829 			if(ring == signaller)
2830 				continue;
2831 
2832 			if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2833 				return signaller;
2834 		}
2835 	}
2836 
2837 	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2838 		  ring->id, ipehr, offset);
2839 
2840 	return NULL;
2841 }
2842 
2843 static struct intel_engine_cs *
2844 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2845 {
2846 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2847 	u32 cmd, ipehr, head;
2848 	u64 offset = 0;
2849 	int i, backwards;
2850 
2851 	/*
2852 	 * This function does not support execlist mode - any attempt to
2853 	 * proceed further into this function will result in a kernel panic
2854 	 * when dereferencing ring->buffer, which is not set up in execlist
2855 	 * mode.
2856 	 *
2857 	 * The correct way of doing it would be to derive the currently
2858 	 * executing ring buffer from the current context, which is derived
2859 	 * from the currently running request. Unfortunately, to get the
2860 	 * current request we would have to grab the struct_mutex before doing
2861 	 * anything else, which would be ill-advised since some other thread
2862 	 * might have grabbed it already and managed to hang itself, causing
2863 	 * the hang checker to deadlock.
2864 	 *
2865 	 * Therefore, this function does not support execlist mode in its
2866 	 * current form. Just return NULL and move on.
2867 	 */
2868 	if (ring->buffer == NULL)
2869 		return NULL;
2870 
2871 	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2872 	if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2873 		return NULL;
2874 
2875 	/*
2876 	 * HEAD is likely pointing to the dword after the actual command,
2877 	 * so scan backwards until we find the MBOX. But limit it to just 3
2878 	 * or 4 dwords depending on the semaphore wait command size.
2879 	 * Note that we don't care about ACTHD here since that might
2880 	 * point at at batch, and semaphores are always emitted into the
2881 	 * ringbuffer itself.
2882 	 */
2883 	head = I915_READ_HEAD(ring) & HEAD_ADDR;
2884 	backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2885 
2886 	for (i = backwards; i; --i) {
2887 		/*
2888 		 * Be paranoid and presume the hw has gone off into the wild -
2889 		 * our ring is smaller than what the hardware (and hence
2890 		 * HEAD_ADDR) allows. Also handles wrap-around.
2891 		 */
2892 		head &= ring->buffer->size - 1;
2893 
2894 		/* This here seems to blow up */
2895 		cmd = ioread32(ring->buffer->virtual_start + head);
2896 		if (cmd == ipehr)
2897 			break;
2898 
2899 		head -= 4;
2900 	}
2901 
2902 	if (!i)
2903 		return NULL;
2904 
2905 	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2906 	if (INTEL_INFO(ring->dev)->gen >= 8) {
2907 		offset = ioread32(ring->buffer->virtual_start + head + 12);
2908 		offset <<= 32;
2909 		offset = ioread32(ring->buffer->virtual_start + head + 8);
2910 	}
2911 	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2912 }
2913 
2914 static int semaphore_passed(struct intel_engine_cs *ring)
2915 {
2916 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2917 	struct intel_engine_cs *signaller;
2918 	u32 seqno;
2919 
2920 	ring->hangcheck.deadlock++;
2921 
2922 	signaller = semaphore_waits_for(ring, &seqno);
2923 	if (signaller == NULL)
2924 		return -1;
2925 
2926 	/* Prevent pathological recursion due to driver bugs */
2927 	if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2928 		return -1;
2929 
2930 	if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2931 		return 1;
2932 
2933 	/* cursory check for an unkickable deadlock */
2934 	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2935 	    semaphore_passed(signaller) < 0)
2936 		return -1;
2937 
2938 	return 0;
2939 }
2940 
2941 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2942 {
2943 	struct intel_engine_cs *ring;
2944 	int i;
2945 
2946 	for_each_ring(ring, dev_priv, i)
2947 		ring->hangcheck.deadlock = 0;
2948 }
2949 
2950 static bool subunits_stuck(struct intel_engine_cs *ring)
2951 {
2952 	u32 instdone[I915_NUM_INSTDONE_REG];
2953 	bool stuck;
2954 	int i;
2955 
2956 	if (ring->id != RCS)
2957 		return true;
2958 
2959 	i915_get_extra_instdone(ring->dev, instdone);
2960 
2961 	/* There might be unstable subunit states even when
2962 	 * actual head is not moving. Filter out the unstable ones by
2963 	 * accumulating the undone -> done transitions and only
2964 	 * consider those as progress.
2965 	 */
2966 	stuck = true;
2967 	for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
2968 		const u32 tmp = instdone[i] | ring->hangcheck.instdone[i];
2969 
2970 		if (tmp != ring->hangcheck.instdone[i])
2971 			stuck = false;
2972 
2973 		ring->hangcheck.instdone[i] |= tmp;
2974 	}
2975 
2976 	return stuck;
2977 }
2978 
2979 static enum intel_ring_hangcheck_action
2980 head_stuck(struct intel_engine_cs *ring, u64 acthd)
2981 {
2982 	if (acthd != ring->hangcheck.acthd) {
2983 
2984 		/* Clear subunit states on head movement */
2985 		memset(ring->hangcheck.instdone, 0,
2986 		       sizeof(ring->hangcheck.instdone));
2987 
2988 		if (acthd > ring->hangcheck.max_acthd) {
2989 			ring->hangcheck.max_acthd = acthd;
2990 			return HANGCHECK_ACTIVE;
2991 		}
2992 
2993 		return HANGCHECK_ACTIVE_LOOP;
2994 	}
2995 
2996 	if (!subunits_stuck(ring))
2997 		return HANGCHECK_ACTIVE;
2998 
2999 	return HANGCHECK_HUNG;
3000 }
3001 
3002 static enum intel_ring_hangcheck_action
3003 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
3004 {
3005 	struct drm_device *dev = ring->dev;
3006 	struct drm_i915_private *dev_priv = dev->dev_private;
3007 	enum intel_ring_hangcheck_action ha;
3008 	u32 tmp;
3009 
3010 	ha = head_stuck(ring, acthd);
3011 	if (ha != HANGCHECK_HUNG)
3012 		return ha;
3013 
3014 	if (IS_GEN2(dev))
3015 		return HANGCHECK_HUNG;
3016 
3017 	/* Is the chip hanging on a WAIT_FOR_EVENT?
3018 	 * If so we can simply poke the RB_WAIT bit
3019 	 * and break the hang. This should work on
3020 	 * all but the second generation chipsets.
3021 	 */
3022 	tmp = I915_READ_CTL(ring);
3023 	if (tmp & RING_WAIT) {
3024 		i915_handle_error(dev, false,
3025 				  "Kicking stuck wait on %s",
3026 				  ring->name);
3027 		I915_WRITE_CTL(ring, tmp);
3028 		return HANGCHECK_KICK;
3029 	}
3030 
3031 	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3032 		switch (semaphore_passed(ring)) {
3033 		default:
3034 			return HANGCHECK_HUNG;
3035 		case 1:
3036 			i915_handle_error(dev, false,
3037 					  "Kicking stuck semaphore on %s",
3038 					  ring->name);
3039 			I915_WRITE_CTL(ring, tmp);
3040 			return HANGCHECK_KICK;
3041 		case 0:
3042 			return HANGCHECK_WAIT;
3043 		}
3044 	}
3045 
3046 	return HANGCHECK_HUNG;
3047 }
3048 
3049 /*
3050  * This is called when the chip hasn't reported back with completed
3051  * batchbuffers in a long time. We keep track per ring seqno progress and
3052  * if there are no progress, hangcheck score for that ring is increased.
3053  * Further, acthd is inspected to see if the ring is stuck. On stuck case
3054  * we kick the ring. If we see no progress on three subsequent calls
3055  * we assume chip is wedged and try to fix it by resetting the chip.
3056  */
3057 static void i915_hangcheck_elapsed(struct work_struct *work)
3058 {
3059 	struct drm_i915_private *dev_priv =
3060 		container_of(work, typeof(*dev_priv),
3061 			     gpu_error.hangcheck_work.work);
3062 	struct drm_device *dev = dev_priv->dev;
3063 	struct intel_engine_cs *ring;
3064 	int i;
3065 	int busy_count = 0, rings_hung = 0;
3066 	bool stuck[I915_NUM_RINGS] = { 0 };
3067 #define BUSY 1
3068 #define KICK 5
3069 #define HUNG 20
3070 
3071 	if (!i915.enable_hangcheck)
3072 		return;
3073 
3074 	/*
3075 	 * The hangcheck work is synced during runtime suspend, we don't
3076 	 * require a wakeref. TODO: instead of disabling the asserts make
3077 	 * sure that we hold a reference when this work is running.
3078 	 */
3079 	DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3080 
3081 	/* As enabling the GPU requires fairly extensive mmio access,
3082 	 * periodically arm the mmio checker to see if we are triggering
3083 	 * any invalid access.
3084 	 */
3085 	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
3086 
3087 	for_each_ring(ring, dev_priv, i) {
3088 		u64 acthd;
3089 		u32 seqno;
3090 		bool busy = true;
3091 
3092 		semaphore_clear_deadlocks(dev_priv);
3093 
3094 		seqno = ring->get_seqno(ring, false);
3095 		acthd = intel_ring_get_active_head(ring);
3096 
3097 		if (ring->hangcheck.seqno == seqno) {
3098 			if (ring_idle(ring, seqno)) {
3099 				ring->hangcheck.action = HANGCHECK_IDLE;
3100 
3101 				if (waitqueue_active(&ring->irq_queue)) {
3102 					/* Issue a wake-up to catch stuck h/w. */
3103 					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3104 						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3105 							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3106 								  ring->name);
3107 						else
3108 							DRM_INFO("Fake missed irq on %s\n",
3109 								 ring->name);
3110 						wake_up_all(&ring->irq_queue);
3111 					}
3112 					/* Safeguard against driver failure */
3113 					ring->hangcheck.score += BUSY;
3114 				} else
3115 					busy = false;
3116 			} else {
3117 				/* We always increment the hangcheck score
3118 				 * if the ring is busy and still processing
3119 				 * the same request, so that no single request
3120 				 * can run indefinitely (such as a chain of
3121 				 * batches). The only time we do not increment
3122 				 * the hangcheck score on this ring, if this
3123 				 * ring is in a legitimate wait for another
3124 				 * ring. In that case the waiting ring is a
3125 				 * victim and we want to be sure we catch the
3126 				 * right culprit. Then every time we do kick
3127 				 * the ring, add a small increment to the
3128 				 * score so that we can catch a batch that is
3129 				 * being repeatedly kicked and so responsible
3130 				 * for stalling the machine.
3131 				 */
3132 				ring->hangcheck.action = ring_stuck(ring,
3133 								    acthd);
3134 
3135 				switch (ring->hangcheck.action) {
3136 				case HANGCHECK_IDLE:
3137 				case HANGCHECK_WAIT:
3138 				case HANGCHECK_ACTIVE:
3139 					break;
3140 				case HANGCHECK_ACTIVE_LOOP:
3141 					ring->hangcheck.score += BUSY;
3142 					break;
3143 				case HANGCHECK_KICK:
3144 					ring->hangcheck.score += KICK;
3145 					break;
3146 				case HANGCHECK_HUNG:
3147 					ring->hangcheck.score += HUNG;
3148 					stuck[i] = true;
3149 					break;
3150 				}
3151 			}
3152 		} else {
3153 			ring->hangcheck.action = HANGCHECK_ACTIVE;
3154 
3155 			/* Gradually reduce the count so that we catch DoS
3156 			 * attempts across multiple batches.
3157 			 */
3158 			if (ring->hangcheck.score > 0)
3159 				ring->hangcheck.score--;
3160 
3161 			/* Clear head and subunit states on seqno movement */
3162 			ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3163 
3164 			memset(ring->hangcheck.instdone, 0,
3165 			       sizeof(ring->hangcheck.instdone));
3166 		}
3167 
3168 		ring->hangcheck.seqno = seqno;
3169 		ring->hangcheck.acthd = acthd;
3170 		busy_count += busy;
3171 	}
3172 
3173 	for_each_ring(ring, dev_priv, i) {
3174 		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3175 			DRM_INFO("%s on %s\n",
3176 				 stuck[i] ? "stuck" : "no progress",
3177 				 ring->name);
3178 			rings_hung++;
3179 		}
3180 	}
3181 
3182 	if (rings_hung) {
3183 		i915_handle_error(dev, true, "Ring hung");
3184 		goto out;
3185 	}
3186 
3187 	if (busy_count)
3188 		/* Reset timer case chip hangs without another request
3189 		 * being added */
3190 		i915_queue_hangcheck(dev);
3191 
3192 out:
3193 	ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3194 }
3195 
3196 void i915_queue_hangcheck(struct drm_device *dev)
3197 {
3198 	struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
3199 
3200 	if (!i915.enable_hangcheck)
3201 		return;
3202 
3203 	/* Don't continually defer the hangcheck so that it is always run at
3204 	 * least once after work has been scheduled on any ring. Otherwise,
3205 	 * we will ignore a hung ring if a second ring is kept busy.
3206 	 */
3207 
3208 	queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3209 			   round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3210 }
3211 
3212 static void ibx_irq_reset(struct drm_device *dev)
3213 {
3214 	struct drm_i915_private *dev_priv = dev->dev_private;
3215 
3216 	if (HAS_PCH_NOP(dev))
3217 		return;
3218 
3219 	GEN5_IRQ_RESET(SDE);
3220 
3221 	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3222 		I915_WRITE(SERR_INT, 0xffffffff);
3223 }
3224 
3225 /*
3226  * SDEIER is also touched by the interrupt handler to work around missed PCH
3227  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3228  * instead we unconditionally enable all PCH interrupt sources here, but then
3229  * only unmask them as needed with SDEIMR.
3230  *
3231  * This function needs to be called before interrupts are enabled.
3232  */
3233 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3234 {
3235 	struct drm_i915_private *dev_priv = dev->dev_private;
3236 
3237 	if (HAS_PCH_NOP(dev))
3238 		return;
3239 
3240 	WARN_ON(I915_READ(SDEIER) != 0);
3241 	I915_WRITE(SDEIER, 0xffffffff);
3242 	POSTING_READ(SDEIER);
3243 }
3244 
3245 static void gen5_gt_irq_reset(struct drm_device *dev)
3246 {
3247 	struct drm_i915_private *dev_priv = dev->dev_private;
3248 
3249 	GEN5_IRQ_RESET(GT);
3250 	if (INTEL_INFO(dev)->gen >= 6)
3251 		GEN5_IRQ_RESET(GEN6_PM);
3252 }
3253 
3254 /* drm_dma.h hooks
3255 */
3256 static void ironlake_irq_reset(struct drm_device *dev)
3257 {
3258 	struct drm_i915_private *dev_priv = dev->dev_private;
3259 
3260 	I915_WRITE(HWSTAM, 0xffffffff);
3261 
3262 	GEN5_IRQ_RESET(DE);
3263 	if (IS_GEN7(dev))
3264 		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3265 
3266 	gen5_gt_irq_reset(dev);
3267 
3268 	ibx_irq_reset(dev);
3269 }
3270 
3271 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3272 {
3273 	enum i915_pipe pipe;
3274 
3275 	i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
3276 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3277 
3278 	for_each_pipe(dev_priv, pipe)
3279 		I915_WRITE(PIPESTAT(pipe), 0xffff);
3280 
3281 	GEN5_IRQ_RESET(VLV_);
3282 }
3283 
3284 static void valleyview_irq_preinstall(struct drm_device *dev)
3285 {
3286 	struct drm_i915_private *dev_priv = dev->dev_private;
3287 
3288 	/* VLV magic */
3289 	I915_WRITE(VLV_IMR, 0);
3290 	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3291 	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3292 	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3293 
3294 	gen5_gt_irq_reset(dev);
3295 
3296 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3297 
3298 	vlv_display_irq_reset(dev_priv);
3299 }
3300 
3301 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3302 {
3303 	GEN8_IRQ_RESET_NDX(GT, 0);
3304 	GEN8_IRQ_RESET_NDX(GT, 1);
3305 	GEN8_IRQ_RESET_NDX(GT, 2);
3306 	GEN8_IRQ_RESET_NDX(GT, 3);
3307 }
3308 
3309 static void gen8_irq_reset(struct drm_device *dev)
3310 {
3311 	struct drm_i915_private *dev_priv = dev->dev_private;
3312 	int pipe;
3313 
3314 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3315 	POSTING_READ(GEN8_MASTER_IRQ);
3316 
3317 	gen8_gt_irq_reset(dev_priv);
3318 
3319 	for_each_pipe(dev_priv, pipe)
3320 		if (intel_display_power_is_enabled(dev_priv,
3321 						   POWER_DOMAIN_PIPE(pipe)))
3322 			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3323 
3324 	GEN5_IRQ_RESET(GEN8_DE_PORT_);
3325 	GEN5_IRQ_RESET(GEN8_DE_MISC_);
3326 	GEN5_IRQ_RESET(GEN8_PCU_);
3327 
3328 	if (HAS_PCH_SPLIT(dev))
3329 		ibx_irq_reset(dev);
3330 }
3331 
3332 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3333 				     unsigned int pipe_mask)
3334 {
3335 	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3336 	enum i915_pipe pipe;
3337 
3338 	spin_lock_irq(&dev_priv->irq_lock);
3339 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3340 		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3341 				  dev_priv->de_irq_mask[pipe],
3342 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3343 	spin_unlock_irq(&dev_priv->irq_lock);
3344 }
3345 
3346 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3347 				     unsigned int pipe_mask)
3348 {
3349 	enum i915_pipe pipe;
3350 
3351 	spin_lock_irq(&dev_priv->irq_lock);
3352 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3353 		GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3354 	spin_unlock_irq(&dev_priv->irq_lock);
3355 
3356 	/* make sure we're done processing display irqs */
3357 #if 0
3358 	synchronize_irq(dev_priv->dev->irq);
3359 #endif
3360 }
3361 
3362 static void cherryview_irq_preinstall(struct drm_device *dev)
3363 {
3364 	struct drm_i915_private *dev_priv = dev->dev_private;
3365 
3366 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3367 	POSTING_READ(GEN8_MASTER_IRQ);
3368 
3369 	gen8_gt_irq_reset(dev_priv);
3370 
3371 	GEN5_IRQ_RESET(GEN8_PCU_);
3372 
3373 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3374 
3375 	vlv_display_irq_reset(dev_priv);
3376 }
3377 
3378 static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3379 				  const u32 hpd[HPD_NUM_PINS])
3380 {
3381 	struct drm_i915_private *dev_priv = to_i915(dev);
3382 	struct intel_encoder *encoder;
3383 	u32 enabled_irqs = 0;
3384 
3385 	for_each_intel_encoder(dev, encoder)
3386 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3387 			enabled_irqs |= hpd[encoder->hpd_pin];
3388 
3389 	return enabled_irqs;
3390 }
3391 
3392 static void ibx_hpd_irq_setup(struct drm_device *dev)
3393 {
3394 	struct drm_i915_private *dev_priv = dev->dev_private;
3395 	u32 hotplug_irqs, hotplug, enabled_irqs;
3396 
3397 	if (HAS_PCH_IBX(dev)) {
3398 		hotplug_irqs = SDE_HOTPLUG_MASK;
3399 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
3400 	} else {
3401 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3402 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
3403 	}
3404 
3405 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3406 
3407 	/*
3408 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3409 	 * duration to 2ms (which is the minimum in the Display Port spec).
3410 	 * The pulse duration bits are reserved on LPT+.
3411 	 */
3412 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3413 	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3414 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3415 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3416 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3417 	/*
3418 	 * When CPU and PCH are on the same package, port A
3419 	 * HPD must be enabled in both north and south.
3420 	 */
3421 	if (HAS_PCH_LPT_LP(dev))
3422 		hotplug |= PORTA_HOTPLUG_ENABLE;
3423 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3424 }
3425 
3426 static void spt_hpd_irq_setup(struct drm_device *dev)
3427 {
3428 	struct drm_i915_private *dev_priv = dev->dev_private;
3429 	u32 hotplug_irqs, hotplug, enabled_irqs;
3430 
3431 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3432 	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3433 
3434 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3435 
3436 	/* Enable digital hotplug on the PCH */
3437 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3438 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3439 		PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3440 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3441 
3442 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3443 	hotplug |= PORTE_HOTPLUG_ENABLE;
3444 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3445 }
3446 
3447 static void ilk_hpd_irq_setup(struct drm_device *dev)
3448 {
3449 	struct drm_i915_private *dev_priv = dev->dev_private;
3450 	u32 hotplug_irqs, hotplug, enabled_irqs;
3451 
3452 	if (INTEL_INFO(dev)->gen >= 8) {
3453 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3454 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3455 
3456 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3457 	} else if (INTEL_INFO(dev)->gen >= 7) {
3458 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3459 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3460 
3461 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3462 	} else {
3463 		hotplug_irqs = DE_DP_A_HOTPLUG;
3464 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3465 
3466 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3467 	}
3468 
3469 	/*
3470 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3471 	 * duration to 2ms (which is the minimum in the Display Port spec)
3472 	 * The pulse duration bits are reserved on HSW+.
3473 	 */
3474 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3475 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3476 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3477 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3478 
3479 	ibx_hpd_irq_setup(dev);
3480 }
3481 
3482 static void bxt_hpd_irq_setup(struct drm_device *dev)
3483 {
3484 	struct drm_i915_private *dev_priv = dev->dev_private;
3485 	u32 hotplug_irqs, hotplug, enabled_irqs;
3486 
3487 	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3488 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3489 
3490 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3491 
3492 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3493 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3494 		PORTA_HOTPLUG_ENABLE;
3495 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3496 }
3497 
3498 static void ibx_irq_postinstall(struct drm_device *dev)
3499 {
3500 	struct drm_i915_private *dev_priv = dev->dev_private;
3501 	u32 mask;
3502 
3503 	if (HAS_PCH_NOP(dev))
3504 		return;
3505 
3506 	if (HAS_PCH_IBX(dev))
3507 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3508 	else
3509 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3510 
3511 	gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3512 	I915_WRITE(SDEIMR, ~mask);
3513 }
3514 
3515 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3516 {
3517 	struct drm_i915_private *dev_priv = dev->dev_private;
3518 	u32 pm_irqs, gt_irqs;
3519 
3520 	pm_irqs = gt_irqs = 0;
3521 
3522 	dev_priv->gt_irq_mask = ~0;
3523 	if (HAS_L3_DPF(dev)) {
3524 		/* L3 parity interrupt is always unmasked. */
3525 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3526 		gt_irqs |= GT_PARITY_ERROR(dev);
3527 	}
3528 
3529 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3530 	if (IS_GEN5(dev)) {
3531 		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3532 			   ILK_BSD_USER_INTERRUPT;
3533 	} else {
3534 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3535 	}
3536 
3537 	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3538 
3539 	if (INTEL_INFO(dev)->gen >= 6) {
3540 		/*
3541 		 * RPS interrupts will get enabled/disabled on demand when RPS
3542 		 * itself is enabled/disabled.
3543 		 */
3544 		if (HAS_VEBOX(dev))
3545 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3546 
3547 		dev_priv->pm_irq_mask = 0xffffffff;
3548 		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3549 	}
3550 }
3551 
3552 static int ironlake_irq_postinstall(struct drm_device *dev)
3553 {
3554 	struct drm_i915_private *dev_priv = dev->dev_private;
3555 	u32 display_mask, extra_mask;
3556 
3557 	if (INTEL_INFO(dev)->gen >= 7) {
3558 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3559 				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3560 				DE_PLANEB_FLIP_DONE_IVB |
3561 				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3562 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3563 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3564 			      DE_DP_A_HOTPLUG_IVB);
3565 	} else {
3566 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3567 				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3568 				DE_AUX_CHANNEL_A |
3569 				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3570 				DE_POISON);
3571 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3572 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3573 			      DE_DP_A_HOTPLUG);
3574 	}
3575 
3576 	dev_priv->irq_mask = ~display_mask;
3577 
3578 	I915_WRITE(HWSTAM, 0xeffe);
3579 
3580 	ibx_irq_pre_postinstall(dev);
3581 
3582 	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3583 
3584 	gen5_gt_irq_postinstall(dev);
3585 
3586 	ibx_irq_postinstall(dev);
3587 
3588 	if (IS_IRONLAKE_M(dev)) {
3589 		/* Enable PCU event interrupts
3590 		 *
3591 		 * spinlocking not required here for correctness since interrupt
3592 		 * setup is guaranteed to run in single-threaded context. But we
3593 		 * need it to make the assert_spin_locked happy. */
3594 		spin_lock_irq(&dev_priv->irq_lock);
3595 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3596 		spin_unlock_irq(&dev_priv->irq_lock);
3597 	}
3598 
3599 	return 0;
3600 }
3601 
3602 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3603 {
3604 	u32 pipestat_mask;
3605 	u32 iir_mask;
3606 	enum i915_pipe pipe;
3607 
3608 	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3609 			PIPE_FIFO_UNDERRUN_STATUS;
3610 
3611 	for_each_pipe(dev_priv, pipe)
3612 		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3613 	POSTING_READ(PIPESTAT(PIPE_A));
3614 
3615 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3616 			PIPE_CRC_DONE_INTERRUPT_STATUS;
3617 
3618 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3619 	for_each_pipe(dev_priv, pipe)
3620 		      i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3621 
3622 	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3623 		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3624 		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3625 	if (IS_CHERRYVIEW(dev_priv))
3626 		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3627 	dev_priv->irq_mask &= ~iir_mask;
3628 
3629 	I915_WRITE(VLV_IIR, iir_mask);
3630 	I915_WRITE(VLV_IIR, iir_mask);
3631 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3632 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3633 	POSTING_READ(VLV_IMR);
3634 }
3635 
3636 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3637 {
3638 	u32 pipestat_mask;
3639 	u32 iir_mask;
3640 	enum i915_pipe pipe;
3641 
3642 	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3643 		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3644 		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3645 	if (IS_CHERRYVIEW(dev_priv))
3646 		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3647 
3648 	dev_priv->irq_mask |= iir_mask;
3649 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3650 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3651 	I915_WRITE(VLV_IIR, iir_mask);
3652 	I915_WRITE(VLV_IIR, iir_mask);
3653 	POSTING_READ(VLV_IIR);
3654 
3655 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3656 			PIPE_CRC_DONE_INTERRUPT_STATUS;
3657 
3658 	i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3659 	for_each_pipe(dev_priv, pipe)
3660 		i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3661 
3662 	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3663 			PIPE_FIFO_UNDERRUN_STATUS;
3664 
3665 	for_each_pipe(dev_priv, pipe)
3666 		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3667 	POSTING_READ(PIPESTAT(PIPE_A));
3668 }
3669 
3670 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3671 {
3672 	assert_spin_locked(&dev_priv->irq_lock);
3673 
3674 	if (dev_priv->display_irqs_enabled)
3675 		return;
3676 
3677 	dev_priv->display_irqs_enabled = true;
3678 
3679 	if (intel_irqs_enabled(dev_priv))
3680 		valleyview_display_irqs_install(dev_priv);
3681 }
3682 
3683 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3684 {
3685 	assert_spin_locked(&dev_priv->irq_lock);
3686 
3687 	if (!dev_priv->display_irqs_enabled)
3688 		return;
3689 
3690 	dev_priv->display_irqs_enabled = false;
3691 
3692 	if (intel_irqs_enabled(dev_priv))
3693 		valleyview_display_irqs_uninstall(dev_priv);
3694 }
3695 
3696 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3697 {
3698 	dev_priv->irq_mask = ~0;
3699 
3700 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3701 	POSTING_READ(PORT_HOTPLUG_EN);
3702 
3703 	I915_WRITE(VLV_IIR, 0xffffffff);
3704 	I915_WRITE(VLV_IIR, 0xffffffff);
3705 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3706 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3707 	POSTING_READ(VLV_IMR);
3708 
3709 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3710 	 * just to make the assert_spin_locked check happy. */
3711 	spin_lock_irq(&dev_priv->irq_lock);
3712 	if (dev_priv->display_irqs_enabled)
3713 		valleyview_display_irqs_install(dev_priv);
3714 	spin_unlock_irq(&dev_priv->irq_lock);
3715 }
3716 
3717 static int valleyview_irq_postinstall(struct drm_device *dev)
3718 {
3719 	struct drm_i915_private *dev_priv = dev->dev_private;
3720 
3721 	vlv_display_irq_postinstall(dev_priv);
3722 
3723 	gen5_gt_irq_postinstall(dev);
3724 
3725 	/* ack & enable invalid PTE error interrupts */
3726 #if 0 /* FIXME: add support to irq handler for checking these bits */
3727 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3728 	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3729 #endif
3730 
3731 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3732 
3733 	return 0;
3734 }
3735 
3736 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3737 {
3738 	/* These are interrupts we'll toggle with the ring mask register */
3739 	uint32_t gt_interrupts[] = {
3740 		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3741 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3742 			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3743 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3744 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3745 		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3746 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3747 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3748 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3749 		0,
3750 		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3751 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3752 		};
3753 
3754 	dev_priv->pm_irq_mask = 0xffffffff;
3755 	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3756 	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3757 	/*
3758 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
3759 	 * is enabled/disabled.
3760 	 */
3761 	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3762 	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3763 }
3764 
3765 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3766 {
3767 	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3768 	uint32_t de_pipe_enables;
3769 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3770 	u32 de_port_enables;
3771 	enum i915_pipe pipe;
3772 
3773 	if (INTEL_INFO(dev_priv)->gen >= 9) {
3774 		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3775 				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3776 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3777 				  GEN9_AUX_CHANNEL_D;
3778 		if (IS_BROXTON(dev_priv))
3779 			de_port_masked |= BXT_DE_PORT_GMBUS;
3780 	} else {
3781 		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3782 				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3783 	}
3784 
3785 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3786 					   GEN8_PIPE_FIFO_UNDERRUN;
3787 
3788 	de_port_enables = de_port_masked;
3789 	if (IS_BROXTON(dev_priv))
3790 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3791 	else if (IS_BROADWELL(dev_priv))
3792 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3793 
3794 	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3795 	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3796 	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3797 
3798 	for_each_pipe(dev_priv, pipe)
3799 		if (intel_display_power_is_enabled(dev_priv,
3800 				POWER_DOMAIN_PIPE(pipe)))
3801 			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3802 					  dev_priv->de_irq_mask[pipe],
3803 					  de_pipe_enables);
3804 
3805 	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3806 }
3807 
3808 static int gen8_irq_postinstall(struct drm_device *dev)
3809 {
3810 	struct drm_i915_private *dev_priv = dev->dev_private;
3811 
3812 	if (HAS_PCH_SPLIT(dev))
3813 		ibx_irq_pre_postinstall(dev);
3814 
3815 	gen8_gt_irq_postinstall(dev_priv);
3816 	gen8_de_irq_postinstall(dev_priv);
3817 
3818 	if (HAS_PCH_SPLIT(dev))
3819 		ibx_irq_postinstall(dev);
3820 
3821 	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3822 	POSTING_READ(GEN8_MASTER_IRQ);
3823 
3824 	return 0;
3825 }
3826 
3827 static int cherryview_irq_postinstall(struct drm_device *dev)
3828 {
3829 	struct drm_i915_private *dev_priv = dev->dev_private;
3830 
3831 	vlv_display_irq_postinstall(dev_priv);
3832 
3833 	gen8_gt_irq_postinstall(dev_priv);
3834 
3835 	I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3836 	POSTING_READ(GEN8_MASTER_IRQ);
3837 
3838 	return 0;
3839 }
3840 
3841 static void gen8_irq_uninstall(struct drm_device *dev)
3842 {
3843 	struct drm_i915_private *dev_priv = dev->dev_private;
3844 
3845 	if (!dev_priv)
3846 		return;
3847 
3848 	gen8_irq_reset(dev);
3849 }
3850 
3851 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3852 {
3853 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3854 	 * just to make the assert_spin_locked check happy. */
3855 	spin_lock_irq(&dev_priv->irq_lock);
3856 	if (dev_priv->display_irqs_enabled)
3857 		valleyview_display_irqs_uninstall(dev_priv);
3858 	spin_unlock_irq(&dev_priv->irq_lock);
3859 
3860 	vlv_display_irq_reset(dev_priv);
3861 
3862 	dev_priv->irq_mask = ~0;
3863 }
3864 
3865 static void valleyview_irq_uninstall(struct drm_device *dev)
3866 {
3867 	struct drm_i915_private *dev_priv = dev->dev_private;
3868 
3869 	if (!dev_priv)
3870 		return;
3871 
3872 	I915_WRITE(VLV_MASTER_IER, 0);
3873 
3874 	gen5_gt_irq_reset(dev);
3875 
3876 	I915_WRITE(HWSTAM, 0xffffffff);
3877 
3878 	vlv_display_irq_uninstall(dev_priv);
3879 }
3880 
3881 static void cherryview_irq_uninstall(struct drm_device *dev)
3882 {
3883 	struct drm_i915_private *dev_priv = dev->dev_private;
3884 
3885 	if (!dev_priv)
3886 		return;
3887 
3888 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3889 	POSTING_READ(GEN8_MASTER_IRQ);
3890 
3891 	gen8_gt_irq_reset(dev_priv);
3892 
3893 	GEN5_IRQ_RESET(GEN8_PCU_);
3894 
3895 	vlv_display_irq_uninstall(dev_priv);
3896 }
3897 
3898 static void ironlake_irq_uninstall(struct drm_device *dev)
3899 {
3900 	struct drm_i915_private *dev_priv = dev->dev_private;
3901 
3902 	if (!dev_priv)
3903 		return;
3904 
3905 	ironlake_irq_reset(dev);
3906 }
3907 
3908 static void i8xx_irq_preinstall(struct drm_device * dev)
3909 {
3910 	struct drm_i915_private *dev_priv = dev->dev_private;
3911 	int pipe;
3912 
3913 	for_each_pipe(dev_priv, pipe)
3914 		I915_WRITE(PIPESTAT(pipe), 0);
3915 	I915_WRITE16(IMR, 0xffff);
3916 	I915_WRITE16(IER, 0x0);
3917 	POSTING_READ16(IER);
3918 }
3919 
3920 static int i8xx_irq_postinstall(struct drm_device *dev)
3921 {
3922 	struct drm_i915_private *dev_priv = dev->dev_private;
3923 
3924 	I915_WRITE16(EMR,
3925 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3926 
3927 	/* Unmask the interrupts that we always want on. */
3928 	dev_priv->irq_mask =
3929 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3930 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3931 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3932 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3933 	I915_WRITE16(IMR, dev_priv->irq_mask);
3934 
3935 	I915_WRITE16(IER,
3936 		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3937 		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3938 		     I915_USER_INTERRUPT);
3939 	POSTING_READ16(IER);
3940 
3941 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3942 	 * just to make the assert_spin_locked check happy. */
3943 	spin_lock_irq(&dev_priv->irq_lock);
3944 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3945 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3946 	spin_unlock_irq(&dev_priv->irq_lock);
3947 
3948 	return 0;
3949 }
3950 
3951 /*
3952  * Returns true when a page flip has completed.
3953  */
3954 static bool i8xx_handle_vblank(struct drm_device *dev,
3955 			       int plane, int pipe, u32 iir)
3956 {
3957 	struct drm_i915_private *dev_priv = dev->dev_private;
3958 	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3959 
3960 	if (!intel_pipe_handle_vblank(dev, pipe))
3961 		return false;
3962 
3963 	if ((iir & flip_pending) == 0)
3964 		goto check_page_flip;
3965 
3966 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3967 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3968 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3969 	 * the flip is completed (no longer pending). Since this doesn't raise
3970 	 * an interrupt per se, we watch for the change at vblank.
3971 	 */
3972 	if (I915_READ16(ISR) & flip_pending)
3973 		goto check_page_flip;
3974 
3975 	intel_prepare_page_flip(dev, plane);
3976 	intel_finish_page_flip(dev, pipe);
3977 	return true;
3978 
3979 check_page_flip:
3980 	intel_check_page_flip(dev, pipe);
3981 	return false;
3982 }
3983 
3984 static irqreturn_t i8xx_irq_handler(void *arg)
3985 {
3986 	struct drm_device *dev = arg;
3987 	struct drm_i915_private *dev_priv = dev->dev_private;
3988 	u16 iir, new_iir;
3989 	u32 pipe_stats[2];
3990 	int pipe;
3991 	u16 flip_mask =
3992 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3993 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3994 
3995 	if (!intel_irqs_enabled(dev_priv))
3996 		return IRQ_NONE;
3997 
3998 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3999 	disable_rpm_wakeref_asserts(dev_priv);
4000 
4001 	iir = I915_READ16(IIR);
4002 	if (iir == 0)
4003 		goto out;
4004 
4005 	while (iir & ~flip_mask) {
4006 		/* Can't rely on pipestat interrupt bit in iir as it might
4007 		 * have been cleared after the pipestat interrupt was received.
4008 		 * It doesn't set the bit in iir again, but it still produces
4009 		 * interrupts (for non-MSI).
4010 		 */
4011 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4012 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4013 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4014 
4015 		for_each_pipe(dev_priv, pipe) {
4016 			i915_reg_t reg = PIPESTAT(pipe);
4017 			pipe_stats[pipe] = I915_READ(reg);
4018 
4019 			/*
4020 			 * Clear the PIPE*STAT regs before the IIR
4021 			 */
4022 			if (pipe_stats[pipe] & 0x8000ffff)
4023 				I915_WRITE(reg, pipe_stats[pipe]);
4024 		}
4025 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4026 
4027 		I915_WRITE16(IIR, iir & ~flip_mask);
4028 		new_iir = I915_READ16(IIR); /* Flush posted writes */
4029 
4030 		if (iir & I915_USER_INTERRUPT)
4031 			notify_ring(&dev_priv->ring[RCS]);
4032 
4033 		for_each_pipe(dev_priv, pipe) {
4034 			int plane = pipe;
4035 			if (HAS_FBC(dev))
4036 				plane = !plane;
4037 
4038 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4039 			    i8xx_handle_vblank(dev, plane, pipe, iir))
4040 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4041 
4042 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4043 				i9xx_pipe_crc_irq_handler(dev, pipe);
4044 
4045 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4046 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
4047 								    pipe);
4048 		}
4049 
4050 		iir = new_iir;
4051 	}
4052 
4053 out:
4054 	enable_rpm_wakeref_asserts(dev_priv);
4055 
4056 }
4057 
4058 static void i8xx_irq_uninstall(struct drm_device * dev)
4059 {
4060 	struct drm_i915_private *dev_priv = dev->dev_private;
4061 	int pipe;
4062 
4063 	for_each_pipe(dev_priv, pipe) {
4064 		/* Clear enable bits; then clear status bits */
4065 		I915_WRITE(PIPESTAT(pipe), 0);
4066 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4067 	}
4068 	I915_WRITE16(IMR, 0xffff);
4069 	I915_WRITE16(IER, 0x0);
4070 	I915_WRITE16(IIR, I915_READ16(IIR));
4071 }
4072 
4073 static void i915_irq_preinstall(struct drm_device * dev)
4074 {
4075 	struct drm_i915_private *dev_priv = dev->dev_private;
4076 	int pipe;
4077 
4078 	if (I915_HAS_HOTPLUG(dev)) {
4079 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4080 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4081 	}
4082 
4083 	I915_WRITE16(HWSTAM, 0xeffe);
4084 	for_each_pipe(dev_priv, pipe)
4085 		I915_WRITE(PIPESTAT(pipe), 0);
4086 	I915_WRITE(IMR, 0xffffffff);
4087 	I915_WRITE(IER, 0x0);
4088 	POSTING_READ(IER);
4089 }
4090 
4091 static int i915_irq_postinstall(struct drm_device *dev)
4092 {
4093 	struct drm_i915_private *dev_priv = dev->dev_private;
4094 	u32 enable_mask;
4095 
4096 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4097 
4098 	/* Unmask the interrupts that we always want on. */
4099 	dev_priv->irq_mask =
4100 		~(I915_ASLE_INTERRUPT |
4101 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4102 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4103 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4104 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4105 
4106 	enable_mask =
4107 		I915_ASLE_INTERRUPT |
4108 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4109 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4110 		I915_USER_INTERRUPT;
4111 
4112 	if (I915_HAS_HOTPLUG(dev)) {
4113 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4114 		POSTING_READ(PORT_HOTPLUG_EN);
4115 
4116 		/* Enable in IER... */
4117 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4118 		/* and unmask in IMR */
4119 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4120 	}
4121 
4122 	I915_WRITE(IMR, dev_priv->irq_mask);
4123 	I915_WRITE(IER, enable_mask);
4124 	POSTING_READ(IER);
4125 
4126 	i915_enable_asle_pipestat(dev);
4127 
4128 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4129 	 * just to make the assert_spin_locked check happy. */
4130 	spin_lock_irq(&dev_priv->irq_lock);
4131 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4132 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4133 	spin_unlock_irq(&dev_priv->irq_lock);
4134 
4135 	return 0;
4136 }
4137 
4138 /*
4139  * Returns true when a page flip has completed.
4140  */
4141 static bool i915_handle_vblank(struct drm_device *dev,
4142 			       int plane, int pipe, u32 iir)
4143 {
4144 	struct drm_i915_private *dev_priv = dev->dev_private;
4145 	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4146 
4147 	if (!intel_pipe_handle_vblank(dev, pipe))
4148 		return false;
4149 
4150 	if ((iir & flip_pending) == 0)
4151 		goto check_page_flip;
4152 
4153 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
4154 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4155 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4156 	 * the flip is completed (no longer pending). Since this doesn't raise
4157 	 * an interrupt per se, we watch for the change at vblank.
4158 	 */
4159 	if (I915_READ(ISR) & flip_pending)
4160 		goto check_page_flip;
4161 
4162 	intel_prepare_page_flip(dev, plane);
4163 	intel_finish_page_flip(dev, pipe);
4164 	return true;
4165 
4166 check_page_flip:
4167 	intel_check_page_flip(dev, pipe);
4168 	return false;
4169 }
4170 
4171 static irqreturn_t i915_irq_handler(void *arg)
4172 {
4173 	struct drm_device *dev = arg;
4174 	struct drm_i915_private *dev_priv = dev->dev_private;
4175 	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4176 	u32 flip_mask =
4177 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4178 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4179 	int pipe;
4180 
4181 	if (!intel_irqs_enabled(dev_priv))
4182 		return IRQ_NONE;
4183 
4184 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4185 	disable_rpm_wakeref_asserts(dev_priv);
4186 
4187 	iir = I915_READ(IIR);
4188 	do {
4189 		bool irq_received = (iir & ~flip_mask) != 0;
4190 		bool blc_event = false;
4191 
4192 		/* Can't rely on pipestat interrupt bit in iir as it might
4193 		 * have been cleared after the pipestat interrupt was received.
4194 		 * It doesn't set the bit in iir again, but it still produces
4195 		 * interrupts (for non-MSI).
4196 		 */
4197 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4198 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4199 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4200 
4201 		for_each_pipe(dev_priv, pipe) {
4202 			i915_reg_t reg = PIPESTAT(pipe);
4203 			pipe_stats[pipe] = I915_READ(reg);
4204 
4205 			/* Clear the PIPE*STAT regs before the IIR */
4206 			if (pipe_stats[pipe] & 0x8000ffff) {
4207 				I915_WRITE(reg, pipe_stats[pipe]);
4208 				irq_received = true;
4209 			}
4210 		}
4211 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4212 
4213 		if (!irq_received)
4214 			break;
4215 
4216 		/* Consume port.  Then clear IIR or we'll miss events */
4217 		if (I915_HAS_HOTPLUG(dev) &&
4218 		    iir & I915_DISPLAY_PORT_INTERRUPT)
4219 			i9xx_hpd_irq_handler(dev);
4220 
4221 		I915_WRITE(IIR, iir & ~flip_mask);
4222 		new_iir = I915_READ(IIR); /* Flush posted writes */
4223 
4224 		if (iir & I915_USER_INTERRUPT)
4225 			notify_ring(&dev_priv->ring[RCS]);
4226 
4227 		for_each_pipe(dev_priv, pipe) {
4228 			int plane = pipe;
4229 			if (HAS_FBC(dev))
4230 				plane = !plane;
4231 
4232 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4233 			    i915_handle_vblank(dev, plane, pipe, iir))
4234 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4235 
4236 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4237 				blc_event = true;
4238 
4239 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4240 				i9xx_pipe_crc_irq_handler(dev, pipe);
4241 
4242 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4243 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
4244 								    pipe);
4245 		}
4246 
4247 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4248 			intel_opregion_asle_intr(dev);
4249 
4250 		/* With MSI, interrupts are only generated when iir
4251 		 * transitions from zero to nonzero.  If another bit got
4252 		 * set while we were handling the existing iir bits, then
4253 		 * we would never get another interrupt.
4254 		 *
4255 		 * This is fine on non-MSI as well, as if we hit this path
4256 		 * we avoid exiting the interrupt handler only to generate
4257 		 * another one.
4258 		 *
4259 		 * Note that for MSI this could cause a stray interrupt report
4260 		 * if an interrupt landed in the time between writing IIR and
4261 		 * the posting read.  This should be rare enough to never
4262 		 * trigger the 99% of 100,000 interrupts test for disabling
4263 		 * stray interrupts.
4264 		 */
4265 		iir = new_iir;
4266 	} while (iir & ~flip_mask);
4267 
4268 	enable_rpm_wakeref_asserts(dev_priv);
4269 
4270 }
4271 
4272 static void i915_irq_uninstall(struct drm_device * dev)
4273 {
4274 	struct drm_i915_private *dev_priv = dev->dev_private;
4275 	int pipe;
4276 
4277 	if (I915_HAS_HOTPLUG(dev)) {
4278 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4279 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4280 	}
4281 
4282 	I915_WRITE16(HWSTAM, 0xffff);
4283 	for_each_pipe(dev_priv, pipe) {
4284 		/* Clear enable bits; then clear status bits */
4285 		I915_WRITE(PIPESTAT(pipe), 0);
4286 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4287 	}
4288 	I915_WRITE(IMR, 0xffffffff);
4289 	I915_WRITE(IER, 0x0);
4290 
4291 	I915_WRITE(IIR, I915_READ(IIR));
4292 }
4293 
4294 static void i965_irq_preinstall(struct drm_device * dev)
4295 {
4296 	struct drm_i915_private *dev_priv = dev->dev_private;
4297 	int pipe;
4298 
4299 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4300 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4301 
4302 	I915_WRITE(HWSTAM, 0xeffe);
4303 	for_each_pipe(dev_priv, pipe)
4304 		I915_WRITE(PIPESTAT(pipe), 0);
4305 	I915_WRITE(IMR, 0xffffffff);
4306 	I915_WRITE(IER, 0x0);
4307 	POSTING_READ(IER);
4308 }
4309 
4310 static int i965_irq_postinstall(struct drm_device *dev)
4311 {
4312 	struct drm_i915_private *dev_priv = dev->dev_private;
4313 	u32 enable_mask;
4314 	u32 error_mask;
4315 
4316 	/* Unmask the interrupts that we always want on. */
4317 	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4318 			       I915_DISPLAY_PORT_INTERRUPT |
4319 			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4320 			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4321 			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4322 			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4323 			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4324 
4325 	enable_mask = ~dev_priv->irq_mask;
4326 	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4327 			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4328 	enable_mask |= I915_USER_INTERRUPT;
4329 
4330 	if (IS_G4X(dev))
4331 		enable_mask |= I915_BSD_USER_INTERRUPT;
4332 
4333 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4334 	 * just to make the assert_spin_locked check happy. */
4335 	spin_lock_irq(&dev_priv->irq_lock);
4336 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4337 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4338 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4339 	spin_unlock_irq(&dev_priv->irq_lock);
4340 
4341 	/*
4342 	 * Enable some error detection, note the instruction error mask
4343 	 * bit is reserved, so we leave it masked.
4344 	 */
4345 	if (IS_G4X(dev)) {
4346 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4347 			       GM45_ERROR_MEM_PRIV |
4348 			       GM45_ERROR_CP_PRIV |
4349 			       I915_ERROR_MEMORY_REFRESH);
4350 	} else {
4351 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4352 			       I915_ERROR_MEMORY_REFRESH);
4353 	}
4354 	I915_WRITE(EMR, error_mask);
4355 
4356 	I915_WRITE(IMR, dev_priv->irq_mask);
4357 	I915_WRITE(IER, enable_mask);
4358 	POSTING_READ(IER);
4359 
4360 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4361 	POSTING_READ(PORT_HOTPLUG_EN);
4362 
4363 	i915_enable_asle_pipestat(dev);
4364 
4365 	return 0;
4366 }
4367 
4368 static void i915_hpd_irq_setup(struct drm_device *dev)
4369 {
4370 	struct drm_i915_private *dev_priv = dev->dev_private;
4371 	u32 hotplug_en;
4372 
4373 	assert_spin_locked(&dev_priv->irq_lock);
4374 
4375 	/* Note HDMI and DP share hotplug bits */
4376 	/* enable bits are the same for all generations */
4377 	hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
4378 	/* Programming the CRT detection parameters tends
4379 	   to generate a spurious hotplug event about three
4380 	   seconds later.  So just do it once.
4381 	*/
4382 	if (IS_G4X(dev))
4383 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4384 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4385 
4386 	/* Ignore TV since it's buggy */
4387 	i915_hotplug_interrupt_update_locked(dev_priv,
4388 					     HOTPLUG_INT_EN_MASK |
4389 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4390 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4391 					     hotplug_en);
4392 }
4393 
4394 static irqreturn_t i965_irq_handler(void *arg)
4395 {
4396 	struct drm_device *dev = arg;
4397 	struct drm_i915_private *dev_priv = dev->dev_private;
4398 	u32 iir, new_iir;
4399 	u32 pipe_stats[I915_MAX_PIPES];
4400 	int pipe;
4401 	u32 flip_mask =
4402 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4403 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4404 
4405 	if (!intel_irqs_enabled(dev_priv))
4406 		return IRQ_NONE;
4407 
4408 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4409 	disable_rpm_wakeref_asserts(dev_priv);
4410 
4411 	iir = I915_READ(IIR);
4412 
4413 	for (;;) {
4414 		bool irq_received = (iir & ~flip_mask) != 0;
4415 		bool blc_event = false;
4416 
4417 		/* Can't rely on pipestat interrupt bit in iir as it might
4418 		 * have been cleared after the pipestat interrupt was received.
4419 		 * It doesn't set the bit in iir again, but it still produces
4420 		 * interrupts (for non-MSI).
4421 		 */
4422 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4423 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4424 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4425 
4426 		for_each_pipe(dev_priv, pipe) {
4427 			i915_reg_t reg = PIPESTAT(pipe);
4428 			pipe_stats[pipe] = I915_READ(reg);
4429 
4430 			/*
4431 			 * Clear the PIPE*STAT regs before the IIR
4432 			 */
4433 			if (pipe_stats[pipe] & 0x8000ffff) {
4434 				I915_WRITE(reg, pipe_stats[pipe]);
4435 				irq_received = true;
4436 			}
4437 		}
4438 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4439 
4440 		if (!irq_received)
4441 			break;
4442 
4443 
4444 		/* Consume port.  Then clear IIR or we'll miss events */
4445 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4446 			i9xx_hpd_irq_handler(dev);
4447 
4448 		I915_WRITE(IIR, iir & ~flip_mask);
4449 		new_iir = I915_READ(IIR); /* Flush posted writes */
4450 
4451 		if (iir & I915_USER_INTERRUPT)
4452 			notify_ring(&dev_priv->ring[RCS]);
4453 		if (iir & I915_BSD_USER_INTERRUPT)
4454 			notify_ring(&dev_priv->ring[VCS]);
4455 
4456 		for_each_pipe(dev_priv, pipe) {
4457 			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4458 			    i915_handle_vblank(dev, pipe, pipe, iir))
4459 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4460 
4461 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4462 				blc_event = true;
4463 
4464 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4465 				i9xx_pipe_crc_irq_handler(dev, pipe);
4466 
4467 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4468 				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4469 		}
4470 
4471 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4472 			intel_opregion_asle_intr(dev);
4473 
4474 		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4475 			gmbus_irq_handler(dev);
4476 
4477 		/* With MSI, interrupts are only generated when iir
4478 		 * transitions from zero to nonzero.  If another bit got
4479 		 * set while we were handling the existing iir bits, then
4480 		 * we would never get another interrupt.
4481 		 *
4482 		 * This is fine on non-MSI as well, as if we hit this path
4483 		 * we avoid exiting the interrupt handler only to generate
4484 		 * another one.
4485 		 *
4486 		 * Note that for MSI this could cause a stray interrupt report
4487 		 * if an interrupt landed in the time between writing IIR and
4488 		 * the posting read.  This should be rare enough to never
4489 		 * trigger the 99% of 100,000 interrupts test for disabling
4490 		 * stray interrupts.
4491 		 */
4492 		iir = new_iir;
4493 	}
4494 
4495 	enable_rpm_wakeref_asserts(dev_priv);
4496 
4497 }
4498 
4499 static void i965_irq_uninstall(struct drm_device * dev)
4500 {
4501 	struct drm_i915_private *dev_priv = dev->dev_private;
4502 	int pipe;
4503 
4504 	if (!dev_priv)
4505 		return;
4506 
4507 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4508 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4509 
4510 	I915_WRITE(HWSTAM, 0xffffffff);
4511 	for_each_pipe(dev_priv, pipe)
4512 		I915_WRITE(PIPESTAT(pipe), 0);
4513 	I915_WRITE(IMR, 0xffffffff);
4514 	I915_WRITE(IER, 0x0);
4515 
4516 	for_each_pipe(dev_priv, pipe)
4517 		I915_WRITE(PIPESTAT(pipe),
4518 			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4519 	I915_WRITE(IIR, I915_READ(IIR));
4520 }
4521 
4522 /**
4523  * intel_irq_init - initializes irq support
4524  * @dev_priv: i915 device instance
4525  *
4526  * This function initializes all the irq support including work items, timers
4527  * and all the vtables. It does not setup the interrupt itself though.
4528  */
4529 void intel_irq_init(struct drm_i915_private *dev_priv)
4530 {
4531 	struct drm_device *dev = dev_priv->dev;
4532 
4533 	intel_hpd_init_work(dev_priv);
4534 
4535 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4536 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4537 
4538 	/* Let's track the enabled rps events */
4539 	if (IS_VALLEYVIEW(dev_priv))
4540 		/* WaGsvRC0ResidencyMethod:vlv */
4541 		dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4542 	else
4543 		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4544 
4545 	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4546 			  i915_hangcheck_elapsed);
4547 
4548 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4549 
4550 	if (IS_GEN2(dev_priv)) {
4551 		dev->max_vblank_count = 0;
4552 		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4553 	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4554 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4555 		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4556 	} else {
4557 		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4558 		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4559 	}
4560 
4561 	/*
4562 	 * Opt out of the vblank disable timer on everything except gen2.
4563 	 * Gen2 doesn't have a hardware frame counter and so depends on
4564 	 * vblank interrupts to produce sane vblank seuquence numbers.
4565 	 */
4566 	if (!IS_GEN2(dev_priv))
4567 		dev->vblank_disable_immediate = true;
4568 
4569 	dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4570 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4571 
4572 	if (IS_CHERRYVIEW(dev_priv)) {
4573 		dev->driver->irq_handler = cherryview_irq_handler;
4574 		dev->driver->irq_preinstall = cherryview_irq_preinstall;
4575 		dev->driver->irq_postinstall = cherryview_irq_postinstall;
4576 		dev->driver->irq_uninstall = cherryview_irq_uninstall;
4577 		dev->driver->enable_vblank = valleyview_enable_vblank;
4578 		dev->driver->disable_vblank = valleyview_disable_vblank;
4579 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4580 	} else if (IS_VALLEYVIEW(dev_priv)) {
4581 		dev->driver->irq_handler = valleyview_irq_handler;
4582 		dev->driver->irq_preinstall = valleyview_irq_preinstall;
4583 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4584 		dev->driver->irq_uninstall = valleyview_irq_uninstall;
4585 		dev->driver->enable_vblank = valleyview_enable_vblank;
4586 		dev->driver->disable_vblank = valleyview_disable_vblank;
4587 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4588 	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4589 		dev->driver->irq_handler = gen8_irq_handler;
4590 		dev->driver->irq_preinstall = gen8_irq_reset;
4591 		dev->driver->irq_postinstall = gen8_irq_postinstall;
4592 		dev->driver->irq_uninstall = gen8_irq_uninstall;
4593 		dev->driver->enable_vblank = gen8_enable_vblank;
4594 		dev->driver->disable_vblank = gen8_disable_vblank;
4595 		if (IS_BROXTON(dev))
4596 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4597 		else if (HAS_PCH_SPT(dev))
4598 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4599 		else
4600 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4601 	} else if (HAS_PCH_SPLIT(dev)) {
4602 		dev->driver->irq_handler = ironlake_irq_handler;
4603 		dev->driver->irq_preinstall = ironlake_irq_reset;
4604 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4605 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
4606 		dev->driver->enable_vblank = ironlake_enable_vblank;
4607 		dev->driver->disable_vblank = ironlake_disable_vblank;
4608 		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4609 	} else {
4610 		if (INTEL_INFO(dev_priv)->gen == 2) {
4611 			dev->driver->irq_preinstall = i8xx_irq_preinstall;
4612 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
4613 			dev->driver->irq_handler = i8xx_irq_handler;
4614 			dev->driver->irq_uninstall = i8xx_irq_uninstall;
4615 		} else if (INTEL_INFO(dev_priv)->gen == 3) {
4616 			dev->driver->irq_preinstall = i915_irq_preinstall;
4617 			dev->driver->irq_postinstall = i915_irq_postinstall;
4618 			dev->driver->irq_uninstall = i915_irq_uninstall;
4619 			dev->driver->irq_handler = i915_irq_handler;
4620 		} else {
4621 			dev->driver->irq_preinstall = i965_irq_preinstall;
4622 			dev->driver->irq_postinstall = i965_irq_postinstall;
4623 			dev->driver->irq_uninstall = i965_irq_uninstall;
4624 			dev->driver->irq_handler = i965_irq_handler;
4625 		}
4626 		if (I915_HAS_HOTPLUG(dev_priv))
4627 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4628 		dev->driver->enable_vblank = i915_enable_vblank;
4629 		dev->driver->disable_vblank = i915_disable_vblank;
4630 	}
4631 }
4632 
4633 /**
4634  * intel_irq_install - enables the hardware interrupt
4635  * @dev_priv: i915 device instance
4636  *
4637  * This function enables the hardware interrupt handling, but leaves the hotplug
4638  * handling still disabled. It is called after intel_irq_init().
4639  *
4640  * In the driver load and resume code we need working interrupts in a few places
4641  * but don't want to deal with the hassle of concurrent probe and hotplug
4642  * workers. Hence the split into this two-stage approach.
4643  */
4644 int intel_irq_install(struct drm_i915_private *dev_priv)
4645 {
4646 	/*
4647 	 * We enable some interrupt sources in our postinstall hooks, so mark
4648 	 * interrupts as enabled _before_ actually enabling them to avoid
4649 	 * special cases in our ordering checks.
4650 	 */
4651 	dev_priv->pm.irqs_enabled = true;
4652 
4653 	return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4654 }
4655 
4656 /**
4657  * intel_irq_uninstall - finilizes all irq handling
4658  * @dev_priv: i915 device instance
4659  *
4660  * This stops interrupt and hotplug handling and unregisters and frees all
4661  * resources acquired in the init functions.
4662  */
4663 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4664 {
4665 	drm_irq_uninstall(dev_priv->dev);
4666 	intel_hpd_cancel_work(dev_priv);
4667 	dev_priv->pm.irqs_enabled = false;
4668 }
4669 
4670 /**
4671  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4672  * @dev_priv: i915 device instance
4673  *
4674  * This function is used to disable interrupts at runtime, both in the runtime
4675  * pm and the system suspend/resume code.
4676  */
4677 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4678 {
4679 	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4680 	dev_priv->pm.irqs_enabled = false;
4681 #if 0
4682 	synchronize_irq(dev_priv->dev->irq);
4683 #endif
4684 }
4685 
4686 /**
4687  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4688  * @dev_priv: i915 device instance
4689  *
4690  * This function is used to enable interrupts at runtime, both in the runtime
4691  * pm and the system suspend/resume code.
4692  */
4693 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4694 {
4695 	dev_priv->pm.irqs_enabled = true;
4696 	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4697 	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4698 }
4699