xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/i915_irq.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #ifdef __NetBSD__
32 #include <sys/cdefs.h>
33 #endif
34 
35 #include <linux/printk.h>
36 #include <linux/sysrq.h>
37 #include <linux/slab.h>
38 #ifdef CONFIG_DEBUG_FS
39 #include <linux/circ_buf.h>
40 #endif
41 #include <drm/drmP.h>
42 #include <drm/i915_drm.h>
43 #include "i915_drv.h"
44 #include "i915_trace.h"
45 #include "intel_drv.h"
46 
47 static const u32 hpd_ibx[] = {
48 	[HPD_CRT] = SDE_CRT_HOTPLUG,
49 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
50 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
51 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
52 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
53 };
54 
55 static const u32 hpd_cpt[] = {
56 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
57 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
58 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
59 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
60 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
61 };
62 
63 static const u32 hpd_mask_i915[] = {
64 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
65 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
66 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
67 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
68 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
69 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
70 };
71 
72 static const u32 hpd_status_g4x[] = {
73 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
74 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
75 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
76 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
77 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
78 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
79 };
80 
81 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
82 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
83 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
84 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
85 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
86 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
87 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
88 };
89 
90 /* For display hotplug interrupt */
91 static void
92 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
93 {
94 	assert_spin_locked(&dev_priv->irq_lock);
95 
96 	if (dev_priv->pm.irqs_disabled) {
97 		WARN(1, "IRQs disabled\n");
98 		dev_priv->pm.regsave.deimr &= ~mask;
99 		return;
100 	}
101 
102 	if ((dev_priv->irq_mask & mask) != 0) {
103 		dev_priv->irq_mask &= ~mask;
104 		I915_WRITE(DEIMR, dev_priv->irq_mask);
105 		POSTING_READ(DEIMR);
106 	}
107 }
108 
109 static void
110 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
111 {
112 	assert_spin_locked(&dev_priv->irq_lock);
113 
114 	if (dev_priv->pm.irqs_disabled) {
115 		WARN(1, "IRQs disabled\n");
116 		dev_priv->pm.regsave.deimr |= mask;
117 		return;
118 	}
119 
120 	if ((dev_priv->irq_mask & mask) != mask) {
121 		dev_priv->irq_mask |= mask;
122 		I915_WRITE(DEIMR, dev_priv->irq_mask);
123 		POSTING_READ(DEIMR);
124 	}
125 }
126 
127 /**
128  * ilk_update_gt_irq - update GTIMR
129  * @dev_priv: driver private
130  * @interrupt_mask: mask of interrupt bits to update
131  * @enabled_irq_mask: mask of interrupt bits to enable
132  */
133 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
134 			      uint32_t interrupt_mask,
135 			      uint32_t enabled_irq_mask)
136 {
137 	assert_spin_locked(&dev_priv->irq_lock);
138 
139 	if (dev_priv->pm.irqs_disabled) {
140 		WARN(1, "IRQs disabled\n");
141 		dev_priv->pm.regsave.gtimr &= ~interrupt_mask;
142 		dev_priv->pm.regsave.gtimr |= (~enabled_irq_mask &
143 						interrupt_mask);
144 		return;
145 	}
146 
147 	dev_priv->gt_irq_mask &= ~interrupt_mask;
148 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
149 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
150 	POSTING_READ(GTIMR);
151 }
152 
153 void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
154 {
155 	ilk_update_gt_irq(dev_priv, mask, mask);
156 }
157 
158 void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
159 {
160 	ilk_update_gt_irq(dev_priv, mask, 0);
161 }
162 
163 /**
164   * snb_update_pm_irq - update GEN6_PMIMR
165   * @dev_priv: driver private
166   * @interrupt_mask: mask of interrupt bits to update
167   * @enabled_irq_mask: mask of interrupt bits to enable
168   */
169 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
170 			      uint32_t interrupt_mask,
171 			      uint32_t enabled_irq_mask)
172 {
173 	uint32_t new_val;
174 
175 	assert_spin_locked(&dev_priv->irq_lock);
176 
177 	if (dev_priv->pm.irqs_disabled) {
178 		WARN(1, "IRQs disabled\n");
179 		dev_priv->pm.regsave.gen6_pmimr &= ~interrupt_mask;
180 		dev_priv->pm.regsave.gen6_pmimr |= (~enabled_irq_mask &
181 						     interrupt_mask);
182 		return;
183 	}
184 
185 	new_val = dev_priv->pm_irq_mask;
186 	new_val &= ~interrupt_mask;
187 	new_val |= (~enabled_irq_mask & interrupt_mask);
188 
189 	if (new_val != dev_priv->pm_irq_mask) {
190 		dev_priv->pm_irq_mask = new_val;
191 		I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
192 		POSTING_READ(GEN6_PMIMR);
193 	}
194 }
195 
196 void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
197 {
198 	snb_update_pm_irq(dev_priv, mask, mask);
199 }
200 
201 void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
202 {
203 	snb_update_pm_irq(dev_priv, mask, 0);
204 }
205 
206 static bool ivb_can_enable_err_int(struct drm_device *dev)
207 {
208 	struct drm_i915_private *dev_priv = dev->dev_private;
209 	struct intel_crtc *crtc;
210 	enum i915_pipe pipe;
211 
212 	assert_spin_locked(&dev_priv->irq_lock);
213 
214 	for_each_pipe(pipe) {
215 		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
216 
217 		if (crtc->cpu_fifo_underrun_disabled)
218 			return false;
219 	}
220 
221 	return true;
222 }
223 
224 static bool cpt_can_enable_serr_int(struct drm_device *dev)
225 {
226 	struct drm_i915_private *dev_priv = dev->dev_private;
227 	enum i915_pipe pipe;
228 	struct intel_crtc *crtc;
229 
230 	assert_spin_locked(&dev_priv->irq_lock);
231 
232 	for_each_pipe(pipe) {
233 		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
234 
235 		if (crtc->pch_fifo_underrun_disabled)
236 			return false;
237 	}
238 
239 	return true;
240 }
241 
242 static void i9xx_clear_fifo_underrun(struct drm_device *dev, enum i915_pipe pipe)
243 {
244 	struct drm_i915_private *dev_priv = dev->dev_private;
245 	u32 reg = PIPESTAT(pipe);
246 	u32 pipestat = I915_READ(reg) & 0x7fff0000;
247 
248 	assert_spin_locked(&dev_priv->irq_lock);
249 
250 	I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
251 	POSTING_READ(reg);
252 }
253 
254 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
255 						 enum i915_pipe pipe, bool enable)
256 {
257 	struct drm_i915_private *dev_priv = dev->dev_private;
258 	uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
259 					  DE_PIPEB_FIFO_UNDERRUN;
260 
261 	if (enable)
262 		ironlake_enable_display_irq(dev_priv, bit);
263 	else
264 		ironlake_disable_display_irq(dev_priv, bit);
265 }
266 
267 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
268 						  enum i915_pipe pipe, bool enable)
269 {
270 	struct drm_i915_private *dev_priv = dev->dev_private;
271 	if (enable) {
272 		I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
273 
274 		if (!ivb_can_enable_err_int(dev))
275 			return;
276 
277 		ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
278 	} else {
279 		bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
280 
281 		/* Change the state _after_ we've read out the current one. */
282 		ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
283 
284 		if (!was_enabled &&
285 		    (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
286 			DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
287 				      pipe_name(pipe));
288 		}
289 	}
290 }
291 
292 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
293 						  enum i915_pipe pipe, bool enable)
294 {
295 	struct drm_i915_private *dev_priv = dev->dev_private;
296 
297 	assert_spin_locked(&dev_priv->irq_lock);
298 
299 	if (enable)
300 		dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
301 	else
302 		dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
303 	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
304 	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
305 }
306 
307 /**
308  * ibx_display_interrupt_update - update SDEIMR
309  * @dev_priv: driver private
310  * @interrupt_mask: mask of interrupt bits to update
311  * @enabled_irq_mask: mask of interrupt bits to enable
312  */
313 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
314 					 uint32_t interrupt_mask,
315 					 uint32_t enabled_irq_mask)
316 {
317 	uint32_t sdeimr = I915_READ(SDEIMR);
318 	sdeimr &= ~interrupt_mask;
319 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
320 
321 	assert_spin_locked(&dev_priv->irq_lock);
322 
323 	if (dev_priv->pm.irqs_disabled &&
324 	    (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
325 		WARN(1, "IRQs disabled\n");
326 		dev_priv->pm.regsave.sdeimr &= ~interrupt_mask;
327 		dev_priv->pm.regsave.sdeimr |= (~enabled_irq_mask &
328 						 interrupt_mask);
329 		return;
330 	}
331 
332 	I915_WRITE(SDEIMR, sdeimr);
333 	POSTING_READ(SDEIMR);
334 }
335 #define ibx_enable_display_interrupt(dev_priv, bits) \
336 	ibx_display_interrupt_update((dev_priv), (bits), (bits))
337 #define ibx_disable_display_interrupt(dev_priv, bits) \
338 	ibx_display_interrupt_update((dev_priv), (bits), 0)
339 
340 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
341 					    enum transcoder pch_transcoder,
342 					    bool enable)
343 {
344 	struct drm_i915_private *dev_priv = dev->dev_private;
345 	uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
346 		       SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
347 
348 	if (enable)
349 		ibx_enable_display_interrupt(dev_priv, bit);
350 	else
351 		ibx_disable_display_interrupt(dev_priv, bit);
352 }
353 
354 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
355 					    enum transcoder pch_transcoder,
356 					    bool enable)
357 {
358 	struct drm_i915_private *dev_priv = dev->dev_private;
359 
360 	if (enable) {
361 		I915_WRITE(SERR_INT,
362 			   SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
363 
364 		if (!cpt_can_enable_serr_int(dev))
365 			return;
366 
367 		ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
368 	} else {
369 		uint32_t tmp = I915_READ(SERR_INT);
370 		bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
371 
372 		/* Change the state _after_ we've read out the current one. */
373 		ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
374 
375 		if (!was_enabled &&
376 		    (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
377 			DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
378 				      transcoder_name(pch_transcoder));
379 		}
380 	}
381 }
382 
383 /**
384  * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
385  * @dev: drm device
386  * @pipe: pipe
387  * @enable: true if we want to report FIFO underrun errors, false otherwise
388  *
389  * This function makes us disable or enable CPU fifo underruns for a specific
390  * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
391  * reporting for one pipe may also disable all the other CPU error interruts for
392  * the other pipes, due to the fact that there's just one interrupt mask/enable
393  * bit for all the pipes.
394  *
395  * Returns the previous state of underrun reporting.
396  */
397 bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
398 					     enum i915_pipe pipe, bool enable)
399 {
400 	struct drm_i915_private *dev_priv = dev->dev_private;
401 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
402 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
403 	bool ret;
404 
405 	assert_spin_locked(&dev_priv->irq_lock);
406 
407 	ret = !intel_crtc->cpu_fifo_underrun_disabled;
408 
409 	if (enable == ret)
410 		goto done;
411 
412 	intel_crtc->cpu_fifo_underrun_disabled = !enable;
413 
414 	if (enable && (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)))
415 		i9xx_clear_fifo_underrun(dev, pipe);
416 	else if (IS_GEN5(dev) || IS_GEN6(dev))
417 		ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
418 	else if (IS_GEN7(dev))
419 		ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
420 	else if (IS_GEN8(dev))
421 		broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
422 
423 done:
424 	return ret;
425 }
426 
427 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
428 					   enum i915_pipe pipe, bool enable)
429 {
430 	struct drm_i915_private *dev_priv = dev->dev_private;
431 	unsigned long flags;
432 	bool ret;
433 
434 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
435 	ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
436 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
437 
438 	return ret;
439 }
440 
441 static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
442 						  enum i915_pipe pipe)
443 {
444 	struct drm_i915_private *dev_priv = dev->dev_private;
445 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
446 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
447 
448 	return !intel_crtc->cpu_fifo_underrun_disabled;
449 }
450 
451 /**
452  * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
453  * @dev: drm device
454  * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
455  * @enable: true if we want to report FIFO underrun errors, false otherwise
456  *
457  * This function makes us disable or enable PCH fifo underruns for a specific
458  * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
459  * underrun reporting for one transcoder may also disable all the other PCH
460  * error interruts for the other transcoders, due to the fact that there's just
461  * one interrupt mask/enable bit for all the transcoders.
462  *
463  * Returns the previous state of underrun reporting.
464  */
465 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
466 					   enum transcoder pch_transcoder,
467 					   bool enable)
468 {
469 	struct drm_i915_private *dev_priv = dev->dev_private;
470 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
471 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
472 	unsigned long flags;
473 	bool ret;
474 
475 	/*
476 	 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
477 	 * has only one pch transcoder A that all pipes can use. To avoid racy
478 	 * pch transcoder -> pipe lookups from interrupt code simply store the
479 	 * underrun statistics in crtc A. Since we never expose this anywhere
480 	 * nor use it outside of the fifo underrun code here using the "wrong"
481 	 * crtc on LPT won't cause issues.
482 	 */
483 
484 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
485 
486 	ret = !intel_crtc->pch_fifo_underrun_disabled;
487 
488 	if (enable == ret)
489 		goto done;
490 
491 	intel_crtc->pch_fifo_underrun_disabled = !enable;
492 
493 	if (HAS_PCH_IBX(dev))
494 		ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
495 	else
496 		cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
497 
498 done:
499 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
500 	return ret;
501 }
502 
503 
504 static void
505 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
506 		       u32 enable_mask, u32 status_mask)
507 {
508 	u32 reg = PIPESTAT(pipe);
509 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
510 
511 	assert_spin_locked(&dev_priv->irq_lock);
512 
513 	if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
514 	                 status_mask & ~PIPESTAT_INT_STATUS_MASK))
515 		return;
516 
517 	if ((pipestat & enable_mask) == enable_mask)
518 		return;
519 
520 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
521 
522 	/* Enable the interrupt, clear any pending status */
523 	pipestat |= enable_mask | status_mask;
524 	I915_WRITE(reg, pipestat);
525 	POSTING_READ(reg);
526 }
527 
528 static void
529 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
530 		        u32 enable_mask, u32 status_mask)
531 {
532 	u32 reg = PIPESTAT(pipe);
533 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
534 
535 	assert_spin_locked(&dev_priv->irq_lock);
536 
537 	if (WARN_ON_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
538 	                 status_mask & ~PIPESTAT_INT_STATUS_MASK))
539 		return;
540 
541 	if ((pipestat & enable_mask) == 0)
542 		return;
543 
544 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
545 
546 	pipestat &= ~enable_mask;
547 	I915_WRITE(reg, pipestat);
548 	POSTING_READ(reg);
549 }
550 
551 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
552 {
553 	u32 enable_mask = status_mask << 16;
554 
555 	/*
556 	 * On pipe A we don't support the PSR interrupt yet, on pipe B the
557 	 * same bit MBZ.
558 	 */
559 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
560 		return 0;
561 
562 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
563 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
564 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
565 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
566 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
567 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
568 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
569 
570 	return enable_mask;
571 }
572 
573 void
574 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
575 		     u32 status_mask)
576 {
577 	u32 enable_mask;
578 
579 	if (IS_VALLEYVIEW(dev_priv->dev))
580 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
581 							   status_mask);
582 	else
583 		enable_mask = status_mask << 16;
584 	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
585 }
586 
587 void
588 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
589 		      u32 status_mask)
590 {
591 	u32 enable_mask;
592 
593 	if (IS_VALLEYVIEW(dev_priv->dev))
594 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
595 							   status_mask);
596 	else
597 		enable_mask = status_mask << 16;
598 	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
599 }
600 
601 /**
602  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
603  */
604 static void i915_enable_asle_pipestat(struct drm_device *dev)
605 {
606 	struct drm_i915_private *dev_priv = dev->dev_private;
607 	unsigned long irqflags;
608 
609 	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
610 		return;
611 
612 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
613 
614 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
615 	if (INTEL_INFO(dev)->gen >= 4)
616 		i915_enable_pipestat(dev_priv, PIPE_A,
617 				     PIPE_LEGACY_BLC_EVENT_STATUS);
618 
619 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
620 }
621 
622 /**
623  * i915_pipe_enabled - check if a pipe is enabled
624  * @dev: DRM device
625  * @pipe: pipe to check
626  *
627  * Reading certain registers when the pipe is disabled can hang the chip.
628  * Use this routine to make sure the PLL is running and the pipe is active
629  * before reading such registers if unsure.
630  */
631 static int
632 i915_pipe_enabled(struct drm_device *dev, int pipe)
633 {
634 	struct drm_i915_private *dev_priv = dev->dev_private;
635 
636 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
637 		/* Locking is horribly broken here, but whatever. */
638 		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
639 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
640 
641 		return intel_crtc->active;
642 	} else {
643 		return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
644 	}
645 }
646 
647 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
648 {
649 	/* Gen2 doesn't have a hardware frame counter */
650 	return 0;
651 }
652 
653 /* Called from drm generic code, passed a 'crtc', which
654  * we use as a pipe index
655  */
656 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
657 {
658 	struct drm_i915_private *dev_priv = dev->dev_private;
659 	unsigned long high_frame;
660 	unsigned long low_frame;
661 	u32 high1, high2, low, pixel, vbl_start;
662 
663 	if (!i915_pipe_enabled(dev, pipe)) {
664 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
665 				"pipe %c\n", pipe_name(pipe));
666 		return 0;
667 	}
668 
669 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
670 		struct intel_crtc *intel_crtc =
671 			to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
672 		const struct drm_display_mode *mode =
673 			&intel_crtc->config.adjusted_mode;
674 
675 		vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
676 	} else {
677 		enum transcoder cpu_transcoder = (enum transcoder) pipe;
678 		u32 htotal;
679 
680 		htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
681 		vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
682 
683 		vbl_start *= htotal;
684 	}
685 
686 	high_frame = PIPEFRAME(pipe);
687 	low_frame = PIPEFRAMEPIXEL(pipe);
688 
689 	/*
690 	 * High & low register fields aren't synchronized, so make sure
691 	 * we get a low value that's stable across two reads of the high
692 	 * register.
693 	 */
694 	do {
695 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
696 		low   = I915_READ(low_frame);
697 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
698 	} while (high1 != high2);
699 
700 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
701 	pixel = low & PIPE_PIXEL_MASK;
702 	low >>= PIPE_FRAME_LOW_SHIFT;
703 
704 	/*
705 	 * The frame counter increments at beginning of active.
706 	 * Cook up a vblank counter by also checking the pixel
707 	 * counter against vblank start.
708 	 */
709 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
710 }
711 
712 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
713 {
714 	struct drm_i915_private *dev_priv = dev->dev_private;
715 	int reg = PIPE_FRMCOUNT_GM45(pipe);
716 
717 	if (!i915_pipe_enabled(dev, pipe)) {
718 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
719 				 "pipe %c\n", pipe_name(pipe));
720 		return 0;
721 	}
722 
723 	return I915_READ(reg);
724 }
725 
726 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
727 #ifdef __NetBSD__
728 #define	__raw_i915_read32(dev_priv, reg) bus_space_read_4((dev_priv)->regs_bst, (dev_priv)->regs_bsh, (reg))
729 #else
730 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
731 #endif
732 
733 static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum i915_pipe pipe)
734 {
735 	struct drm_i915_private *dev_priv = dev->dev_private;
736 	uint32_t status;
737 	int reg;
738 
739 	if (INTEL_INFO(dev)->gen >= 8) {
740 		status = GEN8_PIPE_VBLANK;
741 		reg = GEN8_DE_PIPE_ISR(pipe);
742 	} else if (INTEL_INFO(dev)->gen >= 7) {
743 		status = DE_PIPE_VBLANK_IVB(pipe);
744 		reg = DEISR;
745 	} else {
746 		status = DE_PIPE_VBLANK(pipe);
747 		reg = DEISR;
748 	}
749 
750 	return __raw_i915_read32(dev_priv, reg) & status;
751 }
752 
753 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
754 				    unsigned int flags, int *vpos, int *hpos,
755 				    ktime_t *stime, ktime_t *etime)
756 {
757 	struct drm_i915_private *dev_priv = dev->dev_private;
758 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
759 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
760 	const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
761 	int position;
762 	int vbl_start, vbl_end, htotal, vtotal;
763 	bool in_vbl = true;
764 	int ret = 0;
765 	unsigned long irqflags;
766 
767 	if (!intel_crtc->active) {
768 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
769 				 "pipe %c\n", pipe_name(pipe));
770 		return 0;
771 	}
772 
773 	htotal = mode->crtc_htotal;
774 	vtotal = mode->crtc_vtotal;
775 	vbl_start = mode->crtc_vblank_start;
776 	vbl_end = mode->crtc_vblank_end;
777 
778 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
779 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
780 		vbl_end /= 2;
781 		vtotal /= 2;
782 	}
783 
784 	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
785 
786 	/*
787 	 * Lock uncore.lock, as we will do multiple timing critical raw
788 	 * register reads, potentially with preemption disabled, so the
789 	 * following code must not block on uncore.lock.
790 	 */
791 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
792 
793 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
794 
795 	/* Get optional system timestamp before query. */
796 	if (stime)
797 		*stime = ktime_get();
798 
799 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
800 		/* No obvious pixelcount register. Only query vertical
801 		 * scanout position from Display scan line register.
802 		 */
803 		if (IS_GEN2(dev))
804 			position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
805 		else
806 			position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
807 
808 		if (HAS_DDI(dev)) {
809 			/*
810 			 * On HSW HDMI outputs there seems to be a 2 line
811 			 * difference, whereas eDP has the normal 1 line
812 			 * difference that earlier platforms have. External
813 			 * DP is unknown. For now just check for the 2 line
814 			 * difference case on all output types on HSW+.
815 			 *
816 			 * This might misinterpret the scanline counter being
817 			 * one line too far along on eDP, but that's less
818 			 * dangerous than the alternative since that would lead
819 			 * the vblank timestamp code astray when it sees a
820 			 * scanline count before vblank_start during a vblank
821 			 * interrupt.
822 			 */
823 			in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
824 			if ((in_vbl && (position == vbl_start - 2 ||
825 					position == vbl_start - 1)) ||
826 			    (!in_vbl && (position == vbl_end - 2 ||
827 					 position == vbl_end - 1)))
828 				position = (position + 2) % vtotal;
829 		} else if (HAS_PCH_SPLIT(dev)) {
830 			/*
831 			 * The scanline counter increments at the leading edge
832 			 * of hsync, ie. it completely misses the active portion
833 			 * of the line. Fix up the counter at both edges of vblank
834 			 * to get a more accurate picture whether we're in vblank
835 			 * or not.
836 			 */
837 			in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
838 			if ((in_vbl && position == vbl_start - 1) ||
839 			    (!in_vbl && position == vbl_end - 1))
840 				position = (position + 1) % vtotal;
841 		} else {
842 			/*
843 			 * ISR vblank status bits don't work the way we'd want
844 			 * them to work on non-PCH platforms (for
845 			 * ilk_pipe_in_vblank_locked()), and there doesn't
846 			 * appear any other way to determine if we're currently
847 			 * in vblank.
848 			 *
849 			 * Instead let's assume that we're already in vblank if
850 			 * we got called from the vblank interrupt and the
851 			 * scanline counter value indicates that we're on the
852 			 * line just prior to vblank start. This should result
853 			 * in the correct answer, unless the vblank interrupt
854 			 * delivery really got delayed for almost exactly one
855 			 * full frame/field.
856 			 */
857 			if (flags & DRM_CALLED_FROM_VBLIRQ &&
858 			    position == vbl_start - 1) {
859 				position = (position + 1) % vtotal;
860 
861 				/* Signal this correction as "applied". */
862 				ret |= 0x8;
863 			}
864 		}
865 	} else {
866 		/* Have access to pixelcount since start of frame.
867 		 * We can split this into vertical and horizontal
868 		 * scanout position.
869 		 */
870 		position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
871 
872 		/* convert to pixel counts */
873 		vbl_start *= htotal;
874 		vbl_end *= htotal;
875 		vtotal *= htotal;
876 	}
877 
878 	/* Get optional system timestamp after query. */
879 	if (etime)
880 		*etime = ktime_get();
881 
882 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
883 
884 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
885 
886 	in_vbl = position >= vbl_start && position < vbl_end;
887 
888 	/*
889 	 * While in vblank, position will be negative
890 	 * counting up towards 0 at vbl_end. And outside
891 	 * vblank, position will be positive counting
892 	 * up since vbl_end.
893 	 */
894 	if (position >= vbl_start)
895 		position -= vbl_end;
896 	else
897 		position += vtotal - vbl_end;
898 
899 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
900 		*vpos = position;
901 		*hpos = 0;
902 	} else {
903 		*vpos = position / htotal;
904 		*hpos = position - (*vpos * htotal);
905 	}
906 
907 	/* In vblank? */
908 	if (in_vbl)
909 		ret |= DRM_SCANOUTPOS_INVBL;
910 
911 	return ret;
912 }
913 
914 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
915 			      int *max_error,
916 			      struct timeval *vblank_time,
917 			      unsigned flags)
918 {
919 	struct drm_crtc *crtc;
920 
921 	if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
922 		DRM_ERROR("Invalid crtc %d\n", pipe);
923 		return -EINVAL;
924 	}
925 
926 	/* Get drm_crtc to timestamp: */
927 	crtc = intel_get_crtc_for_pipe(dev, pipe);
928 	if (crtc == NULL) {
929 		DRM_ERROR("Invalid crtc %d\n", pipe);
930 		return -EINVAL;
931 	}
932 
933 	if (!crtc->enabled) {
934 		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
935 		return -EBUSY;
936 	}
937 
938 	/* Helper routine in DRM core does all the work: */
939 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
940 						     vblank_time, flags,
941 						     crtc,
942 						     &to_intel_crtc(crtc)->config.adjusted_mode);
943 }
944 
945 static bool intel_hpd_irq_event(struct drm_device *dev,
946 				struct drm_connector *connector)
947 {
948 	enum drm_connector_status old_status;
949 
950 	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
951 	old_status = connector->status;
952 
953 	connector->status = connector->funcs->detect(connector, false);
954 	if (old_status == connector->status)
955 		return false;
956 
957 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
958 		      connector->base.id,
959 		      drm_get_connector_name(connector),
960 		      drm_get_connector_status_name(old_status),
961 		      drm_get_connector_status_name(connector->status));
962 
963 	return true;
964 }
965 
966 /*
967  * Handle hotplug events outside the interrupt handler proper.
968  */
969 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
970 
971 static void i915_hotplug_work_func(struct work_struct *work)
972 {
973 	struct drm_i915_private *dev_priv =
974 		container_of(work, struct drm_i915_private, hotplug_work);
975 	struct drm_device *dev = dev_priv->dev;
976 	struct drm_mode_config *mode_config = &dev->mode_config;
977 	struct intel_connector *intel_connector;
978 	struct intel_encoder *intel_encoder;
979 	struct drm_connector *connector;
980 	unsigned long irqflags;
981 	bool hpd_disabled = false;
982 	bool changed = false;
983 	u32 hpd_event_bits;
984 
985 	/* HPD irq before everything is fully set up. */
986 	if (!dev_priv->enable_hotplug_processing)
987 		return;
988 
989 	mutex_lock(&mode_config->mutex);
990 	DRM_DEBUG_KMS("running encoder hotplug functions\n");
991 
992 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
993 
994 	hpd_event_bits = dev_priv->hpd_event_bits;
995 	dev_priv->hpd_event_bits = 0;
996 	list_for_each_entry(connector, &mode_config->connector_list, head) {
997 		intel_connector = to_intel_connector(connector);
998 		intel_encoder = intel_connector->encoder;
999 		if (intel_encoder->hpd_pin > HPD_NONE &&
1000 		    dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
1001 		    connector->polled == DRM_CONNECTOR_POLL_HPD) {
1002 			DRM_INFO("HPD interrupt storm detected on connector %s: "
1003 				 "switching from hotplug detection to polling\n",
1004 				drm_get_connector_name(connector));
1005 			dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
1006 			connector->polled = DRM_CONNECTOR_POLL_CONNECT
1007 				| DRM_CONNECTOR_POLL_DISCONNECT;
1008 			hpd_disabled = true;
1009 		}
1010 		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1011 			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
1012 				      drm_get_connector_name(connector), intel_encoder->hpd_pin);
1013 		}
1014 	}
1015 	 /* if there were no outputs to poll, poll was disabled,
1016 	  * therefore make sure it's enabled when disabling HPD on
1017 	  * some connectors */
1018 	if (hpd_disabled) {
1019 		drm_kms_helper_poll_enable(dev);
1020 		mod_timer(&dev_priv->hotplug_reenable_timer,
1021 			  jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
1022 	}
1023 
1024 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1025 
1026 	list_for_each_entry(connector, &mode_config->connector_list, head) {
1027 		intel_connector = to_intel_connector(connector);
1028 		intel_encoder = intel_connector->encoder;
1029 		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1030 			if (intel_encoder->hot_plug)
1031 				intel_encoder->hot_plug(intel_encoder);
1032 			if (intel_hpd_irq_event(dev, connector))
1033 				changed = true;
1034 		}
1035 	}
1036 	mutex_unlock(&mode_config->mutex);
1037 
1038 	if (changed)
1039 		drm_kms_helper_hotplug_event(dev);
1040 }
1041 
1042 static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
1043 {
1044 	del_timer_sync(&dev_priv->hotplug_reenable_timer);
1045 }
1046 
1047 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
1048 {
1049 	struct drm_i915_private *dev_priv = dev->dev_private;
1050 	u32 busy_up, busy_down, max_avg, min_avg;
1051 	u8 new_delay;
1052 
1053 	spin_lock(&mchdev_lock);
1054 
1055 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1056 
1057 	new_delay = dev_priv->ips.cur_delay;
1058 
1059 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1060 	busy_up = I915_READ(RCPREVBSYTUPAVG);
1061 	busy_down = I915_READ(RCPREVBSYTDNAVG);
1062 	max_avg = I915_READ(RCBMAXAVG);
1063 	min_avg = I915_READ(RCBMINAVG);
1064 
1065 	/* Handle RCS change request from hw */
1066 	if (busy_up > max_avg) {
1067 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1068 			new_delay = dev_priv->ips.cur_delay - 1;
1069 		if (new_delay < dev_priv->ips.max_delay)
1070 			new_delay = dev_priv->ips.max_delay;
1071 	} else if (busy_down < min_avg) {
1072 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1073 			new_delay = dev_priv->ips.cur_delay + 1;
1074 		if (new_delay > dev_priv->ips.min_delay)
1075 			new_delay = dev_priv->ips.min_delay;
1076 	}
1077 
1078 	if (ironlake_set_drps(dev, new_delay))
1079 		dev_priv->ips.cur_delay = new_delay;
1080 
1081 	spin_unlock(&mchdev_lock);
1082 
1083 	return;
1084 }
1085 
1086 static void notify_ring(struct drm_device *dev,
1087 			struct intel_ring_buffer *ring)
1088 {
1089 #ifdef __NetBSD__
1090 	struct drm_i915_private *dev_priv = dev->dev_private;
1091 #endif
1092 
1093 	if (ring->obj == NULL)
1094 		return;
1095 
1096 	trace_i915_gem_request_complete(ring);
1097 
1098 #ifdef __NetBSD__
1099     {
1100 	unsigned long flags;
1101 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1102 	/*
1103 	 * XXX Set a flag under the lock or push the lock out to callers.
1104 	 */
1105 	DRM_SPIN_WAKEUP_ALL(&ring->irq_queue, &dev_priv->irq_lock);
1106 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1107     }
1108 #else
1109 	wake_up_all(&ring->irq_queue);
1110 #endif
1111 	i915_queue_hangcheck(dev);
1112 }
1113 
1114 static void gen6_pm_rps_work(struct work_struct *work)
1115 {
1116 	struct drm_i915_private *dev_priv =
1117 		container_of(work, struct drm_i915_private, rps.work);
1118 	u32 pm_iir;
1119 	int new_delay, adj;
1120 
1121 	spin_lock_irq(&dev_priv->irq_lock);
1122 	pm_iir = dev_priv->rps.pm_iir;
1123 	dev_priv->rps.pm_iir = 0;
1124 	/* Make sure not to corrupt PMIMR state used by ringbuffer code */
1125 	snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1126 	spin_unlock_irq(&dev_priv->irq_lock);
1127 
1128 	/* Make sure we didn't queue anything we're not going to process. */
1129 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1130 
1131 	if ((pm_iir & dev_priv->pm_rps_events) == 0)
1132 		return;
1133 
1134 	mutex_lock(&dev_priv->rps.hw_lock);
1135 
1136 	adj = dev_priv->rps.last_adj;
1137 	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1138 		if (adj > 0)
1139 			adj *= 2;
1140 		else
1141 			adj = 1;
1142 		new_delay = dev_priv->rps.cur_freq + adj;
1143 
1144 		/*
1145 		 * For better performance, jump directly
1146 		 * to RPe if we're below it.
1147 		 */
1148 		if (new_delay < dev_priv->rps.efficient_freq)
1149 			new_delay = dev_priv->rps.efficient_freq;
1150 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1151 		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1152 			new_delay = dev_priv->rps.efficient_freq;
1153 		else
1154 			new_delay = dev_priv->rps.min_freq_softlimit;
1155 		adj = 0;
1156 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1157 		if (adj < 0)
1158 			adj *= 2;
1159 		else
1160 			adj = -1;
1161 		new_delay = dev_priv->rps.cur_freq + adj;
1162 	} else { /* unknown event */
1163 		new_delay = dev_priv->rps.cur_freq;
1164 	}
1165 
1166 	/* sysfs frequency interfaces may have snuck in while servicing the
1167 	 * interrupt
1168 	 */
1169 	new_delay = clamp_t(int, new_delay,
1170 			    dev_priv->rps.min_freq_softlimit,
1171 			    dev_priv->rps.max_freq_softlimit);
1172 
1173 	dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1174 
1175 	if (IS_VALLEYVIEW(dev_priv->dev))
1176 		valleyview_set_rps(dev_priv->dev, new_delay);
1177 	else
1178 		gen6_set_rps(dev_priv->dev, new_delay);
1179 
1180 	mutex_unlock(&dev_priv->rps.hw_lock);
1181 }
1182 
1183 
1184 /**
1185  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1186  * occurred.
1187  * @work: workqueue struct
1188  *
1189  * Doesn't actually do anything except notify userspace. As a consequence of
1190  * this event, userspace should try to remap the bad rows since statistically
1191  * it is likely the same row is more likely to go bad again.
1192  */
1193 static void ivybridge_parity_work(struct work_struct *work)
1194 {
1195 	struct drm_i915_private *dev_priv =
1196 		container_of(work, struct drm_i915_private, l3_parity.error_work);
1197 	u32 error_status, row, bank, subbank;
1198 #ifndef __NetBSD__		/* XXX kobject uevent...? */
1199 	char *parity_event[6];
1200 #endif
1201 	uint32_t misccpctl;
1202 	unsigned long flags;
1203 	uint8_t slice = 0;
1204 
1205 	/* We must turn off DOP level clock gating to access the L3 registers.
1206 	 * In order to prevent a get/put style interface, acquire struct mutex
1207 	 * any time we access those registers.
1208 	 */
1209 	mutex_lock(&dev_priv->dev->struct_mutex);
1210 
1211 	/* If we've screwed up tracking, just let the interrupt fire again */
1212 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
1213 		goto out;
1214 
1215 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1216 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1217 	POSTING_READ(GEN7_MISCCPCTL);
1218 
1219 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1220 		u32 reg;
1221 
1222 		slice--;
1223 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1224 			break;
1225 
1226 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1227 
1228 		reg = GEN7_L3CDERRST1 + (slice * 0x200);
1229 
1230 		error_status = I915_READ(reg);
1231 		row = GEN7_PARITY_ERROR_ROW(error_status);
1232 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1233 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1234 
1235 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1236 		POSTING_READ(reg);
1237 
1238 #ifndef __NetBSD__		/* XXX kobject uevent...? */
1239 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1240 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1241 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1242 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1243 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1244 		parity_event[5] = NULL;
1245 
1246 		kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1247 				   KOBJ_CHANGE, parity_event);
1248 #endif
1249 
1250 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1251 			  slice, row, bank, subbank);
1252 
1253 #ifndef __NetBSD__		/* XXX kobject uevent...? */
1254 		kfree(parity_event[4]);
1255 		kfree(parity_event[3]);
1256 		kfree(parity_event[2]);
1257 		kfree(parity_event[1]);
1258 #endif
1259 	}
1260 
1261 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1262 
1263 out:
1264 	WARN_ON(dev_priv->l3_parity.which_slice);
1265 	spin_lock_irqsave(&dev_priv->irq_lock, flags);
1266 	ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1267 	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1268 
1269 	mutex_unlock(&dev_priv->dev->struct_mutex);
1270 }
1271 
1272 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1273 {
1274 	struct drm_i915_private *dev_priv = dev->dev_private;
1275 
1276 	if (!HAS_L3_DPF(dev))
1277 		return;
1278 
1279 	spin_lock(&dev_priv->irq_lock);
1280 	ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1281 	spin_unlock(&dev_priv->irq_lock);
1282 
1283 	iir &= GT_PARITY_ERROR(dev);
1284 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1285 		dev_priv->l3_parity.which_slice |= 1 << 1;
1286 
1287 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1288 		dev_priv->l3_parity.which_slice |= 1 << 0;
1289 
1290 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1291 }
1292 
1293 static void ilk_gt_irq_handler(struct drm_device *dev,
1294 			       struct drm_i915_private *dev_priv,
1295 			       u32 gt_iir)
1296 {
1297 	if (gt_iir &
1298 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1299 		notify_ring(dev, &dev_priv->ring[RCS]);
1300 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1301 		notify_ring(dev, &dev_priv->ring[VCS]);
1302 }
1303 
1304 static void snb_gt_irq_handler(struct drm_device *dev,
1305 			       struct drm_i915_private *dev_priv,
1306 			       u32 gt_iir)
1307 {
1308 
1309 	if (gt_iir &
1310 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1311 		notify_ring(dev, &dev_priv->ring[RCS]);
1312 	if (gt_iir & GT_BSD_USER_INTERRUPT)
1313 		notify_ring(dev, &dev_priv->ring[VCS]);
1314 	if (gt_iir & GT_BLT_USER_INTERRUPT)
1315 		notify_ring(dev, &dev_priv->ring[BCS]);
1316 
1317 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1318 		      GT_BSD_CS_ERROR_INTERRUPT |
1319 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1320 		spin_lock(&dev_priv->irq_lock);
1321 		i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1322 				  gt_iir);
1323 		spin_unlock(&dev_priv->irq_lock);
1324 	}
1325 
1326 	if (gt_iir & GT_PARITY_ERROR(dev))
1327 		ivybridge_parity_error_irq_handler(dev, gt_iir);
1328 }
1329 
1330 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1331 				       struct drm_i915_private *dev_priv,
1332 				       u32 master_ctl)
1333 {
1334 	u32 rcs, bcs, vcs;
1335 	uint32_t tmp = 0;
1336 	irqreturn_t ret = IRQ_NONE;
1337 
1338 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1339 		tmp = I915_READ(GEN8_GT_IIR(0));
1340 		if (tmp) {
1341 			ret = IRQ_HANDLED;
1342 			rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1343 			bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1344 			if (rcs & GT_RENDER_USER_INTERRUPT)
1345 				notify_ring(dev, &dev_priv->ring[RCS]);
1346 			if (bcs & GT_RENDER_USER_INTERRUPT)
1347 				notify_ring(dev, &dev_priv->ring[BCS]);
1348 			I915_WRITE(GEN8_GT_IIR(0), tmp);
1349 		} else
1350 			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1351 	}
1352 
1353 	if (master_ctl & GEN8_GT_VCS1_IRQ) {
1354 		tmp = I915_READ(GEN8_GT_IIR(1));
1355 		if (tmp) {
1356 			ret = IRQ_HANDLED;
1357 			vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1358 			if (vcs & GT_RENDER_USER_INTERRUPT)
1359 				notify_ring(dev, &dev_priv->ring[VCS]);
1360 			I915_WRITE(GEN8_GT_IIR(1), tmp);
1361 		} else
1362 			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1363 	}
1364 
1365 	if (master_ctl & GEN8_GT_VECS_IRQ) {
1366 		tmp = I915_READ(GEN8_GT_IIR(3));
1367 		if (tmp) {
1368 			ret = IRQ_HANDLED;
1369 			vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1370 			if (vcs & GT_RENDER_USER_INTERRUPT)
1371 				notify_ring(dev, &dev_priv->ring[VECS]);
1372 			I915_WRITE(GEN8_GT_IIR(3), tmp);
1373 		} else
1374 			DRM_ERROR("The master control interrupt lied (GT3)!\n");
1375 	}
1376 
1377 	return ret;
1378 }
1379 
1380 #define HPD_STORM_DETECT_PERIOD 1000
1381 #define HPD_STORM_THRESHOLD 5
1382 
1383 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1384 					 u32 hotplug_trigger,
1385 					 const u32 *hpd)
1386 {
1387 	struct drm_i915_private *dev_priv = dev->dev_private;
1388 	int i;
1389 	bool storm_detected = false;
1390 
1391 	if (!hotplug_trigger)
1392 		return;
1393 
1394 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1395 			  hotplug_trigger);
1396 
1397 	spin_lock(&dev_priv->irq_lock);
1398 	for (i = 1; i < HPD_NUM_PINS; i++) {
1399 
1400 		if (hpd[i] & hotplug_trigger &&
1401 		    dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1402 			/*
1403 			 * On GMCH platforms the interrupt mask bits only
1404 			 * prevent irq generation, not the setting of the
1405 			 * hotplug bits itself. So only WARN about unexpected
1406 			 * interrupts on saner platforms.
1407 			 */
1408 			WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1409 				  "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1410 				  hotplug_trigger, i, hpd[i]);
1411 
1412 			continue;
1413 		}
1414 
1415 		if (!(hpd[i] & hotplug_trigger) ||
1416 		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1417 			continue;
1418 
1419 		dev_priv->hpd_event_bits |= (1 << i);
1420 		if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1421 				   dev_priv->hpd_stats[i].hpd_last_jiffies
1422 				   + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1423 			dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1424 			dev_priv->hpd_stats[i].hpd_cnt = 0;
1425 			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1426 		} else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1427 			dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1428 			dev_priv->hpd_event_bits &= ~(1 << i);
1429 			DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1430 			storm_detected = true;
1431 		} else {
1432 			dev_priv->hpd_stats[i].hpd_cnt++;
1433 			DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1434 				      dev_priv->hpd_stats[i].hpd_cnt);
1435 		}
1436 	}
1437 
1438 	if (storm_detected)
1439 		dev_priv->display.hpd_irq_setup(dev);
1440 	spin_unlock(&dev_priv->irq_lock);
1441 
1442 	/*
1443 	 * Our hotplug handler can grab modeset locks (by calling down into the
1444 	 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1445 	 * queue for otherwise the flush_work in the pageflip code will
1446 	 * deadlock.
1447 	 */
1448 	schedule_work(&dev_priv->hotplug_work);
1449 }
1450 
1451 static void gmbus_irq_handler(struct drm_device *dev)
1452 {
1453 	struct drm_i915_private *dev_priv = dev->dev_private;
1454 
1455 #ifdef __NetBSD__
1456 	spin_lock(&dev_priv->gmbus_wait_lock);
1457 	/* XXX Set a flag here...  */
1458 	DRM_SPIN_WAKEUP_ALL(&dev_priv->gmbus_wait_queue,
1459 	    &dev_priv->gmbus_wait_lock);
1460 	spin_unlock(&dev_priv->gmbus_wait_lock);
1461 #else
1462 	wake_up_all(&dev_priv->gmbus_wait_queue);
1463 #endif
1464 }
1465 
1466 static void dp_aux_irq_handler(struct drm_device *dev)
1467 {
1468 	struct drm_i915_private *dev_priv = dev->dev_private;
1469 
1470 #ifdef __NetBSD__
1471 	spin_lock(&dev_priv->gmbus_wait_lock);
1472 	/* XXX Set a flag here...  */
1473 	DRM_SPIN_WAKEUP_ALL(&dev_priv->gmbus_wait_queue,
1474 	    &dev_priv->gmbus_wait_lock);
1475 	spin_unlock(&dev_priv->gmbus_wait_lock);
1476 #else
1477 	wake_up_all(&dev_priv->gmbus_wait_queue);
1478 #endif
1479 }
1480 
1481 #if defined(CONFIG_DEBUG_FS)
1482 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1483 					 uint32_t crc0, uint32_t crc1,
1484 					 uint32_t crc2, uint32_t crc3,
1485 					 uint32_t crc4)
1486 {
1487 	struct drm_i915_private *dev_priv = dev->dev_private;
1488 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1489 	struct intel_pipe_crc_entry *entry;
1490 	int head, tail;
1491 
1492 	spin_lock(&pipe_crc->lock);
1493 
1494 	if (!pipe_crc->entries) {
1495 		spin_unlock(&pipe_crc->lock);
1496 		DRM_ERROR("spurious interrupt\n");
1497 		return;
1498 	}
1499 
1500 	head = pipe_crc->head;
1501 	tail = pipe_crc->tail;
1502 
1503 	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1504 		spin_unlock(&pipe_crc->lock);
1505 		DRM_ERROR("CRC buffer overflowing\n");
1506 		return;
1507 	}
1508 
1509 	entry = &pipe_crc->entries[head];
1510 
1511 	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1512 	entry->crc[0] = crc0;
1513 	entry->crc[1] = crc1;
1514 	entry->crc[2] = crc2;
1515 	entry->crc[3] = crc3;
1516 	entry->crc[4] = crc4;
1517 
1518 	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1519 	pipe_crc->head = head;
1520 
1521 	spin_unlock(&pipe_crc->lock);
1522 
1523 	wake_up_interruptible(&pipe_crc->wq);
1524 }
1525 #else
1526 static inline void
1527 display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1528 			     uint32_t crc0, uint32_t crc1,
1529 			     uint32_t crc2, uint32_t crc3,
1530 			     uint32_t crc4) {}
1531 #endif
1532 
1533 
1534 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1535 {
1536 	struct drm_i915_private *dev_priv = dev->dev_private;
1537 
1538 	display_pipe_crc_irq_handler(dev, pipe,
1539 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1540 				     0, 0, 0, 0);
1541 }
1542 
1543 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1544 {
1545 	struct drm_i915_private *dev_priv = dev->dev_private;
1546 
1547 	display_pipe_crc_irq_handler(dev, pipe,
1548 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1549 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1550 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1551 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1552 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1553 }
1554 
1555 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1556 {
1557 	struct drm_i915_private *dev_priv = dev->dev_private;
1558 	uint32_t res1, res2;
1559 
1560 	if (INTEL_INFO(dev)->gen >= 3)
1561 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1562 	else
1563 		res1 = 0;
1564 
1565 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1566 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1567 	else
1568 		res2 = 0;
1569 
1570 	display_pipe_crc_irq_handler(dev, pipe,
1571 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1572 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1573 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1574 				     res1, res2);
1575 }
1576 
1577 /* The RPS events need forcewake, so we add them to a work queue and mask their
1578  * IMR bits until the work is done. Other interrupts can be processed without
1579  * the work queue. */
1580 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1581 {
1582 	if (pm_iir & dev_priv->pm_rps_events) {
1583 		spin_lock(&dev_priv->irq_lock);
1584 		dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1585 		snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1586 		spin_unlock(&dev_priv->irq_lock);
1587 
1588 		queue_work(dev_priv->wq, &dev_priv->rps.work);
1589 	}
1590 
1591 	if (HAS_VEBOX(dev_priv->dev)) {
1592 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1593 			notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1594 
1595 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1596 			spin_lock(&dev_priv->irq_lock);
1597 			i915_handle_error(dev_priv->dev, false,
1598 					  "VEBOX CS error interrupt 0x%08x",
1599 					  pm_iir);
1600 			spin_unlock(&dev_priv->irq_lock);
1601 		}
1602 	}
1603 }
1604 
1605 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1606 {
1607 	struct drm_i915_private *dev_priv = dev->dev_private;
1608 	u32 pipe_stats[I915_MAX_PIPES] = { };
1609 	int pipe;
1610 
1611 	spin_lock(&dev_priv->irq_lock);
1612 	for_each_pipe(pipe) {
1613 		int reg;
1614 		u32 mask, iir_bit = 0;
1615 
1616 		/*
1617 		 * PIPESTAT bits get signalled even when the interrupt is
1618 		 * disabled with the mask bits, and some of the status bits do
1619 		 * not generate interrupts at all (like the underrun bit). Hence
1620 		 * we need to be careful that we only handle what we want to
1621 		 * handle.
1622 		 */
1623 		mask = 0;
1624 		if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
1625 			mask |= PIPE_FIFO_UNDERRUN_STATUS;
1626 
1627 		switch (pipe) {
1628 		case PIPE_A:
1629 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1630 			break;
1631 		case PIPE_B:
1632 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1633 			break;
1634 		}
1635 		if (iir & iir_bit)
1636 			mask |= dev_priv->pipestat_irq_mask[pipe];
1637 
1638 		if (!mask)
1639 			continue;
1640 
1641 		reg = PIPESTAT(pipe);
1642 		mask |= PIPESTAT_INT_ENABLE_MASK;
1643 		pipe_stats[pipe] = I915_READ(reg) & mask;
1644 
1645 		/*
1646 		 * Clear the PIPE*STAT regs before the IIR
1647 		 */
1648 		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1649 					PIPESTAT_INT_STATUS_MASK))
1650 			I915_WRITE(reg, pipe_stats[pipe]);
1651 	}
1652 	spin_unlock(&dev_priv->irq_lock);
1653 
1654 	for_each_pipe(pipe) {
1655 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1656 			drm_handle_vblank(dev, pipe);
1657 
1658 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1659 			intel_prepare_page_flip(dev, pipe);
1660 			intel_finish_page_flip(dev, pipe);
1661 		}
1662 
1663 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1664 			i9xx_pipe_crc_irq_handler(dev, pipe);
1665 
1666 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
1667 		    intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1668 			DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
1669 	}
1670 
1671 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1672 		gmbus_irq_handler(dev);
1673 }
1674 
1675 static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
1676 {
1677 	struct drm_device *dev = (struct drm_device *) arg;
1678 	struct drm_i915_private *dev_priv = dev->dev_private;
1679 	u32 iir, gt_iir, pm_iir;
1680 	irqreturn_t ret = IRQ_NONE;
1681 
1682 	while (true) {
1683 		iir = I915_READ(VLV_IIR);
1684 		gt_iir = I915_READ(GTIIR);
1685 		pm_iir = I915_READ(GEN6_PMIIR);
1686 
1687 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1688 			goto out;
1689 
1690 		ret = IRQ_HANDLED;
1691 
1692 		snb_gt_irq_handler(dev, dev_priv, gt_iir);
1693 
1694 		valleyview_pipestat_irq_handler(dev, iir);
1695 
1696 		/* Consume port.  Then clear IIR or we'll miss events */
1697 		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1698 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1699 			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1700 
1701 			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
1702 
1703 			if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1704 				dp_aux_irq_handler(dev);
1705 
1706 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1707 			I915_READ(PORT_HOTPLUG_STAT);
1708 		}
1709 
1710 
1711 		if (pm_iir)
1712 			gen6_rps_irq_handler(dev_priv, pm_iir);
1713 
1714 		I915_WRITE(GTIIR, gt_iir);
1715 		I915_WRITE(GEN6_PMIIR, pm_iir);
1716 		I915_WRITE(VLV_IIR, iir);
1717 	}
1718 
1719 out:
1720 	return ret;
1721 }
1722 
1723 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1724 {
1725 	struct drm_i915_private *dev_priv = dev->dev_private;
1726 	int pipe;
1727 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1728 
1729 	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1730 
1731 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1732 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1733 			       SDE_AUDIO_POWER_SHIFT);
1734 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1735 				 port_name(port));
1736 	}
1737 
1738 	if (pch_iir & SDE_AUX_MASK)
1739 		dp_aux_irq_handler(dev);
1740 
1741 	if (pch_iir & SDE_GMBUS)
1742 		gmbus_irq_handler(dev);
1743 
1744 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1745 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1746 
1747 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1748 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1749 
1750 	if (pch_iir & SDE_POISON)
1751 		DRM_ERROR("PCH poison interrupt\n");
1752 
1753 	if (pch_iir & SDE_FDI_MASK)
1754 		for_each_pipe(pipe)
1755 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1756 					 pipe_name(pipe),
1757 					 I915_READ(FDI_RX_IIR(pipe)));
1758 
1759 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1760 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1761 
1762 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1763 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1764 
1765 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1766 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1767 							  false))
1768 			DRM_ERROR("PCH transcoder A FIFO underrun\n");
1769 
1770 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1771 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1772 							  false))
1773 			DRM_ERROR("PCH transcoder B FIFO underrun\n");
1774 }
1775 
1776 static void ivb_err_int_handler(struct drm_device *dev)
1777 {
1778 	struct drm_i915_private *dev_priv = dev->dev_private;
1779 	u32 err_int = I915_READ(GEN7_ERR_INT);
1780 	enum i915_pipe pipe;
1781 
1782 	if (err_int & ERR_INT_POISON)
1783 		DRM_ERROR("Poison interrupt\n");
1784 
1785 	for_each_pipe(pipe) {
1786 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
1787 			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
1788 								  false))
1789 				DRM_ERROR("Pipe %c FIFO underrun\n",
1790 					  pipe_name(pipe));
1791 		}
1792 
1793 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1794 			if (IS_IVYBRIDGE(dev))
1795 				ivb_pipe_crc_irq_handler(dev, pipe);
1796 			else
1797 				hsw_pipe_crc_irq_handler(dev, pipe);
1798 		}
1799 	}
1800 
1801 	I915_WRITE(GEN7_ERR_INT, err_int);
1802 }
1803 
1804 static void cpt_serr_int_handler(struct drm_device *dev)
1805 {
1806 	struct drm_i915_private *dev_priv = dev->dev_private;
1807 	u32 serr_int = I915_READ(SERR_INT);
1808 
1809 	if (serr_int & SERR_INT_POISON)
1810 		DRM_ERROR("PCH poison interrupt\n");
1811 
1812 	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1813 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1814 							  false))
1815 			DRM_ERROR("PCH transcoder A FIFO underrun\n");
1816 
1817 	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1818 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1819 							  false))
1820 			DRM_ERROR("PCH transcoder B FIFO underrun\n");
1821 
1822 	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1823 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1824 							  false))
1825 			DRM_ERROR("PCH transcoder C FIFO underrun\n");
1826 
1827 	I915_WRITE(SERR_INT, serr_int);
1828 }
1829 
1830 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1831 {
1832 	struct drm_i915_private *dev_priv = dev->dev_private;
1833 	int pipe;
1834 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1835 
1836 	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1837 
1838 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1839 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1840 			       SDE_AUDIO_POWER_SHIFT_CPT);
1841 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1842 				 port_name(port));
1843 	}
1844 
1845 	if (pch_iir & SDE_AUX_MASK_CPT)
1846 		dp_aux_irq_handler(dev);
1847 
1848 	if (pch_iir & SDE_GMBUS_CPT)
1849 		gmbus_irq_handler(dev);
1850 
1851 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1852 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1853 
1854 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1855 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1856 
1857 	if (pch_iir & SDE_FDI_MASK_CPT)
1858 		for_each_pipe(pipe)
1859 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1860 					 pipe_name(pipe),
1861 					 I915_READ(FDI_RX_IIR(pipe)));
1862 
1863 	if (pch_iir & SDE_ERROR_CPT)
1864 		cpt_serr_int_handler(dev);
1865 }
1866 
1867 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
1868 {
1869 	struct drm_i915_private *dev_priv = dev->dev_private;
1870 	enum i915_pipe pipe;
1871 
1872 	if (de_iir & DE_AUX_CHANNEL_A)
1873 		dp_aux_irq_handler(dev);
1874 
1875 	if (de_iir & DE_GSE)
1876 		intel_opregion_asle_intr(dev);
1877 
1878 	if (de_iir & DE_POISON)
1879 		DRM_ERROR("Poison interrupt\n");
1880 
1881 	for_each_pipe(pipe) {
1882 		if (de_iir & DE_PIPE_VBLANK(pipe))
1883 			drm_handle_vblank(dev, pipe);
1884 
1885 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1886 			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
1887 				DRM_ERROR("Pipe %c FIFO underrun\n",
1888 					  pipe_name(pipe));
1889 
1890 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
1891 			i9xx_pipe_crc_irq_handler(dev, pipe);
1892 
1893 		/* plane/pipes map 1:1 on ilk+ */
1894 		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
1895 			intel_prepare_page_flip(dev, pipe);
1896 			intel_finish_page_flip_plane(dev, pipe);
1897 		}
1898 	}
1899 
1900 	/* check event from PCH */
1901 	if (de_iir & DE_PCH_EVENT) {
1902 		u32 pch_iir = I915_READ(SDEIIR);
1903 
1904 		if (HAS_PCH_CPT(dev))
1905 			cpt_irq_handler(dev, pch_iir);
1906 		else
1907 			ibx_irq_handler(dev, pch_iir);
1908 
1909 		/* should clear PCH hotplug event before clear CPU irq */
1910 		I915_WRITE(SDEIIR, pch_iir);
1911 	}
1912 
1913 	if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1914 		ironlake_rps_change_irq_handler(dev);
1915 }
1916 
1917 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
1918 {
1919 	struct drm_i915_private *dev_priv = dev->dev_private;
1920 	enum i915_pipe pipe;
1921 
1922 	if (de_iir & DE_ERR_INT_IVB)
1923 		ivb_err_int_handler(dev);
1924 
1925 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
1926 		dp_aux_irq_handler(dev);
1927 
1928 	if (de_iir & DE_GSE_IVB)
1929 		intel_opregion_asle_intr(dev);
1930 
1931 	for_each_pipe(pipe) {
1932 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
1933 			drm_handle_vblank(dev, pipe);
1934 
1935 		/* plane/pipes map 1:1 on ilk+ */
1936 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
1937 			intel_prepare_page_flip(dev, pipe);
1938 			intel_finish_page_flip_plane(dev, pipe);
1939 		}
1940 	}
1941 
1942 	/* check event from PCH */
1943 	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1944 		u32 pch_iir = I915_READ(SDEIIR);
1945 
1946 		cpt_irq_handler(dev, pch_iir);
1947 
1948 		/* clear PCH hotplug event before clear CPU irq */
1949 		I915_WRITE(SDEIIR, pch_iir);
1950 	}
1951 }
1952 
1953 static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
1954 {
1955 	struct drm_device *dev = (struct drm_device *) arg;
1956 	struct drm_i915_private *dev_priv = dev->dev_private;
1957 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
1958 	irqreturn_t ret = IRQ_NONE;
1959 
1960 	/* We get interrupts on unclaimed registers, so check for this before we
1961 	 * do any I915_{READ,WRITE}. */
1962 	intel_uncore_check_errors(dev);
1963 
1964 	/* disable master interrupt before clearing iir  */
1965 	de_ier = I915_READ(DEIER);
1966 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1967 	POSTING_READ(DEIER);
1968 
1969 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
1970 	 * interrupts will will be stored on its back queue, and then we'll be
1971 	 * able to process them after we restore SDEIER (as soon as we restore
1972 	 * it, we'll get an interrupt if SDEIIR still has something to process
1973 	 * due to its back queue). */
1974 	if (!HAS_PCH_NOP(dev)) {
1975 		sde_ier = I915_READ(SDEIER);
1976 		I915_WRITE(SDEIER, 0);
1977 		POSTING_READ(SDEIER);
1978 	}
1979 
1980 	gt_iir = I915_READ(GTIIR);
1981 	if (gt_iir) {
1982 		if (INTEL_INFO(dev)->gen >= 6)
1983 			snb_gt_irq_handler(dev, dev_priv, gt_iir);
1984 		else
1985 			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1986 		I915_WRITE(GTIIR, gt_iir);
1987 		ret = IRQ_HANDLED;
1988 	}
1989 
1990 	de_iir = I915_READ(DEIIR);
1991 	if (de_iir) {
1992 		if (INTEL_INFO(dev)->gen >= 7)
1993 			ivb_display_irq_handler(dev, de_iir);
1994 		else
1995 			ilk_display_irq_handler(dev, de_iir);
1996 		I915_WRITE(DEIIR, de_iir);
1997 		ret = IRQ_HANDLED;
1998 	}
1999 
2000 	if (INTEL_INFO(dev)->gen >= 6) {
2001 		u32 pm_iir = I915_READ(GEN6_PMIIR);
2002 		if (pm_iir) {
2003 			gen6_rps_irq_handler(dev_priv, pm_iir);
2004 			I915_WRITE(GEN6_PMIIR, pm_iir);
2005 			ret = IRQ_HANDLED;
2006 		}
2007 	}
2008 
2009 	I915_WRITE(DEIER, de_ier);
2010 	POSTING_READ(DEIER);
2011 	if (!HAS_PCH_NOP(dev)) {
2012 		I915_WRITE(SDEIER, sde_ier);
2013 		POSTING_READ(SDEIER);
2014 	}
2015 
2016 	return ret;
2017 }
2018 
2019 static irqreturn_t gen8_irq_handler(DRM_IRQ_ARGS)
2020 {
2021 	struct drm_device *dev = arg;
2022 	struct drm_i915_private *dev_priv = dev->dev_private;
2023 	u32 master_ctl;
2024 	irqreturn_t ret = IRQ_NONE;
2025 	uint32_t tmp = 0;
2026 	enum i915_pipe pipe;
2027 
2028 	master_ctl = I915_READ(GEN8_MASTER_IRQ);
2029 	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2030 	if (!master_ctl)
2031 		return IRQ_NONE;
2032 
2033 	I915_WRITE(GEN8_MASTER_IRQ, 0);
2034 	POSTING_READ(GEN8_MASTER_IRQ);
2035 
2036 	ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2037 
2038 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2039 		tmp = I915_READ(GEN8_DE_MISC_IIR);
2040 		if (tmp & GEN8_DE_MISC_GSE)
2041 			intel_opregion_asle_intr(dev);
2042 		else if (tmp)
2043 			DRM_ERROR("Unexpected DE Misc interrupt\n");
2044 		else
2045 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2046 
2047 		if (tmp) {
2048 			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2049 			ret = IRQ_HANDLED;
2050 		}
2051 	}
2052 
2053 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2054 		tmp = I915_READ(GEN8_DE_PORT_IIR);
2055 		if (tmp & GEN8_AUX_CHANNEL_A)
2056 			dp_aux_irq_handler(dev);
2057 		else if (tmp)
2058 			DRM_ERROR("Unexpected DE Port interrupt\n");
2059 		else
2060 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2061 
2062 		if (tmp) {
2063 			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2064 			ret = IRQ_HANDLED;
2065 		}
2066 	}
2067 
2068 	for_each_pipe(pipe) {
2069 		uint32_t pipe_iir;
2070 
2071 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2072 			continue;
2073 
2074 		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2075 		if (pipe_iir & GEN8_PIPE_VBLANK)
2076 			drm_handle_vblank(dev, pipe);
2077 
2078 		if (pipe_iir & GEN8_PIPE_FLIP_DONE) {
2079 			intel_prepare_page_flip(dev, pipe);
2080 			intel_finish_page_flip_plane(dev, pipe);
2081 		}
2082 
2083 		if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2084 			hsw_pipe_crc_irq_handler(dev, pipe);
2085 
2086 		if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
2087 			if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2088 								  false))
2089 				DRM_ERROR("Pipe %c FIFO underrun\n",
2090 					  pipe_name(pipe));
2091 		}
2092 
2093 		if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
2094 			DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2095 				  pipe_name(pipe),
2096 				  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2097 		}
2098 
2099 		if (pipe_iir) {
2100 			ret = IRQ_HANDLED;
2101 			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2102 		} else
2103 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2104 	}
2105 
2106 	if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2107 		/*
2108 		 * FIXME(BDW): Assume for now that the new interrupt handling
2109 		 * scheme also closed the SDE interrupt handling race we've seen
2110 		 * on older pch-split platforms. But this needs testing.
2111 		 */
2112 		u32 pch_iir = I915_READ(SDEIIR);
2113 
2114 		cpt_irq_handler(dev, pch_iir);
2115 
2116 		if (pch_iir) {
2117 			I915_WRITE(SDEIIR, pch_iir);
2118 			ret = IRQ_HANDLED;
2119 		}
2120 	}
2121 
2122 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2123 	POSTING_READ(GEN8_MASTER_IRQ);
2124 
2125 	return ret;
2126 }
2127 
2128 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2129 			       bool reset_completed)
2130 {
2131 	struct intel_ring_buffer *ring;
2132 	int i;
2133 
2134 	/*
2135 	 * Notify all waiters for GPU completion events that reset state has
2136 	 * been changed, and that they need to restart their wait after
2137 	 * checking for potential errors (and bail out to drop locks if there is
2138 	 * a gpu reset pending so that i915_error_work_func can acquire them).
2139 	 */
2140 
2141 	assert_spin_locked(&dev_priv->irq_lock);
2142 #ifdef __NetBSD__
2143 	for_each_ring(ring, dev_priv, i)
2144 		DRM_SPIN_WAKEUP_ALL(&ring->irq_queue, &dev_priv->irq_lock);
2145 
2146 	spin_lock(&dev_priv->pending_flip_lock);
2147 	DRM_SPIN_WAKEUP_ALL(&dev_priv->pending_flip_queue,
2148 	    &dev_priv->pending_flip_lock);
2149 	spin_unlock(&dev_priv->pending_flip_lock);
2150 
2151 	if (reset_completed) {
2152 		spin_lock(&dev_priv->gpu_error.reset_lock);
2153 		DRM_SPIN_WAKEUP_ALL(&dev_priv->gpu_error.reset_queue,
2154 		    &dev_priv->gpu_error.reset_lock);
2155 		spin_unlock(&dev_priv->gpu_error.reset_lock);
2156 	}
2157 #else
2158 	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2159 	for_each_ring(ring, dev_priv, i)
2160 		wake_up_all(&ring->irq_queue);
2161 
2162 	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2163 	wake_up_all(&dev_priv->pending_flip_queue);
2164 
2165 	/*
2166 	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2167 	 * reset state is cleared.
2168 	 */
2169 	if (reset_completed)
2170 		wake_up_all(&dev_priv->gpu_error.reset_queue);
2171 #endif
2172 }
2173 
2174 /**
2175  * i915_error_work_func - do process context error handling work
2176  * @work: work struct
2177  *
2178  * Fire an error uevent so userspace can see that a hang or error
2179  * was detected.
2180  */
2181 static void i915_error_work_func(struct work_struct *work)
2182 {
2183 	struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2184 						    work);
2185 	struct drm_i915_private *dev_priv =
2186 		container_of(error, struct drm_i915_private, gpu_error);
2187 	struct drm_device *dev = dev_priv->dev;
2188 #ifndef __NetBSD__
2189 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2190 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2191 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2192 #endif
2193 	int ret;
2194 
2195 #ifndef __NetBSD__
2196 	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2197 #endif
2198 
2199 	/*
2200 	 * Note that there's only one work item which does gpu resets, so we
2201 	 * need not worry about concurrent gpu resets potentially incrementing
2202 	 * error->reset_counter twice. We only need to take care of another
2203 	 * racing irq/hangcheck declaring the gpu dead for a second time. A
2204 	 * quick check for that is good enough: schedule_work ensures the
2205 	 * correct ordering between hang detection and this work item, and since
2206 	 * the reset in-progress bit is only ever set by code outside of this
2207 	 * work we don't need to worry about any other races.
2208 	 */
2209 	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2210 		DRM_DEBUG_DRIVER("resetting chip\n");
2211 #ifndef __NetBSD__
2212 		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2213 				   reset_event);
2214 #endif
2215 
2216 		/*
2217 		 * All state reset _must_ be completed before we update the
2218 		 * reset counter, for otherwise waiters might miss the reset
2219 		 * pending state and not properly drop locks, resulting in
2220 		 * deadlocks with the reset work.
2221 		 */
2222 		ret = i915_reset(dev);
2223 
2224 		intel_display_handle_reset(dev);
2225 
2226 		if (ret == 0) {
2227 			/*
2228 			 * After all the gem state is reset, increment the reset
2229 			 * counter and wake up everyone waiting for the reset to
2230 			 * complete.
2231 			 *
2232 			 * Since unlock operations are a one-sided barrier only,
2233 			 * we need to insert a barrier here to order any seqno
2234 			 * updates before
2235 			 * the counter increment.
2236 			 */
2237 			smp_mb__before_atomic_inc();
2238 			atomic_inc(&dev_priv->gpu_error.reset_counter);
2239 
2240 #ifndef __NetBSD__
2241 			kobject_uevent_env(&dev->primary->kdev->kobj,
2242 					   KOBJ_CHANGE, reset_done_event);
2243 #endif
2244 		} else {
2245 			atomic_set_mask(I915_WEDGED, &error->reset_counter);
2246 		}
2247 
2248 		/*
2249 		 * Note: The wake_up also serves as a memory barrier so that
2250 		 * waiters see the update value of the reset counter atomic_t.
2251 		 */
2252 		spin_lock(&dev_priv->irq_lock);
2253 		i915_error_wake_up(dev_priv, true);
2254 		spin_unlock(&dev_priv->irq_lock);
2255 	}
2256 }
2257 
2258 static void i915_report_and_clear_eir(struct drm_device *dev)
2259 {
2260 	struct drm_i915_private *dev_priv = dev->dev_private;
2261 	uint32_t instdone[I915_NUM_INSTDONE_REG];
2262 	u32 eir = I915_READ(EIR);
2263 	int pipe, i;
2264 
2265 	if (!eir)
2266 		return;
2267 
2268 	pr_err("render error detected, EIR: 0x%08x\n", eir);
2269 
2270 	i915_get_extra_instdone(dev, instdone);
2271 
2272 	if (IS_G4X(dev)) {
2273 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2274 			u32 ipeir = I915_READ(IPEIR_I965);
2275 
2276 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2277 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2278 			for (i = 0; i < ARRAY_SIZE(instdone); i++)
2279 				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2280 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2281 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2282 			I915_WRITE(IPEIR_I965, ipeir);
2283 			POSTING_READ(IPEIR_I965);
2284 		}
2285 		if (eir & GM45_ERROR_PAGE_TABLE) {
2286 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2287 			pr_err("page table error\n");
2288 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2289 			I915_WRITE(PGTBL_ER, pgtbl_err);
2290 			POSTING_READ(PGTBL_ER);
2291 		}
2292 	}
2293 
2294 	if (!IS_GEN2(dev)) {
2295 		if (eir & I915_ERROR_PAGE_TABLE) {
2296 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2297 			pr_err("page table error\n");
2298 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2299 			I915_WRITE(PGTBL_ER, pgtbl_err);
2300 			POSTING_READ(PGTBL_ER);
2301 		}
2302 	}
2303 
2304 	if (eir & I915_ERROR_MEMORY_REFRESH) {
2305 		pr_err("memory refresh error:\n");
2306 		for_each_pipe(pipe)
2307 			pr_err("pipe %c stat: 0x%08x\n",
2308 			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2309 		/* pipestat has already been acked */
2310 	}
2311 	if (eir & I915_ERROR_INSTRUCTION) {
2312 		pr_err("instruction error\n");
2313 		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2314 		for (i = 0; i < ARRAY_SIZE(instdone); i++)
2315 			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2316 		if (INTEL_INFO(dev)->gen < 4) {
2317 			u32 ipeir = I915_READ(IPEIR);
2318 
2319 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2320 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2321 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2322 			I915_WRITE(IPEIR, ipeir);
2323 			POSTING_READ(IPEIR);
2324 		} else {
2325 			u32 ipeir = I915_READ(IPEIR_I965);
2326 
2327 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2328 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2329 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2330 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2331 			I915_WRITE(IPEIR_I965, ipeir);
2332 			POSTING_READ(IPEIR_I965);
2333 		}
2334 	}
2335 
2336 	I915_WRITE(EIR, eir);
2337 	POSTING_READ(EIR);
2338 	eir = I915_READ(EIR);
2339 	if (eir) {
2340 		/*
2341 		 * some errors might have become stuck,
2342 		 * mask them.
2343 		 */
2344 		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2345 		I915_WRITE(EMR, I915_READ(EMR) | eir);
2346 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2347 	}
2348 }
2349 
2350 /**
2351  * i915_handle_error - handle an error interrupt
2352  * @dev: drm device
2353  *
2354  * Do some basic checking of regsiter state at error interrupt time and
2355  * dump it to the syslog.  Also call i915_capture_error_state() to make
2356  * sure we get a record and make it available in debugfs.  Fire a uevent
2357  * so userspace knows something bad happened (should trigger collection
2358  * of a ring dump etc.).
2359  */
2360 void i915_handle_error(struct drm_device *dev, bool wedged,
2361 		       const char *fmt, ...)
2362 {
2363 	struct drm_i915_private *dev_priv = dev->dev_private;
2364 	va_list args;
2365 	char error_msg[80];
2366 
2367 	assert_spin_locked(&dev_priv->irq_lock);
2368 
2369 	va_start(args, fmt);
2370 	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2371 	va_end(args);
2372 
2373 	i915_capture_error_state(dev, wedged, error_msg);
2374 	i915_report_and_clear_eir(dev);
2375 
2376 	if (wedged) {
2377 		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2378 				&dev_priv->gpu_error.reset_counter);
2379 
2380 		/*
2381 		 * Wakeup waiting processes so that the reset work function
2382 		 * i915_error_work_func doesn't deadlock trying to grab various
2383 		 * locks. By bumping the reset counter first, the woken
2384 		 * processes will see a reset in progress and back off,
2385 		 * releasing their locks and then wait for the reset completion.
2386 		 * We must do this for _all_ gpu waiters that might hold locks
2387 		 * that the reset work needs to acquire.
2388 		 *
2389 		 * Note: The wake_up serves as the required memory barrier to
2390 		 * ensure that the waiters see the updated value of the reset
2391 		 * counter atomic_t.
2392 		 */
2393 		i915_error_wake_up(dev_priv, false);
2394 	}
2395 
2396 	/*
2397 	 * Our reset work can grab modeset locks (since it needs to reset the
2398 	 * state of outstanding pagelips). Hence it must not be run on our own
2399 	 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2400 	 * code will deadlock.
2401 	 */
2402 	schedule_work(&dev_priv->gpu_error.work);
2403 }
2404 
2405 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
2406 {
2407 	struct drm_i915_private *dev_priv = dev->dev_private;
2408 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2409 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2410 	struct drm_i915_gem_object *obj;
2411 	struct intel_unpin_work *work;
2412 	unsigned long flags;
2413 	bool stall_detected;
2414 
2415 	/* Ignore early vblank irqs */
2416 	if (intel_crtc == NULL)
2417 		return;
2418 
2419 	spin_lock_irqsave(&dev->event_lock, flags);
2420 	work = intel_crtc->unpin_work;
2421 
2422 	if (work == NULL ||
2423 	    atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2424 	    !work->enable_stall_check) {
2425 		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
2426 		spin_unlock_irqrestore(&dev->event_lock, flags);
2427 		return;
2428 	}
2429 
2430 	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
2431 	obj = work->pending_flip_obj;
2432 	if (INTEL_INFO(dev)->gen >= 4) {
2433 		int dspsurf = DSPSURF(intel_crtc->plane);
2434 		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2435 					i915_gem_obj_ggtt_offset(obj);
2436 	} else {
2437 		int dspaddr = DSPADDR(intel_crtc->plane);
2438 		stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
2439 							crtc->y * crtc->primary->fb->pitches[0] +
2440 							crtc->x * crtc->primary->fb->bits_per_pixel/8);
2441 	}
2442 
2443 	spin_unlock_irqrestore(&dev->event_lock, flags);
2444 
2445 	if (stall_detected) {
2446 		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2447 		intel_prepare_page_flip(dev, intel_crtc->plane);
2448 	}
2449 }
2450 
2451 /* Called from drm generic code, passed 'crtc' which
2452  * we use as a pipe index
2453  */
2454 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2455 {
2456 	struct drm_i915_private *dev_priv = dev->dev_private;
2457 	unsigned long irqflags;
2458 
2459 	if (!i915_pipe_enabled(dev, pipe))
2460 		return -EINVAL;
2461 
2462 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2463 	if (INTEL_INFO(dev)->gen >= 4)
2464 		i915_enable_pipestat(dev_priv, pipe,
2465 				     PIPE_START_VBLANK_INTERRUPT_STATUS);
2466 	else
2467 		i915_enable_pipestat(dev_priv, pipe,
2468 				     PIPE_VBLANK_INTERRUPT_STATUS);
2469 
2470 	/* maintain vblank delivery even in deep C-states */
2471 	if (INTEL_INFO(dev)->gen == 3)
2472 		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
2473 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2474 
2475 	return 0;
2476 }
2477 
2478 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2479 {
2480 	struct drm_i915_private *dev_priv = dev->dev_private;
2481 	unsigned long irqflags;
2482 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2483 						     DE_PIPE_VBLANK(pipe);
2484 
2485 	if (!i915_pipe_enabled(dev, pipe))
2486 		return -EINVAL;
2487 
2488 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2489 	ironlake_enable_display_irq(dev_priv, bit);
2490 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2491 
2492 	return 0;
2493 }
2494 
2495 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2496 {
2497 	struct drm_i915_private *dev_priv = dev->dev_private;
2498 	unsigned long irqflags;
2499 
2500 	if (!i915_pipe_enabled(dev, pipe))
2501 		return -EINVAL;
2502 
2503 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2504 	i915_enable_pipestat(dev_priv, pipe,
2505 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2506 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2507 
2508 	return 0;
2509 }
2510 
2511 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2512 {
2513 	struct drm_i915_private *dev_priv = dev->dev_private;
2514 	unsigned long irqflags;
2515 
2516 	if (!i915_pipe_enabled(dev, pipe))
2517 		return -EINVAL;
2518 
2519 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2520 	dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2521 	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2522 	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2523 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2524 	return 0;
2525 }
2526 
2527 /* Called from drm generic code, passed 'crtc' which
2528  * we use as a pipe index
2529  */
2530 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2531 {
2532 	struct drm_i915_private *dev_priv = dev->dev_private;
2533 	unsigned long irqflags;
2534 
2535 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2536 	if (INTEL_INFO(dev)->gen == 3)
2537 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
2538 
2539 	i915_disable_pipestat(dev_priv, pipe,
2540 			      PIPE_VBLANK_INTERRUPT_STATUS |
2541 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2542 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2543 }
2544 
2545 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2546 {
2547 	struct drm_i915_private *dev_priv = dev->dev_private;
2548 	unsigned long irqflags;
2549 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2550 						     DE_PIPE_VBLANK(pipe);
2551 
2552 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2553 	ironlake_disable_display_irq(dev_priv, bit);
2554 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2555 }
2556 
2557 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2558 {
2559 	struct drm_i915_private *dev_priv = dev->dev_private;
2560 	unsigned long irqflags;
2561 
2562 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2563 	i915_disable_pipestat(dev_priv, pipe,
2564 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2565 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2566 }
2567 
2568 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2569 {
2570 	struct drm_i915_private *dev_priv = dev->dev_private;
2571 	unsigned long irqflags;
2572 
2573 	if (!i915_pipe_enabled(dev, pipe))
2574 		return;
2575 
2576 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2577 	dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2578 	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2579 	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2580 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2581 }
2582 
2583 static u32
2584 ring_last_seqno(struct intel_ring_buffer *ring)
2585 {
2586 	return list_entry(ring->request_list.prev,
2587 			  struct drm_i915_gem_request, list)->seqno;
2588 }
2589 
2590 static bool
2591 ring_idle(struct intel_ring_buffer *ring, u32 seqno)
2592 {
2593 	return (list_empty(&ring->request_list) ||
2594 		i915_seqno_passed(seqno, ring_last_seqno(ring)));
2595 }
2596 
2597 static struct intel_ring_buffer *
2598 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
2599 {
2600 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2601 	u32 cmd, ipehr, head;
2602 	int i;
2603 
2604 	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2605 	if ((ipehr & ~(0x3 << 16)) !=
2606 	    (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
2607 		return NULL;
2608 
2609 	/*
2610 	 * HEAD is likely pointing to the dword after the actual command,
2611 	 * so scan backwards until we find the MBOX. But limit it to just 3
2612 	 * dwords. Note that we don't care about ACTHD here since that might
2613 	 * point at at batch, and semaphores are always emitted into the
2614 	 * ringbuffer itself.
2615 	 */
2616 	head = I915_READ_HEAD(ring) & HEAD_ADDR;
2617 
2618 	for (i = 4; i; --i) {
2619 		/*
2620 		 * Be paranoid and presume the hw has gone off into the wild -
2621 		 * our ring is smaller than what the hardware (and hence
2622 		 * HEAD_ADDR) allows. Also handles wrap-around.
2623 		 */
2624 		head &= ring->size - 1;
2625 
2626 		/* This here seems to blow up */
2627 #ifdef __NetBSD__
2628 		cmd = bus_space_read_4(ring->bst, ring->bsh, head);
2629 #else
2630 		cmd = ioread32(ring->virtual_start + head);
2631 #endif
2632 		if (cmd == ipehr)
2633 			break;
2634 
2635 		head -= 4;
2636 	}
2637 
2638 	if (!i)
2639 		return NULL;
2640 
2641 #ifdef __NetBSD__
2642 	*seqno = bus_space_read_4(ring->bst, ring->bsh, head + 4) + 1;
2643 #else
2644 	*seqno = ioread32(ring->virtual_start + head + 4) + 1;
2645 #endif
2646 	return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
2647 }
2648 
2649 static int semaphore_passed(struct intel_ring_buffer *ring)
2650 {
2651 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2652 	struct intel_ring_buffer *signaller;
2653 	u32 seqno, ctl;
2654 
2655 	ring->hangcheck.deadlock = true;
2656 
2657 	signaller = semaphore_waits_for(ring, &seqno);
2658 	if (signaller == NULL || signaller->hangcheck.deadlock)
2659 		return -1;
2660 
2661 	/* cursory check for an unkickable deadlock */
2662 	ctl = I915_READ_CTL(signaller);
2663 	if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2664 		return -1;
2665 
2666 	return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2667 }
2668 
2669 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2670 {
2671 	struct intel_ring_buffer *ring;
2672 	int i;
2673 
2674 	for_each_ring(ring, dev_priv, i)
2675 		ring->hangcheck.deadlock = false;
2676 }
2677 
2678 static enum intel_ring_hangcheck_action
2679 ring_stuck(struct intel_ring_buffer *ring, u64 acthd)
2680 {
2681 	struct drm_device *dev = ring->dev;
2682 	struct drm_i915_private *dev_priv = dev->dev_private;
2683 	u32 tmp;
2684 
2685 	if (ring->hangcheck.acthd != acthd)
2686 		return HANGCHECK_ACTIVE;
2687 
2688 	if (IS_GEN2(dev))
2689 		return HANGCHECK_HUNG;
2690 
2691 	/* Is the chip hanging on a WAIT_FOR_EVENT?
2692 	 * If so we can simply poke the RB_WAIT bit
2693 	 * and break the hang. This should work on
2694 	 * all but the second generation chipsets.
2695 	 */
2696 	tmp = I915_READ_CTL(ring);
2697 	if (tmp & RING_WAIT) {
2698 		i915_handle_error(dev, false,
2699 				  "Kicking stuck wait on %s",
2700 				  ring->name);
2701 		I915_WRITE_CTL(ring, tmp);
2702 		return HANGCHECK_KICK;
2703 	}
2704 
2705 	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2706 		switch (semaphore_passed(ring)) {
2707 		default:
2708 			return HANGCHECK_HUNG;
2709 		case 1:
2710 			i915_handle_error(dev, false,
2711 					  "Kicking stuck semaphore on %s",
2712 					  ring->name);
2713 			I915_WRITE_CTL(ring, tmp);
2714 			return HANGCHECK_KICK;
2715 		case 0:
2716 			return HANGCHECK_WAIT;
2717 		}
2718 	}
2719 
2720 	return HANGCHECK_HUNG;
2721 }
2722 
2723 /**
2724  * This is called when the chip hasn't reported back with completed
2725  * batchbuffers in a long time. We keep track per ring seqno progress and
2726  * if there are no progress, hangcheck score for that ring is increased.
2727  * Further, acthd is inspected to see if the ring is stuck. On stuck case
2728  * we kick the ring. If we see no progress on three subsequent calls
2729  * we assume chip is wedged and try to fix it by resetting the chip.
2730  */
2731 static void i915_hangcheck_elapsed(unsigned long data)
2732 {
2733 	struct drm_device *dev = (struct drm_device *)data;
2734 	struct drm_i915_private *dev_priv = dev->dev_private;
2735 	struct intel_ring_buffer *ring;
2736 	int i;
2737 	int busy_count = 0, rings_hung = 0;
2738 	bool stuck[I915_NUM_RINGS] = { 0 };
2739 #define BUSY 1
2740 #define KICK 5
2741 #define HUNG 20
2742 
2743 	if (!i915.enable_hangcheck)
2744 		return;
2745 
2746 	spin_lock(&dev_priv->irq_lock);
2747 
2748 	for_each_ring(ring, dev_priv, i) {
2749 		u64 acthd;
2750 		u32 seqno;
2751 		bool busy = true;
2752 
2753 		semaphore_clear_deadlocks(dev_priv);
2754 
2755 		seqno = ring->get_seqno(ring, false);
2756 		acthd = intel_ring_get_active_head(ring);
2757 
2758 		if (ring->hangcheck.seqno == seqno) {
2759 			if (ring_idle(ring, seqno)) {
2760 				ring->hangcheck.action = HANGCHECK_IDLE;
2761 #ifdef __NetBSD__
2762 				if (DRM_SPIN_WAITERS_P(&ring->irq_queue,
2763 					&dev_priv->irq_lock)) {
2764 					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2765 						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2766 							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2767 								  ring->name);
2768 						else
2769 							DRM_INFO("Fake missed irq on %s\n",
2770 								 ring->name);
2771 						DRM_SPIN_WAKEUP_ALL(&ring->irq_queue, &dev_priv->irq_lock);
2772 					}
2773 					ring->hangcheck.score += BUSY;
2774 				} else {
2775 					busy = false;
2776 				}
2777 #else
2778 				if (waitqueue_active(&ring->irq_queue)) {
2779 					/* Issue a wake-up to catch stuck h/w. */
2780 					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2781 						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2782 							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2783 								  ring->name);
2784 						else
2785 							DRM_INFO("Fake missed irq on %s\n",
2786 								 ring->name);
2787 						wake_up_all(&ring->irq_queue);
2788 					}
2789 					/* Safeguard against driver failure */
2790 					ring->hangcheck.score += BUSY;
2791 				} else
2792 					busy = false;
2793 #endif
2794 			} else {
2795 				/* We always increment the hangcheck score
2796 				 * if the ring is busy and still processing
2797 				 * the same request, so that no single request
2798 				 * can run indefinitely (such as a chain of
2799 				 * batches). The only time we do not increment
2800 				 * the hangcheck score on this ring, if this
2801 				 * ring is in a legitimate wait for another
2802 				 * ring. In that case the waiting ring is a
2803 				 * victim and we want to be sure we catch the
2804 				 * right culprit. Then every time we do kick
2805 				 * the ring, add a small increment to the
2806 				 * score so that we can catch a batch that is
2807 				 * being repeatedly kicked and so responsible
2808 				 * for stalling the machine.
2809 				 */
2810 				ring->hangcheck.action = ring_stuck(ring,
2811 								    acthd);
2812 
2813 				switch (ring->hangcheck.action) {
2814 				case HANGCHECK_IDLE:
2815 				case HANGCHECK_WAIT:
2816 					break;
2817 				case HANGCHECK_ACTIVE:
2818 					ring->hangcheck.score += BUSY;
2819 					break;
2820 				case HANGCHECK_KICK:
2821 					ring->hangcheck.score += KICK;
2822 					break;
2823 				case HANGCHECK_HUNG:
2824 					ring->hangcheck.score += HUNG;
2825 					stuck[i] = true;
2826 					break;
2827 				}
2828 			}
2829 		} else {
2830 			ring->hangcheck.action = HANGCHECK_ACTIVE;
2831 
2832 			/* Gradually reduce the count so that we catch DoS
2833 			 * attempts across multiple batches.
2834 			 */
2835 			if (ring->hangcheck.score > 0)
2836 				ring->hangcheck.score--;
2837 		}
2838 
2839 		ring->hangcheck.seqno = seqno;
2840 		ring->hangcheck.acthd = acthd;
2841 		busy_count += busy;
2842 	}
2843 
2844 	for_each_ring(ring, dev_priv, i) {
2845 		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
2846 			DRM_INFO("%s on %s\n",
2847 				 stuck[i] ? "stuck" : "no progress",
2848 				 ring->name);
2849 			rings_hung++;
2850 		}
2851 	}
2852 
2853 	if (rings_hung) {
2854 		i915_handle_error(dev, true, "Ring hung");
2855 		spin_unlock(&dev_priv->irq_lock);
2856 		return;
2857 	}
2858 
2859 	spin_unlock(&dev_priv->irq_lock);
2860 
2861 	if (busy_count)
2862 		/* Reset timer case chip hangs without another request
2863 		 * being added */
2864 		i915_queue_hangcheck(dev);
2865 }
2866 
2867 void i915_queue_hangcheck(struct drm_device *dev)
2868 {
2869 	struct drm_i915_private *dev_priv = dev->dev_private;
2870 	if (!i915.enable_hangcheck)
2871 		return;
2872 
2873 	mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2874 		  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2875 }
2876 
2877 static void ibx_irq_preinstall(struct drm_device *dev)
2878 {
2879 	struct drm_i915_private *dev_priv = dev->dev_private;
2880 
2881 	if (HAS_PCH_NOP(dev))
2882 		return;
2883 
2884 	/* south display irq */
2885 	I915_WRITE(SDEIMR, 0xffffffff);
2886 	/*
2887 	 * SDEIER is also touched by the interrupt handler to work around missed
2888 	 * PCH interrupts. Hence we can't update it after the interrupt handler
2889 	 * is enabled - instead we unconditionally enable all PCH interrupt
2890 	 * sources here, but then only unmask them as needed with SDEIMR.
2891 	 */
2892 	I915_WRITE(SDEIER, 0xffffffff);
2893 	POSTING_READ(SDEIER);
2894 }
2895 
2896 static void gen5_gt_irq_preinstall(struct drm_device *dev)
2897 {
2898 	struct drm_i915_private *dev_priv = dev->dev_private;
2899 
2900 	/* and GT */
2901 	I915_WRITE(GTIMR, 0xffffffff);
2902 	I915_WRITE(GTIER, 0x0);
2903 	POSTING_READ(GTIER);
2904 
2905 	if (INTEL_INFO(dev)->gen >= 6) {
2906 		/* and PM */
2907 		I915_WRITE(GEN6_PMIMR, 0xffffffff);
2908 		I915_WRITE(GEN6_PMIER, 0x0);
2909 		POSTING_READ(GEN6_PMIER);
2910 	}
2911 }
2912 
2913 /* drm_dma.h hooks
2914 */
2915 static void ironlake_irq_preinstall(struct drm_device *dev)
2916 {
2917 	struct drm_i915_private *dev_priv = dev->dev_private;
2918 
2919 	I915_WRITE(HWSTAM, 0xeffe);
2920 
2921 	I915_WRITE(DEIMR, 0xffffffff);
2922 	I915_WRITE(DEIER, 0x0);
2923 	POSTING_READ(DEIER);
2924 
2925 	gen5_gt_irq_preinstall(dev);
2926 
2927 	ibx_irq_preinstall(dev);
2928 }
2929 
2930 static void valleyview_irq_preinstall(struct drm_device *dev)
2931 {
2932 	struct drm_i915_private *dev_priv = dev->dev_private;
2933 	int pipe;
2934 
2935 	/* VLV magic */
2936 	I915_WRITE(VLV_IMR, 0);
2937 	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2938 	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2939 	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2940 
2941 	/* and GT */
2942 	I915_WRITE(GTIIR, I915_READ(GTIIR));
2943 	I915_WRITE(GTIIR, I915_READ(GTIIR));
2944 
2945 	gen5_gt_irq_preinstall(dev);
2946 
2947 	I915_WRITE(DPINVGTT, 0xff);
2948 
2949 	I915_WRITE(PORT_HOTPLUG_EN, 0);
2950 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2951 	for_each_pipe(pipe)
2952 		I915_WRITE(PIPESTAT(pipe), 0xffff);
2953 	I915_WRITE(VLV_IIR, 0xffffffff);
2954 	I915_WRITE(VLV_IMR, 0xffffffff);
2955 	I915_WRITE(VLV_IER, 0x0);
2956 	POSTING_READ(VLV_IER);
2957 }
2958 
2959 static void gen8_irq_preinstall(struct drm_device *dev)
2960 {
2961 	struct drm_i915_private *dev_priv = dev->dev_private;
2962 	int pipe;
2963 
2964 	I915_WRITE(GEN8_MASTER_IRQ, 0);
2965 	POSTING_READ(GEN8_MASTER_IRQ);
2966 
2967 	/* IIR can theoretically queue up two events. Be paranoid */
2968 #define GEN8_IRQ_INIT_NDX(type, which) do { \
2969 		I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
2970 		POSTING_READ(GEN8_##type##_IMR(which)); \
2971 		I915_WRITE(GEN8_##type##_IER(which), 0); \
2972 		I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2973 		POSTING_READ(GEN8_##type##_IIR(which)); \
2974 		I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
2975 	} while (0)
2976 
2977 #define GEN8_IRQ_INIT(type) do { \
2978 		I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
2979 		POSTING_READ(GEN8_##type##_IMR); \
2980 		I915_WRITE(GEN8_##type##_IER, 0); \
2981 		I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2982 		POSTING_READ(GEN8_##type##_IIR); \
2983 		I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
2984 	} while (0)
2985 
2986 	GEN8_IRQ_INIT_NDX(GT, 0);
2987 	GEN8_IRQ_INIT_NDX(GT, 1);
2988 	GEN8_IRQ_INIT_NDX(GT, 2);
2989 	GEN8_IRQ_INIT_NDX(GT, 3);
2990 
2991 	for_each_pipe(pipe) {
2992 		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe);
2993 	}
2994 
2995 	GEN8_IRQ_INIT(DE_PORT);
2996 	GEN8_IRQ_INIT(DE_MISC);
2997 	GEN8_IRQ_INIT(PCU);
2998 #undef GEN8_IRQ_INIT
2999 #undef GEN8_IRQ_INIT_NDX
3000 
3001 	POSTING_READ(GEN8_PCU_IIR);
3002 
3003 	ibx_irq_preinstall(dev);
3004 }
3005 
3006 static void ibx_hpd_irq_setup(struct drm_device *dev)
3007 {
3008 	struct drm_i915_private *dev_priv = dev->dev_private;
3009 	struct drm_mode_config *mode_config = &dev->mode_config;
3010 	struct intel_encoder *intel_encoder;
3011 	u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3012 
3013 	if (HAS_PCH_IBX(dev)) {
3014 		hotplug_irqs = SDE_HOTPLUG_MASK;
3015 		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3016 			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3017 				enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3018 	} else {
3019 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3020 		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3021 			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3022 				enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3023 	}
3024 
3025 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3026 
3027 	/*
3028 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3029 	 * duration to 2ms (which is the minimum in the Display Port spec)
3030 	 *
3031 	 * This register is the same on all known PCH chips.
3032 	 */
3033 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3034 	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3035 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3036 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3037 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3038 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3039 }
3040 
3041 static void ibx_irq_postinstall(struct drm_device *dev)
3042 {
3043 	struct drm_i915_private *dev_priv = dev->dev_private;
3044 	u32 mask;
3045 
3046 	if (HAS_PCH_NOP(dev))
3047 		return;
3048 
3049 	if (HAS_PCH_IBX(dev)) {
3050 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3051 	} else {
3052 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3053 
3054 		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
3055 	}
3056 
3057 	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
3058 	I915_WRITE(SDEIMR, ~mask);
3059 }
3060 
3061 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3062 {
3063 	struct drm_i915_private *dev_priv = dev->dev_private;
3064 	u32 pm_irqs, gt_irqs;
3065 
3066 	pm_irqs = gt_irqs = 0;
3067 
3068 	dev_priv->gt_irq_mask = ~0;
3069 	if (HAS_L3_DPF(dev)) {
3070 		/* L3 parity interrupt is always unmasked. */
3071 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3072 		gt_irqs |= GT_PARITY_ERROR(dev);
3073 	}
3074 
3075 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3076 	if (IS_GEN5(dev)) {
3077 		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3078 			   ILK_BSD_USER_INTERRUPT;
3079 	} else {
3080 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3081 	}
3082 
3083 	I915_WRITE(GTIIR, I915_READ(GTIIR));
3084 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
3085 	I915_WRITE(GTIER, gt_irqs);
3086 	POSTING_READ(GTIER);
3087 
3088 	if (INTEL_INFO(dev)->gen >= 6) {
3089 		pm_irqs |= dev_priv->pm_rps_events;
3090 
3091 		if (HAS_VEBOX(dev))
3092 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3093 
3094 		dev_priv->pm_irq_mask = 0xffffffff;
3095 		I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
3096 		I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
3097 		I915_WRITE(GEN6_PMIER, pm_irqs);
3098 		POSTING_READ(GEN6_PMIER);
3099 	}
3100 }
3101 
3102 static int ironlake_irq_postinstall(struct drm_device *dev)
3103 {
3104 	unsigned long irqflags;
3105 	struct drm_i915_private *dev_priv = dev->dev_private;
3106 	u32 display_mask, extra_mask;
3107 
3108 	if (INTEL_INFO(dev)->gen >= 7) {
3109 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3110 				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3111 				DE_PLANEB_FLIP_DONE_IVB |
3112 				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3113 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3114 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3115 
3116 		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
3117 	} else {
3118 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3119 				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3120 				DE_AUX_CHANNEL_A |
3121 				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3122 				DE_POISON);
3123 		extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3124 				DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3125 	}
3126 
3127 	dev_priv->irq_mask = ~display_mask;
3128 
3129 	/* should always can generate irq */
3130 	I915_WRITE(DEIIR, I915_READ(DEIIR));
3131 	I915_WRITE(DEIMR, dev_priv->irq_mask);
3132 	I915_WRITE(DEIER, display_mask | extra_mask);
3133 	POSTING_READ(DEIER);
3134 
3135 	gen5_gt_irq_postinstall(dev);
3136 
3137 	ibx_irq_postinstall(dev);
3138 
3139 	if (IS_IRONLAKE_M(dev)) {
3140 		/* Enable PCU event interrupts
3141 		 *
3142 		 * spinlocking not required here for correctness since interrupt
3143 		 * setup is guaranteed to run in single-threaded context. But we
3144 		 * need it to make the assert_spin_locked happy. */
3145 		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3146 		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3147 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3148 	}
3149 
3150 	return 0;
3151 }
3152 
3153 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3154 {
3155 	u32 pipestat_mask;
3156 	u32 iir_mask;
3157 
3158 	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3159 			PIPE_FIFO_UNDERRUN_STATUS;
3160 
3161 	I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3162 	I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3163 	POSTING_READ(PIPESTAT(PIPE_A));
3164 
3165 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3166 			PIPE_CRC_DONE_INTERRUPT_STATUS;
3167 
3168 	i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3169 					       PIPE_GMBUS_INTERRUPT_STATUS);
3170 	i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3171 
3172 	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3173 		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3174 		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3175 	dev_priv->irq_mask &= ~iir_mask;
3176 
3177 	I915_WRITE(VLV_IIR, iir_mask);
3178 	I915_WRITE(VLV_IIR, iir_mask);
3179 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3180 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3181 	POSTING_READ(VLV_IER);
3182 }
3183 
3184 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3185 {
3186 	u32 pipestat_mask;
3187 	u32 iir_mask;
3188 
3189 	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3190 		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3191 		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3192 
3193 	dev_priv->irq_mask |= iir_mask;
3194 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3195 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3196 	I915_WRITE(VLV_IIR, iir_mask);
3197 	I915_WRITE(VLV_IIR, iir_mask);
3198 	POSTING_READ(VLV_IIR);
3199 
3200 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3201 			PIPE_CRC_DONE_INTERRUPT_STATUS;
3202 
3203 	i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3204 					        PIPE_GMBUS_INTERRUPT_STATUS);
3205 	i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3206 
3207 	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3208 			PIPE_FIFO_UNDERRUN_STATUS;
3209 	I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3210 	I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3211 	POSTING_READ(PIPESTAT(PIPE_A));
3212 }
3213 
3214 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3215 {
3216 	assert_spin_locked(&dev_priv->irq_lock);
3217 
3218 	if (dev_priv->display_irqs_enabled)
3219 		return;
3220 
3221 	dev_priv->display_irqs_enabled = true;
3222 
3223 	if (dev_priv->dev->irq_enabled)
3224 		valleyview_display_irqs_install(dev_priv);
3225 }
3226 
3227 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3228 {
3229 	assert_spin_locked(&dev_priv->irq_lock);
3230 
3231 	if (!dev_priv->display_irqs_enabled)
3232 		return;
3233 
3234 	dev_priv->display_irqs_enabled = false;
3235 
3236 	if (dev_priv->dev->irq_enabled)
3237 		valleyview_display_irqs_uninstall(dev_priv);
3238 }
3239 
3240 static int valleyview_irq_postinstall(struct drm_device *dev)
3241 {
3242 	struct drm_i915_private *dev_priv = dev->dev_private;
3243 	unsigned long irqflags;
3244 
3245 	dev_priv->irq_mask = ~0;
3246 
3247 	I915_WRITE(PORT_HOTPLUG_EN, 0);
3248 	POSTING_READ(PORT_HOTPLUG_EN);
3249 
3250 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3251 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3252 	I915_WRITE(VLV_IIR, 0xffffffff);
3253 	POSTING_READ(VLV_IER);
3254 
3255 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3256 	 * just to make the assert_spin_locked check happy. */
3257 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3258 	if (dev_priv->display_irqs_enabled)
3259 		valleyview_display_irqs_install(dev_priv);
3260 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3261 
3262 	I915_WRITE(VLV_IIR, 0xffffffff);
3263 	I915_WRITE(VLV_IIR, 0xffffffff);
3264 
3265 	gen5_gt_irq_postinstall(dev);
3266 
3267 	/* ack & enable invalid PTE error interrupts */
3268 #if 0 /* FIXME: add support to irq handler for checking these bits */
3269 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3270 	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3271 #endif
3272 
3273 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3274 
3275 	return 0;
3276 }
3277 
3278 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3279 {
3280 	int i;
3281 
3282 	/* These are interrupts we'll toggle with the ring mask register */
3283 	uint32_t gt_interrupts[] = {
3284 		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3285 			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3286 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3287 		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3288 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3289 		0,
3290 		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3291 		};
3292 
3293 	for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) {
3294 		u32 tmp = I915_READ(GEN8_GT_IIR(i));
3295 		if (tmp)
3296 			DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
3297 				  i, tmp);
3298 		I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]);
3299 		I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]);
3300 	}
3301 	POSTING_READ(GEN8_GT_IER(0));
3302 }
3303 
3304 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3305 {
3306 	struct drm_device *dev = dev_priv->dev;
3307 	uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
3308 		GEN8_PIPE_CDCLK_CRC_DONE |
3309 		GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3310 	uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3311 		GEN8_PIPE_FIFO_UNDERRUN;
3312 	int pipe;
3313 	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3314 	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3315 	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3316 
3317 	for_each_pipe(pipe) {
3318 		u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe));
3319 		if (tmp)
3320 			DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n",
3321 				  pipe, tmp);
3322 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
3323 		I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables);
3324 	}
3325 	POSTING_READ(GEN8_DE_PIPE_ISR(0));
3326 
3327 	I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A);
3328 	I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A);
3329 	POSTING_READ(GEN8_DE_PORT_IER);
3330 }
3331 
3332 static int gen8_irq_postinstall(struct drm_device *dev)
3333 {
3334 	struct drm_i915_private *dev_priv = dev->dev_private;
3335 
3336 	gen8_gt_irq_postinstall(dev_priv);
3337 	gen8_de_irq_postinstall(dev_priv);
3338 
3339 	ibx_irq_postinstall(dev);
3340 
3341 	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3342 	POSTING_READ(GEN8_MASTER_IRQ);
3343 
3344 	return 0;
3345 }
3346 
3347 static void gen8_irq_uninstall(struct drm_device *dev)
3348 {
3349 	struct drm_i915_private *dev_priv = dev->dev_private;
3350 	int pipe;
3351 
3352 	if (!dev_priv)
3353 		return;
3354 
3355 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3356 
3357 #define GEN8_IRQ_FINI_NDX(type, which) do { \
3358 		I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3359 		I915_WRITE(GEN8_##type##_IER(which), 0); \
3360 		I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3361 	} while (0)
3362 
3363 #define GEN8_IRQ_FINI(type) do { \
3364 		I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3365 		I915_WRITE(GEN8_##type##_IER, 0); \
3366 		I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3367 	} while (0)
3368 
3369 	GEN8_IRQ_FINI_NDX(GT, 0);
3370 	GEN8_IRQ_FINI_NDX(GT, 1);
3371 	GEN8_IRQ_FINI_NDX(GT, 2);
3372 	GEN8_IRQ_FINI_NDX(GT, 3);
3373 
3374 	for_each_pipe(pipe) {
3375 		GEN8_IRQ_FINI_NDX(DE_PIPE, pipe);
3376 	}
3377 
3378 	GEN8_IRQ_FINI(DE_PORT);
3379 	GEN8_IRQ_FINI(DE_MISC);
3380 	GEN8_IRQ_FINI(PCU);
3381 #undef GEN8_IRQ_FINI
3382 #undef GEN8_IRQ_FINI_NDX
3383 
3384 	POSTING_READ(GEN8_PCU_IIR);
3385 }
3386 
3387 static void valleyview_irq_uninstall(struct drm_device *dev)
3388 {
3389 	struct drm_i915_private *dev_priv = dev->dev_private;
3390 	unsigned long irqflags;
3391 	int pipe;
3392 
3393 	if (!dev_priv)
3394 		return;
3395 
3396 	intel_hpd_irq_uninstall(dev_priv);
3397 
3398 	for_each_pipe(pipe)
3399 		I915_WRITE(PIPESTAT(pipe), 0xffff);
3400 
3401 	I915_WRITE(HWSTAM, 0xffffffff);
3402 	I915_WRITE(PORT_HOTPLUG_EN, 0);
3403 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3404 
3405 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3406 	if (dev_priv->display_irqs_enabled)
3407 		valleyview_display_irqs_uninstall(dev_priv);
3408 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3409 
3410 	dev_priv->irq_mask = 0;
3411 
3412 	I915_WRITE(VLV_IIR, 0xffffffff);
3413 	I915_WRITE(VLV_IMR, 0xffffffff);
3414 	I915_WRITE(VLV_IER, 0x0);
3415 	POSTING_READ(VLV_IER);
3416 }
3417 
3418 static void ironlake_irq_uninstall(struct drm_device *dev)
3419 {
3420 	struct drm_i915_private *dev_priv = dev->dev_private;
3421 
3422 	if (!dev_priv)
3423 		return;
3424 
3425 	intel_hpd_irq_uninstall(dev_priv);
3426 
3427 	I915_WRITE(HWSTAM, 0xffffffff);
3428 
3429 	I915_WRITE(DEIMR, 0xffffffff);
3430 	I915_WRITE(DEIER, 0x0);
3431 	I915_WRITE(DEIIR, I915_READ(DEIIR));
3432 	if (IS_GEN7(dev))
3433 		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
3434 
3435 	I915_WRITE(GTIMR, 0xffffffff);
3436 	I915_WRITE(GTIER, 0x0);
3437 	I915_WRITE(GTIIR, I915_READ(GTIIR));
3438 
3439 	if (HAS_PCH_NOP(dev))
3440 		return;
3441 
3442 	I915_WRITE(SDEIMR, 0xffffffff);
3443 	I915_WRITE(SDEIER, 0x0);
3444 	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
3445 	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3446 		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
3447 }
3448 
3449 static void i8xx_irq_preinstall(struct drm_device * dev)
3450 {
3451 	struct drm_i915_private *dev_priv = dev->dev_private;
3452 	int pipe;
3453 
3454 	for_each_pipe(pipe)
3455 		I915_WRITE(PIPESTAT(pipe), 0);
3456 	I915_WRITE16(IMR, 0xffff);
3457 	I915_WRITE16(IER, 0x0);
3458 	POSTING_READ16(IER);
3459 }
3460 
3461 static int i8xx_irq_postinstall(struct drm_device *dev)
3462 {
3463 	struct drm_i915_private *dev_priv = dev->dev_private;
3464 	unsigned long irqflags;
3465 
3466 	I915_WRITE16(EMR,
3467 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3468 
3469 	/* Unmask the interrupts that we always want on. */
3470 	dev_priv->irq_mask =
3471 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3472 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3473 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3474 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3475 		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3476 	I915_WRITE16(IMR, dev_priv->irq_mask);
3477 
3478 	I915_WRITE16(IER,
3479 		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3480 		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3481 		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3482 		     I915_USER_INTERRUPT);
3483 	POSTING_READ16(IER);
3484 
3485 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3486 	 * just to make the assert_spin_locked check happy. */
3487 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3488 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3489 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3490 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3491 
3492 	return 0;
3493 }
3494 
3495 /*
3496  * Returns true when a page flip has completed.
3497  */
3498 static bool i8xx_handle_vblank(struct drm_device *dev,
3499 			       int plane, int pipe, u32 iir)
3500 {
3501 	struct drm_i915_private *dev_priv = dev->dev_private;
3502 	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3503 
3504 	if (!drm_handle_vblank(dev, pipe))
3505 		return false;
3506 
3507 	if ((iir & flip_pending) == 0)
3508 		return false;
3509 
3510 	intel_prepare_page_flip(dev, plane);
3511 
3512 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3513 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3514 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3515 	 * the flip is completed (no longer pending). Since this doesn't raise
3516 	 * an interrupt per se, we watch for the change at vblank.
3517 	 */
3518 	if (I915_READ16(ISR) & flip_pending)
3519 		return false;
3520 
3521 	intel_finish_page_flip(dev, pipe);
3522 
3523 	return true;
3524 }
3525 
3526 static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
3527 {
3528 	struct drm_device *dev = (struct drm_device *) arg;
3529 	struct drm_i915_private *dev_priv = dev->dev_private;
3530 	u16 iir, new_iir;
3531 	u32 pipe_stats[2];
3532 	unsigned long irqflags;
3533 	int pipe;
3534 	u16 flip_mask =
3535 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3536 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3537 
3538 	iir = I915_READ16(IIR);
3539 	if (iir == 0)
3540 		return IRQ_NONE;
3541 
3542 	while (iir & ~flip_mask) {
3543 		/* Can't rely on pipestat interrupt bit in iir as it might
3544 		 * have been cleared after the pipestat interrupt was received.
3545 		 * It doesn't set the bit in iir again, but it still produces
3546 		 * interrupts (for non-MSI).
3547 		 */
3548 		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3549 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3550 			i915_handle_error(dev, false,
3551 					  "Command parser error, iir 0x%08x",
3552 					  iir);
3553 
3554 		for_each_pipe(pipe) {
3555 			int reg = PIPESTAT(pipe);
3556 			pipe_stats[pipe] = I915_READ(reg);
3557 
3558 			/*
3559 			 * Clear the PIPE*STAT regs before the IIR
3560 			 */
3561 			if (pipe_stats[pipe] & 0x8000ffff)
3562 				I915_WRITE(reg, pipe_stats[pipe]);
3563 		}
3564 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3565 
3566 		I915_WRITE16(IIR, iir & ~flip_mask);
3567 		new_iir = I915_READ16(IIR); /* Flush posted writes */
3568 
3569 		i915_update_dri1_breadcrumb(dev);
3570 
3571 		if (iir & I915_USER_INTERRUPT)
3572 			notify_ring(dev, &dev_priv->ring[RCS]);
3573 
3574 		for_each_pipe(pipe) {
3575 			int plane = pipe;
3576 			if (HAS_FBC(dev))
3577 				plane = !plane;
3578 
3579 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3580 			    i8xx_handle_vblank(dev, plane, pipe, iir))
3581 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3582 
3583 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3584 				i9xx_pipe_crc_irq_handler(dev, pipe);
3585 
3586 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
3587 			    intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
3588 				DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
3589 		}
3590 
3591 		iir = new_iir;
3592 	}
3593 
3594 	return IRQ_HANDLED;
3595 }
3596 
3597 static void i8xx_irq_uninstall(struct drm_device * dev)
3598 {
3599 	struct drm_i915_private *dev_priv = dev->dev_private;
3600 	int pipe;
3601 
3602 	for_each_pipe(pipe) {
3603 		/* Clear enable bits; then clear status bits */
3604 		I915_WRITE(PIPESTAT(pipe), 0);
3605 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3606 	}
3607 	I915_WRITE16(IMR, 0xffff);
3608 	I915_WRITE16(IER, 0x0);
3609 	I915_WRITE16(IIR, I915_READ16(IIR));
3610 }
3611 
3612 static void i915_irq_preinstall(struct drm_device * dev)
3613 {
3614 	struct drm_i915_private *dev_priv = dev->dev_private;
3615 	int pipe;
3616 
3617 	if (I915_HAS_HOTPLUG(dev)) {
3618 		I915_WRITE(PORT_HOTPLUG_EN, 0);
3619 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3620 	}
3621 
3622 	I915_WRITE16(HWSTAM, 0xeffe);
3623 	for_each_pipe(pipe)
3624 		I915_WRITE(PIPESTAT(pipe), 0);
3625 	I915_WRITE(IMR, 0xffffffff);
3626 	I915_WRITE(IER, 0x0);
3627 	POSTING_READ(IER);
3628 }
3629 
3630 static int i915_irq_postinstall(struct drm_device *dev)
3631 {
3632 	struct drm_i915_private *dev_priv = dev->dev_private;
3633 	u32 enable_mask;
3634 	unsigned long irqflags;
3635 
3636 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3637 
3638 	/* Unmask the interrupts that we always want on. */
3639 	dev_priv->irq_mask =
3640 		~(I915_ASLE_INTERRUPT |
3641 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3642 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3643 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3644 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3645 		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3646 
3647 	enable_mask =
3648 		I915_ASLE_INTERRUPT |
3649 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3650 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3651 		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3652 		I915_USER_INTERRUPT;
3653 
3654 	if (I915_HAS_HOTPLUG(dev)) {
3655 		I915_WRITE(PORT_HOTPLUG_EN, 0);
3656 		POSTING_READ(PORT_HOTPLUG_EN);
3657 
3658 		/* Enable in IER... */
3659 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3660 		/* and unmask in IMR */
3661 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3662 	}
3663 
3664 	I915_WRITE(IMR, dev_priv->irq_mask);
3665 	I915_WRITE(IER, enable_mask);
3666 	POSTING_READ(IER);
3667 
3668 	i915_enable_asle_pipestat(dev);
3669 
3670 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3671 	 * just to make the assert_spin_locked check happy. */
3672 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3673 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3674 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3675 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3676 
3677 	return 0;
3678 }
3679 
3680 /*
3681  * Returns true when a page flip has completed.
3682  */
3683 static bool i915_handle_vblank(struct drm_device *dev,
3684 			       int plane, int pipe, u32 iir)
3685 {
3686 	struct drm_i915_private *dev_priv = dev->dev_private;
3687 	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3688 
3689 	if (!drm_handle_vblank(dev, pipe))
3690 		return false;
3691 
3692 	if ((iir & flip_pending) == 0)
3693 		return false;
3694 
3695 	intel_prepare_page_flip(dev, plane);
3696 
3697 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3698 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3699 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3700 	 * the flip is completed (no longer pending). Since this doesn't raise
3701 	 * an interrupt per se, we watch for the change at vblank.
3702 	 */
3703 	if (I915_READ(ISR) & flip_pending)
3704 		return false;
3705 
3706 	intel_finish_page_flip(dev, pipe);
3707 
3708 	return true;
3709 }
3710 
3711 static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
3712 {
3713 	struct drm_device *dev = (struct drm_device *) arg;
3714 	struct drm_i915_private *dev_priv = dev->dev_private;
3715 	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3716 	unsigned long irqflags;
3717 	u32 flip_mask =
3718 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3719 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3720 	int pipe, ret = IRQ_NONE;
3721 
3722 	iir = I915_READ(IIR);
3723 	do {
3724 		bool irq_received = (iir & ~flip_mask) != 0;
3725 		bool blc_event = false;
3726 
3727 		/* Can't rely on pipestat interrupt bit in iir as it might
3728 		 * have been cleared after the pipestat interrupt was received.
3729 		 * It doesn't set the bit in iir again, but it still produces
3730 		 * interrupts (for non-MSI).
3731 		 */
3732 		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3733 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3734 			i915_handle_error(dev, false,
3735 					  "Command parser error, iir 0x%08x",
3736 					  iir);
3737 
3738 		for_each_pipe(pipe) {
3739 			int reg = PIPESTAT(pipe);
3740 			pipe_stats[pipe] = I915_READ(reg);
3741 
3742 			/* Clear the PIPE*STAT regs before the IIR */
3743 			if (pipe_stats[pipe] & 0x8000ffff) {
3744 				I915_WRITE(reg, pipe_stats[pipe]);
3745 				irq_received = true;
3746 			}
3747 		}
3748 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3749 
3750 		if (!irq_received)
3751 			break;
3752 
3753 		/* Consume port.  Then clear IIR or we'll miss events */
3754 		if ((I915_HAS_HOTPLUG(dev)) &&
3755 		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
3756 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3757 			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
3758 
3759 			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
3760 
3761 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3762 			POSTING_READ(PORT_HOTPLUG_STAT);
3763 		}
3764 
3765 		I915_WRITE(IIR, iir & ~flip_mask);
3766 		new_iir = I915_READ(IIR); /* Flush posted writes */
3767 
3768 		if (iir & I915_USER_INTERRUPT)
3769 			notify_ring(dev, &dev_priv->ring[RCS]);
3770 
3771 		for_each_pipe(pipe) {
3772 			int plane = pipe;
3773 			if (HAS_FBC(dev))
3774 				plane = !plane;
3775 
3776 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3777 			    i915_handle_vblank(dev, plane, pipe, iir))
3778 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3779 
3780 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3781 				blc_event = true;
3782 
3783 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3784 				i9xx_pipe_crc_irq_handler(dev, pipe);
3785 
3786 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
3787 			    intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
3788 				DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
3789 		}
3790 
3791 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
3792 			intel_opregion_asle_intr(dev);
3793 
3794 		/* With MSI, interrupts are only generated when iir
3795 		 * transitions from zero to nonzero.  If another bit got
3796 		 * set while we were handling the existing iir bits, then
3797 		 * we would never get another interrupt.
3798 		 *
3799 		 * This is fine on non-MSI as well, as if we hit this path
3800 		 * we avoid exiting the interrupt handler only to generate
3801 		 * another one.
3802 		 *
3803 		 * Note that for MSI this could cause a stray interrupt report
3804 		 * if an interrupt landed in the time between writing IIR and
3805 		 * the posting read.  This should be rare enough to never
3806 		 * trigger the 99% of 100,000 interrupts test for disabling
3807 		 * stray interrupts.
3808 		 */
3809 		ret = IRQ_HANDLED;
3810 		iir = new_iir;
3811 	} while (iir & ~flip_mask);
3812 
3813 	i915_update_dri1_breadcrumb(dev);
3814 
3815 	return ret;
3816 }
3817 
3818 static void i915_irq_uninstall(struct drm_device * dev)
3819 {
3820 	struct drm_i915_private *dev_priv = dev->dev_private;
3821 	int pipe;
3822 
3823 	intel_hpd_irq_uninstall(dev_priv);
3824 
3825 	if (I915_HAS_HOTPLUG(dev)) {
3826 		I915_WRITE(PORT_HOTPLUG_EN, 0);
3827 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3828 	}
3829 
3830 	I915_WRITE16(HWSTAM, 0xffff);
3831 	for_each_pipe(pipe) {
3832 		/* Clear enable bits; then clear status bits */
3833 		I915_WRITE(PIPESTAT(pipe), 0);
3834 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3835 	}
3836 	I915_WRITE(IMR, 0xffffffff);
3837 	I915_WRITE(IER, 0x0);
3838 
3839 	I915_WRITE(IIR, I915_READ(IIR));
3840 }
3841 
3842 static void i965_irq_preinstall(struct drm_device * dev)
3843 {
3844 	struct drm_i915_private *dev_priv = dev->dev_private;
3845 	int pipe;
3846 
3847 	I915_WRITE(PORT_HOTPLUG_EN, 0);
3848 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3849 
3850 	I915_WRITE(HWSTAM, 0xeffe);
3851 	for_each_pipe(pipe)
3852 		I915_WRITE(PIPESTAT(pipe), 0);
3853 	I915_WRITE(IMR, 0xffffffff);
3854 	I915_WRITE(IER, 0x0);
3855 	POSTING_READ(IER);
3856 }
3857 
3858 static int i965_irq_postinstall(struct drm_device *dev)
3859 {
3860 	struct drm_i915_private *dev_priv = dev->dev_private;
3861 	u32 enable_mask;
3862 	u32 error_mask;
3863 	unsigned long irqflags;
3864 
3865 	/* Unmask the interrupts that we always want on. */
3866 	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3867 			       I915_DISPLAY_PORT_INTERRUPT |
3868 			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3869 			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3870 			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3871 			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3872 			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3873 
3874 	enable_mask = ~dev_priv->irq_mask;
3875 	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3876 			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3877 	enable_mask |= I915_USER_INTERRUPT;
3878 
3879 	if (IS_G4X(dev))
3880 		enable_mask |= I915_BSD_USER_INTERRUPT;
3881 
3882 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3883 	 * just to make the assert_spin_locked check happy. */
3884 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3885 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3886 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3887 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3888 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3889 
3890 	/*
3891 	 * Enable some error detection, note the instruction error mask
3892 	 * bit is reserved, so we leave it masked.
3893 	 */
3894 	if (IS_G4X(dev)) {
3895 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
3896 			       GM45_ERROR_MEM_PRIV |
3897 			       GM45_ERROR_CP_PRIV |
3898 			       I915_ERROR_MEMORY_REFRESH);
3899 	} else {
3900 		error_mask = ~(I915_ERROR_PAGE_TABLE |
3901 			       I915_ERROR_MEMORY_REFRESH);
3902 	}
3903 	I915_WRITE(EMR, error_mask);
3904 
3905 	I915_WRITE(IMR, dev_priv->irq_mask);
3906 	I915_WRITE(IER, enable_mask);
3907 	POSTING_READ(IER);
3908 
3909 	I915_WRITE(PORT_HOTPLUG_EN, 0);
3910 	POSTING_READ(PORT_HOTPLUG_EN);
3911 
3912 	i915_enable_asle_pipestat(dev);
3913 
3914 	return 0;
3915 }
3916 
3917 static void i915_hpd_irq_setup(struct drm_device *dev)
3918 {
3919 	struct drm_i915_private *dev_priv = dev->dev_private;
3920 	struct drm_mode_config *mode_config = &dev->mode_config;
3921 	struct intel_encoder *intel_encoder;
3922 	u32 hotplug_en;
3923 
3924 	assert_spin_locked(&dev_priv->irq_lock);
3925 
3926 	if (I915_HAS_HOTPLUG(dev)) {
3927 		hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3928 		hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3929 		/* Note HDMI and DP share hotplug bits */
3930 		/* enable bits are the same for all generations */
3931 		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3932 			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3933 				hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3934 		/* Programming the CRT detection parameters tends
3935 		   to generate a spurious hotplug event about three
3936 		   seconds later.  So just do it once.
3937 		*/
3938 		if (IS_G4X(dev))
3939 			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3940 		hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3941 		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3942 
3943 		/* Ignore TV since it's buggy */
3944 		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3945 	}
3946 }
3947 
3948 static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
3949 {
3950 	struct drm_device *dev = (struct drm_device *) arg;
3951 	struct drm_i915_private *dev_priv = dev->dev_private;
3952 	u32 iir, new_iir;
3953 	u32 pipe_stats[I915_MAX_PIPES];
3954 	unsigned long irqflags;
3955 	int ret = IRQ_NONE, pipe;
3956 	u32 flip_mask =
3957 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3958 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3959 
3960 	iir = I915_READ(IIR);
3961 
3962 	for (;;) {
3963 		bool irq_received = (iir & ~flip_mask) != 0;
3964 		bool blc_event = false;
3965 
3966 		/* Can't rely on pipestat interrupt bit in iir as it might
3967 		 * have been cleared after the pipestat interrupt was received.
3968 		 * It doesn't set the bit in iir again, but it still produces
3969 		 * interrupts (for non-MSI).
3970 		 */
3971 		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3972 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3973 			i915_handle_error(dev, false,
3974 					  "Command parser error, iir 0x%08x",
3975 					  iir);
3976 
3977 		for_each_pipe(pipe) {
3978 			int reg = PIPESTAT(pipe);
3979 			pipe_stats[pipe] = I915_READ(reg);
3980 
3981 			/*
3982 			 * Clear the PIPE*STAT regs before the IIR
3983 			 */
3984 			if (pipe_stats[pipe] & 0x8000ffff) {
3985 				I915_WRITE(reg, pipe_stats[pipe]);
3986 				irq_received = true;
3987 			}
3988 		}
3989 		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3990 
3991 		if (!irq_received)
3992 			break;
3993 
3994 		ret = IRQ_HANDLED;
3995 
3996 		/* Consume port.  Then clear IIR or we'll miss events */
3997 		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
3998 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3999 			u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
4000 								  HOTPLUG_INT_STATUS_G4X :
4001 								  HOTPLUG_INT_STATUS_I915);
4002 
4003 			intel_hpd_irq_handler(dev, hotplug_trigger,
4004 					      IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915);
4005 
4006 			if (IS_G4X(dev) &&
4007 			    (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X))
4008 				dp_aux_irq_handler(dev);
4009 
4010 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
4011 			I915_READ(PORT_HOTPLUG_STAT);
4012 		}
4013 
4014 		I915_WRITE(IIR, iir & ~flip_mask);
4015 		new_iir = I915_READ(IIR); /* Flush posted writes */
4016 
4017 		if (iir & I915_USER_INTERRUPT)
4018 			notify_ring(dev, &dev_priv->ring[RCS]);
4019 		if (iir & I915_BSD_USER_INTERRUPT)
4020 			notify_ring(dev, &dev_priv->ring[VCS]);
4021 
4022 		for_each_pipe(pipe) {
4023 			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4024 			    i915_handle_vblank(dev, pipe, pipe, iir))
4025 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4026 
4027 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4028 				blc_event = true;
4029 
4030 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4031 				i9xx_pipe_crc_irq_handler(dev, pipe);
4032 
4033 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4034 			    intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4035 				DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4036 		}
4037 
4038 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4039 			intel_opregion_asle_intr(dev);
4040 
4041 		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4042 			gmbus_irq_handler(dev);
4043 
4044 		/* With MSI, interrupts are only generated when iir
4045 		 * transitions from zero to nonzero.  If another bit got
4046 		 * set while we were handling the existing iir bits, then
4047 		 * we would never get another interrupt.
4048 		 *
4049 		 * This is fine on non-MSI as well, as if we hit this path
4050 		 * we avoid exiting the interrupt handler only to generate
4051 		 * another one.
4052 		 *
4053 		 * Note that for MSI this could cause a stray interrupt report
4054 		 * if an interrupt landed in the time between writing IIR and
4055 		 * the posting read.  This should be rare enough to never
4056 		 * trigger the 99% of 100,000 interrupts test for disabling
4057 		 * stray interrupts.
4058 		 */
4059 		iir = new_iir;
4060 	}
4061 
4062 	i915_update_dri1_breadcrumb(dev);
4063 
4064 	return ret;
4065 }
4066 
4067 static void i965_irq_uninstall(struct drm_device * dev)
4068 {
4069 	struct drm_i915_private *dev_priv = dev->dev_private;
4070 	int pipe;
4071 
4072 	if (!dev_priv)
4073 		return;
4074 
4075 	intel_hpd_irq_uninstall(dev_priv);
4076 
4077 	I915_WRITE(PORT_HOTPLUG_EN, 0);
4078 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4079 
4080 	I915_WRITE(HWSTAM, 0xffffffff);
4081 	for_each_pipe(pipe)
4082 		I915_WRITE(PIPESTAT(pipe), 0);
4083 	I915_WRITE(IMR, 0xffffffff);
4084 	I915_WRITE(IER, 0x0);
4085 
4086 	for_each_pipe(pipe)
4087 		I915_WRITE(PIPESTAT(pipe),
4088 			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4089 	I915_WRITE(IIR, I915_READ(IIR));
4090 }
4091 
4092 static void intel_hpd_irq_reenable(unsigned long data)
4093 {
4094 	struct drm_i915_private *dev_priv = (struct drm_i915_private *)data;
4095 	struct drm_device *dev = dev_priv->dev;
4096 	struct drm_mode_config *mode_config = &dev->mode_config;
4097 	unsigned long irqflags;
4098 	int i;
4099 
4100 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4101 	for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4102 		struct drm_connector *connector;
4103 
4104 		if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4105 			continue;
4106 
4107 		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4108 
4109 		list_for_each_entry(connector, &mode_config->connector_list, head) {
4110 			struct intel_connector *intel_connector = to_intel_connector(connector);
4111 
4112 			if (intel_connector->encoder->hpd_pin == i) {
4113 				if (connector->polled != intel_connector->polled)
4114 					DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4115 							 drm_get_connector_name(connector));
4116 				connector->polled = intel_connector->polled;
4117 				if (!connector->polled)
4118 					connector->polled = DRM_CONNECTOR_POLL_HPD;
4119 			}
4120 		}
4121 	}
4122 	if (dev_priv->display.hpd_irq_setup)
4123 		dev_priv->display.hpd_irq_setup(dev);
4124 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4125 }
4126 
4127 void intel_irq_init(struct drm_device *dev)
4128 {
4129 	struct drm_i915_private *dev_priv = dev->dev_private;
4130 
4131 	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4132 	INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4133 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4134 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4135 
4136 	/* Let's track the enabled rps events */
4137 	dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4138 
4139 	setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4140 		    i915_hangcheck_elapsed,
4141 		    (unsigned long) dev);
4142 	setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
4143 		    (unsigned long) dev_priv);
4144 
4145 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4146 
4147 	if (IS_GEN2(dev)) {
4148 		dev->max_vblank_count = 0;
4149 		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4150 	} else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
4151 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4152 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4153 	} else {
4154 		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4155 		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4156 	}
4157 
4158 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4159 		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4160 		dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4161 	}
4162 
4163 	if (IS_VALLEYVIEW(dev)) {
4164 		dev->driver->irq_handler = valleyview_irq_handler;
4165 		dev->driver->irq_preinstall = valleyview_irq_preinstall;
4166 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4167 		dev->driver->irq_uninstall = valleyview_irq_uninstall;
4168 		dev->driver->enable_vblank = valleyview_enable_vblank;
4169 		dev->driver->disable_vblank = valleyview_disable_vblank;
4170 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4171 	} else if (IS_GEN8(dev)) {
4172 		dev->driver->irq_handler = gen8_irq_handler;
4173 		dev->driver->irq_preinstall = gen8_irq_preinstall;
4174 		dev->driver->irq_postinstall = gen8_irq_postinstall;
4175 		dev->driver->irq_uninstall = gen8_irq_uninstall;
4176 		dev->driver->enable_vblank = gen8_enable_vblank;
4177 		dev->driver->disable_vblank = gen8_disable_vblank;
4178 		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4179 	} else if (HAS_PCH_SPLIT(dev)) {
4180 		dev->driver->irq_handler = ironlake_irq_handler;
4181 		dev->driver->irq_preinstall = ironlake_irq_preinstall;
4182 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4183 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
4184 		dev->driver->enable_vblank = ironlake_enable_vblank;
4185 		dev->driver->disable_vblank = ironlake_disable_vblank;
4186 		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4187 	} else {
4188 		if (INTEL_INFO(dev)->gen == 2) {
4189 			dev->driver->irq_preinstall = i8xx_irq_preinstall;
4190 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
4191 			dev->driver->irq_handler = i8xx_irq_handler;
4192 			dev->driver->irq_uninstall = i8xx_irq_uninstall;
4193 		} else if (INTEL_INFO(dev)->gen == 3) {
4194 			dev->driver->irq_preinstall = i915_irq_preinstall;
4195 			dev->driver->irq_postinstall = i915_irq_postinstall;
4196 			dev->driver->irq_uninstall = i915_irq_uninstall;
4197 			dev->driver->irq_handler = i915_irq_handler;
4198 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4199 		} else {
4200 			dev->driver->irq_preinstall = i965_irq_preinstall;
4201 			dev->driver->irq_postinstall = i965_irq_postinstall;
4202 			dev->driver->irq_uninstall = i965_irq_uninstall;
4203 			dev->driver->irq_handler = i965_irq_handler;
4204 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4205 		}
4206 		dev->driver->enable_vblank = i915_enable_vblank;
4207 		dev->driver->disable_vblank = i915_disable_vblank;
4208 	}
4209 }
4210 
4211 void intel_hpd_init(struct drm_device *dev)
4212 {
4213 	struct drm_i915_private *dev_priv = dev->dev_private;
4214 	struct drm_mode_config *mode_config = &dev->mode_config;
4215 	struct drm_connector *connector;
4216 	unsigned long irqflags;
4217 	int i;
4218 
4219 	for (i = 1; i < HPD_NUM_PINS; i++) {
4220 		dev_priv->hpd_stats[i].hpd_cnt = 0;
4221 		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4222 	}
4223 	list_for_each_entry(connector, &mode_config->connector_list, head) {
4224 		struct intel_connector *intel_connector = to_intel_connector(connector);
4225 		connector->polled = intel_connector->polled;
4226 		if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4227 			connector->polled = DRM_CONNECTOR_POLL_HPD;
4228 	}
4229 
4230 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4231 	 * just to make the assert_spin_locked checks happy. */
4232 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4233 	if (dev_priv->display.hpd_irq_setup)
4234 		dev_priv->display.hpd_irq_setup(dev);
4235 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4236 }
4237 
4238 /* Disable interrupts so we can allow runtime PM. */
4239 void hsw_runtime_pm_disable_interrupts(struct drm_device *dev)
4240 {
4241 	struct drm_i915_private *dev_priv = dev->dev_private;
4242 	unsigned long irqflags;
4243 
4244 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4245 
4246 	dev_priv->pm.regsave.deimr = I915_READ(DEIMR);
4247 	dev_priv->pm.regsave.sdeimr = I915_READ(SDEIMR);
4248 	dev_priv->pm.regsave.gtimr = I915_READ(GTIMR);
4249 	dev_priv->pm.regsave.gtier = I915_READ(GTIER);
4250 	dev_priv->pm.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
4251 
4252 	ironlake_disable_display_irq(dev_priv, 0xffffffff);
4253 	ibx_disable_display_interrupt(dev_priv, 0xffffffff);
4254 	ilk_disable_gt_irq(dev_priv, 0xffffffff);
4255 	snb_disable_pm_irq(dev_priv, 0xffffffff);
4256 
4257 	dev_priv->pm.irqs_disabled = true;
4258 
4259 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4260 }
4261 
4262 /* Restore interrupts so we can recover from runtime PM. */
4263 void hsw_runtime_pm_restore_interrupts(struct drm_device *dev)
4264 {
4265 	struct drm_i915_private *dev_priv = dev->dev_private;
4266 	unsigned long irqflags;
4267 	uint32_t val;
4268 
4269 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4270 
4271 	val = I915_READ(DEIMR);
4272 	WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val);
4273 
4274 	val = I915_READ(SDEIMR);
4275 	WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val);
4276 
4277 	val = I915_READ(GTIMR);
4278 	WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val);
4279 
4280 	val = I915_READ(GEN6_PMIMR);
4281 	WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val);
4282 
4283 	dev_priv->pm.irqs_disabled = false;
4284 
4285 	ironlake_enable_display_irq(dev_priv, ~dev_priv->pm.regsave.deimr);
4286 	ibx_enable_display_interrupt(dev_priv, ~dev_priv->pm.regsave.sdeimr);
4287 	ilk_enable_gt_irq(dev_priv, ~dev_priv->pm.regsave.gtimr);
4288 	snb_enable_pm_irq(dev_priv, ~dev_priv->pm.regsave.gen6_pmimr);
4289 	I915_WRITE(GTIER, dev_priv->pm.regsave.gtier);
4290 
4291 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4292 }
4293