xref: /dflybsd-src/sys/dev/drm/i915/i915_irq.c (revision f3f3eadbf9de7a55ef1ff8cb23a68641403906ea)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "intel_drv.h"
33 
34 static const u32 hpd_ibx[] = {
35 	[HPD_CRT] = SDE_CRT_HOTPLUG,
36 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
37 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
38 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
39 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
40 };
41 
42 static const u32 hpd_cpt[] = {
43 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
44 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
45 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
46 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
47 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
48 };
49 
50 static const u32 hpd_mask_i915[] = {
51 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
52 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
53 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
54 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
55 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
56 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
57 };
58 
59 static const u32 hpd_status_gen4[] = {
60 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
61 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
62 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
63 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
64 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
65 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
66 };
67 
68 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
69 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
70 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
71 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
72 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
73 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
74 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
75 };
76 
77 /* For display hotplug interrupt */
78 static void
79 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
80 {
81 	if ((dev_priv->irq_mask & mask) != 0) {
82 		dev_priv->irq_mask &= ~mask;
83 		I915_WRITE(DEIMR, dev_priv->irq_mask);
84 		POSTING_READ(DEIMR);
85 	}
86 }
87 
88 static void
89 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
90 {
91 	if ((dev_priv->irq_mask & mask) != mask) {
92 		dev_priv->irq_mask |= mask;
93 		I915_WRITE(DEIMR, dev_priv->irq_mask);
94 		POSTING_READ(DEIMR);
95 	}
96 }
97 
98 static bool ivb_can_enable_err_int(struct drm_device *dev)
99 {
100 	struct drm_i915_private *dev_priv = dev->dev_private;
101 	struct intel_crtc *crtc;
102 	enum i915_pipe pipe;
103 
104 	for_each_pipe(pipe) {
105 		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
106 
107 		if (crtc->cpu_fifo_underrun_disabled)
108 			return false;
109 	}
110 
111 	return true;
112 }
113 
114 static bool cpt_can_enable_serr_int(struct drm_device *dev)
115 {
116 	struct drm_i915_private *dev_priv = dev->dev_private;
117 	enum i915_pipe pipe;
118 	struct intel_crtc *crtc;
119 
120 	for_each_pipe(pipe) {
121 		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
122 
123 		if (crtc->pch_fifo_underrun_disabled)
124 			return false;
125 	}
126 
127 	return true;
128 }
129 
130 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
131 						 enum i915_pipe pipe, bool enable)
132 {
133 	struct drm_i915_private *dev_priv = dev->dev_private;
134 	uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
135 					  DE_PIPEB_FIFO_UNDERRUN;
136 
137 	if (enable)
138 		ironlake_enable_display_irq(dev_priv, bit);
139 	else
140 		ironlake_disable_display_irq(dev_priv, bit);
141 }
142 
143 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
144 						  bool enable)
145 {
146 	struct drm_i915_private *dev_priv = dev->dev_private;
147 
148 	if (enable) {
149 		if (!ivb_can_enable_err_int(dev))
150 			return;
151 
152 		I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
153 					 ERR_INT_FIFO_UNDERRUN_B |
154 					 ERR_INT_FIFO_UNDERRUN_C);
155 
156 		ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
157 	} else {
158 		ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
159 	}
160 }
161 
162 static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
163 					    bool enable)
164 {
165 	struct drm_device *dev = crtc->base.dev;
166 	struct drm_i915_private *dev_priv = dev->dev_private;
167 	uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
168 						SDE_TRANSB_FIFO_UNDER;
169 
170 	if (enable)
171 		I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
172 	else
173 		I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
174 
175 	POSTING_READ(SDEIMR);
176 }
177 
178 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
179 					    enum transcoder pch_transcoder,
180 					    bool enable)
181 {
182 	struct drm_i915_private *dev_priv = dev->dev_private;
183 
184 	if (enable) {
185 		if (!cpt_can_enable_serr_int(dev))
186 			return;
187 
188 		I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
189 				     SERR_INT_TRANS_B_FIFO_UNDERRUN |
190 				     SERR_INT_TRANS_C_FIFO_UNDERRUN);
191 
192 		I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
193 	} else {
194 		I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
195 	}
196 
197 	POSTING_READ(SDEIMR);
198 }
199 
200 /**
201  * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
202  * @dev: drm device
203  * @pipe: pipe
204  * @enable: true if we want to report FIFO underrun errors, false otherwise
205  *
206  * This function makes us disable or enable CPU fifo underruns for a specific
207  * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
208  * reporting for one pipe may also disable all the other CPU error interruts for
209  * the other pipes, due to the fact that there's just one interrupt mask/enable
210  * bit for all the pipes.
211  *
212  * Returns the previous state of underrun reporting.
213  */
214 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
215 					   enum i915_pipe pipe, bool enable)
216 {
217 	struct drm_i915_private *dev_priv = dev->dev_private;
218 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
219 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
220 	bool ret;
221 
222 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
223 
224 	ret = !intel_crtc->cpu_fifo_underrun_disabled;
225 
226 	if (enable == ret)
227 		goto done;
228 
229 	intel_crtc->cpu_fifo_underrun_disabled = !enable;
230 
231 	if (IS_GEN5(dev) || IS_GEN6(dev))
232 		ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
233 	else if (IS_GEN7(dev))
234 		ivybridge_set_fifo_underrun_reporting(dev, enable);
235 
236 done:
237 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
238 	return ret;
239 }
240 
241 /**
242  * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
243  * @dev: drm device
244  * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
245  * @enable: true if we want to report FIFO underrun errors, false otherwise
246  *
247  * This function makes us disable or enable PCH fifo underruns for a specific
248  * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
249  * underrun reporting for one transcoder may also disable all the other PCH
250  * error interruts for the other transcoders, due to the fact that there's just
251  * one interrupt mask/enable bit for all the transcoders.
252  *
253  * Returns the previous state of underrun reporting.
254  */
255 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
256 					   enum transcoder pch_transcoder,
257 					   bool enable)
258 {
259 	struct drm_i915_private *dev_priv = dev->dev_private;
260 	enum i915_pipe p;
261 	struct drm_crtc *crtc;
262 	struct intel_crtc *intel_crtc;
263 	bool ret;
264 
265 	if (HAS_PCH_LPT(dev)) {
266 		crtc = NULL;
267 		for_each_pipe(p) {
268 			struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
269 			if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
270 				crtc = c;
271 				break;
272 			}
273 		}
274 		if (!crtc) {
275 			DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
276 			return false;
277 		}
278 	} else {
279 		crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
280 	}
281 	intel_crtc = to_intel_crtc(crtc);
282 
283 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
284 
285 	ret = !intel_crtc->pch_fifo_underrun_disabled;
286 
287 	if (enable == ret)
288 		goto done;
289 
290 	intel_crtc->pch_fifo_underrun_disabled = !enable;
291 
292 	if (HAS_PCH_IBX(dev))
293 		ibx_set_fifo_underrun_reporting(intel_crtc, enable);
294 	else
295 		cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
296 
297 done:
298 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
299 	return ret;
300 }
301 
302 
303 void
304 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
305 {
306 	u32 reg = PIPESTAT(pipe);
307 	u32 pipestat = I915_READ(reg) & 0x7fff0000;
308 
309 	if ((pipestat & mask) == mask)
310 		return;
311 
312 	/* Enable the interrupt, clear any pending status */
313 	pipestat |= mask | (mask >> 16);
314 	I915_WRITE(reg, pipestat);
315 	POSTING_READ(reg);
316 }
317 
318 void
319 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
320 {
321 	u32 reg = PIPESTAT(pipe);
322 	u32 pipestat = I915_READ(reg) & 0x7fff0000;
323 
324 	if ((pipestat & mask) == 0)
325 		return;
326 
327 	pipestat &= ~mask;
328 	I915_WRITE(reg, pipestat);
329 	POSTING_READ(reg);
330 }
331 
332 /**
333  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
334  */
335 static void i915_enable_asle_pipestat(struct drm_device *dev)
336 {
337 	drm_i915_private_t *dev_priv = dev->dev_private;
338 
339 	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
340 		return;
341 
342 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
343 
344 	i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
345 	if (INTEL_INFO(dev)->gen >= 4)
346 		i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
347 
348 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
349 }
350 
351 /**
352  * i915_pipe_enabled - check if a pipe is enabled
353  * @dev: DRM device
354  * @pipe: pipe to check
355  *
356  * Reading certain registers when the pipe is disabled can hang the chip.
357  * Use this routine to make sure the PLL is running and the pipe is active
358  * before reading such registers if unsure.
359  */
360 static int
361 i915_pipe_enabled(struct drm_device *dev, int pipe)
362 {
363 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
364 
365 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
366 		/* Locking is horribly broken here, but whatever. */
367 		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
368 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
369 
370 		return intel_crtc->active;
371 	} else {
372 		return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
373 	}
374 }
375 
376 /* Called from drm generic code, passed a 'crtc', which
377  * we use as a pipe index
378  */
379 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
380 {
381 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
382 	unsigned long high_frame;
383 	unsigned long low_frame;
384 	u32 high1, high2, low;
385 
386 	if (!i915_pipe_enabled(dev, pipe)) {
387 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
388 				"pipe %c\n", pipe_name(pipe));
389 		return 0;
390 	}
391 
392 	high_frame = PIPEFRAME(pipe);
393 	low_frame = PIPEFRAMEPIXEL(pipe);
394 
395 	/*
396 	 * High & low register fields aren't synchronized, so make sure
397 	 * we get a low value that's stable across two reads of the high
398 	 * register.
399 	 */
400 	do {
401 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
402 		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
403 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
404 	} while (high1 != high2);
405 
406 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
407 	low >>= PIPE_FRAME_LOW_SHIFT;
408 	return (high1 << 8) | low;
409 }
410 
411 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
412 {
413 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
414 	int reg = PIPE_FRMCOUNT_GM45(pipe);
415 
416 	if (!i915_pipe_enabled(dev, pipe)) {
417 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
418 				 "pipe %c\n", pipe_name(pipe));
419 		return 0;
420 	}
421 
422 	return I915_READ(reg);
423 }
424 
425 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
426 			     int *vpos, int *hpos)
427 {
428 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
429 	u32 vbl = 0, position = 0;
430 	int vbl_start, vbl_end, htotal, vtotal;
431 	bool in_vbl = true;
432 	int ret = 0;
433 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
434 								      pipe);
435 
436 	if (!i915_pipe_enabled(dev, pipe)) {
437 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
438 				 "pipe %c\n", pipe_name(pipe));
439 		return 0;
440 	}
441 
442 	/* Get vtotal. */
443 	vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
444 
445 	if (INTEL_INFO(dev)->gen >= 4) {
446 		/* No obvious pixelcount register. Only query vertical
447 		 * scanout position from Display scan line register.
448 		 */
449 		position = I915_READ(PIPEDSL(pipe));
450 
451 		/* Decode into vertical scanout position. Don't have
452 		 * horizontal scanout position.
453 		 */
454 		*vpos = position & 0x1fff;
455 		*hpos = 0;
456 	} else {
457 		/* Have access to pixelcount since start of frame.
458 		 * We can split this into vertical and horizontal
459 		 * scanout position.
460 		 */
461 		position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
462 
463 		htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
464 		*vpos = position / htotal;
465 		*hpos = position - (*vpos * htotal);
466 	}
467 
468 	/* Query vblank area. */
469 	vbl = I915_READ(VBLANK(cpu_transcoder));
470 
471 	/* Test position against vblank region. */
472 	vbl_start = vbl & 0x1fff;
473 	vbl_end = (vbl >> 16) & 0x1fff;
474 
475 	if ((*vpos < vbl_start) || (*vpos > vbl_end))
476 		in_vbl = false;
477 
478 	/* Inside "upper part" of vblank area? Apply corrective offset: */
479 	if (in_vbl && (*vpos >= vbl_start))
480 		*vpos = *vpos - vtotal;
481 
482 	/* Readouts valid? */
483 	if (vbl > 0)
484 		ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
485 
486 	/* In vblank? */
487 	if (in_vbl)
488 		ret |= DRM_SCANOUTPOS_INVBL;
489 
490 	return ret;
491 }
492 
493 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
494 			      int *max_error,
495 			      struct timeval *vblank_time,
496 			      unsigned flags)
497 {
498 	struct drm_crtc *crtc;
499 
500 	if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
501 		DRM_ERROR("Invalid crtc %d\n", pipe);
502 		return -EINVAL;
503 	}
504 
505 	/* Get drm_crtc to timestamp: */
506 	crtc = intel_get_crtc_for_pipe(dev, pipe);
507 	if (crtc == NULL) {
508 		DRM_ERROR("Invalid crtc %d\n", pipe);
509 		return -EINVAL;
510 	}
511 
512 	if (!crtc->enabled) {
513 		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
514 		return -EBUSY;
515 	}
516 
517 	/* Helper routine in DRM core does all the work: */
518 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
519 						     vblank_time, flags,
520 						     crtc);
521 }
522 
523 static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
524 {
525 	enum drm_connector_status old_status;
526 
527 	WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
528 	old_status = connector->status;
529 
530 	connector->status = connector->funcs->detect(connector, false);
531 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
532 		      connector->base.id,
533 		      drm_get_connector_name(connector),
534 		      old_status, connector->status);
535 	return (old_status != connector->status);
536 }
537 
538 /*
539  * Handle hotplug events outside the interrupt handler proper.
540  */
541 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
542 
543 static void i915_hotplug_work_func(struct work_struct *work)
544 {
545 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
546 						    hotplug_work);
547 	struct drm_device *dev = dev_priv->dev;
548 	struct drm_mode_config *mode_config = &dev->mode_config;
549 	struct intel_connector *intel_connector;
550 	struct intel_encoder *intel_encoder;
551 	struct drm_connector *connector;
552 	bool hpd_disabled = false;
553 	bool changed = false;
554 	u32 hpd_event_bits;
555 
556 	/* HPD irq before everything is fully set up. */
557 	if (!dev_priv->enable_hotplug_processing)
558 		return;
559 
560 	mutex_lock(&mode_config->mutex);
561 	DRM_DEBUG_KMS("running encoder hotplug functions\n");
562 
563 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
564 
565 	hpd_event_bits = dev_priv->hpd_event_bits;
566 	dev_priv->hpd_event_bits = 0;
567 	list_for_each_entry(connector, &mode_config->connector_list, head) {
568 		intel_connector = to_intel_connector(connector);
569 		intel_encoder = intel_connector->encoder;
570 		if (intel_encoder->hpd_pin > HPD_NONE &&
571 		    dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
572 		    connector->polled == DRM_CONNECTOR_POLL_HPD) {
573 			DRM_INFO("HPD interrupt storm detected on connector %s: "
574 				 "switching from hotplug detection to polling\n",
575 				drm_get_connector_name(connector));
576 			dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
577 			connector->polled = DRM_CONNECTOR_POLL_CONNECT
578 				| DRM_CONNECTOR_POLL_DISCONNECT;
579 			hpd_disabled = true;
580 		}
581 		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
582 			DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
583 				      drm_get_connector_name(connector), intel_encoder->hpd_pin);
584 		}
585 	}
586 	 /* if there were no outputs to poll, poll was disabled,
587 	  * therefore make sure it's enabled when disabling HPD on
588 	  * some connectors */
589 	if (hpd_disabled) {
590 		drm_kms_helper_poll_enable(dev);
591 		mod_timer(&dev_priv->hotplug_reenable_timer,
592 			  jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
593 	}
594 
595 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
596 
597 	list_for_each_entry(connector, &mode_config->connector_list, head) {
598 		intel_connector = to_intel_connector(connector);
599 		intel_encoder = intel_connector->encoder;
600 		if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
601 			if (intel_encoder->hot_plug)
602 				intel_encoder->hot_plug(intel_encoder);
603 			if (intel_hpd_irq_event(dev, connector))
604 				changed = true;
605 		}
606 	}
607 	mutex_unlock(&mode_config->mutex);
608 
609 	if (changed)
610 		drm_kms_helper_hotplug_event(dev);
611 }
612 
613 static void ironlake_handle_rps_change(struct drm_device *dev)
614 {
615 	drm_i915_private_t *dev_priv = dev->dev_private;
616 	u32 busy_up, busy_down, max_avg, min_avg;
617 	u8 new_delay;
618 
619 	lockmgr(&mchdev_lock, LK_EXCLUSIVE);
620 
621 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
622 
623 	new_delay = dev_priv->ips.cur_delay;
624 
625 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
626 	busy_up = I915_READ(RCPREVBSYTUPAVG);
627 	busy_down = I915_READ(RCPREVBSYTDNAVG);
628 	max_avg = I915_READ(RCBMAXAVG);
629 	min_avg = I915_READ(RCBMINAVG);
630 
631 	/* Handle RCS change request from hw */
632 	if (busy_up > max_avg) {
633 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
634 			new_delay = dev_priv->ips.cur_delay - 1;
635 		if (new_delay < dev_priv->ips.max_delay)
636 			new_delay = dev_priv->ips.max_delay;
637 	} else if (busy_down < min_avg) {
638 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
639 			new_delay = dev_priv->ips.cur_delay + 1;
640 		if (new_delay > dev_priv->ips.min_delay)
641 			new_delay = dev_priv->ips.min_delay;
642 	}
643 
644 	if (ironlake_set_drps(dev, new_delay))
645 		dev_priv->ips.cur_delay = new_delay;
646 
647 	lockmgr(&mchdev_lock, LK_RELEASE);
648 
649 	return;
650 }
651 
652 static void notify_ring(struct drm_device *dev,
653 			struct intel_ring_buffer *ring)
654 {
655 	struct drm_i915_private *dev_priv = dev->dev_private;
656 
657 	if (ring->obj == NULL)
658 		return;
659 
660 	wake_up_all(&ring->irq_queue);
661 	if (i915_enable_hangcheck) {
662 		mod_timer(&dev_priv->gpu_error.hangcheck_timer,
663 			  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
664 	}
665 }
666 
667 static void gen6_pm_rps_work(struct work_struct *work)
668 {
669 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
670 						    rps.work);
671 	u32 pm_iir, pm_imr;
672 	u8 new_delay;
673 
674 	lockmgr(&dev_priv->rps.lock, LK_EXCLUSIVE);
675 	pm_iir = dev_priv->rps.pm_iir;
676 	dev_priv->rps.pm_iir = 0;
677 	pm_imr = I915_READ(GEN6_PMIMR);
678 	/* Make sure not to corrupt PMIMR state used by ringbuffer code */
679 	I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
680 	lockmgr(&dev_priv->rps.lock, LK_RELEASE);
681 
682 	if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
683 		return;
684 
685 	mutex_lock(&dev_priv->rps.hw_lock);
686 
687 	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
688 		new_delay = dev_priv->rps.cur_delay + 1;
689 
690 		/*
691 		 * For better performance, jump directly
692 		 * to RPe if we're below it.
693 		 */
694 		if (IS_VALLEYVIEW(dev_priv->dev) &&
695 		    dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
696 			new_delay = dev_priv->rps.rpe_delay;
697 	} else
698 		new_delay = dev_priv->rps.cur_delay - 1;
699 
700 	/* sysfs frequency interfaces may have snuck in while servicing the
701 	 * interrupt
702 	 */
703 	if (new_delay >= dev_priv->rps.min_delay &&
704 	    new_delay <= dev_priv->rps.max_delay) {
705 		if (IS_VALLEYVIEW(dev_priv->dev))
706 			valleyview_set_rps(dev_priv->dev, new_delay);
707 		else
708 			gen6_set_rps(dev_priv->dev, new_delay);
709 	}
710 
711 	if (IS_VALLEYVIEW(dev_priv->dev)) {
712 		/*
713 		 * On VLV, when we enter RC6 we may not be at the minimum
714 		 * voltage level, so arm a timer to check.  It should only
715 		 * fire when there's activity or once after we've entered
716 		 * RC6, and then won't be re-armed until the next RPS interrupt.
717 		 */
718 		mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
719 				 msecs_to_jiffies(100));
720 	}
721 
722 	mutex_unlock(&dev_priv->rps.hw_lock);
723 }
724 
725 
726 /**
727  * ivybridge_parity_work - Workqueue called when a parity error interrupt
728  * occurred.
729  * @work: workqueue struct
730  *
731  * Doesn't actually do anything except notify userspace. As a consequence of
732  * this event, userspace should try to remap the bad rows since statistically
733  * it is likely the same row is more likely to go bad again.
734  */
735 static void ivybridge_parity_work(struct work_struct *work)
736 {
737 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
738 						    l3_parity.error_work);
739 	u32 error_status, row, bank, subbank;
740 	char *parity_event[5];
741 	uint32_t misccpctl;
742 
743 	/* We must turn off DOP level clock gating to access the L3 registers.
744 	 * In order to prevent a get/put style interface, acquire struct mutex
745 	 * any time we access those registers.
746 	 */
747 	mutex_lock(&dev_priv->dev->struct_mutex);
748 
749 	misccpctl = I915_READ(GEN7_MISCCPCTL);
750 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
751 	POSTING_READ(GEN7_MISCCPCTL);
752 
753 	error_status = I915_READ(GEN7_L3CDERRST1);
754 	row = GEN7_PARITY_ERROR_ROW(error_status);
755 	bank = GEN7_PARITY_ERROR_BANK(error_status);
756 	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
757 
758 	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
759 				    GEN7_L3CDERRST1_ENABLE);
760 	POSTING_READ(GEN7_L3CDERRST1);
761 
762 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
763 
764 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
765 	dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
766 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
767 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
768 
769 	mutex_unlock(&dev_priv->dev->struct_mutex);
770 
771 	parity_event[0] = "L3_PARITY_ERROR=1";
772 	parity_event[4] = NULL;
773 
774 	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
775 		  row, bank, subbank);
776 }
777 
778 static void ivybridge_handle_parity_error(struct drm_device *dev)
779 {
780 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
781 
782 	if (!HAS_L3_GPU_CACHE(dev))
783 		return;
784 
785 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
786 	dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
787 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
788 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
789 
790 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
791 }
792 
793 static void snb_gt_irq_handler(struct drm_device *dev,
794 			       struct drm_i915_private *dev_priv,
795 			       u32 gt_iir)
796 {
797 
798 	if (gt_iir &
799 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
800 		notify_ring(dev, &dev_priv->ring[RCS]);
801 	if (gt_iir & GT_BSD_USER_INTERRUPT)
802 		notify_ring(dev, &dev_priv->ring[VCS]);
803 	if (gt_iir & GT_BLT_USER_INTERRUPT)
804 		notify_ring(dev, &dev_priv->ring[BCS]);
805 
806 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
807 		      GT_BSD_CS_ERROR_INTERRUPT |
808 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
809 		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
810 		i915_handle_error(dev, false);
811 	}
812 
813 	if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
814 		ivybridge_handle_parity_error(dev);
815 }
816 
817 /* Legacy way of handling PM interrupts */
818 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
819 				u32 pm_iir)
820 {
821 
822 	/*
823 	 * IIR bits should never already be set because IMR should
824 	 * prevent an interrupt from being shown in IIR. The warning
825 	 * displays a case where we've unsafely cleared
826 	 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
827 	 * type is not a problem, it displays a problem in the logic.
828 	 *
829 	 * The mask bit in IMR is cleared by dev_priv->rps.work.
830 	 */
831 
832 	lockmgr(&dev_priv->rps.lock, LK_EXCLUSIVE);
833 	dev_priv->rps.pm_iir |= pm_iir;
834 	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
835 	POSTING_READ(GEN6_PMIMR);
836 	lockmgr(&dev_priv->rps.lock, LK_RELEASE);
837 
838 	queue_work(dev_priv->wq, &dev_priv->rps.work);
839 }
840 
841 #define HPD_STORM_DETECT_PERIOD 1000
842 #define HPD_STORM_THRESHOLD 5
843 
844 static inline void intel_hpd_irq_handler(struct drm_device *dev,
845 					 u32 hotplug_trigger,
846 					 const u32 *hpd)
847 {
848 	drm_i915_private_t *dev_priv = dev->dev_private;
849 	int i;
850 	bool storm_detected = false;
851 
852 	if (!hotplug_trigger)
853 		return;
854 
855 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
856 
857 	for (i = 1; i < HPD_NUM_PINS; i++) {
858 
859 		if (!(hpd[i] & hotplug_trigger) ||
860 		    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
861 			continue;
862 
863 		dev_priv->hpd_event_bits |= (1 << i);
864 		if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
865 				   dev_priv->hpd_stats[i].hpd_last_jiffies
866 				   + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
867 			dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
868 			dev_priv->hpd_stats[i].hpd_cnt = 0;
869 		} else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
870 			dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
871 			dev_priv->hpd_event_bits &= ~(1 << i);
872 			DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
873 			storm_detected = true;
874 		} else {
875 			dev_priv->hpd_stats[i].hpd_cnt++;
876 		}
877 	}
878 
879 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
880 
881 	if (storm_detected)
882 		dev_priv->display.hpd_irq_setup(dev);
883 
884 	queue_work(dev_priv->wq,
885 		   &dev_priv->hotplug_work);
886 }
887 
888 static void gmbus_irq_handler(struct drm_device *dev)
889 {
890 	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
891 
892 	wake_up_all(&dev_priv->gmbus_wait_queue);
893 }
894 
895 static void dp_aux_irq_handler(struct drm_device *dev)
896 {
897 	struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
898 
899 	wake_up_all(&dev_priv->gmbus_wait_queue);
900 }
901 
902 /* Unlike gen6_queue_rps_work() from which this function is originally derived,
903  * we must be able to deal with other PM interrupts. This is complicated because
904  * of the way in which we use the masks to defer the RPS work (which for
905  * posterity is necessary because of forcewake).
906  */
907 static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
908 			       u32 pm_iir)
909 {
910 	lockmgr(&dev_priv->rps.lock, LK_EXCLUSIVE);
911 	dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
912 	if (dev_priv->rps.pm_iir) {
913 		I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
914 		/* never want to mask useful interrupts. (also posting read) */
915 		WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
916 		/* TODO: if queue_work is slow, move it out of the spinlock */
917 		queue_work(dev_priv->wq, &dev_priv->rps.work);
918 	}
919 	lockmgr(&dev_priv->rps.lock, LK_RELEASE);
920 
921 	if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
922 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
923 			notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
924 
925 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
926 			DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
927 			i915_handle_error(dev_priv->dev, false);
928 		}
929 	}
930 }
931 
932 static irqreturn_t valleyview_irq_handler(void *arg)
933 {
934 	struct drm_device *dev = (struct drm_device *) arg;
935 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
936 	u32 iir, gt_iir, pm_iir;
937 	int pipe;
938 	u32 pipe_stats[I915_MAX_PIPES];
939 
940 	atomic_inc(&dev_priv->irq_received);
941 
942 	while (true) {
943 		iir = I915_READ(VLV_IIR);
944 		gt_iir = I915_READ(GTIIR);
945 		pm_iir = I915_READ(GEN6_PMIIR);
946 
947 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
948 			goto out;
949 
950 		snb_gt_irq_handler(dev, dev_priv, gt_iir);
951 
952 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
953 		for_each_pipe(pipe) {
954 			int reg = PIPESTAT(pipe);
955 			pipe_stats[pipe] = I915_READ(reg);
956 
957 			/*
958 			 * Clear the PIPE*STAT regs before the IIR
959 			 */
960 			if (pipe_stats[pipe] & 0x8000ffff) {
961 				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
962 					DRM_DEBUG_DRIVER("pipe %c underrun\n",
963 							 pipe_name(pipe));
964 				I915_WRITE(reg, pipe_stats[pipe]);
965 			}
966 		}
967 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
968 
969 		for_each_pipe(pipe) {
970 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
971 				drm_handle_vblank(dev, pipe);
972 
973 			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
974 				intel_prepare_page_flip(dev, pipe);
975 				intel_finish_page_flip(dev, pipe);
976 			}
977 		}
978 
979 		/* Consume port.  Then clear IIR or we'll miss events */
980 		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
981 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
982 			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
983 
984 			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
985 					 hotplug_status);
986 
987 			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
988 
989 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
990 			I915_READ(PORT_HOTPLUG_STAT);
991 		}
992 
993 		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
994 			gmbus_irq_handler(dev);
995 
996 		if (pm_iir & GEN6_PM_RPS_EVENTS)
997 			gen6_queue_rps_work(dev_priv, pm_iir);
998 
999 		I915_WRITE(GTIIR, gt_iir);
1000 		I915_WRITE(GEN6_PMIIR, pm_iir);
1001 		I915_WRITE(VLV_IIR, iir);
1002 	}
1003 
1004 out:
1005 	return;
1006 }
1007 
1008 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1009 {
1010 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1011 	int pipe;
1012 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1013 
1014 	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1015 
1016 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1017 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1018 			       SDE_AUDIO_POWER_SHIFT);
1019 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1020 				 port_name(port));
1021 	}
1022 
1023 	if (pch_iir & SDE_AUX_MASK)
1024 		dp_aux_irq_handler(dev);
1025 
1026 	if (pch_iir & SDE_GMBUS)
1027 		gmbus_irq_handler(dev);
1028 
1029 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1030 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1031 
1032 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1033 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1034 
1035 	if (pch_iir & SDE_POISON)
1036 		DRM_ERROR("PCH poison interrupt\n");
1037 
1038 	if (pch_iir & SDE_FDI_MASK)
1039 		for_each_pipe(pipe)
1040 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1041 					 pipe_name(pipe),
1042 					 I915_READ(FDI_RX_IIR(pipe)));
1043 
1044 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1045 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1046 
1047 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1048 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1049 
1050 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1051 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1052 							  false))
1053 			DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1054 
1055 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1056 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1057 							  false))
1058 			DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1059 }
1060 
1061 static void ivb_err_int_handler(struct drm_device *dev)
1062 {
1063 	struct drm_i915_private *dev_priv = dev->dev_private;
1064 	u32 err_int = I915_READ(GEN7_ERR_INT);
1065 
1066 	if (err_int & ERR_INT_POISON)
1067 		DRM_ERROR("Poison interrupt\n");
1068 
1069 	if (err_int & ERR_INT_FIFO_UNDERRUN_A)
1070 		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1071 			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1072 
1073 	if (err_int & ERR_INT_FIFO_UNDERRUN_B)
1074 		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1075 			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1076 
1077 	if (err_int & ERR_INT_FIFO_UNDERRUN_C)
1078 		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
1079 			DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
1080 
1081 	I915_WRITE(GEN7_ERR_INT, err_int);
1082 }
1083 
1084 static void cpt_serr_int_handler(struct drm_device *dev)
1085 {
1086 	struct drm_i915_private *dev_priv = dev->dev_private;
1087 	u32 serr_int = I915_READ(SERR_INT);
1088 
1089 	if (serr_int & SERR_INT_POISON)
1090 		DRM_ERROR("PCH poison interrupt\n");
1091 
1092 	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1093 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1094 							  false))
1095 			DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1096 
1097 	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1098 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1099 							  false))
1100 			DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1101 
1102 	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1103 		if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1104 							  false))
1105 			DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1106 
1107 	I915_WRITE(SERR_INT, serr_int);
1108 }
1109 
1110 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1111 {
1112 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1113 	int pipe;
1114 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1115 
1116 	intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1117 
1118 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1119 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1120 			       SDE_AUDIO_POWER_SHIFT_CPT);
1121 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1122 				 port_name(port));
1123 	}
1124 
1125 	if (pch_iir & SDE_AUX_MASK_CPT)
1126 		dp_aux_irq_handler(dev);
1127 
1128 	if (pch_iir & SDE_GMBUS_CPT)
1129 		gmbus_irq_handler(dev);
1130 
1131 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1132 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1133 
1134 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1135 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1136 
1137 	if (pch_iir & SDE_FDI_MASK_CPT)
1138 		for_each_pipe(pipe)
1139 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1140 					 pipe_name(pipe),
1141 					 I915_READ(FDI_RX_IIR(pipe)));
1142 
1143 	if (pch_iir & SDE_ERROR_CPT)
1144 		cpt_serr_int_handler(dev);
1145 }
1146 
1147 static irqreturn_t ivybridge_irq_handler(void *arg)
1148 {
1149 	struct drm_device *dev = (struct drm_device *) arg;
1150 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1151 	u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
1152 	int i;
1153 
1154 	atomic_inc(&dev_priv->irq_received);
1155 
1156 	/* We get interrupts on unclaimed registers, so check for this before we
1157 	 * do any I915_{READ,WRITE}. */
1158 	if (IS_HASWELL(dev) &&
1159 	    (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1160 		DRM_ERROR("Unclaimed register before interrupt\n");
1161 		I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1162 	}
1163 
1164 	/* disable master interrupt before clearing iir  */
1165 	de_ier = I915_READ(DEIER);
1166 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1167 
1168 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
1169 	 * interrupts will will be stored on its back queue, and then we'll be
1170 	 * able to process them after we restore SDEIER (as soon as we restore
1171 	 * it, we'll get an interrupt if SDEIIR still has something to process
1172 	 * due to its back queue). */
1173 	if (!HAS_PCH_NOP(dev)) {
1174 		sde_ier = I915_READ(SDEIER);
1175 		I915_WRITE(SDEIER, 0);
1176 		POSTING_READ(SDEIER);
1177 	}
1178 
1179 	/* On Haswell, also mask ERR_INT because we don't want to risk
1180 	 * generating "unclaimed register" interrupts from inside the interrupt
1181 	 * handler. */
1182 	if (IS_HASWELL(dev)) {
1183 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1184 		ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
1185 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1186 	}
1187 
1188 	gt_iir = I915_READ(GTIIR);
1189 	if (gt_iir) {
1190 		snb_gt_irq_handler(dev, dev_priv, gt_iir);
1191 		I915_WRITE(GTIIR, gt_iir);
1192 	}
1193 
1194 	de_iir = I915_READ(DEIIR);
1195 	if (de_iir) {
1196 		if (de_iir & DE_ERR_INT_IVB)
1197 			ivb_err_int_handler(dev);
1198 
1199 		if (de_iir & DE_AUX_CHANNEL_A_IVB)
1200 			dp_aux_irq_handler(dev);
1201 
1202 		if (de_iir & DE_GSE_IVB)
1203 			intel_opregion_asle_intr(dev);
1204 
1205 		for (i = 0; i < 3; i++) {
1206 			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1207 				drm_handle_vblank(dev, i);
1208 			if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1209 				intel_prepare_page_flip(dev, i);
1210 				intel_finish_page_flip_plane(dev, i);
1211 			}
1212 		}
1213 
1214 		/* check event from PCH */
1215 		if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
1216 			u32 pch_iir = I915_READ(SDEIIR);
1217 
1218 			cpt_irq_handler(dev, pch_iir);
1219 
1220 			/* clear PCH hotplug event before clear CPU irq */
1221 			I915_WRITE(SDEIIR, pch_iir);
1222 		}
1223 
1224 		I915_WRITE(DEIIR, de_iir);
1225 	}
1226 
1227 	pm_iir = I915_READ(GEN6_PMIIR);
1228 	if (pm_iir) {
1229 		if (IS_HASWELL(dev))
1230 			hsw_pm_irq_handler(dev_priv, pm_iir);
1231 		else if (pm_iir & GEN6_PM_RPS_EVENTS)
1232 			gen6_queue_rps_work(dev_priv, pm_iir);
1233 		I915_WRITE(GEN6_PMIIR, pm_iir);
1234 	}
1235 
1236 	if (IS_HASWELL(dev)) {
1237 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1238 		if (ivb_can_enable_err_int(dev))
1239 			ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1240 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1241 	}
1242 
1243 	I915_WRITE(DEIER, de_ier);
1244 	POSTING_READ(DEIER);
1245 	if (!HAS_PCH_NOP(dev)) {
1246 		I915_WRITE(SDEIER, sde_ier);
1247 		POSTING_READ(SDEIER);
1248 	}
1249 }
1250 
1251 static void ilk_gt_irq_handler(struct drm_device *dev,
1252 			       struct drm_i915_private *dev_priv,
1253 			       u32 gt_iir)
1254 {
1255 	if (gt_iir &
1256 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1257 		notify_ring(dev, &dev_priv->ring[RCS]);
1258 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1259 		notify_ring(dev, &dev_priv->ring[VCS]);
1260 }
1261 
1262 static irqreturn_t ironlake_irq_handler(void *arg)
1263 {
1264 	struct drm_device *dev = (struct drm_device *) arg;
1265 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1266 	u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
1267 
1268 	atomic_inc(&dev_priv->irq_received);
1269 
1270 	/* disable master interrupt before clearing iir  */
1271 	de_ier = I915_READ(DEIER);
1272 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
1273 	POSTING_READ(DEIER);
1274 
1275 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
1276 	 * interrupts will will be stored on its back queue, and then we'll be
1277 	 * able to process them after we restore SDEIER (as soon as we restore
1278 	 * it, we'll get an interrupt if SDEIIR still has something to process
1279 	 * due to its back queue). */
1280 	sde_ier = I915_READ(SDEIER);
1281 	I915_WRITE(SDEIER, 0);
1282 	POSTING_READ(SDEIER);
1283 
1284 	de_iir = I915_READ(DEIIR);
1285 	gt_iir = I915_READ(GTIIR);
1286 	pm_iir = I915_READ(GEN6_PMIIR);
1287 
1288 	if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
1289 		goto done;
1290 
1291 	if (IS_GEN5(dev))
1292 		ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1293 	else
1294 		snb_gt_irq_handler(dev, dev_priv, gt_iir);
1295 
1296 	if (de_iir & DE_AUX_CHANNEL_A)
1297 		dp_aux_irq_handler(dev);
1298 
1299 	if (de_iir & DE_GSE)
1300 		intel_opregion_asle_intr(dev);
1301 
1302 	if (de_iir & DE_PIPEA_VBLANK)
1303 		drm_handle_vblank(dev, 0);
1304 
1305 	if (de_iir & DE_PIPEB_VBLANK)
1306 		drm_handle_vblank(dev, 1);
1307 
1308 	if (de_iir & DE_POISON)
1309 		DRM_ERROR("Poison interrupt\n");
1310 
1311 	if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1312 		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1313 			DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1314 
1315 	if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1316 		if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1317 			DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1318 
1319 	if (de_iir & DE_PLANEA_FLIP_DONE) {
1320 		intel_prepare_page_flip(dev, 0);
1321 		intel_finish_page_flip_plane(dev, 0);
1322 	}
1323 
1324 	if (de_iir & DE_PLANEB_FLIP_DONE) {
1325 		intel_prepare_page_flip(dev, 1);
1326 		intel_finish_page_flip_plane(dev, 1);
1327 	}
1328 
1329 	/* check event from PCH */
1330 	if (de_iir & DE_PCH_EVENT) {
1331 		u32 pch_iir = I915_READ(SDEIIR);
1332 
1333 		if (HAS_PCH_CPT(dev))
1334 			cpt_irq_handler(dev, pch_iir);
1335 		else
1336 			ibx_irq_handler(dev, pch_iir);
1337 
1338 		/* should clear PCH hotplug event before clear CPU irq */
1339 		I915_WRITE(SDEIIR, pch_iir);
1340 	}
1341 
1342 	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
1343 		ironlake_handle_rps_change(dev);
1344 
1345 	if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
1346 		gen6_queue_rps_work(dev_priv, pm_iir);
1347 
1348 	I915_WRITE(GTIIR, gt_iir);
1349 	I915_WRITE(DEIIR, de_iir);
1350 	I915_WRITE(GEN6_PMIIR, pm_iir);
1351 
1352 done:
1353 	I915_WRITE(DEIER, de_ier);
1354 	POSTING_READ(DEIER);
1355 	I915_WRITE(SDEIER, sde_ier);
1356 	POSTING_READ(SDEIER);
1357 }
1358 
1359 /**
1360  * i915_error_work_func - do process context error handling work
1361  * @work: work struct
1362  *
1363  * Fire an error uevent so userspace can see that a hang or error
1364  * was detected.
1365  */
1366 static void i915_error_work_func(struct work_struct *work)
1367 {
1368 	struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1369 						    work);
1370 	drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1371 						    gpu_error);
1372 	struct drm_device *dev = dev_priv->dev;
1373 	struct intel_ring_buffer *ring;
1374 #if 0
1375 	char *error_event[] = { "ERROR=1", NULL };
1376 	char *reset_event[] = { "RESET=1", NULL };
1377 	char *reset_done_event[] = { "ERROR=0", NULL };
1378 #endif
1379 	int i, ret;
1380 
1381 	/* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */
1382 
1383 	/*
1384 	 * Note that there's only one work item which does gpu resets, so we
1385 	 * need not worry about concurrent gpu resets potentially incrementing
1386 	 * error->reset_counter twice. We only need to take care of another
1387 	 * racing irq/hangcheck declaring the gpu dead for a second time. A
1388 	 * quick check for that is good enough: schedule_work ensures the
1389 	 * correct ordering between hang detection and this work item, and since
1390 	 * the reset in-progress bit is only ever set by code outside of this
1391 	 * work we don't need to worry about any other races.
1392 	 */
1393 	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
1394 		DRM_DEBUG_DRIVER("resetting chip\n");
1395 #if 0
1396 		kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1397 				   reset_event);
1398 #endif
1399 
1400 		ret = i915_reset(dev);
1401 
1402 		if (ret == 0) {
1403 			/*
1404 			 * After all the gem state is reset, increment the reset
1405 			 * counter and wake up everyone waiting for the reset to
1406 			 * complete.
1407 			 *
1408 			 * Since unlock operations are a one-sided barrier only,
1409 			 * we need to insert a barrier here to order any seqno
1410 			 * updates before
1411 			 * the counter increment.
1412 			 */
1413 			cpu_sfence();
1414 			atomic_inc(&dev_priv->gpu_error.reset_counter);
1415 
1416 #if 0
1417 			kobject_uevent_env(&dev->primary->kdev.kobj,
1418 					   KOBJ_CHANGE, reset_done_event);
1419 #endif
1420 		} else {
1421 			atomic_set(&error->reset_counter, I915_WEDGED);
1422 		}
1423 
1424 		for_each_ring(ring, dev_priv, i)
1425 			wake_up_all(&ring->irq_queue);
1426 
1427 		intel_display_handle_reset(dev);
1428 
1429 		wake_up_all(&dev_priv->gpu_error.reset_queue);
1430 	}
1431 }
1432 
1433 /* NB: please notice the memset */
1434 static void i915_get_extra_instdone(struct drm_device *dev,
1435 				    uint32_t *instdone)
1436 {
1437 	struct drm_i915_private *dev_priv = dev->dev_private;
1438 	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1439 
1440 	switch(INTEL_INFO(dev)->gen) {
1441 	case 2:
1442 	case 3:
1443 		instdone[0] = I915_READ(INSTDONE);
1444 		break;
1445 	case 4:
1446 	case 5:
1447 	case 6:
1448 		instdone[0] = I915_READ(INSTDONE_I965);
1449 		instdone[1] = I915_READ(INSTDONE1);
1450 		break;
1451 	default:
1452 #if 0
1453 		WARN_ONCE(1, "Unsupported platform\n");
1454 #endif
1455 	case 7:
1456 		instdone[0] = I915_READ(GEN7_INSTDONE_1);
1457 		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1458 		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1459 		instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1460 		break;
1461 	}
1462 }
1463 
1464 #if 0 /* CONFIG_DEBUG_FS */
1465 static struct drm_i915_error_object *
1466 i915_error_object_create_sized(struct drm_i915_private *dev_priv,
1467 			       struct drm_i915_gem_object *src,
1468 			       const int num_pages)
1469 {
1470 	struct drm_i915_error_object *dst;
1471 	int i;
1472 	u32 reloc_offset;
1473 
1474 	if (src == NULL || src->pages == NULL)
1475 		return NULL;
1476 
1477 	dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
1478 	if (dst == NULL)
1479 		return NULL;
1480 
1481 	reloc_offset = src->gtt_offset;
1482 	for (i = 0; i < num_pages; i++) {
1483 		unsigned long flags;
1484 		void *d;
1485 
1486 		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
1487 		if (d == NULL)
1488 			goto unwind;
1489 
1490 		local_irq_save(flags);
1491 		if (reloc_offset < dev_priv->gtt.mappable_end &&
1492 		    src->has_global_gtt_mapping) {
1493 			void __iomem *s;
1494 
1495 			/* Simply ignore tiling or any overlapping fence.
1496 			 * It's part of the error state, and this hopefully
1497 			 * captures what the GPU read.
1498 			 */
1499 
1500 			s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1501 						     reloc_offset);
1502 			memcpy_fromio(d, s, PAGE_SIZE);
1503 			io_mapping_unmap_atomic(s);
1504 		} else if (src->stolen) {
1505 			unsigned long offset;
1506 
1507 			offset = dev_priv->mm.stolen_base;
1508 			offset += src->stolen->start;
1509 			offset += i << PAGE_SHIFT;
1510 
1511 			memcpy_fromio(d, (void *)offset, PAGE_SIZE);
1512 		} else {
1513 			struct page *page;
1514 			void *s;
1515 
1516 			page = i915_gem_object_get_page(src, i);
1517 
1518 			drm_clflush_pages(&page, 1);
1519 
1520 			s = kmap_atomic(page);
1521 			memcpy(d, s, PAGE_SIZE);
1522 			kunmap_atomic(s);
1523 
1524 			drm_clflush_pages(&page, 1);
1525 		}
1526 		local_irq_restore(flags);
1527 
1528 		dst->pages[i] = d;
1529 
1530 		reloc_offset += PAGE_SIZE;
1531 	}
1532 	dst->page_count = num_pages;
1533 	dst->gtt_offset = src->gtt_offset;
1534 
1535 	return dst;
1536 
1537 unwind:
1538 	while (i--)
1539 		kfree(dst->pages[i]);
1540 	kfree(dst);
1541 	return NULL;
1542 }
1543 #define i915_error_object_create(dev_priv, src) \
1544 	i915_error_object_create_sized((dev_priv), (src), \
1545 				       (src)->base.size>>PAGE_SHIFT)
1546 
1547 static void
1548 i915_error_object_free(struct drm_i915_error_object *obj)
1549 {
1550 	int page;
1551 
1552 	if (obj == NULL)
1553 		return;
1554 
1555 	for (page = 0; page < obj->page_count; page++)
1556 		kfree(obj->pages[page]);
1557 
1558 	kfree(obj);
1559 }
1560 
1561 void
1562 i915_error_state_free(struct drm_device *dev,
1563 		      struct drm_i915_error_state *error)
1564 {
1565 	struct drm_i915_error_state *error = container_of(error_ref,
1566 							  typeof(*error), ref);
1567 	int i;
1568 
1569 	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1570 		i915_error_object_free(error->ring[i].batchbuffer);
1571 		i915_error_object_free(error->ring[i].ringbuffer);
1572 		i915_error_object_free(error->ring[i].ctx);
1573 		kfree(error->ring[i].requests);
1574 	}
1575 
1576 	kfree(error->active_bo);
1577 	kfree(error->overlay);
1578 	kfree(error->display);
1579 	kfree(error);
1580 }
1581 static void capture_bo(struct drm_i915_error_buffer *err,
1582 		       struct drm_i915_gem_object *obj)
1583 {
1584 	err->size = obj->base.size;
1585 	err->name = obj->base.name;
1586 	err->rseqno = obj->last_read_seqno;
1587 	err->wseqno = obj->last_write_seqno;
1588 	err->gtt_offset = obj->gtt_offset;
1589 	err->read_domains = obj->base.read_domains;
1590 	err->write_domain = obj->base.write_domain;
1591 	err->fence_reg = obj->fence_reg;
1592 	err->pinned = 0;
1593 	if (obj->pin_count > 0)
1594 		err->pinned = 1;
1595 	if (obj->user_pin_count > 0)
1596 		err->pinned = -1;
1597 	err->tiling = obj->tiling_mode;
1598 	err->dirty = obj->dirty;
1599 	err->purgeable = obj->madv != I915_MADV_WILLNEED;
1600 	err->ring = obj->ring ? obj->ring->id : -1;
1601 	err->cache_level = obj->cache_level;
1602 }
1603 
1604 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1605 			     int count, struct list_head *head)
1606 {
1607 	struct drm_i915_gem_object *obj;
1608 	int i = 0;
1609 
1610 	list_for_each_entry(obj, head, mm_list) {
1611 		capture_bo(err++, obj);
1612 		if (++i == count)
1613 			break;
1614 	}
1615 
1616 	return i;
1617 }
1618 
1619 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1620 			     int count, struct list_head *head)
1621 {
1622 	struct drm_i915_gem_object *obj;
1623 	int i = 0;
1624 
1625 	list_for_each_entry(obj, head, global_list) {
1626 		if (obj->pin_count == 0)
1627 			continue;
1628 
1629 		capture_bo(err++, obj);
1630 		if (++i == count)
1631 			break;
1632 	}
1633 
1634 	return i;
1635 }
1636 
1637 static void i915_gem_record_fences(struct drm_device *dev,
1638 				   struct drm_i915_error_state *error)
1639 {
1640 	struct drm_i915_private *dev_priv = dev->dev_private;
1641 	int i;
1642 
1643 	/* Fences */
1644 	switch (INTEL_INFO(dev)->gen) {
1645 	case 7:
1646 	case 6:
1647 		for (i = 0; i < dev_priv->num_fence_regs; i++)
1648 			error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1649 		break;
1650 	case 5:
1651 	case 4:
1652 		for (i = 0; i < 16; i++)
1653 			error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1654 		break;
1655 	case 3:
1656 		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1657 			for (i = 0; i < 8; i++)
1658 				error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1659 	case 2:
1660 		for (i = 0; i < 8; i++)
1661 			error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1662 		break;
1663 
1664 	default:
1665 		BUG();
1666 	}
1667 }
1668 
1669 static struct drm_i915_error_object *
1670 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1671 			     struct intel_ring_buffer *ring)
1672 {
1673 	struct drm_i915_gem_object *obj;
1674 	u32 seqno;
1675 
1676 	if (!ring->get_seqno)
1677 		return NULL;
1678 
1679 	if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1680 		u32 acthd = I915_READ(ACTHD);
1681 
1682 		if (WARN_ON(ring->id != RCS))
1683 			return NULL;
1684 
1685 		obj = ring->private;
1686 		if (acthd >= obj->gtt_offset &&
1687 		    acthd < obj->gtt_offset + obj->base.size)
1688 			return i915_error_object_create(dev_priv, obj);
1689 	}
1690 
1691 	seqno = ring->get_seqno(ring, false);
1692 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1693 		if (obj->ring != ring)
1694 			continue;
1695 
1696 		if (i915_seqno_passed(seqno, obj->last_read_seqno))
1697 			continue;
1698 
1699 		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1700 			continue;
1701 
1702 		/* We need to copy these to an anonymous buffer as the simplest
1703 		 * method to avoid being overwritten by userspace.
1704 		 */
1705 		return i915_error_object_create(dev_priv, obj);
1706 	}
1707 
1708 	return NULL;
1709 }
1710 
1711 static void i915_record_ring_state(struct drm_device *dev,
1712 				   struct drm_i915_error_state *error,
1713 				   struct intel_ring_buffer *ring)
1714 {
1715 	struct drm_i915_private *dev_priv = dev->dev_private;
1716 
1717 	if (INTEL_INFO(dev)->gen >= 6) {
1718 		error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1719 		error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1720 		error->semaphore_mboxes[ring->id][0]
1721 			= I915_READ(RING_SYNC_0(ring->mmio_base));
1722 		error->semaphore_mboxes[ring->id][1]
1723 			= I915_READ(RING_SYNC_1(ring->mmio_base));
1724 		error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1725 		error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1726 	}
1727 
1728 	if (INTEL_INFO(dev)->gen >= 4) {
1729 		error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1730 		error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1731 		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1732 		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1733 		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1734 		if (ring->id == RCS)
1735 			error->bbaddr = I915_READ64(BB_ADDR);
1736 	} else {
1737 		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1738 		error->ipeir[ring->id] = I915_READ(IPEIR);
1739 		error->ipehr[ring->id] = I915_READ(IPEHR);
1740 		error->instdone[ring->id] = I915_READ(INSTDONE);
1741 	}
1742 
1743 	error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1744 	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1745 	error->seqno[ring->id] = ring->get_seqno(ring, false);
1746 	error->acthd[ring->id] = intel_ring_get_active_head(ring);
1747 	error->head[ring->id] = I915_READ_HEAD(ring);
1748 	error->tail[ring->id] = I915_READ_TAIL(ring);
1749 	error->ctl[ring->id] = I915_READ_CTL(ring);
1750 
1751 	error->cpu_ring_head[ring->id] = ring->head;
1752 	error->cpu_ring_tail[ring->id] = ring->tail;
1753 }
1754 
1755 
1756 static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1757 					   struct drm_i915_error_state *error,
1758 					   struct drm_i915_error_ring *ering)
1759 {
1760 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1761 	struct drm_i915_gem_object *obj;
1762 
1763 	/* Currently render ring is the only HW context user */
1764 	if (ring->id != RCS || !error->ccid)
1765 		return;
1766 
1767 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1768 		if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1769 			ering->ctx = i915_error_object_create_sized(dev_priv,
1770 								    obj, 1);
1771 		}
1772 	}
1773 }
1774 
1775 static void i915_gem_record_rings(struct drm_device *dev,
1776 				  struct drm_i915_error_state *error)
1777 {
1778 	struct drm_i915_private *dev_priv = dev->dev_private;
1779 	struct intel_ring_buffer *ring;
1780 	struct drm_i915_gem_request *request;
1781 	int i, count;
1782 
1783 	for_each_ring(ring, dev_priv, i) {
1784 		i915_record_ring_state(dev, error, ring);
1785 
1786 		error->ring[i].batchbuffer =
1787 			i915_error_first_batchbuffer(dev_priv, ring);
1788 
1789 		error->ring[i].ringbuffer =
1790 			i915_error_object_create(dev_priv, ring->obj);
1791 
1792 
1793 		i915_gem_record_active_context(ring, error, &error->ring[i]);
1794 
1795 		count = 0;
1796 		list_for_each_entry(request, &ring->request_list, list)
1797 			count++;
1798 
1799 		error->ring[i].num_requests = count;
1800 		error->ring[i].requests =
1801 			kmalloc(count*sizeof(struct drm_i915_error_request),
1802 				GFP_ATOMIC);
1803 		if (error->ring[i].requests == NULL) {
1804 			error->ring[i].num_requests = 0;
1805 			continue;
1806 		}
1807 
1808 		count = 0;
1809 		list_for_each_entry(request, &ring->request_list, list) {
1810 			struct drm_i915_error_request *erq;
1811 
1812 			erq = &error->ring[i].requests[count++];
1813 			erq->seqno = request->seqno;
1814 			erq->jiffies = request->emitted_jiffies;
1815 			erq->tail = request->tail;
1816 		}
1817 	}
1818 }
1819 
1820 /**
1821  * i915_capture_error_state - capture an error record for later analysis
1822  * @dev: drm device
1823  *
1824  * Should be called when an error is detected (either a hang or an error
1825  * interrupt) to capture error state from the time of the error.  Fills
1826  * out a structure which becomes available in debugfs for user level tools
1827  * to pick up.
1828  */
1829 static void i915_capture_error_state(struct drm_device *dev)
1830 {
1831 	struct drm_i915_private *dev_priv = dev->dev_private;
1832 	struct drm_i915_gem_object *obj;
1833 	struct drm_i915_error_state *error;
1834 	unsigned long flags;
1835 	int i, pipe;
1836 
1837 	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1838 	error = dev_priv->gpu_error.first_error;
1839 	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1840 	if (error)
1841 		return;
1842 
1843 	/* Account for pipe specific data like PIPE*STAT */
1844 	error = kzalloc(sizeof(*error), GFP_ATOMIC);
1845 	if (!error) {
1846 		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1847 		return;
1848 	}
1849 
1850 	DRM_INFO("capturing error event; look for more information in "
1851 		 "/sys/kernel/debug/dri/%d/i915_error_state\n",
1852 		 dev->primary->index);
1853 
1854 	kref_init(&error->ref);
1855 	error->eir = I915_READ(EIR);
1856 	error->pgtbl_er = I915_READ(PGTBL_ER);
1857 	if (HAS_HW_CONTEXTS(dev))
1858 		error->ccid = I915_READ(CCID);
1859 
1860 	if (HAS_PCH_SPLIT(dev))
1861 		error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1862 	else if (IS_VALLEYVIEW(dev))
1863 		error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1864 	else if (IS_GEN2(dev))
1865 		error->ier = I915_READ16(IER);
1866 	else
1867 		error->ier = I915_READ(IER);
1868 
1869 	if (INTEL_INFO(dev)->gen >= 6)
1870 		error->derrmr = I915_READ(DERRMR);
1871 
1872 	if (IS_VALLEYVIEW(dev))
1873 		error->forcewake = I915_READ(FORCEWAKE_VLV);
1874 	else if (INTEL_INFO(dev)->gen >= 7)
1875 		error->forcewake = I915_READ(FORCEWAKE_MT);
1876 	else if (INTEL_INFO(dev)->gen == 6)
1877 		error->forcewake = I915_READ(FORCEWAKE);
1878 
1879 	if (!HAS_PCH_SPLIT(dev))
1880 		for_each_pipe(pipe)
1881 			error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1882 
1883 	if (INTEL_INFO(dev)->gen >= 6) {
1884 		error->error = I915_READ(ERROR_GEN6);
1885 		error->done_reg = I915_READ(DONE_REG);
1886 	}
1887 
1888 	if (INTEL_INFO(dev)->gen == 7)
1889 		error->err_int = I915_READ(GEN7_ERR_INT);
1890 
1891 	i915_get_extra_instdone(dev, error->extra_instdone);
1892 
1893 	i915_gem_record_fences(dev, error);
1894 	i915_gem_record_rings(dev, error);
1895 
1896 	/* Record buffers on the active and pinned lists. */
1897 	error->active_bo = NULL;
1898 	error->pinned_bo = NULL;
1899 
1900 	i = 0;
1901 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1902 		i++;
1903 	error->active_bo_count = i;
1904 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1905 		if (obj->pin_count)
1906 			i++;
1907 	error->pinned_bo_count = i - error->active_bo_count;
1908 
1909 	error->active_bo = NULL;
1910 	error->pinned_bo = NULL;
1911 	if (i) {
1912 		error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1913 					   GFP_ATOMIC);
1914 		if (error->active_bo)
1915 			error->pinned_bo =
1916 				error->active_bo + error->active_bo_count;
1917 	}
1918 
1919 	if (error->active_bo)
1920 		error->active_bo_count =
1921 			capture_active_bo(error->active_bo,
1922 					  error->active_bo_count,
1923 					  &dev_priv->mm.active_list);
1924 
1925 	if (error->pinned_bo)
1926 		error->pinned_bo_count =
1927 			capture_pinned_bo(error->pinned_bo,
1928 					  error->pinned_bo_count,
1929 					  &dev_priv->mm.bound_list);
1930 
1931 	do_gettimeofday(&error->time);
1932 
1933 	error->overlay = intel_overlay_capture_error_state(dev);
1934 	error->display = intel_display_capture_error_state(dev);
1935 
1936 	spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1937 	if (dev_priv->gpu_error.first_error == NULL) {
1938 		dev_priv->gpu_error.first_error = error;
1939 		error = NULL;
1940 	}
1941 	spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1942 
1943 	if (error)
1944 		i915_error_state_free(&error->ref);
1945 }
1946 
1947 void i915_destroy_error_state(struct drm_device *dev)
1948 {
1949 	struct drm_i915_private *dev_priv = dev->dev_private;
1950 	struct drm_i915_error_state *error;
1951 
1952 	lockmgr(&dev_priv->gpu_error.lock, LK_EXCLUSIVE);
1953 	error = dev_priv->gpu_error.first_error;
1954 	dev_priv->gpu_error.first_error = NULL;
1955 	lockmgr(&dev_priv->gpu_error.lock, LK_RELEASE);
1956 
1957 	if (error)
1958 		i915_error_state_free(dev, error);
1959 }
1960 #else
1961 #define i915_capture_error_state(x)
1962 #endif
1963 
1964 static void i915_report_and_clear_eir(struct drm_device *dev)
1965 {
1966 	struct drm_i915_private *dev_priv = dev->dev_private;
1967 	uint32_t instdone[I915_NUM_INSTDONE_REG];
1968 	u32 eir = I915_READ(EIR);
1969 	int pipe, i;
1970 
1971 	if (!eir)
1972 		return;
1973 
1974 	pr_err("render error detected, EIR: 0x%08x\n", eir);
1975 
1976 	i915_get_extra_instdone(dev, instdone);
1977 
1978 	if (IS_G4X(dev)) {
1979 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1980 			u32 ipeir = I915_READ(IPEIR_I965);
1981 
1982 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1983 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1984 			for (i = 0; i < ARRAY_SIZE(instdone); i++)
1985 				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1986 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1987 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1988 			I915_WRITE(IPEIR_I965, ipeir);
1989 			POSTING_READ(IPEIR_I965);
1990 		}
1991 		if (eir & GM45_ERROR_PAGE_TABLE) {
1992 			u32 pgtbl_err = I915_READ(PGTBL_ER);
1993 			pr_err("page table error\n");
1994 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1995 			I915_WRITE(PGTBL_ER, pgtbl_err);
1996 			POSTING_READ(PGTBL_ER);
1997 		}
1998 	}
1999 
2000 	if (!IS_GEN2(dev)) {
2001 		if (eir & I915_ERROR_PAGE_TABLE) {
2002 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2003 			pr_err("page table error\n");
2004 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2005 			I915_WRITE(PGTBL_ER, pgtbl_err);
2006 			POSTING_READ(PGTBL_ER);
2007 		}
2008 	}
2009 
2010 	if (eir & I915_ERROR_MEMORY_REFRESH) {
2011 		pr_err("memory refresh error:\n");
2012 		for_each_pipe(pipe)
2013 			pr_err("pipe %c stat: 0x%08x\n",
2014 			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2015 		/* pipestat has already been acked */
2016 	}
2017 	if (eir & I915_ERROR_INSTRUCTION) {
2018 		pr_err("instruction error\n");
2019 		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2020 		for (i = 0; i < ARRAY_SIZE(instdone); i++)
2021 			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2022 		if (INTEL_INFO(dev)->gen < 4) {
2023 			u32 ipeir = I915_READ(IPEIR);
2024 
2025 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2026 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2027 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2028 			I915_WRITE(IPEIR, ipeir);
2029 			POSTING_READ(IPEIR);
2030 		} else {
2031 			u32 ipeir = I915_READ(IPEIR_I965);
2032 
2033 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2034 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2035 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2036 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2037 			I915_WRITE(IPEIR_I965, ipeir);
2038 			POSTING_READ(IPEIR_I965);
2039 		}
2040 	}
2041 
2042 	I915_WRITE(EIR, eir);
2043 	POSTING_READ(EIR);
2044 	eir = I915_READ(EIR);
2045 	if (eir) {
2046 		/*
2047 		 * some errors might have become stuck,
2048 		 * mask them.
2049 		 */
2050 		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2051 		I915_WRITE(EMR, I915_READ(EMR) | eir);
2052 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2053 	}
2054 }
2055 
2056 /**
2057  * i915_handle_error - handle an error interrupt
2058  * @dev: drm device
2059  *
2060  * Do some basic checking of regsiter state at error interrupt time and
2061  * dump it to the syslog.  Also call i915_capture_error_state() to make
2062  * sure we get a record and make it available in debugfs.  Fire a uevent
2063  * so userspace knows something bad happened (should trigger collection
2064  * of a ring dump etc.).
2065  */
2066 void i915_handle_error(struct drm_device *dev, bool wedged)
2067 {
2068 	struct drm_i915_private *dev_priv = dev->dev_private;
2069 	struct intel_ring_buffer *ring;
2070 	int i;
2071 
2072 	i915_capture_error_state(dev);
2073 	i915_report_and_clear_eir(dev);
2074 
2075 	if (wedged) {
2076 		atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2077 				&dev_priv->gpu_error.reset_counter);
2078 
2079 		/*
2080 		 * Wakeup waiting processes so that the reset work item
2081 		 * doesn't deadlock trying to grab various locks.
2082 		 */
2083 		for_each_ring(ring, dev_priv, i)
2084 			wake_up_all(&ring->irq_queue);
2085 	}
2086 
2087 	queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
2088 }
2089 
2090 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
2091 {
2092 	drm_i915_private_t *dev_priv = dev->dev_private;
2093 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2094 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2095 	struct drm_i915_gem_object *obj;
2096 	struct intel_unpin_work *work;
2097 	bool stall_detected;
2098 
2099 	/* Ignore early vblank irqs */
2100 	if (intel_crtc == NULL)
2101 		return;
2102 
2103 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
2104 	work = intel_crtc->unpin_work;
2105 
2106 	if (work == NULL ||
2107 	    atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2108 	    !work->enable_stall_check) {
2109 		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
2110 		lockmgr(&dev->event_lock, LK_RELEASE);
2111 		return;
2112 	}
2113 
2114 	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
2115 	obj = work->pending_flip_obj;
2116 	if (INTEL_INFO(dev)->gen >= 4) {
2117 		int dspsurf = DSPSURF(intel_crtc->plane);
2118 		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2119 					obj->gtt_offset;
2120 	} else {
2121 		int dspaddr = DSPADDR(intel_crtc->plane);
2122 		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
2123 							crtc->y * crtc->fb->pitches[0] +
2124 							crtc->x * crtc->fb->bits_per_pixel/8);
2125 	}
2126 
2127 	lockmgr(&dev->event_lock, LK_RELEASE);
2128 
2129 	if (stall_detected) {
2130 		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2131 		intel_prepare_page_flip(dev, intel_crtc->plane);
2132 	}
2133 }
2134 
2135 /* Called from drm generic code, passed 'crtc' which
2136  * we use as a pipe index
2137  */
2138 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2139 {
2140 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2141 
2142 	if (!i915_pipe_enabled(dev, pipe))
2143 		return -EINVAL;
2144 
2145 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2146 	if (INTEL_INFO(dev)->gen >= 4)
2147 		i915_enable_pipestat(dev_priv, pipe,
2148 				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
2149 	else
2150 		i915_enable_pipestat(dev_priv, pipe,
2151 				     PIPE_VBLANK_INTERRUPT_ENABLE);
2152 
2153 	/* maintain vblank delivery even in deep C-states */
2154 	if (dev_priv->info->gen == 3)
2155 		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
2156 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2157 
2158 	return 0;
2159 }
2160 
2161 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2162 {
2163 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2164 
2165 	if (!i915_pipe_enabled(dev, pipe))
2166 		return -EINVAL;
2167 
2168 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2169 	ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
2170 				    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
2171 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2172 
2173 	return 0;
2174 }
2175 
2176 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
2177 {
2178 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2179 
2180 	if (!i915_pipe_enabled(dev, pipe))
2181 		return -EINVAL;
2182 
2183 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2184 	ironlake_enable_display_irq(dev_priv,
2185 				    DE_PIPEA_VBLANK_IVB << (5 * pipe));
2186 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2187 
2188 	return 0;
2189 }
2190 
2191 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2192 {
2193 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2194 	u32 imr;
2195 
2196 	if (!i915_pipe_enabled(dev, pipe))
2197 		return -EINVAL;
2198 
2199 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2200 	imr = I915_READ(VLV_IMR);
2201 	if (pipe == 0)
2202 		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2203 	else
2204 		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2205 	I915_WRITE(VLV_IMR, imr);
2206 	i915_enable_pipestat(dev_priv, pipe,
2207 			     PIPE_START_VBLANK_INTERRUPT_ENABLE);
2208 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2209 
2210 	return 0;
2211 }
2212 
2213 /* Called from drm generic code, passed 'crtc' which
2214  * we use as a pipe index
2215  */
2216 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2217 {
2218 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2219 
2220 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2221 	if (dev_priv->info->gen == 3)
2222 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
2223 
2224 	i915_disable_pipestat(dev_priv, pipe,
2225 			      PIPE_VBLANK_INTERRUPT_ENABLE |
2226 			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
2227 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2228 }
2229 
2230 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2231 {
2232 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2233 
2234 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2235 	ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
2236 				     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
2237 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2238 }
2239 
2240 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
2241 {
2242 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2243 
2244 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2245 	ironlake_disable_display_irq(dev_priv,
2246 				     DE_PIPEA_VBLANK_IVB << (pipe * 5));
2247 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2248 }
2249 
2250 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2251 {
2252 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2253 	u32 imr;
2254 
2255 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2256 	i915_disable_pipestat(dev_priv, pipe,
2257 			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
2258 	imr = I915_READ(VLV_IMR);
2259 	if (pipe == 0)
2260 		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
2261 	else
2262 		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2263 	I915_WRITE(VLV_IMR, imr);
2264 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2265 }
2266 
2267 static u32
2268 ring_last_seqno(struct intel_ring_buffer *ring)
2269 {
2270 	return list_entry(ring->request_list.prev,
2271 			  struct drm_i915_gem_request, list)->seqno;
2272 }
2273 
2274 static bool
2275 ring_idle(struct intel_ring_buffer *ring, u32 seqno)
2276 {
2277 	return (list_empty(&ring->request_list) ||
2278 		i915_seqno_passed(seqno, ring_last_seqno(ring)));
2279 }
2280 
2281 static struct intel_ring_buffer *
2282 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
2283 {
2284 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2285 	u32 cmd, ipehr, acthd, acthd_min;
2286 
2287 	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2288 	if ((ipehr & ~(0x3 << 16)) !=
2289 	    (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
2290 		return NULL;
2291 
2292 	/* ACTHD is likely pointing to the dword after the actual command,
2293 	 * so scan backwards until we find the MBOX.
2294 	 */
2295 	acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
2296 	acthd_min = max((int)acthd - 3 * 4, 0);
2297 	do {
2298 		cmd = ioread32(ring->virtual_start + acthd);
2299 		if (cmd == ipehr)
2300 			break;
2301 
2302 		acthd -= 4;
2303 		if (acthd < acthd_min)
2304 			return NULL;
2305 	} while (1);
2306 
2307 	*seqno = ioread32(ring->virtual_start+acthd+4)+1;
2308 	return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
2309 }
2310 
2311 static int semaphore_passed(struct intel_ring_buffer *ring)
2312 {
2313 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2314 	struct intel_ring_buffer *signaller;
2315 	u32 seqno, ctl;
2316 
2317 	ring->hangcheck.deadlock = true;
2318 
2319 	signaller = semaphore_waits_for(ring, &seqno);
2320 	if (signaller == NULL || signaller->hangcheck.deadlock)
2321 		return -1;
2322 
2323 	/* cursory check for an unkickable deadlock */
2324 	ctl = I915_READ_CTL(signaller);
2325 	if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2326 		return -1;
2327 
2328 	return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2329 }
2330 
2331 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2332 {
2333 	struct intel_ring_buffer *ring;
2334 	int i;
2335 
2336 	for_each_ring(ring, dev_priv, i)
2337 		ring->hangcheck.deadlock = false;
2338 }
2339 
2340 static enum intel_ring_hangcheck_action
2341 ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
2342 {
2343 	struct drm_device *dev = ring->dev;
2344 	struct drm_i915_private *dev_priv = dev->dev_private;
2345 	u32 tmp;
2346 
2347 	if (ring->hangcheck.acthd != acthd)
2348 		return active;
2349 
2350 	if (IS_GEN2(dev))
2351 		return hung;
2352 
2353 	/* Is the chip hanging on a WAIT_FOR_EVENT?
2354 	 * If so we can simply poke the RB_WAIT bit
2355 	 * and break the hang. This should work on
2356 	 * all but the second generation chipsets.
2357 	 */
2358 	tmp = I915_READ_CTL(ring);
2359 	if (tmp & RING_WAIT) {
2360 		DRM_ERROR("Kicking stuck wait on %s\n",
2361 			  ring->name);
2362 		I915_WRITE_CTL(ring, tmp);
2363 		return kick;
2364 	}
2365 
2366 	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2367 		switch (semaphore_passed(ring)) {
2368 		default:
2369 			return hung;
2370 		case 1:
2371 			DRM_ERROR("Kicking stuck semaphore on %s\n",
2372 				  ring->name);
2373 			I915_WRITE_CTL(ring, tmp);
2374 			return kick;
2375 		case 0:
2376 			return wait;
2377 		}
2378 	}
2379 
2380 	return hung;
2381 }
2382 
2383 /**
2384  * This is called when the chip hasn't reported back with completed
2385  * batchbuffers in a long time. We keep track per ring seqno progress and
2386  * if there are no progress, hangcheck score for that ring is increased.
2387  * Further, acthd is inspected to see if the ring is stuck. On stuck case
2388  * we kick the ring. If we see no progress on three subsequent calls
2389  * we assume chip is wedged and try to fix it by resetting the chip.
2390  */
2391 void i915_hangcheck_elapsed(unsigned long data)
2392 {
2393 	struct drm_device *dev = (struct drm_device *)data;
2394 	drm_i915_private_t *dev_priv = dev->dev_private;
2395 	struct intel_ring_buffer *ring;
2396 	int i;
2397 	int busy_count = 0, rings_hung = 0;
2398 	bool stuck[I915_NUM_RINGS] = { 0 };
2399 #define BUSY 1
2400 #define KICK 5
2401 #define HUNG 20
2402 #define FIRE 30
2403 
2404 	if (!i915_enable_hangcheck)
2405 		return;
2406 
2407 	for_each_ring(ring, dev_priv, i) {
2408 		u32 seqno, acthd;
2409 		bool busy = true;
2410 
2411 		semaphore_clear_deadlocks(dev_priv);
2412 
2413 		seqno = ring->get_seqno(ring, false);
2414 		acthd = intel_ring_get_active_head(ring);
2415 
2416 		if (ring->hangcheck.seqno == seqno) {
2417 			if (ring_idle(ring, seqno)) {
2418 				if (waitqueue_active(&ring->irq_queue)) {
2419 					/* Issue a wake-up to catch stuck h/w. */
2420 					DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2421 						  ring->name);
2422 					wake_up_all(&ring->irq_queue);
2423 					ring->hangcheck.score += HUNG;
2424 				} else
2425 					busy = false;
2426 			} else {
2427 				int score;
2428 
2429 				/* We always increment the hangcheck score
2430 				 * if the ring is busy and still processing
2431 				 * the same request, so that no single request
2432 				 * can run indefinitely (such as a chain of
2433 				 * batches). The only time we do not increment
2434 				 * the hangcheck score on this ring, if this
2435 				 * ring is in a legitimate wait for another
2436 				 * ring. In that case the waiting ring is a
2437 				 * victim and we want to be sure we catch the
2438 				 * right culprit. Then every time we do kick
2439 				 * the ring, add a small increment to the
2440 				 * score so that we can catch a batch that is
2441 				 * being repeatedly kicked and so responsible
2442 				 * for stalling the machine.
2443 				 */
2444 				ring->hangcheck.action = ring_stuck(ring,
2445 								    acthd);
2446 
2447 				switch (ring->hangcheck.action) {
2448 				case wait:
2449 					score = 0;
2450 					break;
2451 				case active:
2452 					score = BUSY;
2453 					break;
2454 				case kick:
2455 					score = KICK;
2456 					break;
2457 				case hung:
2458 					score = HUNG;
2459 					stuck[i] = true;
2460 					break;
2461 				}
2462 				ring->hangcheck.score += score;
2463 			}
2464 		} else {
2465 			/* Gradually reduce the count so that we catch DoS
2466 			 * attempts across multiple batches.
2467 			 */
2468 			if (ring->hangcheck.score > 0)
2469 				ring->hangcheck.score--;
2470 		}
2471 
2472 		ring->hangcheck.seqno = seqno;
2473 		ring->hangcheck.acthd = acthd;
2474 		busy_count += busy;
2475 	}
2476 
2477 	for_each_ring(ring, dev_priv, i) {
2478 		if (ring->hangcheck.score > FIRE) {
2479 			DRM_ERROR("%s on %s\n",
2480 				  stuck[i] ? "stuck" : "no progress",
2481 				  ring->name);
2482 			rings_hung++;
2483 		}
2484 	}
2485 
2486 	if (rings_hung)
2487 		return i915_handle_error(dev, true);
2488 
2489 	if (busy_count)
2490 		/* Reset timer case chip hangs without another request
2491 		 * being added */
2492 		mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2493 			  round_jiffies_up(jiffies +
2494 					   DRM_I915_HANGCHECK_JIFFIES));
2495 }
2496 
2497 static void ibx_irq_preinstall(struct drm_device *dev)
2498 {
2499 	struct drm_i915_private *dev_priv = dev->dev_private;
2500 
2501 	if (HAS_PCH_NOP(dev))
2502 		return;
2503 
2504 	/* south display irq */
2505 	I915_WRITE(SDEIMR, 0xffffffff);
2506 	/*
2507 	 * SDEIER is also touched by the interrupt handler to work around missed
2508 	 * PCH interrupts. Hence we can't update it after the interrupt handler
2509 	 * is enabled - instead we unconditionally enable all PCH interrupt
2510 	 * sources here, but then only unmask them as needed with SDEIMR.
2511 	 */
2512 	I915_WRITE(SDEIER, 0xffffffff);
2513 	POSTING_READ(SDEIER);
2514 }
2515 
2516 /* drm_dma.h hooks
2517 */
2518 static void ironlake_irq_preinstall(struct drm_device *dev)
2519 {
2520 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2521 
2522 	atomic_set(&dev_priv->irq_received, 0);
2523 
2524 	I915_WRITE(HWSTAM, 0xeffe);
2525 
2526 	/* XXX hotplug from PCH */
2527 
2528 	I915_WRITE(DEIMR, 0xffffffff);
2529 	I915_WRITE(DEIER, 0x0);
2530 	POSTING_READ(DEIER);
2531 
2532 	/* and GT */
2533 	I915_WRITE(GTIMR, 0xffffffff);
2534 	I915_WRITE(GTIER, 0x0);
2535 	POSTING_READ(GTIER);
2536 
2537 	ibx_irq_preinstall(dev);
2538 }
2539 
2540 static void ivybridge_irq_preinstall(struct drm_device *dev)
2541 {
2542 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2543 
2544 	atomic_set(&dev_priv->irq_received, 0);
2545 
2546 	I915_WRITE(HWSTAM, 0xeffe);
2547 
2548 	/* XXX hotplug from PCH */
2549 
2550 	I915_WRITE(DEIMR, 0xffffffff);
2551 	I915_WRITE(DEIER, 0x0);
2552 	POSTING_READ(DEIER);
2553 
2554 	/* and GT */
2555 	I915_WRITE(GTIMR, 0xffffffff);
2556 	I915_WRITE(GTIER, 0x0);
2557 	POSTING_READ(GTIER);
2558 
2559 	/* Power management */
2560 	I915_WRITE(GEN6_PMIMR, 0xffffffff);
2561 	I915_WRITE(GEN6_PMIER, 0x0);
2562 	POSTING_READ(GEN6_PMIER);
2563 
2564 	ibx_irq_preinstall(dev);
2565 }
2566 
2567 static void valleyview_irq_preinstall(struct drm_device *dev)
2568 {
2569 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2570 	int pipe;
2571 
2572 	atomic_set(&dev_priv->irq_received, 0);
2573 
2574 	/* VLV magic */
2575 	I915_WRITE(VLV_IMR, 0);
2576 	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2577 	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2578 	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2579 
2580 	/* and GT */
2581 	I915_WRITE(GTIIR, I915_READ(GTIIR));
2582 	I915_WRITE(GTIIR, I915_READ(GTIIR));
2583 	I915_WRITE(GTIMR, 0xffffffff);
2584 	I915_WRITE(GTIER, 0x0);
2585 	POSTING_READ(GTIER);
2586 
2587 	I915_WRITE(DPINVGTT, 0xff);
2588 
2589 	I915_WRITE(PORT_HOTPLUG_EN, 0);
2590 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2591 	for_each_pipe(pipe)
2592 		I915_WRITE(PIPESTAT(pipe), 0xffff);
2593 	I915_WRITE(VLV_IIR, 0xffffffff);
2594 	I915_WRITE(VLV_IMR, 0xffffffff);
2595 	I915_WRITE(VLV_IER, 0x0);
2596 	POSTING_READ(VLV_IER);
2597 }
2598 
2599 static void ibx_hpd_irq_setup(struct drm_device *dev)
2600 {
2601 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2602 	struct drm_mode_config *mode_config = &dev->mode_config;
2603 	struct intel_encoder *intel_encoder;
2604 	u32 mask = ~I915_READ(SDEIMR);
2605 	u32 hotplug;
2606 
2607 	if (HAS_PCH_IBX(dev)) {
2608 		mask &= ~SDE_HOTPLUG_MASK;
2609 		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2610 			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2611 				mask |= hpd_ibx[intel_encoder->hpd_pin];
2612 	} else {
2613 		mask &= ~SDE_HOTPLUG_MASK_CPT;
2614 		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
2615 			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2616 				mask |= hpd_cpt[intel_encoder->hpd_pin];
2617 	}
2618 
2619 	I915_WRITE(SDEIMR, ~mask);
2620 
2621 	/*
2622 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
2623 	 * duration to 2ms (which is the minimum in the Display Port spec)
2624 	 *
2625 	 * This register is the same on all known PCH chips.
2626 	 */
2627 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
2628 	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2629 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2630 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2631 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2632 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2633 }
2634 
2635 static void ibx_irq_postinstall(struct drm_device *dev)
2636 {
2637 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2638 	u32 mask;
2639 
2640 	if (HAS_PCH_NOP(dev))
2641 		return;
2642 
2643 	if (HAS_PCH_IBX(dev)) {
2644 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
2645 		       SDE_TRANSA_FIFO_UNDER | SDE_POISON;
2646 	} else {
2647 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2648 
2649 		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2650 	}
2651 
2652 	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2653 	I915_WRITE(SDEIMR, ~mask);
2654 }
2655 
2656 static int ironlake_irq_postinstall(struct drm_device *dev)
2657 {
2658 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2659 	/* enable kind of interrupts always enabled */
2660 	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
2661 			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
2662 			   DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
2663 			   DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
2664 	u32 gt_irqs;
2665 
2666 	dev_priv->irq_mask = ~display_mask;
2667 
2668 	/* should always can generate irq */
2669 	I915_WRITE(DEIIR, I915_READ(DEIIR));
2670 	I915_WRITE(DEIMR, dev_priv->irq_mask);
2671 	I915_WRITE(DEIER, display_mask |
2672 			  DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT);
2673 	POSTING_READ(DEIER);
2674 
2675 	dev_priv->gt_irq_mask = ~0;
2676 
2677 	I915_WRITE(GTIIR, I915_READ(GTIIR));
2678 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2679 
2680 	gt_irqs = GT_RENDER_USER_INTERRUPT;
2681 
2682 	if (IS_GEN6(dev))
2683 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
2684 	else
2685 		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2686 			   ILK_BSD_USER_INTERRUPT;
2687 
2688 	I915_WRITE(GTIER, gt_irqs);
2689 	POSTING_READ(GTIER);
2690 
2691 	ibx_irq_postinstall(dev);
2692 
2693 	if (IS_IRONLAKE_M(dev)) {
2694 		/* Enable PCU event interrupts
2695 		 *
2696 		 * spinlocking not required here for correctness since interrupt
2697 		 * setup is guaranteed to run in single-threaded context. But we
2698 		 * need it to make the assert_spin_locked happy. */
2699 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2700 		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
2701 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2702 	}
2703 
2704 	return 0;
2705 }
2706 
2707 static int ivybridge_irq_postinstall(struct drm_device *dev)
2708 {
2709 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2710 	/* enable kind of interrupts always enabled */
2711 	u32 display_mask =
2712 		DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
2713 		DE_PLANEC_FLIP_DONE_IVB |
2714 		DE_PLANEB_FLIP_DONE_IVB |
2715 		DE_PLANEA_FLIP_DONE_IVB |
2716 		DE_AUX_CHANNEL_A_IVB |
2717 		DE_ERR_INT_IVB;
2718 	u32 pm_irqs = GEN6_PM_RPS_EVENTS;
2719 	u32 gt_irqs;
2720 
2721 	dev_priv->irq_mask = ~display_mask;
2722 
2723 	/* should always can generate irq */
2724 	I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2725 	I915_WRITE(DEIIR, I915_READ(DEIIR));
2726 	I915_WRITE(DEIMR, dev_priv->irq_mask);
2727 	I915_WRITE(DEIER,
2728 		   display_mask |
2729 		   DE_PIPEC_VBLANK_IVB |
2730 		   DE_PIPEB_VBLANK_IVB |
2731 		   DE_PIPEA_VBLANK_IVB);
2732 	POSTING_READ(DEIER);
2733 
2734 	dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2735 
2736 	I915_WRITE(GTIIR, I915_READ(GTIIR));
2737 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2738 
2739 	gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2740 		  GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2741 	I915_WRITE(GTIER, gt_irqs);
2742 	POSTING_READ(GTIER);
2743 
2744 	I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2745 	if (HAS_VEBOX(dev))
2746 		pm_irqs |= PM_VEBOX_USER_INTERRUPT |
2747 			PM_VEBOX_CS_ERROR_INTERRUPT;
2748 
2749 	/* Our enable/disable rps functions may touch these registers so
2750 	 * make sure to set a known state for only the non-RPS bits.
2751 	 * The RMW is extra paranoia since this should be called after being set
2752 	 * to a known state in preinstall.
2753 	 * */
2754 	I915_WRITE(GEN6_PMIMR,
2755 		   (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
2756 	I915_WRITE(GEN6_PMIER,
2757 		   (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
2758 	POSTING_READ(GEN6_PMIER);
2759 
2760 	ibx_irq_postinstall(dev);
2761 
2762 	return 0;
2763 }
2764 
2765 static int valleyview_irq_postinstall(struct drm_device *dev)
2766 {
2767 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2768 	u32 gt_irqs;
2769 	u32 enable_mask;
2770 	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
2771 
2772 	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
2773 	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2774 		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2775 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2776 		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2777 
2778 	/*
2779 	 *Leave vblank interrupts masked initially.  enable/disable will
2780 	 * toggle them based on usage.
2781 	 */
2782 	dev_priv->irq_mask = (~enable_mask) |
2783 		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2784 		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2785 
2786 	I915_WRITE(PORT_HOTPLUG_EN, 0);
2787 	POSTING_READ(PORT_HOTPLUG_EN);
2788 
2789 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2790 	I915_WRITE(VLV_IER, enable_mask);
2791 	I915_WRITE(VLV_IIR, 0xffffffff);
2792 	I915_WRITE(PIPESTAT(0), 0xffff);
2793 	I915_WRITE(PIPESTAT(1), 0xffff);
2794 	POSTING_READ(VLV_IER);
2795 
2796 	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2797 	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2798 	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2799 
2800 	I915_WRITE(VLV_IIR, 0xffffffff);
2801 	I915_WRITE(VLV_IIR, 0xffffffff);
2802 
2803 	I915_WRITE(GTIIR, I915_READ(GTIIR));
2804 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2805 
2806 	gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2807 		GT_BLT_USER_INTERRUPT;
2808 	I915_WRITE(GTIER, gt_irqs);
2809 	POSTING_READ(GTIER);
2810 
2811 	/* ack & enable invalid PTE error interrupts */
2812 #if 0 /* FIXME: add support to irq handler for checking these bits */
2813 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2814 	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2815 #endif
2816 
2817 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2818 
2819 	return 0;
2820 }
2821 
2822 static void valleyview_irq_uninstall(struct drm_device *dev)
2823 {
2824 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2825 	int pipe;
2826 
2827 	if (!dev_priv)
2828 		return;
2829 
2830 	del_timer_sync(&dev_priv->hotplug_reenable_timer);
2831 
2832 	for_each_pipe(pipe)
2833 		I915_WRITE(PIPESTAT(pipe), 0xffff);
2834 
2835 	I915_WRITE(HWSTAM, 0xffffffff);
2836 	I915_WRITE(PORT_HOTPLUG_EN, 0);
2837 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2838 	for_each_pipe(pipe)
2839 		I915_WRITE(PIPESTAT(pipe), 0xffff);
2840 	I915_WRITE(VLV_IIR, 0xffffffff);
2841 	I915_WRITE(VLV_IMR, 0xffffffff);
2842 	I915_WRITE(VLV_IER, 0x0);
2843 	POSTING_READ(VLV_IER);
2844 }
2845 
2846 static void ironlake_irq_uninstall(struct drm_device *dev)
2847 {
2848 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2849 
2850 	if (!dev_priv)
2851 		return;
2852 
2853 	del_timer_sync(&dev_priv->hotplug_reenable_timer);
2854 
2855 	I915_WRITE(HWSTAM, 0xffffffff);
2856 
2857 	I915_WRITE(DEIMR, 0xffffffff);
2858 	I915_WRITE(DEIER, 0x0);
2859 	I915_WRITE(DEIIR, I915_READ(DEIIR));
2860 	if (IS_GEN7(dev))
2861 		I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
2862 
2863 	I915_WRITE(GTIMR, 0xffffffff);
2864 	I915_WRITE(GTIER, 0x0);
2865 	I915_WRITE(GTIIR, I915_READ(GTIIR));
2866 
2867 	if (HAS_PCH_NOP(dev))
2868 		return;
2869 
2870 	I915_WRITE(SDEIMR, 0xffffffff);
2871 	I915_WRITE(SDEIER, 0x0);
2872 	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2873 	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2874 		I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2875 }
2876 
2877 static void i8xx_irq_preinstall(struct drm_device * dev)
2878 {
2879 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2880 	int pipe;
2881 
2882 	atomic_set(&dev_priv->irq_received, 0);
2883 
2884 	for_each_pipe(pipe)
2885 		I915_WRITE(PIPESTAT(pipe), 0);
2886 	I915_WRITE16(IMR, 0xffff);
2887 	I915_WRITE16(IER, 0x0);
2888 	POSTING_READ16(IER);
2889 }
2890 
2891 static int i8xx_irq_postinstall(struct drm_device *dev)
2892 {
2893 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2894 
2895 	I915_WRITE16(EMR,
2896 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2897 
2898 	/* Unmask the interrupts that we always want on. */
2899 	dev_priv->irq_mask =
2900 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2901 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2902 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2903 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2904 		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2905 	I915_WRITE16(IMR, dev_priv->irq_mask);
2906 
2907 	I915_WRITE16(IER,
2908 		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2909 		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2910 		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2911 		     I915_USER_INTERRUPT);
2912 	POSTING_READ16(IER);
2913 
2914 	return 0;
2915 }
2916 
2917 /*
2918  * Returns true when a page flip has completed.
2919  */
2920 static bool i8xx_handle_vblank(struct drm_device *dev,
2921 			       int pipe, u16 iir)
2922 {
2923 	drm_i915_private_t *dev_priv = dev->dev_private;
2924 	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2925 
2926 	if (!drm_handle_vblank(dev, pipe))
2927 		return false;
2928 
2929 	if ((iir & flip_pending) == 0)
2930 		return false;
2931 
2932 	intel_prepare_page_flip(dev, pipe);
2933 
2934 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
2935 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2936 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2937 	 * the flip is completed (no longer pending). Since this doesn't raise
2938 	 * an interrupt per se, we watch for the change at vblank.
2939 	 */
2940 	if (I915_READ16(ISR) & flip_pending)
2941 		return false;
2942 
2943 	intel_finish_page_flip(dev, pipe);
2944 
2945 	return true;
2946 }
2947 
2948 static irqreturn_t i8xx_irq_handler(void *arg)
2949 {
2950 	struct drm_device *dev = (struct drm_device *) arg;
2951 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2952 	u16 iir, new_iir;
2953 	u32 pipe_stats[2];
2954 	int irq_received;
2955 	int pipe;
2956 	u16 flip_mask =
2957 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2958 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2959 
2960 	atomic_inc(&dev_priv->irq_received);
2961 
2962 	iir = I915_READ16(IIR);
2963 	if (iir == 0)
2964 		return;
2965 
2966 	while (iir & ~flip_mask) {
2967 		/* Can't rely on pipestat interrupt bit in iir as it might
2968 		 * have been cleared after the pipestat interrupt was received.
2969 		 * It doesn't set the bit in iir again, but it still produces
2970 		 * interrupts (for non-MSI).
2971 		 */
2972 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2973 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2974 			i915_handle_error(dev, false);
2975 
2976 		for_each_pipe(pipe) {
2977 			int reg = PIPESTAT(pipe);
2978 			pipe_stats[pipe] = I915_READ(reg);
2979 
2980 			/*
2981 			 * Clear the PIPE*STAT regs before the IIR
2982 			 */
2983 			if (pipe_stats[pipe] & 0x8000ffff) {
2984 				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2985 					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2986 							 pipe_name(pipe));
2987 				I915_WRITE(reg, pipe_stats[pipe]);
2988 				irq_received = 1;
2989 			}
2990 		}
2991 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2992 
2993 		I915_WRITE16(IIR, iir & ~flip_mask);
2994 		new_iir = I915_READ16(IIR); /* Flush posted writes */
2995 
2996 		i915_update_dri1_breadcrumb(dev);
2997 
2998 		if (iir & I915_USER_INTERRUPT)
2999 			notify_ring(dev, &dev_priv->ring[RCS]);
3000 
3001 		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
3002 		    i8xx_handle_vblank(dev, 0, iir))
3003 			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
3004 
3005 		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
3006 		    i8xx_handle_vblank(dev, 1, iir))
3007 			flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
3008 
3009 		iir = new_iir;
3010 	}
3011 
3012 	return;
3013 }
3014 
3015 static void i8xx_irq_uninstall(struct drm_device * dev)
3016 {
3017 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3018 	int pipe;
3019 
3020 	for_each_pipe(pipe) {
3021 		/* Clear enable bits; then clear status bits */
3022 		I915_WRITE(PIPESTAT(pipe), 0);
3023 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3024 	}
3025 	I915_WRITE16(IMR, 0xffff);
3026 	I915_WRITE16(IER, 0x0);
3027 	I915_WRITE16(IIR, I915_READ16(IIR));
3028 }
3029 
3030 static void i915_irq_preinstall(struct drm_device * dev)
3031 {
3032 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3033 	int pipe;
3034 
3035 	atomic_set(&dev_priv->irq_received, 0);
3036 
3037 	if (I915_HAS_HOTPLUG(dev)) {
3038 		I915_WRITE(PORT_HOTPLUG_EN, 0);
3039 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3040 	}
3041 
3042 	I915_WRITE16(HWSTAM, 0xeffe);
3043 	for_each_pipe(pipe)
3044 		I915_WRITE(PIPESTAT(pipe), 0);
3045 	I915_WRITE(IMR, 0xffffffff);
3046 	I915_WRITE(IER, 0x0);
3047 	POSTING_READ(IER);
3048 }
3049 
3050 static int i915_irq_postinstall(struct drm_device *dev)
3051 {
3052 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3053 	u32 enable_mask;
3054 
3055 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3056 
3057 	/* Unmask the interrupts that we always want on. */
3058 	dev_priv->irq_mask =
3059 		~(I915_ASLE_INTERRUPT |
3060 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3061 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3062 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3063 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3064 		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3065 
3066 	enable_mask =
3067 		I915_ASLE_INTERRUPT |
3068 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3069 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3070 		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3071 		I915_USER_INTERRUPT;
3072 
3073 	if (I915_HAS_HOTPLUG(dev)) {
3074 		I915_WRITE(PORT_HOTPLUG_EN, 0);
3075 		POSTING_READ(PORT_HOTPLUG_EN);
3076 
3077 		/* Enable in IER... */
3078 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3079 		/* and unmask in IMR */
3080 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3081 	}
3082 
3083 	I915_WRITE(IMR, dev_priv->irq_mask);
3084 	I915_WRITE(IER, enable_mask);
3085 	POSTING_READ(IER);
3086 
3087 	i915_enable_asle_pipestat(dev);
3088 
3089 	return 0;
3090 }
3091 
3092 /*
3093  * Returns true when a page flip has completed.
3094  */
3095 static bool i915_handle_vblank(struct drm_device *dev,
3096 			       int plane, int pipe, u32 iir)
3097 {
3098 	drm_i915_private_t *dev_priv = dev->dev_private;
3099 	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3100 
3101 	if (!drm_handle_vblank(dev, pipe))
3102 		return false;
3103 
3104 	if ((iir & flip_pending) == 0)
3105 		return false;
3106 
3107 	intel_prepare_page_flip(dev, plane);
3108 
3109 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3110 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3111 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3112 	 * the flip is completed (no longer pending). Since this doesn't raise
3113 	 * an interrupt per se, we watch for the change at vblank.
3114 	 */
3115 	if (I915_READ(ISR) & flip_pending)
3116 		return false;
3117 
3118 	intel_finish_page_flip(dev, pipe);
3119 
3120 	return true;
3121 }
3122 
3123 static irqreturn_t i915_irq_handler(void *arg)
3124 {
3125 	struct drm_device *dev = (struct drm_device *) arg;
3126 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3127 	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3128 	u32 flip_mask =
3129 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3130 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3131 	int pipe;
3132 
3133 	atomic_inc(&dev_priv->irq_received);
3134 
3135 	iir = I915_READ(IIR);
3136 	do {
3137 		bool irq_received = (iir & ~flip_mask) != 0;
3138 		bool blc_event = false;
3139 
3140 		/* Can't rely on pipestat interrupt bit in iir as it might
3141 		 * have been cleared after the pipestat interrupt was received.
3142 		 * It doesn't set the bit in iir again, but it still produces
3143 		 * interrupts (for non-MSI).
3144 		 */
3145 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3146 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3147 			i915_handle_error(dev, false);
3148 
3149 		for_each_pipe(pipe) {
3150 			int reg = PIPESTAT(pipe);
3151 			pipe_stats[pipe] = I915_READ(reg);
3152 
3153 			/* Clear the PIPE*STAT regs before the IIR */
3154 			if (pipe_stats[pipe] & 0x8000ffff) {
3155 				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3156 					DRM_DEBUG_DRIVER("pipe %c underrun\n",
3157 							 pipe_name(pipe));
3158 				I915_WRITE(reg, pipe_stats[pipe]);
3159 				irq_received = true;
3160 			}
3161 		}
3162 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3163 
3164 		if (!irq_received)
3165 			break;
3166 
3167 		/* Consume port.  Then clear IIR or we'll miss events */
3168 		if ((I915_HAS_HOTPLUG(dev)) &&
3169 		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
3170 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3171 			u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
3172 
3173 			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3174 				  hotplug_status);
3175 
3176 			intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915);
3177 
3178 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3179 			POSTING_READ(PORT_HOTPLUG_STAT);
3180 		}
3181 
3182 		I915_WRITE(IIR, iir & ~flip_mask);
3183 		new_iir = I915_READ(IIR); /* Flush posted writes */
3184 
3185 		if (iir & I915_USER_INTERRUPT)
3186 			notify_ring(dev, &dev_priv->ring[RCS]);
3187 
3188 		for_each_pipe(pipe) {
3189 			int plane = pipe;
3190 			if (IS_MOBILE(dev))
3191 				plane = !plane;
3192 
3193 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3194 			    i915_handle_vblank(dev, plane, pipe, iir))
3195 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3196 
3197 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3198 				blc_event = true;
3199 		}
3200 
3201 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
3202 			intel_opregion_asle_intr(dev);
3203 
3204 		/* With MSI, interrupts are only generated when iir
3205 		 * transitions from zero to nonzero.  If another bit got
3206 		 * set while we were handling the existing iir bits, then
3207 		 * we would never get another interrupt.
3208 		 *
3209 		 * This is fine on non-MSI as well, as if we hit this path
3210 		 * we avoid exiting the interrupt handler only to generate
3211 		 * another one.
3212 		 *
3213 		 * Note that for MSI this could cause a stray interrupt report
3214 		 * if an interrupt landed in the time between writing IIR and
3215 		 * the posting read.  This should be rare enough to never
3216 		 * trigger the 99% of 100,000 interrupts test for disabling
3217 		 * stray interrupts.
3218 		 */
3219 		iir = new_iir;
3220 	} while (iir & ~flip_mask);
3221 
3222 	i915_update_dri1_breadcrumb(dev);
3223 }
3224 
3225 static void i915_irq_uninstall(struct drm_device * dev)
3226 {
3227 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3228 	int pipe;
3229 
3230 	del_timer_sync(&dev_priv->hotplug_reenable_timer);
3231 
3232 	if (I915_HAS_HOTPLUG(dev)) {
3233 		I915_WRITE(PORT_HOTPLUG_EN, 0);
3234 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3235 	}
3236 
3237 	I915_WRITE16(HWSTAM, 0xffff);
3238 	for_each_pipe(pipe) {
3239 		/* Clear enable bits; then clear status bits */
3240 		I915_WRITE(PIPESTAT(pipe), 0);
3241 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3242 	}
3243 	I915_WRITE(IMR, 0xffffffff);
3244 	I915_WRITE(IER, 0x0);
3245 
3246 	I915_WRITE(IIR, I915_READ(IIR));
3247 }
3248 
3249 static void i965_irq_preinstall(struct drm_device * dev)
3250 {
3251 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3252 	int pipe;
3253 
3254 	atomic_set(&dev_priv->irq_received, 0);
3255 
3256 	I915_WRITE(PORT_HOTPLUG_EN, 0);
3257 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3258 
3259 	I915_WRITE(HWSTAM, 0xeffe);
3260 	for_each_pipe(pipe)
3261 		I915_WRITE(PIPESTAT(pipe), 0);
3262 	I915_WRITE(IMR, 0xffffffff);
3263 	I915_WRITE(IER, 0x0);
3264 	POSTING_READ(IER);
3265 }
3266 
3267 static int i965_irq_postinstall(struct drm_device *dev)
3268 {
3269 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3270 	u32 enable_mask;
3271 	u32 error_mask;
3272 
3273 	/* Unmask the interrupts that we always want on. */
3274 	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3275 			       I915_DISPLAY_PORT_INTERRUPT |
3276 			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3277 			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3278 			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3279 			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3280 			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3281 
3282 	enable_mask = ~dev_priv->irq_mask;
3283 	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3284 			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3285 	enable_mask |= I915_USER_INTERRUPT;
3286 
3287 	if (IS_G4X(dev))
3288 		enable_mask |= I915_BSD_USER_INTERRUPT;
3289 
3290 	i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
3291 
3292 	/*
3293 	 * Enable some error detection, note the instruction error mask
3294 	 * bit is reserved, so we leave it masked.
3295 	 */
3296 	if (IS_G4X(dev)) {
3297 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
3298 			       GM45_ERROR_MEM_PRIV |
3299 			       GM45_ERROR_CP_PRIV |
3300 			       I915_ERROR_MEMORY_REFRESH);
3301 	} else {
3302 		error_mask = ~(I915_ERROR_PAGE_TABLE |
3303 			       I915_ERROR_MEMORY_REFRESH);
3304 	}
3305 	I915_WRITE(EMR, error_mask);
3306 
3307 	I915_WRITE(IMR, dev_priv->irq_mask);
3308 	I915_WRITE(IER, enable_mask);
3309 	POSTING_READ(IER);
3310 
3311 	I915_WRITE(PORT_HOTPLUG_EN, 0);
3312 	POSTING_READ(PORT_HOTPLUG_EN);
3313 
3314 	i915_enable_asle_pipestat(dev);
3315 
3316 	return 0;
3317 }
3318 
3319 static void i915_hpd_irq_setup(struct drm_device *dev)
3320 {
3321 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3322 	struct drm_mode_config *mode_config = &dev->mode_config;
3323 	struct intel_encoder *intel_encoder;
3324 	u32 hotplug_en;
3325 
3326 	if (I915_HAS_HOTPLUG(dev)) {
3327 		hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3328 		hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3329 		/* Note HDMI and DP share hotplug bits */
3330 		/* enable bits are the same for all generations */
3331 		list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3332 			if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3333 				hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
3334 		/* Programming the CRT detection parameters tends
3335 		   to generate a spurious hotplug event about three
3336 		   seconds later.  So just do it once.
3337 		*/
3338 		if (IS_G4X(dev))
3339 			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3340 		hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
3341 		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3342 
3343 		/* Ignore TV since it's buggy */
3344 		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3345 	}
3346 }
3347 
3348 static irqreturn_t i965_irq_handler(void *arg)
3349 {
3350 	struct drm_device *dev = (struct drm_device *) arg;
3351 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3352 	u32 iir, new_iir;
3353 	u32 pipe_stats[I915_MAX_PIPES];
3354 	int irq_received;
3355 	int pipe;
3356 	u32 flip_mask =
3357 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3358 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3359 
3360 	atomic_inc(&dev_priv->irq_received);
3361 
3362 	iir = I915_READ(IIR);
3363 
3364 	for (;;) {
3365 		bool blc_event = false;
3366 
3367 		irq_received = (iir & ~flip_mask) != 0;
3368 
3369 		/* Can't rely on pipestat interrupt bit in iir as it might
3370 		 * have been cleared after the pipestat interrupt was received.
3371 		 * It doesn't set the bit in iir again, but it still produces
3372 		 * interrupts (for non-MSI).
3373 		 */
3374 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3375 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3376 			i915_handle_error(dev, false);
3377 
3378 		for_each_pipe(pipe) {
3379 			int reg = PIPESTAT(pipe);
3380 			pipe_stats[pipe] = I915_READ(reg);
3381 
3382 			/*
3383 			 * Clear the PIPE*STAT regs before the IIR
3384 			 */
3385 			if (pipe_stats[pipe] & 0x8000ffff) {
3386 				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3387 					DRM_DEBUG_DRIVER("pipe %c underrun\n",
3388 							 pipe_name(pipe));
3389 				I915_WRITE(reg, pipe_stats[pipe]);
3390 				irq_received = 1;
3391 			}
3392 		}
3393 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3394 
3395 		if (!irq_received)
3396 			break;
3397 
3398 		/* Consume port.  Then clear IIR or we'll miss events */
3399 		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
3400 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
3401 			u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3402 								  HOTPLUG_INT_STATUS_G4X :
3403 								  HOTPLUG_INT_STATUS_I915);
3404 
3405 			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3406 				  hotplug_status);
3407 
3408 			intel_hpd_irq_handler(dev, hotplug_trigger,
3409 					      IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915);
3410 
3411 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3412 			I915_READ(PORT_HOTPLUG_STAT);
3413 		}
3414 
3415 		I915_WRITE(IIR, iir & ~flip_mask);
3416 		new_iir = I915_READ(IIR); /* Flush posted writes */
3417 
3418 		if (iir & I915_USER_INTERRUPT)
3419 			notify_ring(dev, &dev_priv->ring[RCS]);
3420 		if (iir & I915_BSD_USER_INTERRUPT)
3421 			notify_ring(dev, &dev_priv->ring[VCS]);
3422 
3423 		for_each_pipe(pipe) {
3424 			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
3425 			    i915_handle_vblank(dev, pipe, pipe, iir))
3426 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
3427 
3428 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3429 				blc_event = true;
3430 		}
3431 
3432 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
3433 			intel_opregion_asle_intr(dev);
3434 
3435 		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3436 			gmbus_irq_handler(dev);
3437 
3438 		/* With MSI, interrupts are only generated when iir
3439 		 * transitions from zero to nonzero.  If another bit got
3440 		 * set while we were handling the existing iir bits, then
3441 		 * we would never get another interrupt.
3442 		 *
3443 		 * This is fine on non-MSI as well, as if we hit this path
3444 		 * we avoid exiting the interrupt handler only to generate
3445 		 * another one.
3446 		 *
3447 		 * Note that for MSI this could cause a stray interrupt report
3448 		 * if an interrupt landed in the time between writing IIR and
3449 		 * the posting read.  This should be rare enough to never
3450 		 * trigger the 99% of 100,000 interrupts test for disabling
3451 		 * stray interrupts.
3452 		 */
3453 		iir = new_iir;
3454 	}
3455 
3456 	i915_update_dri1_breadcrumb(dev);
3457 }
3458 
3459 static void i965_irq_uninstall(struct drm_device * dev)
3460 {
3461 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3462 	int pipe;
3463 
3464 	if (!dev_priv)
3465 		return;
3466 
3467 	del_timer_sync(&dev_priv->hotplug_reenable_timer);
3468 
3469 	I915_WRITE(PORT_HOTPLUG_EN, 0);
3470 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3471 
3472 	I915_WRITE(HWSTAM, 0xffffffff);
3473 	for_each_pipe(pipe)
3474 		I915_WRITE(PIPESTAT(pipe), 0);
3475 	I915_WRITE(IMR, 0xffffffff);
3476 	I915_WRITE(IER, 0x0);
3477 
3478 	for_each_pipe(pipe)
3479 		I915_WRITE(PIPESTAT(pipe),
3480 			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3481 	I915_WRITE(IIR, I915_READ(IIR));
3482 }
3483 
3484 static void i915_reenable_hotplug_timer_func(unsigned long data)
3485 {
3486 	drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3487 	struct drm_device *dev = dev_priv->dev;
3488 	struct drm_mode_config *mode_config = &dev->mode_config;
3489 	int i;
3490 
3491 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3492 	for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3493 		struct drm_connector *connector;
3494 
3495 		if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3496 			continue;
3497 
3498 		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3499 
3500 		list_for_each_entry(connector, &mode_config->connector_list, head) {
3501 			struct intel_connector *intel_connector = to_intel_connector(connector);
3502 
3503 			if (intel_connector->encoder->hpd_pin == i) {
3504 				if (connector->polled != intel_connector->polled)
3505 					DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3506 							 drm_get_connector_name(connector));
3507 				connector->polled = intel_connector->polled;
3508 				if (!connector->polled)
3509 					connector->polled = DRM_CONNECTOR_POLL_HPD;
3510 			}
3511 		}
3512 	}
3513 	if (dev_priv->display.hpd_irq_setup)
3514 		dev_priv->display.hpd_irq_setup(dev);
3515 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3516 }
3517 
3518 void intel_irq_init(struct drm_device *dev)
3519 {
3520 	struct drm_i915_private *dev_priv = dev->dev_private;
3521 
3522 	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
3523 	INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
3524 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
3525 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
3526 
3527 	setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3528 		    i915_hangcheck_elapsed,
3529 		    (unsigned long) dev);
3530 	setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3531 		    (unsigned long) dev_priv);
3532 
3533 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
3534 
3535 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
3536 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
3537 	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
3538 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3539 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3540 	}
3541 
3542 	if (drm_core_check_feature(dev, DRIVER_MODESET))
3543 		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3544 	else
3545 		dev->driver->get_vblank_timestamp = NULL;
3546 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3547 
3548 	if (IS_VALLEYVIEW(dev)) {
3549 		dev->driver->irq_handler = valleyview_irq_handler;
3550 		dev->driver->irq_preinstall = valleyview_irq_preinstall;
3551 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
3552 		dev->driver->irq_uninstall = valleyview_irq_uninstall;
3553 		dev->driver->enable_vblank = valleyview_enable_vblank;
3554 		dev->driver->disable_vblank = valleyview_disable_vblank;
3555 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3556 	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
3557 		/* Share uninstall handlers with ILK/SNB */
3558 		dev->driver->irq_handler = ivybridge_irq_handler;
3559 		dev->driver->irq_preinstall = ivybridge_irq_preinstall;
3560 		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3561 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
3562 		dev->driver->enable_vblank = ivybridge_enable_vblank;
3563 		dev->driver->disable_vblank = ivybridge_disable_vblank;
3564 		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3565 	} else if (HAS_PCH_SPLIT(dev)) {
3566 		dev->driver->irq_handler = ironlake_irq_handler;
3567 		dev->driver->irq_preinstall = ironlake_irq_preinstall;
3568 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
3569 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
3570 		dev->driver->enable_vblank = ironlake_enable_vblank;
3571 		dev->driver->disable_vblank = ironlake_disable_vblank;
3572 		dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
3573 	} else {
3574 		if (INTEL_INFO(dev)->gen == 2) {
3575 			dev->driver->irq_preinstall = i8xx_irq_preinstall;
3576 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
3577 			dev->driver->irq_handler = i8xx_irq_handler;
3578 			dev->driver->irq_uninstall = i8xx_irq_uninstall;
3579 		} else if (INTEL_INFO(dev)->gen == 3) {
3580 			dev->driver->irq_preinstall = i915_irq_preinstall;
3581 			dev->driver->irq_postinstall = i915_irq_postinstall;
3582 			dev->driver->irq_uninstall = i915_irq_uninstall;
3583 			dev->driver->irq_handler = i915_irq_handler;
3584 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3585 		} else {
3586 			dev->driver->irq_preinstall = i965_irq_preinstall;
3587 			dev->driver->irq_postinstall = i965_irq_postinstall;
3588 			dev->driver->irq_uninstall = i965_irq_uninstall;
3589 			dev->driver->irq_handler = i965_irq_handler;
3590 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3591 		}
3592 		dev->driver->enable_vblank = i915_enable_vblank;
3593 		dev->driver->disable_vblank = i915_disable_vblank;
3594 	}
3595 }
3596 
3597 void intel_hpd_init(struct drm_device *dev)
3598 {
3599 	struct drm_i915_private *dev_priv = dev->dev_private;
3600 	struct drm_mode_config *mode_config = &dev->mode_config;
3601 	struct drm_connector *connector;
3602 	int i;
3603 
3604 	for (i = 1; i < HPD_NUM_PINS; i++) {
3605 		dev_priv->hpd_stats[i].hpd_cnt = 0;
3606 		dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3607 	}
3608 	list_for_each_entry(connector, &mode_config->connector_list, head) {
3609 		struct intel_connector *intel_connector = to_intel_connector(connector);
3610 		connector->polled = intel_connector->polled;
3611 		if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3612 			connector->polled = DRM_CONNECTOR_POLL_HPD;
3613 	}
3614 
3615 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3616 	 * just to make the assert_spin_locked checks happy. */
3617 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3618 	if (dev_priv->display.hpd_irq_setup)
3619 		dev_priv->display.hpd_irq_setup(dev);
3620 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3621 }
3622