xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/i915_irq.c (revision 43f68184b16c6c624f61789a7ad788178963c063)
1 /*	$NetBSD: i915_irq.c,v 1.25 2021/12/19 11:45:01 riastradh Exp $	*/
2 
3 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4  */
5 /*
6  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7  * All Rights Reserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the
11  * "Software"), to deal in the Software without restriction, including
12  * without limitation the rights to use, copy, modify, merge, publish,
13  * distribute, sub license, and/or sell copies of the Software, and to
14  * permit persons to whom the Software is furnished to do so, subject to
15  * the following conditions:
16  *
17  * The above copyright notice and this permission notice (including the
18  * next paragraph) shall be included in all copies or substantial portions
19  * of the Software.
20  *
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
22  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
24  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
25  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
26  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
27  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28  *
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: i915_irq.c,v 1.25 2021/12/19 11:45:01 riastradh Exp $");
33 
34 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 
36 #include <linux/circ_buf.h>
37 #include <linux/slab.h>
38 #include <linux/sysrq.h>
39 
40 #include <drm/drm_drv.h>
41 #include <drm/drm_irq.h>
42 #include <drm/i915_drm.h>
43 
44 #include "display/intel_display_types.h"
45 #include "display/intel_fifo_underrun.h"
46 #include "display/intel_hotplug.h"
47 #include "display/intel_lpe_audio.h"
48 #include "display/intel_psr.h"
49 
50 #include "gt/intel_gt.h"
51 #include "gt/intel_gt_irq.h"
52 #include "gt/intel_gt_pm_irq.h"
53 #include "gt/intel_rps.h"
54 
55 #include "i915_drv.h"
56 #include "i915_irq.h"
57 #include "i915_trace.h"
58 #include "intel_pm.h"
59 
60 /**
61  * DOC: interrupt handling
62  *
63  * These functions provide the basic support for enabling and disabling the
64  * interrupt handling support. There's a lot more functionality in i915_irq.c
65  * and related files, but that will be described in separate chapters.
66  */
67 
68 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
69 
70 static const u32 hpd_ilk[HPD_NUM_PINS] = {
71 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
72 };
73 
74 static const u32 hpd_ivb[HPD_NUM_PINS] = {
75 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
76 };
77 
78 static const u32 hpd_bdw[HPD_NUM_PINS] = {
79 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
80 };
81 
82 static const u32 hpd_ibx[HPD_NUM_PINS] = {
83 	[HPD_CRT] = SDE_CRT_HOTPLUG,
84 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
85 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
86 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
87 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
88 };
89 
90 static const u32 hpd_cpt[HPD_NUM_PINS] = {
91 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
92 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
93 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
94 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
95 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
96 };
97 
98 static const u32 hpd_spt[HPD_NUM_PINS] = {
99 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
100 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
101 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
102 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
103 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
104 };
105 
106 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
107 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
108 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
109 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
110 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
111 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
112 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
113 };
114 
115 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
116 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
117 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
118 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
119 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
120 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
121 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
122 };
123 
124 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
125 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
126 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
127 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
128 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
129 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
130 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
131 };
132 
133 /* BXT hpd list */
134 static const u32 hpd_bxt[HPD_NUM_PINS] = {
135 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
136 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
137 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
138 };
139 
140 static const u32 hpd_gen11[HPD_NUM_PINS] = {
141 	[HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
142 	[HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
143 	[HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
144 	[HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
145 };
146 
147 static const u32 hpd_gen12[HPD_NUM_PINS] = {
148 	[HPD_PORT_D] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
149 	[HPD_PORT_E] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
150 	[HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
151 	[HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
152 	[HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
153 	[HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG
154 };
155 
156 static const u32 hpd_icp[HPD_NUM_PINS] = {
157 	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
158 	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
159 	[HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
160 	[HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
161 	[HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
162 	[HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
163 };
164 
165 static const u32 hpd_tgp[HPD_NUM_PINS] = {
166 	[HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
167 	[HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
168 	[HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C),
169 	[HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
170 	[HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
171 	[HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
172 	[HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
173 	[HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
174 	[HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
175 };
176 
gen3_irq_reset(struct intel_uncore * uncore,i915_reg_t imr,i915_reg_t iir,i915_reg_t ier)177 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
178 		    i915_reg_t iir, i915_reg_t ier)
179 {
180 	intel_uncore_write(uncore, imr, 0xffffffff);
181 	intel_uncore_posting_read(uncore, imr);
182 
183 	intel_uncore_write(uncore, ier, 0);
184 
185 	/* IIR can theoretically queue up two events. Be paranoid. */
186 	intel_uncore_write(uncore, iir, 0xffffffff);
187 	intel_uncore_posting_read(uncore, iir);
188 	intel_uncore_write(uncore, iir, 0xffffffff);
189 	intel_uncore_posting_read(uncore, iir);
190 }
191 
gen2_irq_reset(struct intel_uncore * uncore)192 void gen2_irq_reset(struct intel_uncore *uncore)
193 {
194 	intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
195 	intel_uncore_posting_read16(uncore, GEN2_IMR);
196 
197 	intel_uncore_write16(uncore, GEN2_IER, 0);
198 
199 	/* IIR can theoretically queue up two events. Be paranoid. */
200 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
201 	intel_uncore_posting_read16(uncore, GEN2_IIR);
202 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
203 	intel_uncore_posting_read16(uncore, GEN2_IIR);
204 }
205 
206 /*
207  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
208  */
gen3_assert_iir_is_zero(struct intel_uncore * uncore,i915_reg_t reg)209 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
210 {
211 	u32 val = intel_uncore_read(uncore, reg);
212 
213 	if (val == 0)
214 		return;
215 
216 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
217 	     i915_mmio_reg_offset(reg), val);
218 	intel_uncore_write(uncore, reg, 0xffffffff);
219 	intel_uncore_posting_read(uncore, reg);
220 	intel_uncore_write(uncore, reg, 0xffffffff);
221 	intel_uncore_posting_read(uncore, reg);
222 }
223 
gen2_assert_iir_is_zero(struct intel_uncore * uncore)224 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
225 {
226 	u16 val = intel_uncore_read16(uncore, GEN2_IIR);
227 
228 	if (val == 0)
229 		return;
230 
231 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
232 	     i915_mmio_reg_offset(GEN2_IIR), val);
233 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
234 	intel_uncore_posting_read16(uncore, GEN2_IIR);
235 	intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
236 	intel_uncore_posting_read16(uncore, GEN2_IIR);
237 }
238 
gen3_irq_init(struct intel_uncore * uncore,i915_reg_t imr,u32 imr_val,i915_reg_t ier,u32 ier_val,i915_reg_t iir)239 void gen3_irq_init(struct intel_uncore *uncore,
240 		   i915_reg_t imr, u32 imr_val,
241 		   i915_reg_t ier, u32 ier_val,
242 		   i915_reg_t iir)
243 {
244 	gen3_assert_iir_is_zero(uncore, iir);
245 
246 	intel_uncore_write(uncore, ier, ier_val);
247 	intel_uncore_write(uncore, imr, imr_val);
248 	intel_uncore_posting_read(uncore, imr);
249 }
250 
gen2_irq_init(struct intel_uncore * uncore,u32 imr_val,u32 ier_val)251 void gen2_irq_init(struct intel_uncore *uncore,
252 		   u32 imr_val, u32 ier_val)
253 {
254 	gen2_assert_iir_is_zero(uncore);
255 
256 	intel_uncore_write16(uncore, GEN2_IER, ier_val);
257 	intel_uncore_write16(uncore, GEN2_IMR, imr_val);
258 	intel_uncore_posting_read16(uncore, GEN2_IMR);
259 }
260 
261 /* For display hotplug interrupt */
262 static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private * dev_priv,u32 mask,u32 bits)263 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
264 				     u32 mask,
265 				     u32 bits)
266 {
267 	u32 val;
268 
269 	lockdep_assert_held(&dev_priv->irq_lock);
270 	WARN_ON(bits & ~mask);
271 
272 	val = I915_READ(PORT_HOTPLUG_EN);
273 	val &= ~mask;
274 	val |= bits;
275 	I915_WRITE(PORT_HOTPLUG_EN, val);
276 }
277 
278 /**
279  * i915_hotplug_interrupt_update - update hotplug interrupt enable
280  * @dev_priv: driver private
281  * @mask: bits to update
282  * @bits: bits to enable
283  * NOTE: the HPD enable bits are modified both inside and outside
284  * of an interrupt context. To avoid that read-modify-write cycles
285  * interfer, these bits are protected by a spinlock. Since this
286  * function is usually not called from a context where the lock is
287  * held already, this function acquires the lock itself. A non-locking
288  * version is also available.
289  */
i915_hotplug_interrupt_update(struct drm_i915_private * dev_priv,u32 mask,u32 bits)290 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
291 				   u32 mask,
292 				   u32 bits)
293 {
294 	spin_lock_irq(&dev_priv->irq_lock);
295 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
296 	spin_unlock_irq(&dev_priv->irq_lock);
297 }
298 
299 /**
300  * ilk_update_display_irq - update DEIMR
301  * @dev_priv: driver private
302  * @interrupt_mask: mask of interrupt bits to update
303  * @enabled_irq_mask: mask of interrupt bits to enable
304  */
ilk_update_display_irq(struct drm_i915_private * dev_priv,u32 interrupt_mask,u32 enabled_irq_mask)305 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
306 			    u32 interrupt_mask,
307 			    u32 enabled_irq_mask)
308 {
309 	u32 new_val;
310 
311 	lockdep_assert_held(&dev_priv->irq_lock);
312 
313 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
314 
315 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
316 		return;
317 
318 	new_val = dev_priv->irq_mask;
319 	new_val &= ~interrupt_mask;
320 	new_val |= (~enabled_irq_mask & interrupt_mask);
321 
322 	if (new_val != dev_priv->irq_mask) {
323 		dev_priv->irq_mask = new_val;
324 		I915_WRITE(DEIMR, dev_priv->irq_mask);
325 		POSTING_READ(DEIMR);
326 	}
327 }
328 
329 /**
330  * bdw_update_port_irq - update DE port interrupt
331  * @dev_priv: driver private
332  * @interrupt_mask: mask of interrupt bits to update
333  * @enabled_irq_mask: mask of interrupt bits to enable
334  */
bdw_update_port_irq(struct drm_i915_private * dev_priv,u32 interrupt_mask,u32 enabled_irq_mask)335 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
336 				u32 interrupt_mask,
337 				u32 enabled_irq_mask)
338 {
339 	u32 new_val;
340 	u32 old_val;
341 
342 	lockdep_assert_held(&dev_priv->irq_lock);
343 
344 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
345 
346 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
347 		return;
348 
349 	old_val = I915_READ(GEN8_DE_PORT_IMR);
350 
351 	new_val = old_val;
352 	new_val &= ~interrupt_mask;
353 	new_val |= (~enabled_irq_mask & interrupt_mask);
354 
355 	if (new_val != old_val) {
356 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
357 		POSTING_READ(GEN8_DE_PORT_IMR);
358 	}
359 }
360 
361 /**
362  * bdw_update_pipe_irq - update DE pipe interrupt
363  * @dev_priv: driver private
364  * @pipe: pipe whose interrupt to update
365  * @interrupt_mask: mask of interrupt bits to update
366  * @enabled_irq_mask: mask of interrupt bits to enable
367  */
bdw_update_pipe_irq(struct drm_i915_private * dev_priv,enum pipe pipe,u32 interrupt_mask,u32 enabled_irq_mask)368 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
369 			 enum pipe pipe,
370 			 u32 interrupt_mask,
371 			 u32 enabled_irq_mask)
372 {
373 	u32 new_val;
374 
375 	lockdep_assert_held(&dev_priv->irq_lock);
376 
377 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
378 
379 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
380 		return;
381 
382 	new_val = dev_priv->de_irq_mask[pipe];
383 	new_val &= ~interrupt_mask;
384 	new_val |= (~enabled_irq_mask & interrupt_mask);
385 
386 	if (new_val != dev_priv->de_irq_mask[pipe]) {
387 		dev_priv->de_irq_mask[pipe] = new_val;
388 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
389 		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
390 	}
391 }
392 
393 /**
394  * ibx_display_interrupt_update - update SDEIMR
395  * @dev_priv: driver private
396  * @interrupt_mask: mask of interrupt bits to update
397  * @enabled_irq_mask: mask of interrupt bits to enable
398  */
ibx_display_interrupt_update(struct drm_i915_private * dev_priv,u32 interrupt_mask,u32 enabled_irq_mask)399 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
400 				  u32 interrupt_mask,
401 				  u32 enabled_irq_mask)
402 {
403 	u32 sdeimr = I915_READ(SDEIMR);
404 	sdeimr &= ~interrupt_mask;
405 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
406 
407 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
408 
409 	lockdep_assert_held(&dev_priv->irq_lock);
410 
411 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
412 		return;
413 
414 	I915_WRITE(SDEIMR, sdeimr);
415 	POSTING_READ(SDEIMR);
416 }
417 
i915_pipestat_enable_mask(struct drm_i915_private * dev_priv,enum pipe pipe)418 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
419 			      enum pipe pipe)
420 {
421 	u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
422 	u32 enable_mask = status_mask << 16;
423 
424 	lockdep_assert_held(&dev_priv->irq_lock);
425 
426 	if (INTEL_GEN(dev_priv) < 5)
427 		goto out;
428 
429 	/*
430 	 * On pipe A we don't support the PSR interrupt yet,
431 	 * on pipe B and C the same bit MBZ.
432 	 */
433 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
434 		return 0;
435 	/*
436 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
437 	 * A the same bit is for perf counters which we don't use either.
438 	 */
439 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
440 		return 0;
441 
442 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
443 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
444 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
445 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
446 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
447 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
448 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
449 
450 out:
451 	WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
452 		  status_mask & ~PIPESTAT_INT_STATUS_MASK,
453 		  "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
454 		  pipe_name(pipe), enable_mask, status_mask);
455 
456 	return enable_mask;
457 }
458 
i915_enable_pipestat(struct drm_i915_private * dev_priv,enum pipe pipe,u32 status_mask)459 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
460 			  enum pipe pipe, u32 status_mask)
461 {
462 	i915_reg_t reg = PIPESTAT(pipe);
463 	u32 enable_mask;
464 
465 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
466 		  "pipe %c: status_mask=0x%x\n",
467 		  pipe_name(pipe), status_mask);
468 
469 	lockdep_assert_held(&dev_priv->irq_lock);
470 	WARN_ON(!intel_irqs_enabled(dev_priv));
471 
472 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
473 		return;
474 
475 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
476 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
477 
478 	I915_WRITE(reg, enable_mask | status_mask);
479 	POSTING_READ(reg);
480 }
481 
i915_disable_pipestat(struct drm_i915_private * dev_priv,enum pipe pipe,u32 status_mask)482 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
483 			   enum pipe pipe, u32 status_mask)
484 {
485 	i915_reg_t reg = PIPESTAT(pipe);
486 	u32 enable_mask;
487 
488 	WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
489 		  "pipe %c: status_mask=0x%x\n",
490 		  pipe_name(pipe), status_mask);
491 
492 	lockdep_assert_held(&dev_priv->irq_lock);
493 	WARN_ON(!intel_irqs_enabled(dev_priv));
494 
495 	if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
496 		return;
497 
498 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
499 	enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
500 
501 	I915_WRITE(reg, enable_mask | status_mask);
502 	POSTING_READ(reg);
503 }
504 
i915_has_asle(struct drm_i915_private * dev_priv)505 static bool i915_has_asle(struct drm_i915_private *dev_priv)
506 {
507 	if (!dev_priv->opregion.asle)
508 		return false;
509 
510 	return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
511 }
512 
513 /**
514  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
515  * @dev_priv: i915 device private
516  */
i915_enable_asle_pipestat(struct drm_i915_private * dev_priv)517 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
518 {
519 	if (!i915_has_asle(dev_priv))
520 		return;
521 
522 	spin_lock_irq(&dev_priv->irq_lock);
523 
524 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
525 	if (INTEL_GEN(dev_priv) >= 4)
526 		i915_enable_pipestat(dev_priv, PIPE_A,
527 				     PIPE_LEGACY_BLC_EVENT_STATUS);
528 
529 	spin_unlock_irq(&dev_priv->irq_lock);
530 }
531 
532 /*
533  * This timing diagram depicts the video signal in and
534  * around the vertical blanking period.
535  *
536  * Assumptions about the fictitious mode used in this example:
537  *  vblank_start >= 3
538  *  vsync_start = vblank_start + 1
539  *  vsync_end = vblank_start + 2
540  *  vtotal = vblank_start + 3
541  *
542  *           start of vblank:
543  *           latch double buffered registers
544  *           increment frame counter (ctg+)
545  *           generate start of vblank interrupt (gen4+)
546  *           |
547  *           |          frame start:
548  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
549  *           |          may be shifted forward 1-3 extra lines via PIPECONF
550  *           |          |
551  *           |          |  start of vsync:
552  *           |          |  generate vsync interrupt
553  *           |          |  |
554  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
555  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
556  * ----va---> <-----------------vb--------------------> <--------va-------------
557  *       |          |       <----vs----->                     |
558  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
559  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
560  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
561  *       |          |                                         |
562  *       last visible pixel                                   first visible pixel
563  *                  |                                         increment frame counter (gen3/4)
564  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
565  *
566  * x  = horizontal active
567  * _  = horizontal blanking
568  * hs = horizontal sync
569  * va = vertical active
570  * vb = vertical blanking
571  * vs = vertical sync
572  * vbs = vblank_start (number)
573  *
574  * Summary:
575  * - most events happen at the start of horizontal sync
576  * - frame start happens at the start of horizontal blank, 1-4 lines
577  *   (depending on PIPECONF settings) after the start of vblank
578  * - gen3/4 pixel and frame counter are synchronized with the start
579  *   of horizontal active on the first line of vertical active
580  */
581 
582 /* Called from drm generic code, passed a 'crtc', which
583  * we use as a pipe index
584  */
i915_get_vblank_counter(struct drm_crtc * crtc)585 u32 i915_get_vblank_counter(struct drm_crtc *crtc)
586 {
587 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
588 	struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
589 	const struct drm_display_mode *mode = &vblank->hwmode;
590 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
591 	i915_reg_t high_frame, low_frame;
592 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
593 	unsigned long irqflags;
594 
595 	/*
596 	 * On i965gm TV output the frame counter only works up to
597 	 * the point when we enable the TV encoder. After that the
598 	 * frame counter ceases to work and reads zero. We need a
599 	 * vblank wait before enabling the TV encoder and so we
600 	 * have to enable vblank interrupts while the frame counter
601 	 * is still in a working state. However the core vblank code
602 	 * does not like us returning non-zero frame counter values
603 	 * when we've told it that we don't have a working frame
604 	 * counter. Thus we must stop non-zero values leaking out.
605 	 */
606 	if (!vblank->max_vblank_count)
607 		return 0;
608 
609 	htotal = mode->crtc_htotal;
610 	hsync_start = mode->crtc_hsync_start;
611 	vbl_start = mode->crtc_vblank_start;
612 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
613 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
614 
615 	/* Convert to pixel count */
616 	vbl_start *= htotal;
617 
618 	/* Start of vblank event occurs at start of hsync */
619 	vbl_start -= htotal - hsync_start;
620 
621 	high_frame = PIPEFRAME(pipe);
622 	low_frame = PIPEFRAMEPIXEL(pipe);
623 
624 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
625 
626 	/*
627 	 * High & low register fields aren't synchronized, so make sure
628 	 * we get a low value that's stable across two reads of the high
629 	 * register.
630 	 */
631 	do {
632 		high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
633 		low   = I915_READ_FW(low_frame);
634 		high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
635 	} while (high1 != high2);
636 
637 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
638 
639 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
640 	pixel = low & PIPE_PIXEL_MASK;
641 	low >>= PIPE_FRAME_LOW_SHIFT;
642 
643 	/*
644 	 * The frame counter increments at beginning of active.
645 	 * Cook up a vblank counter by also checking the pixel
646 	 * counter against vblank start.
647 	 */
648 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
649 }
650 
g4x_get_vblank_counter(struct drm_crtc * crtc)651 u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
652 {
653 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
654 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
655 
656 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
657 }
658 
659 /*
660  * On certain encoders on certain platforms, pipe
661  * scanline register will not work to get the scanline,
662  * since the timings are driven from the PORT or issues
663  * with scanline register updates.
664  * This function will use Framestamp and current
665  * timestamp registers to calculate the scanline.
666  */
__intel_get_crtc_scanline_from_timestamp(struct intel_crtc * crtc)667 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
668 {
669 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
670 	struct drm_vblank_crtc *vblank =
671 		&crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
672 	const struct drm_display_mode *mode = &vblank->hwmode;
673 	u32 vblank_start = mode->crtc_vblank_start;
674 	u32 vtotal = mode->crtc_vtotal;
675 	u32 htotal = mode->crtc_htotal;
676 	u32 clock = mode->crtc_clock;
677 	u32 scanline, scan_prev_time, scan_curr_time, scan_post_time;
678 
679 	/*
680 	 * To avoid the race condition where we might cross into the
681 	 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
682 	 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
683 	 * during the same frame.
684 	 */
685 	do {
686 		/*
687 		 * This field provides read back of the display
688 		 * pipe frame time stamp. The time stamp value
689 		 * is sampled at every start of vertical blank.
690 		 */
691 		scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
692 
693 		/*
694 		 * The TIMESTAMP_CTR register has the current
695 		 * time stamp value.
696 		 */
697 		scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
698 
699 		scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
700 	} while (scan_post_time != scan_prev_time);
701 
702 	scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
703 					clock), 1000 * htotal);
704 	scanline = min(scanline, vtotal - 1);
705 	scanline = (scanline + vblank_start) % vtotal;
706 
707 	return scanline;
708 }
709 
710 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
__intel_get_crtc_scanline(struct intel_crtc * crtc)711 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
712 {
713 	struct drm_device *dev = crtc->base.dev;
714 	struct drm_i915_private *dev_priv = to_i915(dev);
715 	const struct drm_display_mode *mode;
716 	struct drm_vblank_crtc *vblank;
717 	enum pipe pipe = crtc->pipe;
718 	int position, vtotal;
719 
720 	if (!crtc->active)
721 		return -1;
722 
723 	vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
724 	mode = &vblank->hwmode;
725 
726 	if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
727 		return __intel_get_crtc_scanline_from_timestamp(crtc);
728 
729 	vtotal = mode->crtc_vtotal;
730 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
731 		vtotal /= 2;
732 
733 	if (IS_GEN(dev_priv, 2))
734 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
735 	else
736 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
737 
738 	/*
739 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
740 	 * read it just before the start of vblank.  So try it again
741 	 * so we don't accidentally end up spanning a vblank frame
742 	 * increment, causing the pipe_update_end() code to squak at us.
743 	 *
744 	 * The nature of this problem means we can't simply check the ISR
745 	 * bit and return the vblank start value; nor can we use the scanline
746 	 * debug register in the transcoder as it appears to have the same
747 	 * problem.  We may need to extend this to include other platforms,
748 	 * but so far testing only shows the problem on HSW.
749 	 */
750 	if (HAS_DDI(dev_priv) && !position) {
751 		int i, temp;
752 
753 		for (i = 0; i < 100; i++) {
754 			udelay(1);
755 			temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
756 			if (temp != position) {
757 				position = temp;
758 				break;
759 			}
760 		}
761 	}
762 
763 	/*
764 	 * See update_scanline_offset() for the details on the
765 	 * scanline_offset adjustment.
766 	 */
767 	return (position + crtc->scanline_offset) % vtotal;
768 }
769 
i915_get_crtc_scanoutpos(struct drm_device * dev,unsigned int index,bool in_vblank_irq,int * vpos,int * hpos,ktime_t * stime,ktime_t * etime,const struct drm_display_mode * mode)770 bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
771 			      bool in_vblank_irq, int *vpos, int *hpos,
772 			      ktime_t *stime, ktime_t *etime,
773 			      const struct drm_display_mode *mode)
774 {
775 	struct drm_i915_private *dev_priv = to_i915(dev);
776 	struct intel_crtc *crtc = to_intel_crtc(drm_crtc_from_index(dev, index));
777 	enum pipe pipe = crtc->pipe;
778 	int position;
779 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
780 	unsigned long irqflags;
781 	bool use_scanline_counter = INTEL_GEN(dev_priv) >= 5 ||
782 		IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
783 		mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
784 
785 	if (WARN_ON(!mode->crtc_clock)) {
786 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
787 				 "pipe %c\n", pipe_name(pipe));
788 		return false;
789 	}
790 
791 	htotal = mode->crtc_htotal;
792 	hsync_start = mode->crtc_hsync_start;
793 	vtotal = mode->crtc_vtotal;
794 	vbl_start = mode->crtc_vblank_start;
795 	vbl_end = mode->crtc_vblank_end;
796 
797 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
798 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
799 		vbl_end /= 2;
800 		vtotal /= 2;
801 	}
802 
803 	/*
804 	 * Lock uncore.lock, as we will do multiple timing critical raw
805 	 * register reads, potentially with preemption disabled, so the
806 	 * following code must not block on uncore.lock.
807 	 */
808 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
809 
810 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
811 
812 	/* Get optional system timestamp before query. */
813 	if (stime)
814 		*stime = ktime_get();
815 
816 	if (use_scanline_counter) {
817 		/* No obvious pixelcount register. Only query vertical
818 		 * scanout position from Display scan line register.
819 		 */
820 		position = __intel_get_crtc_scanline(crtc);
821 	} else {
822 		/* Have access to pixelcount since start of frame.
823 		 * We can split this into vertical and horizontal
824 		 * scanout position.
825 		 */
826 		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
827 
828 		/* convert to pixel counts */
829 		vbl_start *= htotal;
830 		vbl_end *= htotal;
831 		vtotal *= htotal;
832 
833 		/*
834 		 * In interlaced modes, the pixel counter counts all pixels,
835 		 * so one field will have htotal more pixels. In order to avoid
836 		 * the reported position from jumping backwards when the pixel
837 		 * counter is beyond the length of the shorter field, just
838 		 * clamp the position the length of the shorter field. This
839 		 * matches how the scanline counter based position works since
840 		 * the scanline counter doesn't count the two half lines.
841 		 */
842 		if (position >= vtotal)
843 			position = vtotal - 1;
844 
845 		/*
846 		 * Start of vblank interrupt is triggered at start of hsync,
847 		 * just prior to the first active line of vblank. However we
848 		 * consider lines to start at the leading edge of horizontal
849 		 * active. So, should we get here before we've crossed into
850 		 * the horizontal active of the first line in vblank, we would
851 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
852 		 * always add htotal-hsync_start to the current pixel position.
853 		 */
854 		position = (position + htotal - hsync_start) % vtotal;
855 	}
856 
857 	/* Get optional system timestamp after query. */
858 	if (etime)
859 		*etime = ktime_get();
860 
861 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
862 
863 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
864 
865 	/*
866 	 * While in vblank, position will be negative
867 	 * counting up towards 0 at vbl_end. And outside
868 	 * vblank, position will be positive counting
869 	 * up since vbl_end.
870 	 */
871 	if (position >= vbl_start)
872 		position -= vbl_end;
873 	else
874 		position += vtotal - vbl_end;
875 
876 	if (use_scanline_counter) {
877 		*vpos = position;
878 		*hpos = 0;
879 	} else {
880 		*vpos = position / htotal;
881 		*hpos = position - (*vpos * htotal);
882 	}
883 
884 	return true;
885 }
886 
intel_get_crtc_scanline(struct intel_crtc * crtc)887 int intel_get_crtc_scanline(struct intel_crtc *crtc)
888 {
889 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
890 	unsigned long irqflags;
891 	int position;
892 
893 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
894 	position = __intel_get_crtc_scanline(crtc);
895 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
896 
897 	return position;
898 }
899 
900 /**
901  * ivb_parity_work - Workqueue called when a parity error interrupt
902  * occurred.
903  * @work: workqueue struct
904  *
905  * Doesn't actually do anything except notify userspace. As a consequence of
906  * this event, userspace should try to remap the bad rows since statistically
907  * it is likely the same row is more likely to go bad again.
908  */
ivb_parity_work(struct work_struct * work)909 static void ivb_parity_work(struct work_struct *work)
910 {
911 	struct drm_i915_private *dev_priv =
912 		container_of(work, typeof(*dev_priv), l3_parity.error_work);
913 	struct intel_gt *gt = &dev_priv->gt;
914 	u32 error_status, row, bank, subbank;
915 #ifndef __NetBSD__		/* XXX kobject uevent...? */
916 	char *parity_event[6];
917 #endif
918 	u32 misccpctl;
919 	u8 slice = 0;
920 
921 	/* We must turn off DOP level clock gating to access the L3 registers.
922 	 * In order to prevent a get/put style interface, acquire struct mutex
923 	 * any time we access those registers.
924 	 */
925 	mutex_lock(&dev_priv->drm.struct_mutex);
926 
927 	/* If we've screwed up tracking, just let the interrupt fire again */
928 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
929 		goto out;
930 
931 	misccpctl = I915_READ(GEN7_MISCCPCTL);
932 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
933 	POSTING_READ(GEN7_MISCCPCTL);
934 
935 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
936 		i915_reg_t reg;
937 
938 		slice--;
939 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
940 			break;
941 
942 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
943 
944 		reg = GEN7_L3CDERRST1(slice);
945 
946 		error_status = I915_READ(reg);
947 		row = GEN7_PARITY_ERROR_ROW(error_status);
948 		bank = GEN7_PARITY_ERROR_BANK(error_status);
949 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
950 
951 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
952 		POSTING_READ(reg);
953 
954 #ifndef __NetBSD__		/* XXX kobject uevent...? */
955 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
956 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
957 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
958 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
959 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
960 		parity_event[5] = NULL;
961 
962 		kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
963 				   KOBJ_CHANGE, parity_event);
964 #endif
965 
966 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
967 			  slice, row, bank, subbank);
968 
969 #ifndef __NetBSD__		/* XXX kobject uevent...? */
970 		kfree(parity_event[4]);
971 		kfree(parity_event[3]);
972 		kfree(parity_event[2]);
973 		kfree(parity_event[1]);
974 #endif
975 	}
976 
977 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
978 
979 out:
980 	WARN_ON(dev_priv->l3_parity.which_slice);
981 	spin_lock_irq(&gt->irq_lock);
982 	gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
983 	spin_unlock_irq(&gt->irq_lock);
984 
985 	mutex_unlock(&dev_priv->drm.struct_mutex);
986 }
987 
gen11_port_hotplug_long_detect(enum hpd_pin pin,u32 val)988 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
989 {
990 	switch (pin) {
991 	case HPD_PORT_C:
992 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
993 	case HPD_PORT_D:
994 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
995 	case HPD_PORT_E:
996 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
997 	case HPD_PORT_F:
998 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
999 	default:
1000 		return false;
1001 	}
1002 }
1003 
gen12_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1004 static bool gen12_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1005 {
1006 	switch (pin) {
1007 	case HPD_PORT_D:
1008 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
1009 	case HPD_PORT_E:
1010 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
1011 	case HPD_PORT_F:
1012 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
1013 	case HPD_PORT_G:
1014 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
1015 	case HPD_PORT_H:
1016 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
1017 	case HPD_PORT_I:
1018 		return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
1019 	default:
1020 		return false;
1021 	}
1022 }
1023 
bxt_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1024 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1025 {
1026 	switch (pin) {
1027 	case HPD_PORT_A:
1028 		return val & PORTA_HOTPLUG_LONG_DETECT;
1029 	case HPD_PORT_B:
1030 		return val & PORTB_HOTPLUG_LONG_DETECT;
1031 	case HPD_PORT_C:
1032 		return val & PORTC_HOTPLUG_LONG_DETECT;
1033 	default:
1034 		return false;
1035 	}
1036 }
1037 
icp_ddi_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1038 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1039 {
1040 	switch (pin) {
1041 	case HPD_PORT_A:
1042 		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A);
1043 	case HPD_PORT_B:
1044 		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B);
1045 	case HPD_PORT_C:
1046 		return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C);
1047 	default:
1048 		return false;
1049 	}
1050 }
1051 
icp_tc_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1052 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1053 {
1054 	switch (pin) {
1055 	case HPD_PORT_C:
1056 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1057 	case HPD_PORT_D:
1058 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1059 	case HPD_PORT_E:
1060 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1061 	case HPD_PORT_F:
1062 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1063 	default:
1064 		return false;
1065 	}
1066 }
1067 
tgp_tc_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1068 static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1069 {
1070 	switch (pin) {
1071 	case HPD_PORT_D:
1072 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
1073 	case HPD_PORT_E:
1074 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
1075 	case HPD_PORT_F:
1076 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
1077 	case HPD_PORT_G:
1078 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
1079 	case HPD_PORT_H:
1080 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
1081 	case HPD_PORT_I:
1082 		return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
1083 	default:
1084 		return false;
1085 	}
1086 }
1087 
spt_port_hotplug2_long_detect(enum hpd_pin pin,u32 val)1088 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1089 {
1090 	switch (pin) {
1091 	case HPD_PORT_E:
1092 		return val & PORTE_HOTPLUG_LONG_DETECT;
1093 	default:
1094 		return false;
1095 	}
1096 }
1097 
spt_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1098 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1099 {
1100 	switch (pin) {
1101 	case HPD_PORT_A:
1102 		return val & PORTA_HOTPLUG_LONG_DETECT;
1103 	case HPD_PORT_B:
1104 		return val & PORTB_HOTPLUG_LONG_DETECT;
1105 	case HPD_PORT_C:
1106 		return val & PORTC_HOTPLUG_LONG_DETECT;
1107 	case HPD_PORT_D:
1108 		return val & PORTD_HOTPLUG_LONG_DETECT;
1109 	default:
1110 		return false;
1111 	}
1112 }
1113 
ilk_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1114 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1115 {
1116 	switch (pin) {
1117 	case HPD_PORT_A:
1118 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1119 	default:
1120 		return false;
1121 	}
1122 }
1123 
pch_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1124 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1125 {
1126 	switch (pin) {
1127 	case HPD_PORT_B:
1128 		return val & PORTB_HOTPLUG_LONG_DETECT;
1129 	case HPD_PORT_C:
1130 		return val & PORTC_HOTPLUG_LONG_DETECT;
1131 	case HPD_PORT_D:
1132 		return val & PORTD_HOTPLUG_LONG_DETECT;
1133 	default:
1134 		return false;
1135 	}
1136 }
1137 
i9xx_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1138 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1139 {
1140 	switch (pin) {
1141 	case HPD_PORT_B:
1142 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1143 	case HPD_PORT_C:
1144 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1145 	case HPD_PORT_D:
1146 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1147 	default:
1148 		return false;
1149 	}
1150 }
1151 
1152 /*
1153  * Get a bit mask of pins that have triggered, and which ones may be long.
1154  * This can be called multiple times with the same masks to accumulate
1155  * hotplug detection results from several registers.
1156  *
1157  * Note that the caller is expected to zero out the masks initially.
1158  */
intel_get_hpd_pins(struct drm_i915_private * dev_priv,u32 * pin_mask,u32 * long_mask,u32 hotplug_trigger,u32 dig_hotplug_reg,const u32 hpd[HPD_NUM_PINS],bool long_pulse_detect (enum hpd_pin pin,u32 val))1159 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1160 			       u32 *pin_mask, u32 *long_mask,
1161 			       u32 hotplug_trigger, u32 dig_hotplug_reg,
1162 			       const u32 hpd[HPD_NUM_PINS],
1163 			       bool long_pulse_detect(enum hpd_pin pin, u32 val))
1164 {
1165 	enum hpd_pin pin;
1166 
1167 	BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1168 
1169 	for_each_hpd_pin(pin) {
1170 		if ((hpd[pin] & hotplug_trigger) == 0)
1171 			continue;
1172 
1173 		*pin_mask |= BIT(pin);
1174 
1175 		if (long_pulse_detect(pin, dig_hotplug_reg))
1176 			*long_mask |= BIT(pin);
1177 	}
1178 
1179 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1180 			 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1181 
1182 }
1183 
gmbus_irq_handler(struct drm_i915_private * dev_priv)1184 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1185 {
1186 
1187 	spin_lock(&dev_priv->gmbus_wait_lock);
1188 	DRM_SPIN_WAKEUP_ALL(&dev_priv->gmbus_wait_queue,
1189 	    &dev_priv->gmbus_wait_lock);
1190 	spin_unlock(&dev_priv->gmbus_wait_lock);
1191 }
1192 
dp_aux_irq_handler(struct drm_i915_private * dev_priv)1193 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1194 {
1195 
1196 	spin_lock(&dev_priv->gmbus_wait_lock);
1197 	DRM_SPIN_WAKEUP_ALL(&dev_priv->gmbus_wait_queue,
1198 	    &dev_priv->gmbus_wait_lock);
1199 	spin_unlock(&dev_priv->gmbus_wait_lock);
1200 }
1201 
1202 #if defined(CONFIG_DEBUG_FS)
display_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe,u32 crc0,u32 crc1,u32 crc2,u32 crc3,u32 crc4)1203 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1204 					 enum pipe pipe,
1205 					 u32 crc0, u32 crc1,
1206 					 u32 crc2, u32 crc3,
1207 					 u32 crc4)
1208 {
1209 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1210 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
1211 	u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1212 
1213 	trace_intel_pipe_crc(crtc, crcs);
1214 
1215 	spin_lock(&pipe_crc->lock);
1216 	/*
1217 	 * For some not yet identified reason, the first CRC is
1218 	 * bonkers. So let's just wait for the next vblank and read
1219 	 * out the buggy result.
1220 	 *
1221 	 * On GEN8+ sometimes the second CRC is bonkers as well, so
1222 	 * don't trust that one either.
1223 	 */
1224 	if (pipe_crc->skipped <= 0 ||
1225 	    (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1226 		pipe_crc->skipped++;
1227 		spin_unlock(&pipe_crc->lock);
1228 		return;
1229 	}
1230 	spin_unlock(&pipe_crc->lock);
1231 
1232 	drm_crtc_add_crc_entry(&crtc->base, true,
1233 				drm_crtc_accurate_vblank_count(&crtc->base),
1234 				crcs);
1235 }
1236 #else
1237 static inline void
display_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe,u32 crc0,u32 crc1,u32 crc2,u32 crc3,u32 crc4)1238 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1239 			     enum pipe pipe,
1240 			     u32 crc0, u32 crc1,
1241 			     u32 crc2, u32 crc3,
1242 			     u32 crc4) {}
1243 #endif
1244 
1245 
hsw_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe)1246 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1247 				     enum pipe pipe)
1248 {
1249 	display_pipe_crc_irq_handler(dev_priv, pipe,
1250 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1251 				     0, 0, 0, 0);
1252 }
1253 
ivb_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe)1254 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1255 				     enum pipe pipe)
1256 {
1257 	display_pipe_crc_irq_handler(dev_priv, pipe,
1258 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1259 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1260 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1261 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1262 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1263 }
1264 
i9xx_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe)1265 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1266 				      enum pipe pipe)
1267 {
1268 	u32 res1, res2;
1269 
1270 	if (INTEL_GEN(dev_priv) >= 3)
1271 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1272 	else
1273 		res1 = 0;
1274 
1275 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1276 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1277 	else
1278 		res2 = 0;
1279 
1280 	display_pipe_crc_irq_handler(dev_priv, pipe,
1281 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1282 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1283 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1284 				     res1, res2);
1285 }
1286 
i9xx_pipestat_irq_reset(struct drm_i915_private * dev_priv)1287 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1288 {
1289 	enum pipe pipe;
1290 
1291 	for_each_pipe(dev_priv, pipe) {
1292 		I915_WRITE(PIPESTAT(pipe),
1293 			   PIPESTAT_INT_STATUS_MASK |
1294 			   PIPE_FIFO_UNDERRUN_STATUS);
1295 
1296 		dev_priv->pipestat_irq_mask[pipe] = 0;
1297 	}
1298 }
1299 
i9xx_pipestat_irq_ack(struct drm_i915_private * dev_priv,u32 iir,u32 pipe_stats[I915_MAX_PIPES])1300 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1301 				  u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1302 {
1303 	enum pipe pipe;
1304 
1305 	spin_lock(&dev_priv->irq_lock);
1306 
1307 	if (!dev_priv->display_irqs_enabled) {
1308 		spin_unlock(&dev_priv->irq_lock);
1309 		return;
1310 	}
1311 
1312 	for_each_pipe(dev_priv, pipe) {
1313 		i915_reg_t reg;
1314 		u32 status_mask, enable_mask, iir_bit = 0;
1315 
1316 		/*
1317 		 * PIPESTAT bits get signalled even when the interrupt is
1318 		 * disabled with the mask bits, and some of the status bits do
1319 		 * not generate interrupts at all (like the underrun bit). Hence
1320 		 * we need to be careful that we only handle what we want to
1321 		 * handle.
1322 		 */
1323 
1324 		/* fifo underruns are filterered in the underrun handler. */
1325 		status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1326 
1327 		switch (pipe) {
1328 		default:
1329 		case PIPE_A:
1330 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1331 			break;
1332 		case PIPE_B:
1333 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1334 			break;
1335 		case PIPE_C:
1336 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1337 			break;
1338 		}
1339 		if (iir & iir_bit)
1340 			status_mask |= dev_priv->pipestat_irq_mask[pipe];
1341 
1342 		if (!status_mask)
1343 			continue;
1344 
1345 		reg = PIPESTAT(pipe);
1346 		pipe_stats[pipe] = I915_READ(reg) & status_mask;
1347 		enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1348 
1349 		/*
1350 		 * Clear the PIPE*STAT regs before the IIR
1351 		 *
1352 		 * Toggle the enable bits to make sure we get an
1353 		 * edge in the ISR pipe event bit if we don't clear
1354 		 * all the enabled status bits. Otherwise the edge
1355 		 * triggered IIR on i965/g4x wouldn't notice that
1356 		 * an interrupt is still pending.
1357 		 */
1358 		if (pipe_stats[pipe]) {
1359 			I915_WRITE(reg, pipe_stats[pipe]);
1360 			I915_WRITE(reg, enable_mask);
1361 		}
1362 	}
1363 	spin_unlock(&dev_priv->irq_lock);
1364 }
1365 
i8xx_pipestat_irq_handler(struct drm_i915_private * dev_priv,u16 iir,u32 pipe_stats[I915_MAX_PIPES])1366 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1367 				      u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1368 {
1369 	enum pipe pipe;
1370 
1371 	for_each_pipe(dev_priv, pipe) {
1372 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1373 			drm_handle_vblank(&dev_priv->drm, pipe);
1374 
1375 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1376 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1377 
1378 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1379 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1380 	}
1381 }
1382 
i915_pipestat_irq_handler(struct drm_i915_private * dev_priv,u32 iir,u32 pipe_stats[I915_MAX_PIPES])1383 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1384 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1385 {
1386 	bool blc_event = false;
1387 	enum pipe pipe;
1388 
1389 	for_each_pipe(dev_priv, pipe) {
1390 		if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1391 			drm_handle_vblank(&dev_priv->drm, pipe);
1392 
1393 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1394 			blc_event = true;
1395 
1396 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1397 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1398 
1399 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1400 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1401 	}
1402 
1403 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1404 		intel_opregion_asle_intr(dev_priv);
1405 }
1406 
i965_pipestat_irq_handler(struct drm_i915_private * dev_priv,u32 iir,u32 pipe_stats[I915_MAX_PIPES])1407 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1408 				      u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1409 {
1410 	bool blc_event = false;
1411 	enum pipe pipe;
1412 
1413 	for_each_pipe(dev_priv, pipe) {
1414 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1415 			drm_handle_vblank(&dev_priv->drm, pipe);
1416 
1417 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1418 			blc_event = true;
1419 
1420 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1421 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1422 
1423 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1424 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1425 	}
1426 
1427 	if (blc_event || (iir & I915_ASLE_INTERRUPT))
1428 		intel_opregion_asle_intr(dev_priv);
1429 
1430 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1431 		gmbus_irq_handler(dev_priv);
1432 }
1433 
valleyview_pipestat_irq_handler(struct drm_i915_private * dev_priv,u32 pipe_stats[I915_MAX_PIPES])1434 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1435 					    u32 pipe_stats[I915_MAX_PIPES])
1436 {
1437 	enum pipe pipe;
1438 
1439 	for_each_pipe(dev_priv, pipe) {
1440 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1441 			drm_handle_vblank(&dev_priv->drm, pipe);
1442 
1443 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1444 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1445 
1446 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1447 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1448 	}
1449 
1450 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1451 		gmbus_irq_handler(dev_priv);
1452 }
1453 
i9xx_hpd_irq_ack(struct drm_i915_private * dev_priv)1454 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1455 {
1456 	u32 hotplug_status = 0, hotplug_status_mask;
1457 	int i;
1458 
1459 	if (IS_G4X(dev_priv) ||
1460 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1461 		hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1462 			DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1463 	else
1464 		hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1465 
1466 	/*
1467 	 * We absolutely have to clear all the pending interrupt
1468 	 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1469 	 * interrupt bit won't have an edge, and the i965/g4x
1470 	 * edge triggered IIR will not notice that an interrupt
1471 	 * is still pending. We can't use PORT_HOTPLUG_EN to
1472 	 * guarantee the edge as the act of toggling the enable
1473 	 * bits can itself generate a new hotplug interrupt :(
1474 	 */
1475 	for (i = 0; i < 10; i++) {
1476 		u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
1477 
1478 		if (tmp == 0)
1479 			return hotplug_status;
1480 
1481 		hotplug_status |= tmp;
1482 		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1483 	}
1484 
1485 	WARN_ONCE(1,
1486 		  "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1487 		  I915_READ(PORT_HOTPLUG_STAT));
1488 
1489 	return hotplug_status;
1490 }
1491 
i9xx_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 hotplug_status)1492 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1493 				 u32 hotplug_status)
1494 {
1495 	u32 pin_mask = 0, long_mask = 0;
1496 
1497 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1498 	    IS_CHERRYVIEW(dev_priv)) {
1499 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1500 
1501 		if (hotplug_trigger) {
1502 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1503 					   hotplug_trigger, hotplug_trigger,
1504 					   hpd_status_g4x,
1505 					   i9xx_port_hotplug_long_detect);
1506 
1507 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1508 		}
1509 
1510 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1511 			dp_aux_irq_handler(dev_priv);
1512 	} else {
1513 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1514 
1515 		if (hotplug_trigger) {
1516 			intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1517 					   hotplug_trigger, hotplug_trigger,
1518 					   hpd_status_i915,
1519 					   i9xx_port_hotplug_long_detect);
1520 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1521 		}
1522 	}
1523 }
1524 
valleyview_irq_handler(DRM_IRQ_ARGS)1525 static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
1526 {
1527 	struct drm_i915_private *dev_priv = arg;
1528 	irqreturn_t ret = IRQ_NONE;
1529 
1530 	if (!intel_irqs_enabled(dev_priv))
1531 		return IRQ_NONE;
1532 
1533 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1534 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1535 
1536 	do {
1537 		u32 iir, gt_iir, pm_iir;
1538 		u32 pipe_stats[I915_MAX_PIPES] = {};
1539 		u32 hotplug_status = 0;
1540 		u32 ier = 0;
1541 
1542 		gt_iir = I915_READ(GTIIR);
1543 		pm_iir = I915_READ(GEN6_PMIIR);
1544 		iir = I915_READ(VLV_IIR);
1545 
1546 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1547 			break;
1548 
1549 		ret = IRQ_HANDLED;
1550 
1551 		/*
1552 		 * Theory on interrupt generation, based on empirical evidence:
1553 		 *
1554 		 * x = ((VLV_IIR & VLV_IER) ||
1555 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1556 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1557 		 *
1558 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1559 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1560 		 * guarantee the CPU interrupt will be raised again even if we
1561 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1562 		 * bits this time around.
1563 		 */
1564 		I915_WRITE(VLV_MASTER_IER, 0);
1565 		ier = I915_READ(VLV_IER);
1566 		I915_WRITE(VLV_IER, 0);
1567 
1568 		if (gt_iir)
1569 			I915_WRITE(GTIIR, gt_iir);
1570 		if (pm_iir)
1571 			I915_WRITE(GEN6_PMIIR, pm_iir);
1572 
1573 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1574 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1575 
1576 		/* Call regardless, as some status bits might not be
1577 		 * signalled in iir */
1578 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1579 
1580 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1581 			   I915_LPE_PIPE_B_INTERRUPT))
1582 			intel_lpe_audio_irq_handler(dev_priv);
1583 
1584 		/*
1585 		 * VLV_IIR is single buffered, and reflects the level
1586 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1587 		 */
1588 		if (iir)
1589 			I915_WRITE(VLV_IIR, iir);
1590 
1591 		I915_WRITE(VLV_IER, ier);
1592 		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1593 
1594 		if (gt_iir)
1595 			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
1596 		if (pm_iir)
1597 			gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
1598 
1599 		if (hotplug_status)
1600 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1601 
1602 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1603 	} while (0);
1604 
1605 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1606 
1607 	return ret;
1608 }
1609 
cherryview_irq_handler(DRM_IRQ_ARGS)1610 static irqreturn_t cherryview_irq_handler(DRM_IRQ_ARGS)
1611 {
1612 	struct drm_i915_private *dev_priv = arg;
1613 	irqreturn_t ret = IRQ_NONE;
1614 
1615 	if (!intel_irqs_enabled(dev_priv))
1616 		return IRQ_NONE;
1617 
1618 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1619 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1620 
1621 	do {
1622 		u32 master_ctl, iir;
1623 		u32 pipe_stats[I915_MAX_PIPES] = {};
1624 		u32 hotplug_status = 0;
1625 		u32 gt_iir[4];
1626 		u32 ier = 0;
1627 
1628 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1629 		iir = I915_READ(VLV_IIR);
1630 
1631 		if (master_ctl == 0 && iir == 0)
1632 			break;
1633 
1634 		ret = IRQ_HANDLED;
1635 
1636 		/*
1637 		 * Theory on interrupt generation, based on empirical evidence:
1638 		 *
1639 		 * x = ((VLV_IIR & VLV_IER) ||
1640 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1641 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1642 		 *
1643 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1644 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1645 		 * guarantee the CPU interrupt will be raised again even if we
1646 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1647 		 * bits this time around.
1648 		 */
1649 		I915_WRITE(GEN8_MASTER_IRQ, 0);
1650 		ier = I915_READ(VLV_IER);
1651 		I915_WRITE(VLV_IER, 0);
1652 
1653 		gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
1654 
1655 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1656 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1657 
1658 		/* Call regardless, as some status bits might not be
1659 		 * signalled in iir */
1660 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1661 
1662 		if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1663 			   I915_LPE_PIPE_B_INTERRUPT |
1664 			   I915_LPE_PIPE_C_INTERRUPT))
1665 			intel_lpe_audio_irq_handler(dev_priv);
1666 
1667 		/*
1668 		 * VLV_IIR is single buffered, and reflects the level
1669 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1670 		 */
1671 		if (iir)
1672 			I915_WRITE(VLV_IIR, iir);
1673 
1674 		I915_WRITE(VLV_IER, ier);
1675 		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1676 
1677 		gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
1678 
1679 		if (hotplug_status)
1680 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1681 
1682 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1683 	} while (0);
1684 
1685 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1686 
1687 	return ret;
1688 }
1689 
ibx_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 hotplug_trigger,const u32 hpd[HPD_NUM_PINS])1690 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1691 				u32 hotplug_trigger,
1692 				const u32 hpd[HPD_NUM_PINS])
1693 {
1694 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1695 
1696 	/*
1697 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1698 	 * unless we touch the hotplug register, even if hotplug_trigger is
1699 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1700 	 * errors.
1701 	 */
1702 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1703 	if (!hotplug_trigger) {
1704 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1705 			PORTD_HOTPLUG_STATUS_MASK |
1706 			PORTC_HOTPLUG_STATUS_MASK |
1707 			PORTB_HOTPLUG_STATUS_MASK;
1708 		dig_hotplug_reg &= ~mask;
1709 	}
1710 
1711 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1712 	if (!hotplug_trigger)
1713 		return;
1714 
1715 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
1716 			   dig_hotplug_reg, hpd,
1717 			   pch_port_hotplug_long_detect);
1718 
1719 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1720 }
1721 
ibx_irq_handler(struct drm_i915_private * dev_priv,u32 pch_iir)1722 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1723 {
1724 	enum pipe pipe;
1725 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1726 
1727 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
1728 
1729 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1730 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1731 			       SDE_AUDIO_POWER_SHIFT);
1732 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1733 				 port_name(port));
1734 	}
1735 
1736 	if (pch_iir & SDE_AUX_MASK)
1737 		dp_aux_irq_handler(dev_priv);
1738 
1739 	if (pch_iir & SDE_GMBUS)
1740 		gmbus_irq_handler(dev_priv);
1741 
1742 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1743 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1744 
1745 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1746 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1747 
1748 	if (pch_iir & SDE_POISON)
1749 		DRM_ERROR("PCH poison interrupt\n");
1750 
1751 	if (pch_iir & SDE_FDI_MASK)
1752 		for_each_pipe(dev_priv, pipe)
1753 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1754 					 pipe_name(pipe),
1755 					 I915_READ(FDI_RX_IIR(pipe)));
1756 
1757 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1758 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1759 
1760 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1761 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1762 
1763 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1764 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1765 
1766 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1767 		intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1768 }
1769 
ivb_err_int_handler(struct drm_i915_private * dev_priv)1770 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1771 {
1772 	u32 err_int = I915_READ(GEN7_ERR_INT);
1773 	enum pipe pipe;
1774 
1775 	if (err_int & ERR_INT_POISON)
1776 		DRM_ERROR("Poison interrupt\n");
1777 
1778 	for_each_pipe(dev_priv, pipe) {
1779 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1780 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1781 
1782 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1783 			if (IS_IVYBRIDGE(dev_priv))
1784 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
1785 			else
1786 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
1787 		}
1788 	}
1789 
1790 	I915_WRITE(GEN7_ERR_INT, err_int);
1791 }
1792 
cpt_serr_int_handler(struct drm_i915_private * dev_priv)1793 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1794 {
1795 	u32 serr_int = I915_READ(SERR_INT);
1796 	enum pipe pipe;
1797 
1798 	if (serr_int & SERR_INT_POISON)
1799 		DRM_ERROR("PCH poison interrupt\n");
1800 
1801 	for_each_pipe(dev_priv, pipe)
1802 		if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1803 			intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1804 
1805 	I915_WRITE(SERR_INT, serr_int);
1806 }
1807 
cpt_irq_handler(struct drm_i915_private * dev_priv,u32 pch_iir)1808 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1809 {
1810 	enum pipe pipe;
1811 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1812 
1813 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
1814 
1815 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1816 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1817 			       SDE_AUDIO_POWER_SHIFT_CPT);
1818 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1819 				 port_name(port));
1820 	}
1821 
1822 	if (pch_iir & SDE_AUX_MASK_CPT)
1823 		dp_aux_irq_handler(dev_priv);
1824 
1825 	if (pch_iir & SDE_GMBUS_CPT)
1826 		gmbus_irq_handler(dev_priv);
1827 
1828 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1829 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1830 
1831 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1832 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1833 
1834 	if (pch_iir & SDE_FDI_MASK_CPT)
1835 		for_each_pipe(dev_priv, pipe)
1836 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1837 					 pipe_name(pipe),
1838 					 I915_READ(FDI_RX_IIR(pipe)));
1839 
1840 	if (pch_iir & SDE_ERROR_CPT)
1841 		cpt_serr_int_handler(dev_priv);
1842 }
1843 
icp_irq_handler(struct drm_i915_private * dev_priv,u32 pch_iir)1844 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1845 {
1846 	u32 ddi_hotplug_trigger, tc_hotplug_trigger;
1847 	u32 pin_mask = 0, long_mask = 0;
1848 	bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val);
1849 	const u32 *pins;
1850 
1851 	if (HAS_PCH_TGP(dev_priv)) {
1852 		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
1853 		tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
1854 		tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect;
1855 		pins = hpd_tgp;
1856 	} else if (HAS_PCH_JSP(dev_priv)) {
1857 		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
1858 		tc_hotplug_trigger = 0;
1859 		pins = hpd_tgp;
1860 	} else if (HAS_PCH_MCC(dev_priv)) {
1861 		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
1862 		tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
1863 		tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
1864 		pins = hpd_icp;
1865 	} else {
1866 		WARN(!HAS_PCH_ICP(dev_priv),
1867 		     "Unrecognized PCH type 0x%x\n", INTEL_PCH_TYPE(dev_priv));
1868 
1869 		ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
1870 		tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
1871 		tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
1872 		pins = hpd_icp;
1873 	}
1874 
1875 	if (ddi_hotplug_trigger) {
1876 		u32 dig_hotplug_reg;
1877 
1878 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI);
1879 		I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg);
1880 
1881 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1882 				   ddi_hotplug_trigger,
1883 				   dig_hotplug_reg, pins,
1884 				   icp_ddi_port_hotplug_long_detect);
1885 	}
1886 
1887 	if (tc_hotplug_trigger) {
1888 		u32 dig_hotplug_reg;
1889 
1890 		dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC);
1891 		I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg);
1892 
1893 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1894 				   tc_hotplug_trigger,
1895 				   dig_hotplug_reg, pins,
1896 				   tc_port_hotplug_long_detect);
1897 	}
1898 
1899 	if (pin_mask)
1900 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1901 
1902 	if (pch_iir & SDE_GMBUS_ICP)
1903 		gmbus_irq_handler(dev_priv);
1904 }
1905 
spt_irq_handler(struct drm_i915_private * dev_priv,u32 pch_iir)1906 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1907 {
1908 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1909 		~SDE_PORTE_HOTPLUG_SPT;
1910 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1911 	u32 pin_mask = 0, long_mask = 0;
1912 
1913 	if (hotplug_trigger) {
1914 		u32 dig_hotplug_reg;
1915 
1916 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1917 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1918 
1919 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1920 				   hotplug_trigger, dig_hotplug_reg, hpd_spt,
1921 				   spt_port_hotplug_long_detect);
1922 	}
1923 
1924 	if (hotplug2_trigger) {
1925 		u32 dig_hotplug_reg;
1926 
1927 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1928 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1929 
1930 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1931 				   hotplug2_trigger, dig_hotplug_reg, hpd_spt,
1932 				   spt_port_hotplug2_long_detect);
1933 	}
1934 
1935 	if (pin_mask)
1936 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1937 
1938 	if (pch_iir & SDE_GMBUS_CPT)
1939 		gmbus_irq_handler(dev_priv);
1940 }
1941 
ilk_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 hotplug_trigger,const u32 hpd[HPD_NUM_PINS])1942 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
1943 				u32 hotplug_trigger,
1944 				const u32 hpd[HPD_NUM_PINS])
1945 {
1946 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1947 
1948 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
1949 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
1950 
1951 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
1952 			   dig_hotplug_reg, hpd,
1953 			   ilk_port_hotplug_long_detect);
1954 
1955 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1956 }
1957 
ilk_display_irq_handler(struct drm_i915_private * dev_priv,u32 de_iir)1958 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
1959 				    u32 de_iir)
1960 {
1961 	enum pipe pipe;
1962 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
1963 
1964 	if (hotplug_trigger)
1965 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
1966 
1967 	if (de_iir & DE_AUX_CHANNEL_A)
1968 		dp_aux_irq_handler(dev_priv);
1969 
1970 	if (de_iir & DE_GSE)
1971 		intel_opregion_asle_intr(dev_priv);
1972 
1973 	if (de_iir & DE_POISON)
1974 		DRM_ERROR("Poison interrupt\n");
1975 
1976 	for_each_pipe(dev_priv, pipe) {
1977 		if (de_iir & DE_PIPE_VBLANK(pipe))
1978 			drm_handle_vblank(&dev_priv->drm, pipe);
1979 
1980 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
1981 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1982 
1983 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
1984 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1985 	}
1986 
1987 	/* check event from PCH */
1988 	if (de_iir & DE_PCH_EVENT) {
1989 		u32 pch_iir = I915_READ(SDEIIR);
1990 
1991 		if (HAS_PCH_CPT(dev_priv))
1992 			cpt_irq_handler(dev_priv, pch_iir);
1993 		else
1994 			ibx_irq_handler(dev_priv, pch_iir);
1995 
1996 		/* should clear PCH hotplug event before clear CPU irq */
1997 		I915_WRITE(SDEIIR, pch_iir);
1998 	}
1999 
2000 	if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
2001 		gen5_rps_irq_handler(&dev_priv->gt.rps);
2002 }
2003 
ivb_display_irq_handler(struct drm_i915_private * dev_priv,u32 de_iir)2004 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2005 				    u32 de_iir)
2006 {
2007 	enum pipe pipe;
2008 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2009 
2010 	if (hotplug_trigger)
2011 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2012 
2013 	if (de_iir & DE_ERR_INT_IVB)
2014 		ivb_err_int_handler(dev_priv);
2015 
2016 	if (de_iir & DE_EDP_PSR_INT_HSW) {
2017 		u32 psr_iir = I915_READ(EDP_PSR_IIR);
2018 
2019 		intel_psr_irq_handler(dev_priv, psr_iir);
2020 		I915_WRITE(EDP_PSR_IIR, psr_iir);
2021 	}
2022 
2023 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2024 		dp_aux_irq_handler(dev_priv);
2025 
2026 	if (de_iir & DE_GSE_IVB)
2027 		intel_opregion_asle_intr(dev_priv);
2028 
2029 	for_each_pipe(dev_priv, pipe) {
2030 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2031 			drm_handle_vblank(&dev_priv->drm, pipe);
2032 	}
2033 
2034 	/* check event from PCH */
2035 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2036 		u32 pch_iir = I915_READ(SDEIIR);
2037 
2038 		cpt_irq_handler(dev_priv, pch_iir);
2039 
2040 		/* clear PCH hotplug event before clear CPU irq */
2041 		I915_WRITE(SDEIIR, pch_iir);
2042 	}
2043 }
2044 
2045 /*
2046  * To handle irqs with the minimum potential races with fresh interrupts, we:
2047  * 1 - Disable Master Interrupt Control.
2048  * 2 - Find the source(s) of the interrupt.
2049  * 3 - Clear the Interrupt Identity bits (IIR).
2050  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2051  * 5 - Re-enable Master Interrupt Control.
2052  */
ilk_irq_handler(DRM_IRQ_ARGS)2053 static irqreturn_t ilk_irq_handler(DRM_IRQ_ARGS)
2054 {
2055 	struct drm_i915_private *dev_priv = arg;
2056 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2057 	irqreturn_t ret = IRQ_NONE;
2058 
2059 	if (!intel_irqs_enabled(dev_priv))
2060 		return IRQ_NONE;
2061 
2062 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2063 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2064 
2065 	/* disable master interrupt before clearing iir  */
2066 	de_ier = I915_READ(DEIER);
2067 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2068 
2069 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2070 	 * interrupts will will be stored on its back queue, and then we'll be
2071 	 * able to process them after we restore SDEIER (as soon as we restore
2072 	 * it, we'll get an interrupt if SDEIIR still has something to process
2073 	 * due to its back queue). */
2074 	if (!HAS_PCH_NOP(dev_priv)) {
2075 		sde_ier = I915_READ(SDEIER);
2076 		I915_WRITE(SDEIER, 0);
2077 	}
2078 
2079 	/* Find, clear, then process each source of interrupt */
2080 
2081 	gt_iir = I915_READ(GTIIR);
2082 	if (gt_iir) {
2083 		I915_WRITE(GTIIR, gt_iir);
2084 		ret = IRQ_HANDLED;
2085 		if (INTEL_GEN(dev_priv) >= 6)
2086 			gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
2087 		else
2088 			gen5_gt_irq_handler(&dev_priv->gt, gt_iir);
2089 	}
2090 
2091 	de_iir = I915_READ(DEIIR);
2092 	if (de_iir) {
2093 		I915_WRITE(DEIIR, de_iir);
2094 		ret = IRQ_HANDLED;
2095 		if (INTEL_GEN(dev_priv) >= 7)
2096 			ivb_display_irq_handler(dev_priv, de_iir);
2097 		else
2098 			ilk_display_irq_handler(dev_priv, de_iir);
2099 	}
2100 
2101 	if (INTEL_GEN(dev_priv) >= 6) {
2102 		u32 pm_iir = I915_READ(GEN6_PMIIR);
2103 		if (pm_iir) {
2104 			I915_WRITE(GEN6_PMIIR, pm_iir);
2105 			ret = IRQ_HANDLED;
2106 			gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
2107 		}
2108 	}
2109 
2110 	I915_WRITE(DEIER, de_ier);
2111 	if (!HAS_PCH_NOP(dev_priv))
2112 		I915_WRITE(SDEIER, sde_ier);
2113 
2114 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2115 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2116 
2117 	return ret;
2118 }
2119 
bxt_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 hotplug_trigger,const u32 hpd[HPD_NUM_PINS])2120 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2121 				u32 hotplug_trigger,
2122 				const u32 hpd[HPD_NUM_PINS])
2123 {
2124 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2125 
2126 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2127 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2128 
2129 	intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger,
2130 			   dig_hotplug_reg, hpd,
2131 			   bxt_port_hotplug_long_detect);
2132 
2133 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2134 }
2135 
gen11_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 iir)2136 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2137 {
2138 	u32 pin_mask = 0, long_mask = 0;
2139 	u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2140 	u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2141 	long_pulse_detect_func long_pulse_detect;
2142 	const u32 *hpd;
2143 
2144 	if (INTEL_GEN(dev_priv) >= 12) {
2145 		long_pulse_detect = gen12_port_hotplug_long_detect;
2146 		hpd = hpd_gen12;
2147 	} else {
2148 		long_pulse_detect = gen11_port_hotplug_long_detect;
2149 		hpd = hpd_gen11;
2150 	}
2151 
2152 	if (trigger_tc) {
2153 		u32 dig_hotplug_reg;
2154 
2155 		dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL);
2156 		I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2157 
2158 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc,
2159 				   dig_hotplug_reg, hpd, long_pulse_detect);
2160 	}
2161 
2162 	if (trigger_tbt) {
2163 		u32 dig_hotplug_reg;
2164 
2165 		dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL);
2166 		I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2167 
2168 		intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt,
2169 				   dig_hotplug_reg, hpd, long_pulse_detect);
2170 	}
2171 
2172 	if (pin_mask)
2173 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2174 	else
2175 		DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
2176 }
2177 
gen8_de_port_aux_mask(struct drm_i915_private * dev_priv)2178 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2179 {
2180 	u32 mask;
2181 
2182 	if (INTEL_GEN(dev_priv) >= 12)
2183 		return TGL_DE_PORT_AUX_DDIA |
2184 			TGL_DE_PORT_AUX_DDIB |
2185 			TGL_DE_PORT_AUX_DDIC |
2186 			TGL_DE_PORT_AUX_USBC1 |
2187 			TGL_DE_PORT_AUX_USBC2 |
2188 			TGL_DE_PORT_AUX_USBC3 |
2189 			TGL_DE_PORT_AUX_USBC4 |
2190 			TGL_DE_PORT_AUX_USBC5 |
2191 			TGL_DE_PORT_AUX_USBC6;
2192 
2193 
2194 	mask = GEN8_AUX_CHANNEL_A;
2195 	if (INTEL_GEN(dev_priv) >= 9)
2196 		mask |= GEN9_AUX_CHANNEL_B |
2197 			GEN9_AUX_CHANNEL_C |
2198 			GEN9_AUX_CHANNEL_D;
2199 
2200 	if (IS_CNL_WITH_PORT_F(dev_priv) || IS_GEN(dev_priv, 11))
2201 		mask |= CNL_AUX_CHANNEL_F;
2202 
2203 	if (IS_GEN(dev_priv, 11))
2204 		mask |= ICL_AUX_CHANNEL_E;
2205 
2206 	return mask;
2207 }
2208 
gen8_de_pipe_fault_mask(struct drm_i915_private * dev_priv)2209 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2210 {
2211 	if (INTEL_GEN(dev_priv) >= 11)
2212 		return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
2213 	else if (INTEL_GEN(dev_priv) >= 9)
2214 		return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2215 	else
2216 		return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2217 }
2218 
2219 static void
gen8_de_misc_irq_handler(struct drm_i915_private * dev_priv,u32 iir)2220 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2221 {
2222 	bool found = false;
2223 
2224 	if (iir & GEN8_DE_MISC_GSE) {
2225 		intel_opregion_asle_intr(dev_priv);
2226 		found = true;
2227 	}
2228 
2229 	if (iir & GEN8_DE_EDP_PSR) {
2230 		u32 psr_iir;
2231 		i915_reg_t iir_reg;
2232 
2233 		if (INTEL_GEN(dev_priv) >= 12)
2234 			iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
2235 		else
2236 			iir_reg = EDP_PSR_IIR;
2237 
2238 		psr_iir = I915_READ(iir_reg);
2239 		I915_WRITE(iir_reg, psr_iir);
2240 
2241 		if (psr_iir)
2242 			found = true;
2243 
2244 		intel_psr_irq_handler(dev_priv, psr_iir);
2245 	}
2246 
2247 	if (!found)
2248 		DRM_ERROR("Unexpected DE Misc interrupt\n");
2249 }
2250 
2251 static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private * dev_priv,u32 master_ctl)2252 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2253 {
2254 	irqreturn_t ret = IRQ_NONE;
2255 	u32 iir;
2256 	enum pipe pipe;
2257 
2258 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2259 		iir = I915_READ(GEN8_DE_MISC_IIR);
2260 		if (iir) {
2261 			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2262 			ret = IRQ_HANDLED;
2263 			gen8_de_misc_irq_handler(dev_priv, iir);
2264 		} else {
2265 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2266 		}
2267 	}
2268 
2269 	if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2270 		iir = I915_READ(GEN11_DE_HPD_IIR);
2271 		if (iir) {
2272 			I915_WRITE(GEN11_DE_HPD_IIR, iir);
2273 			ret = IRQ_HANDLED;
2274 			gen11_hpd_irq_handler(dev_priv, iir);
2275 		} else {
2276 			DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
2277 		}
2278 	}
2279 
2280 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2281 		iir = I915_READ(GEN8_DE_PORT_IIR);
2282 		if (iir) {
2283 			u32 tmp_mask;
2284 			bool found = false;
2285 
2286 			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2287 			ret = IRQ_HANDLED;
2288 
2289 			if (iir & gen8_de_port_aux_mask(dev_priv)) {
2290 				dp_aux_irq_handler(dev_priv);
2291 				found = true;
2292 			}
2293 
2294 			if (IS_GEN9_LP(dev_priv)) {
2295 				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2296 				if (tmp_mask) {
2297 					bxt_hpd_irq_handler(dev_priv, tmp_mask,
2298 							    hpd_bxt);
2299 					found = true;
2300 				}
2301 			} else if (IS_BROADWELL(dev_priv)) {
2302 				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2303 				if (tmp_mask) {
2304 					ilk_hpd_irq_handler(dev_priv,
2305 							    tmp_mask, hpd_bdw);
2306 					found = true;
2307 				}
2308 			}
2309 
2310 			if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2311 				gmbus_irq_handler(dev_priv);
2312 				found = true;
2313 			}
2314 
2315 			if (!found)
2316 				DRM_ERROR("Unexpected DE Port interrupt\n");
2317 		}
2318 		else
2319 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2320 	}
2321 
2322 	for_each_pipe(dev_priv, pipe) {
2323 		u32 fault_errors;
2324 
2325 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2326 			continue;
2327 
2328 		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2329 		if (!iir) {
2330 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2331 			continue;
2332 		}
2333 
2334 		ret = IRQ_HANDLED;
2335 		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2336 
2337 		if (iir & GEN8_PIPE_VBLANK)
2338 			drm_handle_vblank(&dev_priv->drm, pipe);
2339 
2340 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2341 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2342 
2343 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2344 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2345 
2346 		fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2347 		if (fault_errors)
2348 			DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
2349 				  pipe_name(pipe),
2350 				  fault_errors);
2351 	}
2352 
2353 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2354 	    master_ctl & GEN8_DE_PCH_IRQ) {
2355 		/*
2356 		 * FIXME(BDW): Assume for now that the new interrupt handling
2357 		 * scheme also closed the SDE interrupt handling race we've seen
2358 		 * on older pch-split platforms. But this needs testing.
2359 		 */
2360 		iir = I915_READ(SDEIIR);
2361 		if (iir) {
2362 			I915_WRITE(SDEIIR, iir);
2363 			ret = IRQ_HANDLED;
2364 
2365 			if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2366 				icp_irq_handler(dev_priv, iir);
2367 			else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2368 				spt_irq_handler(dev_priv, iir);
2369 			else
2370 				cpt_irq_handler(dev_priv, iir);
2371 		} else {
2372 			/*
2373 			 * Like on previous PCH there seems to be something
2374 			 * fishy going on with forwarding PCH interrupts.
2375 			 */
2376 			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2377 		}
2378 	}
2379 
2380 	return ret;
2381 }
2382 
gen8_master_intr_disable(struct intel_uncore * regs)2383 static inline u32 gen8_master_intr_disable(struct intel_uncore *regs)
2384 {
2385 	raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2386 
2387 	/*
2388 	 * Now with master disabled, get a sample of level indications
2389 	 * for this interrupt. Indications will be cleared on related acks.
2390 	 * New indications can and will light up during processing,
2391 	 * and will generate new interrupt after enabling master.
2392 	 */
2393 	return raw_reg_read(regs, GEN8_MASTER_IRQ);
2394 }
2395 
gen8_master_intr_enable(struct intel_uncore * regs)2396 static inline void gen8_master_intr_enable(struct intel_uncore *regs)
2397 {
2398 	raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2399 }
2400 
gen8_irq_handler(DRM_IRQ_ARGS)2401 static irqreturn_t gen8_irq_handler(DRM_IRQ_ARGS)
2402 {
2403 	struct drm_i915_private *dev_priv = arg;
2404 	u32 master_ctl;
2405 	u32 gt_iir[4];
2406 
2407 	if (!intel_irqs_enabled(dev_priv))
2408 		return IRQ_NONE;
2409 
2410 	master_ctl = gen8_master_intr_disable(&dev_priv->uncore);
2411 	if (!master_ctl) {
2412 		gen8_master_intr_enable(&dev_priv->uncore);
2413 		return IRQ_NONE;
2414 	}
2415 
2416 	/* Find, clear, then process each source of interrupt */
2417 	gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
2418 
2419 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2420 	if (master_ctl & ~GEN8_GT_IRQS) {
2421 		disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2422 		gen8_de_irq_handler(dev_priv, master_ctl);
2423 		enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2424 	}
2425 
2426 	gen8_master_intr_enable(&dev_priv->uncore);
2427 
2428 	gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
2429 
2430 	return IRQ_HANDLED;
2431 }
2432 
2433 static u32
gen11_gu_misc_irq_ack(struct intel_gt * gt,const u32 master_ctl)2434 gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
2435 {
2436 	u32 iir;
2437 
2438 	if (!(master_ctl & GEN11_GU_MISC_IRQ))
2439 		return 0;
2440 
2441 	iir = raw_reg_read(gt->uncore, GEN11_GU_MISC_IIR);
2442 	if (likely(iir))
2443 		raw_reg_write(gt->uncore, GEN11_GU_MISC_IIR, iir);
2444 
2445 	return iir;
2446 }
2447 
2448 static void
gen11_gu_misc_irq_handler(struct intel_gt * gt,const u32 iir)2449 gen11_gu_misc_irq_handler(struct intel_gt *gt, const u32 iir)
2450 {
2451 	if (iir & GEN11_GU_MISC_GSE)
2452 		intel_opregion_asle_intr(gt->i915);
2453 }
2454 
gen11_master_intr_disable(struct intel_uncore * regs)2455 static inline u32 gen11_master_intr_disable(struct intel_uncore *regs)
2456 {
2457 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2458 
2459 	/*
2460 	 * Now with master disabled, get a sample of level indications
2461 	 * for this interrupt. Indications will be cleared on related acks.
2462 	 * New indications can and will light up during processing,
2463 	 * and will generate new interrupt after enabling master.
2464 	 */
2465 	return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2466 }
2467 
gen11_master_intr_enable(struct intel_uncore * regs)2468 static inline void gen11_master_intr_enable(struct intel_uncore *regs)
2469 {
2470 	raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2471 }
2472 
2473 static void
gen11_display_irq_handler(struct drm_i915_private * i915)2474 gen11_display_irq_handler(struct drm_i915_private *i915)
2475 {
2476 	const u32 disp_ctl = raw_reg_read(&i915->uncore, GEN11_DISPLAY_INT_CTL);
2477 
2478 	disable_rpm_wakeref_asserts(&i915->runtime_pm);
2479 	/*
2480 	 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2481 	 * for the display related bits.
2482 	 */
2483 	raw_reg_write(&i915->uncore, GEN11_DISPLAY_INT_CTL, 0x0);
2484 	gen8_de_irq_handler(i915, disp_ctl);
2485 	raw_reg_write(&i915->uncore, GEN11_DISPLAY_INT_CTL,
2486 		      GEN11_DISPLAY_IRQ_ENABLE);
2487 
2488 	enable_rpm_wakeref_asserts(&i915->runtime_pm);
2489 }
2490 
2491 static __always_inline irqreturn_t
__gen11_irq_handler(struct drm_i915_private * const i915,u32 (* intr_disable)(struct intel_uncore * regs),void (* intr_enable)(struct intel_uncore * regs))2492 __gen11_irq_handler(struct drm_i915_private * const i915,
2493 		    u32 (*intr_disable)(struct intel_uncore *regs),
2494 		    void (*intr_enable)(struct intel_uncore *regs))
2495 {
2496 	struct intel_gt *gt = &i915->gt;
2497 	u32 master_ctl;
2498 	u32 gu_misc_iir;
2499 
2500 	if (!intel_irqs_enabled(i915))
2501 		return IRQ_NONE;
2502 
2503 	master_ctl = intr_disable(&i915->uncore);
2504 	if (!master_ctl) {
2505 		intr_enable(&i915->uncore);
2506 		return IRQ_NONE;
2507 	}
2508 
2509 	/* Find, clear, then process each source of interrupt. */
2510 	gen11_gt_irq_handler(gt, master_ctl);
2511 
2512 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2513 	if (master_ctl & GEN11_DISPLAY_IRQ)
2514 		gen11_display_irq_handler(i915);
2515 
2516 	gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
2517 
2518 	intr_enable(&i915->uncore);
2519 
2520 	gen11_gu_misc_irq_handler(gt, gu_misc_iir);
2521 
2522 	return IRQ_HANDLED;
2523 }
2524 
gen11_irq_handler(DRM_IRQ_ARGS)2525 static irqreturn_t gen11_irq_handler(DRM_IRQ_ARGS)
2526 {
2527 	return __gen11_irq_handler(arg,
2528 				   gen11_master_intr_disable,
2529 				   gen11_master_intr_enable);
2530 }
2531 
2532 /* Called from drm generic code, passed 'crtc' which
2533  * we use as a pipe index
2534  */
i8xx_enable_vblank(struct drm_crtc * crtc)2535 int i8xx_enable_vblank(struct drm_crtc *crtc)
2536 {
2537 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2538 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2539 	unsigned long irqflags;
2540 
2541 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2542 	i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2543 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2544 
2545 	return 0;
2546 }
2547 
i915gm_enable_vblank(struct drm_crtc * crtc)2548 int i915gm_enable_vblank(struct drm_crtc *crtc)
2549 {
2550 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2551 
2552 	/*
2553 	 * Vblank interrupts fail to wake the device up from C2+.
2554 	 * Disabling render clock gating during C-states avoids
2555 	 * the problem. There is a small power cost so we do this
2556 	 * only when vblank interrupts are actually enabled.
2557 	 */
2558 	if (dev_priv->vblank_enabled++ == 0)
2559 		I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2560 
2561 	return i8xx_enable_vblank(crtc);
2562 }
2563 
i965_enable_vblank(struct drm_crtc * crtc)2564 int i965_enable_vblank(struct drm_crtc *crtc)
2565 {
2566 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2567 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2568 	unsigned long irqflags;
2569 
2570 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2571 	i915_enable_pipestat(dev_priv, pipe,
2572 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2573 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2574 
2575 	return 0;
2576 }
2577 
ilk_enable_vblank(struct drm_crtc * crtc)2578 int ilk_enable_vblank(struct drm_crtc *crtc)
2579 {
2580 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2581 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2582 	unsigned long irqflags;
2583 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2584 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2585 
2586 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2587 	ilk_enable_display_irq(dev_priv, bit);
2588 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2589 
2590 	/* Even though there is no DMC, frame counter can get stuck when
2591 	 * PSR is active as no frames are generated.
2592 	 */
2593 	if (HAS_PSR(dev_priv))
2594 		drm_crtc_vblank_restore(crtc);
2595 
2596 	return 0;
2597 }
2598 
bdw_enable_vblank(struct drm_crtc * crtc)2599 int bdw_enable_vblank(struct drm_crtc *crtc)
2600 {
2601 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2602 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2603 	unsigned long irqflags;
2604 
2605 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2606 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2607 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2608 
2609 	/* Even if there is no DMC, frame counter can get stuck when
2610 	 * PSR is active as no frames are generated, so check only for PSR.
2611 	 */
2612 	if (HAS_PSR(dev_priv))
2613 		drm_crtc_vblank_restore(crtc);
2614 
2615 	return 0;
2616 }
2617 
2618 /* Called from drm generic code, passed 'crtc' which
2619  * we use as a pipe index
2620  */
i8xx_disable_vblank(struct drm_crtc * crtc)2621 void i8xx_disable_vblank(struct drm_crtc *crtc)
2622 {
2623 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2624 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2625 	unsigned long irqflags;
2626 
2627 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2628 	i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2629 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2630 }
2631 
i915gm_disable_vblank(struct drm_crtc * crtc)2632 void i915gm_disable_vblank(struct drm_crtc *crtc)
2633 {
2634 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2635 
2636 	i8xx_disable_vblank(crtc);
2637 
2638 	if (--dev_priv->vblank_enabled == 0)
2639 		I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2640 }
2641 
i965_disable_vblank(struct drm_crtc * crtc)2642 void i965_disable_vblank(struct drm_crtc *crtc)
2643 {
2644 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2645 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2646 	unsigned long irqflags;
2647 
2648 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2649 	i915_disable_pipestat(dev_priv, pipe,
2650 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2651 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2652 }
2653 
ilk_disable_vblank(struct drm_crtc * crtc)2654 void ilk_disable_vblank(struct drm_crtc *crtc)
2655 {
2656 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2657 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2658 	unsigned long irqflags;
2659 	u32 bit = INTEL_GEN(dev_priv) >= 7 ?
2660 		DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2661 
2662 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2663 	ilk_disable_display_irq(dev_priv, bit);
2664 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2665 }
2666 
bdw_disable_vblank(struct drm_crtc * crtc)2667 void bdw_disable_vblank(struct drm_crtc *crtc)
2668 {
2669 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2670 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
2671 	unsigned long irqflags;
2672 
2673 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2674 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2675 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2676 }
2677 
ibx_irq_reset(struct drm_i915_private * dev_priv)2678 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
2679 {
2680 	struct intel_uncore *uncore = &dev_priv->uncore;
2681 
2682 	if (HAS_PCH_NOP(dev_priv))
2683 		return;
2684 
2685 	GEN3_IRQ_RESET(uncore, SDE);
2686 
2687 	if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
2688 		I915_WRITE(SERR_INT, 0xffffffff);
2689 }
2690 
2691 /*
2692  * SDEIER is also touched by the interrupt handler to work around missed PCH
2693  * interrupts. Hence we can't update it after the interrupt handler is enabled -
2694  * instead we unconditionally enable all PCH interrupt sources here, but then
2695  * only unmask them as needed with SDEIMR.
2696  *
2697  * This function needs to be called before interrupts are enabled.
2698  */
ibx_irq_pre_postinstall(struct drm_i915_private * dev_priv)2699 static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
2700 {
2701 	if (HAS_PCH_NOP(dev_priv))
2702 		return;
2703 
2704 	WARN_ON(I915_READ(SDEIER) != 0);
2705 	I915_WRITE(SDEIER, 0xffffffff);
2706 	POSTING_READ(SDEIER);
2707 }
2708 
vlv_display_irq_reset(struct drm_i915_private * dev_priv)2709 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
2710 {
2711 	struct intel_uncore *uncore = &dev_priv->uncore;
2712 
2713 	if (IS_CHERRYVIEW(dev_priv))
2714 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
2715 	else
2716 		intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
2717 
2718 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
2719 	intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2720 
2721 	i9xx_pipestat_irq_reset(dev_priv);
2722 
2723 	GEN3_IRQ_RESET(uncore, VLV_);
2724 	dev_priv->irq_mask = ~0u;
2725 }
2726 
vlv_display_irq_postinstall(struct drm_i915_private * dev_priv)2727 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
2728 {
2729 	struct intel_uncore *uncore = &dev_priv->uncore;
2730 
2731 	u32 pipestat_mask;
2732 	u32 enable_mask;
2733 	enum pipe pipe;
2734 
2735 	pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
2736 
2737 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
2738 	for_each_pipe(dev_priv, pipe)
2739 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
2740 
2741 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
2742 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2743 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2744 		I915_LPE_PIPE_A_INTERRUPT |
2745 		I915_LPE_PIPE_B_INTERRUPT;
2746 
2747 	if (IS_CHERRYVIEW(dev_priv))
2748 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
2749 			I915_LPE_PIPE_C_INTERRUPT;
2750 
2751 	WARN_ON(dev_priv->irq_mask != ~0u);
2752 
2753 	dev_priv->irq_mask = ~enable_mask;
2754 
2755 	GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
2756 }
2757 
2758 /* drm_dma.h hooks
2759 */
ilk_irq_reset(struct drm_i915_private * dev_priv)2760 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
2761 {
2762 	struct intel_uncore *uncore = &dev_priv->uncore;
2763 
2764 	GEN3_IRQ_RESET(uncore, DE);
2765 	if (IS_GEN(dev_priv, 7))
2766 		intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
2767 
2768 	if (IS_HASWELL(dev_priv)) {
2769 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2770 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2771 	}
2772 
2773 	gen5_gt_irq_reset(&dev_priv->gt);
2774 
2775 	ibx_irq_reset(dev_priv);
2776 }
2777 
valleyview_irq_reset(struct drm_i915_private * dev_priv)2778 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
2779 {
2780 	I915_WRITE(VLV_MASTER_IER, 0);
2781 	POSTING_READ(VLV_MASTER_IER);
2782 
2783 	gen5_gt_irq_reset(&dev_priv->gt);
2784 
2785 	spin_lock_irq(&dev_priv->irq_lock);
2786 	if (dev_priv->display_irqs_enabled)
2787 		vlv_display_irq_reset(dev_priv);
2788 	spin_unlock_irq(&dev_priv->irq_lock);
2789 }
2790 
gen8_irq_reset(struct drm_i915_private * dev_priv)2791 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
2792 {
2793 	struct intel_uncore *uncore = &dev_priv->uncore;
2794 	enum pipe pipe;
2795 
2796 	gen8_master_intr_disable(&dev_priv->uncore);
2797 
2798 	gen8_gt_irq_reset(&dev_priv->gt);
2799 
2800 	intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2801 	intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2802 
2803 	for_each_pipe(dev_priv, pipe)
2804 		if (intel_display_power_is_enabled(dev_priv,
2805 						   POWER_DOMAIN_PIPE(pipe)))
2806 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2807 
2808 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2809 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2810 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2811 
2812 	if (HAS_PCH_SPLIT(dev_priv))
2813 		ibx_irq_reset(dev_priv);
2814 }
2815 
gen11_display_irq_reset(struct drm_i915_private * dev_priv)2816 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
2817 {
2818 	struct intel_uncore *uncore = &dev_priv->uncore;
2819 	enum pipe pipe;
2820 
2821 	intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
2822 
2823 	if (INTEL_GEN(dev_priv) >= 12) {
2824 		enum transcoder trans;
2825 
2826 		for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
2827 			enum intel_display_power_domain domain;
2828 
2829 			domain = POWER_DOMAIN_TRANSCODER(trans);
2830 			if (!intel_display_power_is_enabled(dev_priv, domain))
2831 				continue;
2832 
2833 			intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
2834 			intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
2835 		}
2836 	} else {
2837 		intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
2838 		intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
2839 	}
2840 
2841 	for_each_pipe(dev_priv, pipe)
2842 		if (intel_display_power_is_enabled(dev_priv,
2843 						   POWER_DOMAIN_PIPE(pipe)))
2844 			GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2845 
2846 	GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
2847 	GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
2848 	GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
2849 
2850 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2851 		GEN3_IRQ_RESET(uncore, SDE);
2852 }
2853 
gen11_irq_reset(struct drm_i915_private * dev_priv)2854 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
2855 {
2856 	struct intel_uncore *uncore = &dev_priv->uncore;
2857 
2858 	gen11_master_intr_disable(&dev_priv->uncore);
2859 
2860 	gen11_gt_irq_reset(&dev_priv->gt);
2861 	gen11_display_irq_reset(dev_priv);
2862 
2863 	GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
2864 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2865 }
2866 
gen8_irq_power_well_post_enable(struct drm_i915_private * dev_priv,u8 pipe_mask)2867 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
2868 				     u8 pipe_mask)
2869 {
2870 	struct intel_uncore *uncore = &dev_priv->uncore;
2871 
2872 	u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
2873 	enum pipe pipe;
2874 
2875 	spin_lock_irq(&dev_priv->irq_lock);
2876 
2877 	if (!intel_irqs_enabled(dev_priv)) {
2878 		spin_unlock_irq(&dev_priv->irq_lock);
2879 		return;
2880 	}
2881 
2882 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2883 		GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
2884 				  dev_priv->de_irq_mask[pipe],
2885 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
2886 
2887 	spin_unlock_irq(&dev_priv->irq_lock);
2888 }
2889 
gen8_irq_power_well_pre_disable(struct drm_i915_private * dev_priv,u8 pipe_mask)2890 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
2891 				     u8 pipe_mask)
2892 {
2893 	struct intel_uncore *uncore = &dev_priv->uncore;
2894 	enum pipe pipe;
2895 
2896 	spin_lock_irq(&dev_priv->irq_lock);
2897 
2898 	if (!intel_irqs_enabled(dev_priv)) {
2899 		spin_unlock_irq(&dev_priv->irq_lock);
2900 		return;
2901 	}
2902 
2903 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
2904 		GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
2905 
2906 	spin_unlock_irq(&dev_priv->irq_lock);
2907 
2908 	/* make sure we're done processing display irqs */
2909 	intel_synchronize_irq(dev_priv);
2910 }
2911 
cherryview_irq_reset(struct drm_i915_private * dev_priv)2912 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
2913 {
2914 	struct intel_uncore *uncore = &dev_priv->uncore;
2915 
2916 	I915_WRITE(GEN8_MASTER_IRQ, 0);
2917 	POSTING_READ(GEN8_MASTER_IRQ);
2918 
2919 	gen8_gt_irq_reset(&dev_priv->gt);
2920 
2921 	GEN3_IRQ_RESET(uncore, GEN8_PCU_);
2922 
2923 	spin_lock_irq(&dev_priv->irq_lock);
2924 	if (dev_priv->display_irqs_enabled)
2925 		vlv_display_irq_reset(dev_priv);
2926 	spin_unlock_irq(&dev_priv->irq_lock);
2927 }
2928 
intel_hpd_enabled_irqs(struct drm_i915_private * dev_priv,const u32 hpd[HPD_NUM_PINS])2929 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
2930 				  const u32 hpd[HPD_NUM_PINS])
2931 {
2932 	struct intel_encoder *encoder;
2933 	u32 enabled_irqs = 0;
2934 
2935 	for_each_intel_encoder(&dev_priv->drm, encoder)
2936 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
2937 			enabled_irqs |= hpd[encoder->hpd_pin];
2938 
2939 	return enabled_irqs;
2940 }
2941 
ibx_hpd_detection_setup(struct drm_i915_private * dev_priv)2942 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
2943 {
2944 	u32 hotplug;
2945 
2946 	/*
2947 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
2948 	 * duration to 2ms (which is the minimum in the Display Port spec).
2949 	 * The pulse duration bits are reserved on LPT+.
2950 	 */
2951 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
2952 	hotplug &= ~(PORTB_PULSE_DURATION_MASK |
2953 		     PORTC_PULSE_DURATION_MASK |
2954 		     PORTD_PULSE_DURATION_MASK);
2955 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2956 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2957 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2958 	/*
2959 	 * When CPU and PCH are on the same package, port A
2960 	 * HPD must be enabled in both north and south.
2961 	 */
2962 	if (HAS_PCH_LPT_LP(dev_priv))
2963 		hotplug |= PORTA_HOTPLUG_ENABLE;
2964 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2965 }
2966 
ibx_hpd_irq_setup(struct drm_i915_private * dev_priv)2967 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
2968 {
2969 	u32 hotplug_irqs, enabled_irqs;
2970 
2971 	if (HAS_PCH_IBX(dev_priv)) {
2972 		hotplug_irqs = SDE_HOTPLUG_MASK;
2973 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
2974 	} else {
2975 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
2976 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
2977 	}
2978 
2979 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
2980 
2981 	ibx_hpd_detection_setup(dev_priv);
2982 }
2983 
icp_hpd_detection_setup(struct drm_i915_private * dev_priv,u32 ddi_hotplug_enable_mask,u32 tc_hotplug_enable_mask)2984 static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv,
2985 				    u32 ddi_hotplug_enable_mask,
2986 				    u32 tc_hotplug_enable_mask)
2987 {
2988 	u32 hotplug;
2989 
2990 	hotplug = I915_READ(SHOTPLUG_CTL_DDI);
2991 	hotplug |= ddi_hotplug_enable_mask;
2992 	I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
2993 
2994 	if (tc_hotplug_enable_mask) {
2995 		hotplug = I915_READ(SHOTPLUG_CTL_TC);
2996 		hotplug |= tc_hotplug_enable_mask;
2997 		I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
2998 	}
2999 }
3000 
icp_hpd_irq_setup(struct drm_i915_private * dev_priv,u32 sde_ddi_mask,u32 sde_tc_mask,u32 ddi_enable_mask,u32 tc_enable_mask,const u32 * pins)3001 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
3002 			      u32 sde_ddi_mask, u32 sde_tc_mask,
3003 			      u32 ddi_enable_mask, u32 tc_enable_mask,
3004 			      const u32 *pins)
3005 {
3006 	u32 hotplug_irqs, enabled_irqs;
3007 
3008 	hotplug_irqs = sde_ddi_mask | sde_tc_mask;
3009 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins);
3010 
3011 	I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3012 
3013 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3014 
3015 	icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask);
3016 }
3017 
3018 /*
3019  * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the
3020  * equivalent of SDE.
3021  */
mcc_hpd_irq_setup(struct drm_i915_private * dev_priv)3022 static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
3023 {
3024 	icp_hpd_irq_setup(dev_priv,
3025 			  SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1),
3026 			  ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1),
3027 			  hpd_icp);
3028 }
3029 
3030 /*
3031  * JSP behaves exactly the same as MCC above except that port C is mapped to
3032  * the DDI-C pins instead of the TC1 pins.  This means we should follow TGP's
3033  * masks & tables rather than ICP's masks & tables.
3034  */
jsp_hpd_irq_setup(struct drm_i915_private * dev_priv)3035 static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3036 {
3037 	icp_hpd_irq_setup(dev_priv,
3038 			  SDE_DDI_MASK_TGP, 0,
3039 			  TGP_DDI_HPD_ENABLE_MASK, 0,
3040 			  hpd_tgp);
3041 }
3042 
gen11_hpd_detection_setup(struct drm_i915_private * dev_priv)3043 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
3044 {
3045 	u32 hotplug;
3046 
3047 	hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
3048 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3049 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3050 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3051 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3052 	I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
3053 
3054 	hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
3055 	hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
3056 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
3057 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
3058 		   GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4);
3059 	I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
3060 }
3061 
gen11_hpd_irq_setup(struct drm_i915_private * dev_priv)3062 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3063 {
3064 	u32 hotplug_irqs, enabled_irqs __unused;
3065 	const u32 *hpd;
3066 	u32 val;
3067 
3068 	hpd = INTEL_GEN(dev_priv) >= 12 ? hpd_gen12 : hpd_gen11;
3069 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd);
3070 	hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK;
3071 
3072 	val = I915_READ(GEN11_DE_HPD_IMR);
3073 	val &= ~hotplug_irqs;
3074 	I915_WRITE(GEN11_DE_HPD_IMR, val);
3075 	POSTING_READ(GEN11_DE_HPD_IMR);
3076 
3077 	gen11_hpd_detection_setup(dev_priv);
3078 
3079 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
3080 		icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP,
3081 				  TGP_DDI_HPD_ENABLE_MASK,
3082 				  TGP_TC_HPD_ENABLE_MASK, hpd_tgp);
3083 	else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3084 		icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP,
3085 				  ICP_DDI_HPD_ENABLE_MASK,
3086 				  ICP_TC_HPD_ENABLE_MASK, hpd_icp);
3087 }
3088 
spt_hpd_detection_setup(struct drm_i915_private * dev_priv)3089 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3090 {
3091 	u32 val, hotplug;
3092 
3093 	/* Display WA #1179 WaHardHangonHotPlug: cnp */
3094 	if (HAS_PCH_CNP(dev_priv)) {
3095 		val = I915_READ(SOUTH_CHICKEN1);
3096 		val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3097 		val |= CHASSIS_CLK_REQ_DURATION(0xf);
3098 		I915_WRITE(SOUTH_CHICKEN1, val);
3099 	}
3100 
3101 	/* Enable digital hotplug on the PCH */
3102 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3103 	hotplug |= PORTA_HOTPLUG_ENABLE |
3104 		   PORTB_HOTPLUG_ENABLE |
3105 		   PORTC_HOTPLUG_ENABLE |
3106 		   PORTD_HOTPLUG_ENABLE;
3107 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3108 
3109 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3110 	hotplug |= PORTE_HOTPLUG_ENABLE;
3111 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3112 }
3113 
spt_hpd_irq_setup(struct drm_i915_private * dev_priv)3114 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3115 {
3116 	u32 hotplug_irqs, enabled_irqs;
3117 
3118 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3119 		I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3120 
3121 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3122 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3123 
3124 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3125 
3126 	spt_hpd_detection_setup(dev_priv);
3127 }
3128 
ilk_hpd_detection_setup(struct drm_i915_private * dev_priv)3129 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3130 {
3131 	u32 hotplug;
3132 
3133 	/*
3134 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3135 	 * duration to 2ms (which is the minimum in the Display Port spec)
3136 	 * The pulse duration bits are reserved on HSW+.
3137 	 */
3138 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3139 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3140 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
3141 		   DIGITAL_PORTA_PULSE_DURATION_2ms;
3142 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3143 }
3144 
ilk_hpd_irq_setup(struct drm_i915_private * dev_priv)3145 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3146 {
3147 	u32 hotplug_irqs, enabled_irqs;
3148 
3149 	if (INTEL_GEN(dev_priv) >= 8) {
3150 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3151 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3152 
3153 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3154 	} else if (INTEL_GEN(dev_priv) >= 7) {
3155 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3156 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3157 
3158 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3159 	} else {
3160 		hotplug_irqs = DE_DP_A_HOTPLUG;
3161 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3162 
3163 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3164 	}
3165 
3166 	ilk_hpd_detection_setup(dev_priv);
3167 
3168 	ibx_hpd_irq_setup(dev_priv);
3169 }
3170 
__bxt_hpd_detection_setup(struct drm_i915_private * dev_priv,u32 enabled_irqs)3171 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
3172 				      u32 enabled_irqs)
3173 {
3174 	u32 hotplug;
3175 
3176 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3177 	hotplug |= PORTA_HOTPLUG_ENABLE |
3178 		   PORTB_HOTPLUG_ENABLE |
3179 		   PORTC_HOTPLUG_ENABLE;
3180 
3181 	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3182 		      hotplug, enabled_irqs);
3183 	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3184 
3185 	/*
3186 	 * For BXT invert bit has to be set based on AOB design
3187 	 * for HPD detection logic, update it based on VBT fields.
3188 	 */
3189 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3190 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3191 		hotplug |= BXT_DDIA_HPD_INVERT;
3192 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3193 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3194 		hotplug |= BXT_DDIB_HPD_INVERT;
3195 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3196 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3197 		hotplug |= BXT_DDIC_HPD_INVERT;
3198 
3199 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3200 }
3201 
bxt_hpd_detection_setup(struct drm_i915_private * dev_priv)3202 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3203 {
3204 	__bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
3205 }
3206 
bxt_hpd_irq_setup(struct drm_i915_private * dev_priv)3207 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3208 {
3209 	u32 hotplug_irqs, enabled_irqs;
3210 
3211 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3212 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3213 
3214 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3215 
3216 	__bxt_hpd_detection_setup(dev_priv, enabled_irqs);
3217 }
3218 
ibx_irq_postinstall(struct drm_i915_private * dev_priv)3219 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3220 {
3221 	u32 mask;
3222 
3223 	if (HAS_PCH_NOP(dev_priv))
3224 		return;
3225 
3226 	if (HAS_PCH_IBX(dev_priv))
3227 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3228 	else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3229 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3230 	else
3231 		mask = SDE_GMBUS_CPT;
3232 
3233 	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3234 	I915_WRITE(SDEIMR, ~mask);
3235 
3236 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
3237 	    HAS_PCH_LPT(dev_priv))
3238 		ibx_hpd_detection_setup(dev_priv);
3239 	else
3240 		spt_hpd_detection_setup(dev_priv);
3241 }
3242 
ilk_irq_postinstall(struct drm_i915_private * dev_priv)3243 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3244 {
3245 	struct intel_uncore *uncore = &dev_priv->uncore;
3246 	u32 display_mask, extra_mask;
3247 
3248 	if (INTEL_GEN(dev_priv) >= 7) {
3249 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3250 				DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3251 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3252 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3253 			      DE_DP_A_HOTPLUG_IVB);
3254 	} else {
3255 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3256 				DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3257 				DE_PIPEA_CRC_DONE | DE_POISON);
3258 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3259 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3260 			      DE_DP_A_HOTPLUG);
3261 	}
3262 
3263 	if (IS_HASWELL(dev_priv)) {
3264 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3265 		display_mask |= DE_EDP_PSR_INT_HSW;
3266 	}
3267 
3268 	dev_priv->irq_mask = ~display_mask;
3269 
3270 	ibx_irq_pre_postinstall(dev_priv);
3271 
3272 	GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3273 		      display_mask | extra_mask);
3274 
3275 	gen5_gt_irq_postinstall(&dev_priv->gt);
3276 
3277 	ilk_hpd_detection_setup(dev_priv);
3278 
3279 	ibx_irq_postinstall(dev_priv);
3280 
3281 	if (IS_IRONLAKE_M(dev_priv)) {
3282 		/* Enable PCU event interrupts
3283 		 *
3284 		 * spinlocking not required here for correctness since interrupt
3285 		 * setup is guaranteed to run in single-threaded context. But we
3286 		 * need it to make the assert_spin_locked happy. */
3287 		spin_lock_irq(&dev_priv->irq_lock);
3288 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3289 		spin_unlock_irq(&dev_priv->irq_lock);
3290 	}
3291 }
3292 
valleyview_enable_display_irqs(struct drm_i915_private * dev_priv)3293 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3294 {
3295 	lockdep_assert_held(&dev_priv->irq_lock);
3296 
3297 	if (dev_priv->display_irqs_enabled)
3298 		return;
3299 
3300 	dev_priv->display_irqs_enabled = true;
3301 
3302 	if (intel_irqs_enabled(dev_priv)) {
3303 		vlv_display_irq_reset(dev_priv);
3304 		vlv_display_irq_postinstall(dev_priv);
3305 	}
3306 }
3307 
valleyview_disable_display_irqs(struct drm_i915_private * dev_priv)3308 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3309 {
3310 	lockdep_assert_held(&dev_priv->irq_lock);
3311 
3312 	if (!dev_priv->display_irqs_enabled)
3313 		return;
3314 
3315 	dev_priv->display_irqs_enabled = false;
3316 
3317 	if (intel_irqs_enabled(dev_priv))
3318 		vlv_display_irq_reset(dev_priv);
3319 }
3320 
3321 
valleyview_irq_postinstall(struct drm_i915_private * dev_priv)3322 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3323 {
3324 	gen5_gt_irq_postinstall(&dev_priv->gt);
3325 
3326 	spin_lock_irq(&dev_priv->irq_lock);
3327 	if (dev_priv->display_irqs_enabled)
3328 		vlv_display_irq_postinstall(dev_priv);
3329 	spin_unlock_irq(&dev_priv->irq_lock);
3330 
3331 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3332 	POSTING_READ(VLV_MASTER_IER);
3333 }
3334 
gen8_de_irq_postinstall(struct drm_i915_private * dev_priv)3335 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3336 {
3337 	struct intel_uncore *uncore = &dev_priv->uncore;
3338 
3339 	u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3340 	u32 de_pipe_enables;
3341 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3342 	u32 de_port_enables;
3343 	u32 de_misc_masked = GEN8_DE_EDP_PSR;
3344 	enum pipe pipe;
3345 
3346 	if (INTEL_GEN(dev_priv) <= 10)
3347 		de_misc_masked |= GEN8_DE_MISC_GSE;
3348 
3349 	if (INTEL_GEN(dev_priv) >= 9) {
3350 		de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3351 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3352 				  GEN9_AUX_CHANNEL_D;
3353 		if (IS_GEN9_LP(dev_priv))
3354 			de_port_masked |= BXT_DE_PORT_GMBUS;
3355 	} else {
3356 		de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3357 	}
3358 
3359 	if (INTEL_GEN(dev_priv) >= 11)
3360 		de_port_masked |= ICL_AUX_CHANNEL_E;
3361 
3362 	if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11)
3363 		de_port_masked |= CNL_AUX_CHANNEL_F;
3364 
3365 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3366 					   GEN8_PIPE_FIFO_UNDERRUN;
3367 
3368 	de_port_enables = de_port_masked;
3369 	if (IS_GEN9_LP(dev_priv))
3370 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3371 	else if (IS_BROADWELL(dev_priv))
3372 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3373 
3374 	if (INTEL_GEN(dev_priv) >= 12) {
3375 		enum transcoder trans;
3376 
3377 		for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) {
3378 			enum intel_display_power_domain domain;
3379 
3380 			domain = POWER_DOMAIN_TRANSCODER(trans);
3381 			if (!intel_display_power_is_enabled(dev_priv, domain))
3382 				continue;
3383 
3384 			gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3385 		}
3386 	} else {
3387 		gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3388 	}
3389 
3390 	for_each_pipe(dev_priv, pipe) {
3391 		dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3392 
3393 		if (intel_display_power_is_enabled(dev_priv,
3394 				POWER_DOMAIN_PIPE(pipe)))
3395 			GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3396 					  dev_priv->de_irq_mask[pipe],
3397 					  de_pipe_enables);
3398 	}
3399 
3400 	GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3401 	GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3402 
3403 	if (INTEL_GEN(dev_priv) >= 11) {
3404 		u32 de_hpd_masked = 0;
3405 		u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3406 				     GEN11_DE_TBT_HOTPLUG_MASK;
3407 
3408 		GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3409 			      de_hpd_enables);
3410 		gen11_hpd_detection_setup(dev_priv);
3411 	} else if (IS_GEN9_LP(dev_priv)) {
3412 		bxt_hpd_detection_setup(dev_priv);
3413 	} else if (IS_BROADWELL(dev_priv)) {
3414 		ilk_hpd_detection_setup(dev_priv);
3415 	}
3416 }
3417 
gen8_irq_postinstall(struct drm_i915_private * dev_priv)3418 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3419 {
3420 	if (HAS_PCH_SPLIT(dev_priv))
3421 		ibx_irq_pre_postinstall(dev_priv);
3422 
3423 	gen8_gt_irq_postinstall(&dev_priv->gt);
3424 	gen8_de_irq_postinstall(dev_priv);
3425 
3426 	if (HAS_PCH_SPLIT(dev_priv))
3427 		ibx_irq_postinstall(dev_priv);
3428 
3429 	gen8_master_intr_enable(&dev_priv->uncore);
3430 }
3431 
icp_irq_postinstall(struct drm_i915_private * dev_priv)3432 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3433 {
3434 	u32 mask = SDE_GMBUS_ICP;
3435 
3436 	WARN_ON(I915_READ(SDEIER) != 0);
3437 	I915_WRITE(SDEIER, 0xffffffff);
3438 	POSTING_READ(SDEIER);
3439 
3440 	gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
3441 	I915_WRITE(SDEIMR, ~mask);
3442 
3443 	if (HAS_PCH_TGP(dev_priv))
3444 		icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK,
3445 					TGP_TC_HPD_ENABLE_MASK);
3446 	else if (HAS_PCH_JSP(dev_priv))
3447 		icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0);
3448 	else if (HAS_PCH_MCC(dev_priv))
3449 		icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
3450 					ICP_TC_HPD_ENABLE(PORT_TC1));
3451 	else
3452 		icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK,
3453 					ICP_TC_HPD_ENABLE_MASK);
3454 }
3455 
gen11_irq_postinstall(struct drm_i915_private * dev_priv)3456 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3457 {
3458 	struct intel_uncore *uncore = &dev_priv->uncore;
3459 	u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3460 
3461 	if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3462 		icp_irq_postinstall(dev_priv);
3463 
3464 	gen11_gt_irq_postinstall(&dev_priv->gt);
3465 	gen8_de_irq_postinstall(dev_priv);
3466 
3467 	GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3468 
3469 	I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
3470 
3471 	gen11_master_intr_enable(uncore);
3472 	POSTING_READ(GEN11_GFX_MSTR_IRQ);
3473 }
3474 
cherryview_irq_postinstall(struct drm_i915_private * dev_priv)3475 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3476 {
3477 	gen8_gt_irq_postinstall(&dev_priv->gt);
3478 
3479 	spin_lock_irq(&dev_priv->irq_lock);
3480 	if (dev_priv->display_irqs_enabled)
3481 		vlv_display_irq_postinstall(dev_priv);
3482 	spin_unlock_irq(&dev_priv->irq_lock);
3483 
3484 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3485 	POSTING_READ(GEN8_MASTER_IRQ);
3486 }
3487 
i8xx_irq_reset(struct drm_i915_private * dev_priv)3488 static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3489 {
3490 	struct intel_uncore *uncore = &dev_priv->uncore;
3491 
3492 	i9xx_pipestat_irq_reset(dev_priv);
3493 
3494 	GEN2_IRQ_RESET(uncore);
3495 }
3496 
i8xx_irq_postinstall(struct drm_i915_private * dev_priv)3497 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3498 {
3499 	struct intel_uncore *uncore = &dev_priv->uncore;
3500 	u16 enable_mask;
3501 
3502 	intel_uncore_write16(uncore,
3503 			     EMR,
3504 			     ~(I915_ERROR_PAGE_TABLE |
3505 			       I915_ERROR_MEMORY_REFRESH));
3506 
3507 	/* Unmask the interrupts that we always want on. */
3508 	dev_priv->irq_mask =
3509 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3510 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3511 		  I915_MASTER_ERROR_INTERRUPT);
3512 
3513 	enable_mask =
3514 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3515 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3516 		I915_MASTER_ERROR_INTERRUPT |
3517 		I915_USER_INTERRUPT;
3518 
3519 	GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
3520 
3521 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3522 	 * just to make the assert_spin_locked check happy. */
3523 	spin_lock_irq(&dev_priv->irq_lock);
3524 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3525 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3526 	spin_unlock_irq(&dev_priv->irq_lock);
3527 }
3528 
i8xx_error_irq_ack(struct drm_i915_private * i915,u16 * eir,u16 * eir_stuck)3529 static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3530 			       u16 *eir, u16 *eir_stuck)
3531 {
3532 	struct intel_uncore *uncore = &i915->uncore;
3533 	u16 emr;
3534 
3535 	*eir = intel_uncore_read16(uncore, EIR);
3536 
3537 	if (*eir)
3538 		intel_uncore_write16(uncore, EIR, *eir);
3539 
3540 	*eir_stuck = intel_uncore_read16(uncore, EIR);
3541 	if (*eir_stuck == 0)
3542 		return;
3543 
3544 	/*
3545 	 * Toggle all EMR bits to make sure we get an edge
3546 	 * in the ISR master error bit if we don't clear
3547 	 * all the EIR bits. Otherwise the edge triggered
3548 	 * IIR on i965/g4x wouldn't notice that an interrupt
3549 	 * is still pending. Also some EIR bits can't be
3550 	 * cleared except by handling the underlying error
3551 	 * (or by a GPU reset) so we mask any bit that
3552 	 * remains set.
3553 	 */
3554 	emr = intel_uncore_read16(uncore, EMR);
3555 	intel_uncore_write16(uncore, EMR, 0xffff);
3556 	intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3557 }
3558 
i8xx_error_irq_handler(struct drm_i915_private * dev_priv,u16 eir,u16 eir_stuck)3559 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
3560 				   u16 eir, u16 eir_stuck)
3561 {
3562 	DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
3563 
3564 	if (eir_stuck)
3565 		DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
3566 }
3567 
i9xx_error_irq_ack(struct drm_i915_private * dev_priv,u32 * eir,u32 * eir_stuck)3568 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
3569 			       u32 *eir, u32 *eir_stuck)
3570 {
3571 	u32 emr;
3572 
3573 	*eir = I915_READ(EIR);
3574 
3575 	I915_WRITE(EIR, *eir);
3576 
3577 	*eir_stuck = I915_READ(EIR);
3578 	if (*eir_stuck == 0)
3579 		return;
3580 
3581 	/*
3582 	 * Toggle all EMR bits to make sure we get an edge
3583 	 * in the ISR master error bit if we don't clear
3584 	 * all the EIR bits. Otherwise the edge triggered
3585 	 * IIR on i965/g4x wouldn't notice that an interrupt
3586 	 * is still pending. Also some EIR bits can't be
3587 	 * cleared except by handling the underlying error
3588 	 * (or by a GPU reset) so we mask any bit that
3589 	 * remains set.
3590 	 */
3591 	emr = I915_READ(EMR);
3592 	I915_WRITE(EMR, 0xffffffff);
3593 	I915_WRITE(EMR, emr | *eir_stuck);
3594 }
3595 
i9xx_error_irq_handler(struct drm_i915_private * dev_priv,u32 eir,u32 eir_stuck)3596 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
3597 				   u32 eir, u32 eir_stuck)
3598 {
3599 	DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
3600 
3601 	if (eir_stuck)
3602 		DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
3603 }
3604 
i8xx_irq_handler(DRM_IRQ_ARGS)3605 static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
3606 {
3607 	struct drm_i915_private *dev_priv = arg;
3608 	irqreturn_t ret = IRQ_NONE;
3609 
3610 	if (!intel_irqs_enabled(dev_priv))
3611 		return IRQ_NONE;
3612 
3613 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3614 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3615 
3616 	do {
3617 		u32 pipe_stats[I915_MAX_PIPES] = {};
3618 		u16 eir = 0, eir_stuck = 0;
3619 		u16 iir;
3620 
3621 		iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
3622 		if (iir == 0)
3623 			break;
3624 
3625 		ret = IRQ_HANDLED;
3626 
3627 		/* Call regardless, as some status bits might not be
3628 		 * signalled in iir */
3629 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3630 
3631 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3632 			i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3633 
3634 		intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
3635 
3636 		if (iir & I915_USER_INTERRUPT)
3637 			intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
3638 
3639 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3640 			i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
3641 
3642 		i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3643 	} while (0);
3644 
3645 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3646 
3647 	return ret;
3648 }
3649 
i915_irq_reset(struct drm_i915_private * dev_priv)3650 static void i915_irq_reset(struct drm_i915_private *dev_priv)
3651 {
3652 	struct intel_uncore *uncore = &dev_priv->uncore;
3653 
3654 	if (I915_HAS_HOTPLUG(dev_priv)) {
3655 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3656 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3657 	}
3658 
3659 	i9xx_pipestat_irq_reset(dev_priv);
3660 
3661 	GEN3_IRQ_RESET(uncore, GEN2_);
3662 }
3663 
i915_irq_postinstall(struct drm_i915_private * dev_priv)3664 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
3665 {
3666 	struct intel_uncore *uncore = &dev_priv->uncore;
3667 	u32 enable_mask;
3668 
3669 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
3670 			  I915_ERROR_MEMORY_REFRESH));
3671 
3672 	/* Unmask the interrupts that we always want on. */
3673 	dev_priv->irq_mask =
3674 		~(I915_ASLE_INTERRUPT |
3675 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3676 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3677 		  I915_MASTER_ERROR_INTERRUPT);
3678 
3679 	enable_mask =
3680 		I915_ASLE_INTERRUPT |
3681 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3682 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3683 		I915_MASTER_ERROR_INTERRUPT |
3684 		I915_USER_INTERRUPT;
3685 
3686 	if (I915_HAS_HOTPLUG(dev_priv)) {
3687 		/* Enable in IER... */
3688 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3689 		/* and unmask in IMR */
3690 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3691 	}
3692 
3693 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3694 
3695 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3696 	 * just to make the assert_spin_locked check happy. */
3697 	spin_lock_irq(&dev_priv->irq_lock);
3698 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3699 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3700 	spin_unlock_irq(&dev_priv->irq_lock);
3701 
3702 	i915_enable_asle_pipestat(dev_priv);
3703 }
3704 
i915_irq_handler(DRM_IRQ_ARGS)3705 static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
3706 {
3707 	struct drm_i915_private *dev_priv = arg;
3708 	irqreturn_t ret = IRQ_NONE;
3709 
3710 	if (!intel_irqs_enabled(dev_priv))
3711 		return IRQ_NONE;
3712 
3713 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3714 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3715 
3716 	do {
3717 		u32 pipe_stats[I915_MAX_PIPES] = {};
3718 		u32 eir = 0, eir_stuck = 0;
3719 		u32 hotplug_status = 0;
3720 		u32 iir;
3721 
3722 		iir = I915_READ(GEN2_IIR);
3723 		if (iir == 0)
3724 			break;
3725 
3726 		ret = IRQ_HANDLED;
3727 
3728 		if (I915_HAS_HOTPLUG(dev_priv) &&
3729 		    iir & I915_DISPLAY_PORT_INTERRUPT)
3730 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3731 
3732 		/* Call regardless, as some status bits might not be
3733 		 * signalled in iir */
3734 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3735 
3736 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3737 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3738 
3739 		I915_WRITE(GEN2_IIR, iir);
3740 
3741 		if (iir & I915_USER_INTERRUPT)
3742 			intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
3743 
3744 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3745 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3746 
3747 		if (hotplug_status)
3748 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3749 
3750 		i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3751 	} while (0);
3752 
3753 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3754 
3755 	return ret;
3756 }
3757 
i965_irq_reset(struct drm_i915_private * dev_priv)3758 static void i965_irq_reset(struct drm_i915_private *dev_priv)
3759 {
3760 	struct intel_uncore *uncore = &dev_priv->uncore;
3761 
3762 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3763 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3764 
3765 	i9xx_pipestat_irq_reset(dev_priv);
3766 
3767 	GEN3_IRQ_RESET(uncore, GEN2_);
3768 }
3769 
i965_irq_postinstall(struct drm_i915_private * dev_priv)3770 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
3771 {
3772 	struct intel_uncore *uncore = &dev_priv->uncore;
3773 	u32 enable_mask;
3774 	u32 error_mask;
3775 
3776 	/*
3777 	 * Enable some error detection, note the instruction error mask
3778 	 * bit is reserved, so we leave it masked.
3779 	 */
3780 	if (IS_G4X(dev_priv)) {
3781 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
3782 			       GM45_ERROR_MEM_PRIV |
3783 			       GM45_ERROR_CP_PRIV |
3784 			       I915_ERROR_MEMORY_REFRESH);
3785 	} else {
3786 		error_mask = ~(I915_ERROR_PAGE_TABLE |
3787 			       I915_ERROR_MEMORY_REFRESH);
3788 	}
3789 	I915_WRITE(EMR, error_mask);
3790 
3791 	/* Unmask the interrupts that we always want on. */
3792 	dev_priv->irq_mask =
3793 		~(I915_ASLE_INTERRUPT |
3794 		  I915_DISPLAY_PORT_INTERRUPT |
3795 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3796 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3797 		  I915_MASTER_ERROR_INTERRUPT);
3798 
3799 	enable_mask =
3800 		I915_ASLE_INTERRUPT |
3801 		I915_DISPLAY_PORT_INTERRUPT |
3802 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3803 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3804 		I915_MASTER_ERROR_INTERRUPT |
3805 		I915_USER_INTERRUPT;
3806 
3807 	if (IS_G4X(dev_priv))
3808 		enable_mask |= I915_BSD_USER_INTERRUPT;
3809 
3810 	GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
3811 
3812 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3813 	 * just to make the assert_spin_locked check happy. */
3814 	spin_lock_irq(&dev_priv->irq_lock);
3815 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3816 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3817 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3818 	spin_unlock_irq(&dev_priv->irq_lock);
3819 
3820 	i915_enable_asle_pipestat(dev_priv);
3821 }
3822 
i915_hpd_irq_setup(struct drm_i915_private * dev_priv)3823 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
3824 {
3825 	u32 hotplug_en;
3826 
3827 	lockdep_assert_held(&dev_priv->irq_lock);
3828 
3829 	/* Note HDMI and DP share hotplug bits */
3830 	/* enable bits are the same for all generations */
3831 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
3832 	/* Programming the CRT detection parameters tends
3833 	   to generate a spurious hotplug event about three
3834 	   seconds later.  So just do it once.
3835 	*/
3836 	if (IS_G4X(dev_priv))
3837 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
3838 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
3839 
3840 	/* Ignore TV since it's buggy */
3841 	i915_hotplug_interrupt_update_locked(dev_priv,
3842 					     HOTPLUG_INT_EN_MASK |
3843 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
3844 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
3845 					     hotplug_en);
3846 }
3847 
i965_irq_handler(DRM_IRQ_ARGS)3848 static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
3849 {
3850 	struct drm_i915_private *dev_priv = arg;
3851 	irqreturn_t ret = IRQ_NONE;
3852 
3853 	if (!intel_irqs_enabled(dev_priv))
3854 		return IRQ_NONE;
3855 
3856 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3857 	disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3858 
3859 	do {
3860 		u32 pipe_stats[I915_MAX_PIPES] = {};
3861 		u32 eir = 0, eir_stuck = 0;
3862 		u32 hotplug_status = 0;
3863 		u32 iir;
3864 
3865 		iir = I915_READ(GEN2_IIR);
3866 		if (iir == 0)
3867 			break;
3868 
3869 		ret = IRQ_HANDLED;
3870 
3871 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
3872 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
3873 
3874 		/* Call regardless, as some status bits might not be
3875 		 * signalled in iir */
3876 		i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
3877 
3878 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3879 			i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
3880 
3881 		I915_WRITE(GEN2_IIR, iir);
3882 
3883 		if (iir & I915_USER_INTERRUPT)
3884 			intel_engine_signal_breadcrumbs(dev_priv->engine[RCS0]);
3885 
3886 		if (iir & I915_BSD_USER_INTERRUPT)
3887 			intel_engine_signal_breadcrumbs(dev_priv->engine[VCS0]);
3888 
3889 		if (iir & I915_MASTER_ERROR_INTERRUPT)
3890 			i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
3891 
3892 		if (hotplug_status)
3893 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
3894 
3895 		i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
3896 	} while (0);
3897 
3898 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
3899 
3900 	return ret;
3901 }
3902 
3903 /**
3904  * intel_irq_init - initializes irq support
3905  * @dev_priv: i915 device instance
3906  *
3907  * This function initializes all the irq support including work items, timers
3908  * and all the vtables. It does not setup the interrupt itself though.
3909  */
intel_irq_init(struct drm_i915_private * dev_priv)3910 void intel_irq_init(struct drm_i915_private *dev_priv)
3911 {
3912 	struct drm_device *dev = &dev_priv->drm;
3913 	int i;
3914 
3915 	intel_hpd_init_work(dev_priv);
3916 
3917 	INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
3918 	for (i = 0; i < MAX_L3_SLICES; ++i)
3919 		dev_priv->l3_parity.remap_info[i] = NULL;
3920 
3921 	/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
3922 	if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
3923 		dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
3924 
3925 	dev->vblank_disable_immediate = true;
3926 
3927 	/* Most platforms treat the display irq block as an always-on
3928 	 * power domain. vlv/chv can disable it at runtime and need
3929 	 * special care to avoid writing any of the display block registers
3930 	 * outside of the power domain. We defer setting up the display irqs
3931 	 * in this case to the runtime pm.
3932 	 */
3933 	dev_priv->display_irqs_enabled = true;
3934 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3935 		dev_priv->display_irqs_enabled = false;
3936 
3937 	dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
3938 	/* If we have MST support, we want to avoid doing short HPD IRQ storm
3939 	 * detection, as short HPD storms will occur as a natural part of
3940 	 * sideband messaging with MST.
3941 	 * On older platforms however, IRQ storms can occur with both long and
3942 	 * short pulses, as seen on some G4x systems.
3943 	 */
3944 	dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
3945 
3946 	if (HAS_GMCH(dev_priv)) {
3947 		if (I915_HAS_HOTPLUG(dev_priv))
3948 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
3949 	} else {
3950 		if (HAS_PCH_JSP(dev_priv))
3951 			dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup;
3952 		else if (HAS_PCH_MCC(dev_priv))
3953 			dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
3954 		else if (INTEL_GEN(dev_priv) >= 11)
3955 			dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
3956 		else if (IS_GEN9_LP(dev_priv))
3957 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
3958 		else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
3959 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
3960 		else
3961 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
3962 	}
3963 }
3964 
3965 /**
3966  * intel_irq_fini - deinitializes IRQ support
3967  * @i915: i915 device instance
3968  *
3969  * This function deinitializes all the IRQ support.
3970  */
intel_irq_fini(struct drm_i915_private * i915)3971 void intel_irq_fini(struct drm_i915_private *i915)
3972 {
3973 	int i;
3974 
3975 	for (i = 0; i < MAX_L3_SLICES; ++i)
3976 		kfree(i915->l3_parity.remap_info[i]);
3977 }
3978 
intel_irq_handler(struct drm_i915_private * dev_priv)3979 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
3980 {
3981 	if (HAS_GMCH(dev_priv)) {
3982 		if (IS_CHERRYVIEW(dev_priv))
3983 			return cherryview_irq_handler;
3984 		else if (IS_VALLEYVIEW(dev_priv))
3985 			return valleyview_irq_handler;
3986 		else if (IS_GEN(dev_priv, 4))
3987 			return i965_irq_handler;
3988 		else if (IS_GEN(dev_priv, 3))
3989 			return i915_irq_handler;
3990 		else
3991 			return i8xx_irq_handler;
3992 	} else {
3993 		if (INTEL_GEN(dev_priv) >= 11)
3994 			return gen11_irq_handler;
3995 		else if (INTEL_GEN(dev_priv) >= 8)
3996 			return gen8_irq_handler;
3997 		else
3998 			return ilk_irq_handler;
3999 	}
4000 }
4001 
intel_irq_reset(struct drm_i915_private * dev_priv)4002 static void intel_irq_reset(struct drm_i915_private *dev_priv)
4003 {
4004 	if (HAS_GMCH(dev_priv)) {
4005 		if (IS_CHERRYVIEW(dev_priv))
4006 			cherryview_irq_reset(dev_priv);
4007 		else if (IS_VALLEYVIEW(dev_priv))
4008 			valleyview_irq_reset(dev_priv);
4009 		else if (IS_GEN(dev_priv, 4))
4010 			i965_irq_reset(dev_priv);
4011 		else if (IS_GEN(dev_priv, 3))
4012 			i915_irq_reset(dev_priv);
4013 		else
4014 			i8xx_irq_reset(dev_priv);
4015 	} else {
4016 		if (INTEL_GEN(dev_priv) >= 11)
4017 			gen11_irq_reset(dev_priv);
4018 		else if (INTEL_GEN(dev_priv) >= 8)
4019 			gen8_irq_reset(dev_priv);
4020 		else
4021 			ilk_irq_reset(dev_priv);
4022 	}
4023 }
4024 
intel_irq_postinstall(struct drm_i915_private * dev_priv)4025 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4026 {
4027 	if (HAS_GMCH(dev_priv)) {
4028 		if (IS_CHERRYVIEW(dev_priv))
4029 			cherryview_irq_postinstall(dev_priv);
4030 		else if (IS_VALLEYVIEW(dev_priv))
4031 			valleyview_irq_postinstall(dev_priv);
4032 		else if (IS_GEN(dev_priv, 4))
4033 			i965_irq_postinstall(dev_priv);
4034 		else if (IS_GEN(dev_priv, 3))
4035 			i915_irq_postinstall(dev_priv);
4036 		else
4037 			i8xx_irq_postinstall(dev_priv);
4038 	} else {
4039 		if (INTEL_GEN(dev_priv) >= 11)
4040 			gen11_irq_postinstall(dev_priv);
4041 		else if (INTEL_GEN(dev_priv) >= 8)
4042 			gen8_irq_postinstall(dev_priv);
4043 		else
4044 			ilk_irq_postinstall(dev_priv);
4045 	}
4046 }
4047 
4048 /**
4049  * intel_irq_install - enables the hardware interrupt
4050  * @dev_priv: i915 device instance
4051  *
4052  * This function enables the hardware interrupt handling, but leaves the hotplug
4053  * handling still disabled. It is called after intel_irq_init().
4054  *
4055  * In the driver load and resume code we need working interrupts in a few places
4056  * but don't want to deal with the hassle of concurrent probe and hotplug
4057  * workers. Hence the split into this two-stage approach.
4058  */
intel_irq_install(struct drm_i915_private * dev_priv)4059 int intel_irq_install(struct drm_i915_private *dev_priv)
4060 {
4061 #ifndef __NetBSD__
4062 	int irq = dev_priv->drm.pdev->irq;
4063 #endif
4064 	int ret;
4065 
4066 	/*
4067 	 * We enable some interrupt sources in our postinstall hooks, so mark
4068 	 * interrupts as enabled _before_ actually enabling them to avoid
4069 	 * special cases in our ordering checks.
4070 	 */
4071 	dev_priv->runtime_pm.irqs_enabled = true;
4072 
4073 	dev_priv->drm.irq_enabled = true;
4074 
4075 	intel_irq_reset(dev_priv);
4076 
4077 #ifdef __NetBSD__
4078     {
4079 	struct pci_dev *const pdev = dev_priv->drm.pdev;
4080 	const char *const name = device_xname(pci_dev_dev(pdev));
4081 	const struct pci_attach_args *pa = &pdev->pd_pa;
4082 	const char *intrstr;
4083 	char intrbuf[PCI_INTRSTR_LEN];
4084 
4085 	if (pdev->msi_enabled) {
4086 		if (pdev->pd_intr_handles == NULL) {
4087 			/* XXX errno NetBSD->Linux */
4088 			if ((ret = -pci_msi_alloc_exact(pa, &dev_priv->pci_ihp,
4089 			    1))) {
4090 				aprint_error_dev(pci_dev_dev(pdev),
4091 				    "couldn't allocate MSI (%s)\n", name);
4092 				goto out;
4093 			}
4094 		} else {
4095 			dev_priv->pci_ihp = pdev->pd_intr_handles;
4096 			pdev->pd_intr_handles = NULL;
4097 		}
4098 	} else {
4099 		/* XXX errno NetBSD->Linux */
4100 		if ((ret = -pci_intx_alloc(pa, &dev_priv->pci_ihp))) {
4101 			aprint_error_dev(pci_dev_dev(pdev),
4102 			    "couldn't allocate INTx interrupt (%s)\n",
4103 			    name);
4104 			goto out;
4105 		}
4106 	}
4107 
4108 	intrstr = pci_intr_string(pa->pa_pc, dev_priv->pci_ihp[0],
4109 	    intrbuf, sizeof(intrbuf));
4110 	dev_priv->pci_intrcookie = pci_intr_establish_xname(pa->pa_pc,
4111 	    dev_priv->pci_ihp[0], IPL_DRM, intel_irq_handler(dev_priv),
4112 	    dev_priv, name);
4113 	if (dev_priv->pci_intrcookie == NULL) {
4114 		aprint_error_dev(pci_dev_dev(pdev),
4115 		    "couldn't establish interrupt at %s (%s)\n", intrstr, name);
4116 		pci_intr_release(pa->pa_pc, dev_priv->pci_ihp, 1);
4117 		dev_priv->pci_ihp = NULL;
4118 		ret = -EIO;	/* XXX er? */
4119 		goto out;
4120 	}
4121 
4122 	/* Success!  */
4123 	aprint_normal_dev(pci_dev_dev(pdev), "interrupting at %s (%s)\n",
4124 	    intrstr, name);
4125 	ret = 0;
4126 out:;
4127     }
4128 #else
4129 	ret = request_irq(irq, intel_irq_handler(dev_priv),
4130 			  IRQF_SHARED, DRIVER_NAME, dev_priv);
4131 #endif
4132 	if (ret < 0) {
4133 		dev_priv->drm.irq_enabled = false;
4134 		return ret;
4135 	}
4136 
4137 	intel_irq_postinstall(dev_priv);
4138 
4139 	return ret;
4140 }
4141 
4142 /**
4143  * intel_irq_uninstall - finilizes all irq handling
4144  * @dev_priv: i915 device instance
4145  *
4146  * This stops interrupt and hotplug handling and unregisters and frees all
4147  * resources acquired in the init functions.
4148  */
intel_irq_uninstall(struct drm_i915_private * dev_priv)4149 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4150 {
4151 #ifndef __NetBSD__
4152 	int irq = dev_priv->drm.pdev->irq;
4153 #endif
4154 
4155 	/*
4156 	 * FIXME we can get called twice during driver probe
4157 	 * error handling as well as during driver remove due to
4158 	 * intel_modeset_driver_remove() calling us out of sequence.
4159 	 * Would be nice if it didn't do that...
4160 	 */
4161 	if (!dev_priv->drm.irq_enabled)
4162 		return;
4163 
4164 	dev_priv->drm.irq_enabled = false;
4165 
4166 	intel_irq_reset(dev_priv);
4167 
4168 #ifdef __NetBSD__
4169 	const struct pci_attach_args *pa = &dev_priv->drm.pdev->pd_pa;
4170 	if (dev_priv->pci_intrcookie != NULL) {
4171 		pci_intr_disestablish(pa->pa_pc, dev_priv->pci_intrcookie);
4172 		dev_priv->pci_intrcookie = NULL;
4173 	}
4174 	if (dev_priv->pci_ihp != NULL) {
4175 		pci_intr_release(pa->pa_pc, dev_priv->pci_ihp, 1);
4176 		dev_priv->pci_ihp = NULL;
4177 	}
4178 #else
4179 	free_irq(irq, dev_priv);
4180 #endif
4181 
4182 	intel_hpd_cancel_work(dev_priv);
4183 	dev_priv->runtime_pm.irqs_enabled = false;
4184 }
4185 
4186 /**
4187  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4188  * @dev_priv: i915 device instance
4189  *
4190  * This function is used to disable interrupts at runtime, both in the runtime
4191  * pm and the system suspend/resume code.
4192  */
intel_runtime_pm_disable_interrupts(struct drm_i915_private * dev_priv)4193 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4194 {
4195 	intel_irq_reset(dev_priv);
4196 	dev_priv->runtime_pm.irqs_enabled = false;
4197 	intel_synchronize_irq(dev_priv);
4198 }
4199 
4200 /**
4201  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4202  * @dev_priv: i915 device instance
4203  *
4204  * This function is used to enable interrupts at runtime, both in the runtime
4205  * pm and the system suspend/resume code.
4206  */
intel_runtime_pm_enable_interrupts(struct drm_i915_private * dev_priv)4207 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4208 {
4209 	dev_priv->runtime_pm.irqs_enabled = true;
4210 	intel_irq_reset(dev_priv);
4211 	intel_irq_postinstall(dev_priv);
4212 }
4213 
intel_irqs_enabled(struct drm_i915_private * dev_priv)4214 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4215 {
4216 	/*
4217 	 * We only use drm_irq_uninstall() at unload and VT switch, so
4218 	 * this is the only thing we need to check.
4219 	 */
4220 	return dev_priv->runtime_pm.irqs_enabled;
4221 }
4222 
intel_synchronize_irq(struct drm_i915_private * i915)4223 void intel_synchronize_irq(struct drm_i915_private *i915)
4224 {
4225 #ifdef __NetBSD__
4226 	xc_barrier(0);
4227 #else
4228 	synchronize_irq(i915->drm.pdev->irq);
4229 #endif
4230 }
4231