1*f005ef32Sjsg // SPDX-License-Identifier: MIT
2*f005ef32Sjsg /*
3*f005ef32Sjsg * Copyright © 2023 Intel Corporation
4*f005ef32Sjsg */
5*f005ef32Sjsg
6*f005ef32Sjsg #include "gt/intel_rps.h"
7*f005ef32Sjsg #include "i915_drv.h"
8*f005ef32Sjsg #include "i915_irq.h"
9*f005ef32Sjsg #include "i915_reg.h"
10*f005ef32Sjsg #include "icl_dsi_regs.h"
11*f005ef32Sjsg #include "intel_crtc.h"
12*f005ef32Sjsg #include "intel_de.h"
13*f005ef32Sjsg #include "intel_display_irq.h"
14*f005ef32Sjsg #include "intel_display_trace.h"
15*f005ef32Sjsg #include "intel_display_types.h"
16*f005ef32Sjsg #include "intel_dp_aux.h"
17*f005ef32Sjsg #include "intel_fdi_regs.h"
18*f005ef32Sjsg #include "intel_fifo_underrun.h"
19*f005ef32Sjsg #include "intel_gmbus.h"
20*f005ef32Sjsg #include "intel_hotplug_irq.h"
21*f005ef32Sjsg #include "intel_pmdemand.h"
22*f005ef32Sjsg #include "intel_psr.h"
23*f005ef32Sjsg #include "intel_psr_regs.h"
24*f005ef32Sjsg
25*f005ef32Sjsg static void
intel_handle_vblank(struct drm_i915_private * dev_priv,enum pipe pipe)26*f005ef32Sjsg intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
27*f005ef32Sjsg {
28*f005ef32Sjsg struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
29*f005ef32Sjsg
30*f005ef32Sjsg drm_crtc_handle_vblank(&crtc->base);
31*f005ef32Sjsg }
32*f005ef32Sjsg
33*f005ef32Sjsg /**
34*f005ef32Sjsg * ilk_update_display_irq - update DEIMR
35*f005ef32Sjsg * @dev_priv: driver private
36*f005ef32Sjsg * @interrupt_mask: mask of interrupt bits to update
37*f005ef32Sjsg * @enabled_irq_mask: mask of interrupt bits to enable
38*f005ef32Sjsg */
ilk_update_display_irq(struct drm_i915_private * dev_priv,u32 interrupt_mask,u32 enabled_irq_mask)39*f005ef32Sjsg void ilk_update_display_irq(struct drm_i915_private *dev_priv,
40*f005ef32Sjsg u32 interrupt_mask, u32 enabled_irq_mask)
41*f005ef32Sjsg {
42*f005ef32Sjsg u32 new_val;
43*f005ef32Sjsg
44*f005ef32Sjsg lockdep_assert_held(&dev_priv->irq_lock);
45*f005ef32Sjsg drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
46*f005ef32Sjsg
47*f005ef32Sjsg new_val = dev_priv->irq_mask;
48*f005ef32Sjsg new_val &= ~interrupt_mask;
49*f005ef32Sjsg new_val |= (~enabled_irq_mask & interrupt_mask);
50*f005ef32Sjsg
51*f005ef32Sjsg if (new_val != dev_priv->irq_mask &&
52*f005ef32Sjsg !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
53*f005ef32Sjsg dev_priv->irq_mask = new_val;
54*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
55*f005ef32Sjsg intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
56*f005ef32Sjsg }
57*f005ef32Sjsg }
58*f005ef32Sjsg
ilk_enable_display_irq(struct drm_i915_private * i915,u32 bits)59*f005ef32Sjsg void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
60*f005ef32Sjsg {
61*f005ef32Sjsg ilk_update_display_irq(i915, bits, bits);
62*f005ef32Sjsg }
63*f005ef32Sjsg
ilk_disable_display_irq(struct drm_i915_private * i915,u32 bits)64*f005ef32Sjsg void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
65*f005ef32Sjsg {
66*f005ef32Sjsg ilk_update_display_irq(i915, bits, 0);
67*f005ef32Sjsg }
68*f005ef32Sjsg
69*f005ef32Sjsg /**
70*f005ef32Sjsg * bdw_update_port_irq - update DE port interrupt
71*f005ef32Sjsg * @dev_priv: driver private
72*f005ef32Sjsg * @interrupt_mask: mask of interrupt bits to update
73*f005ef32Sjsg * @enabled_irq_mask: mask of interrupt bits to enable
74*f005ef32Sjsg */
bdw_update_port_irq(struct drm_i915_private * dev_priv,u32 interrupt_mask,u32 enabled_irq_mask)75*f005ef32Sjsg void bdw_update_port_irq(struct drm_i915_private *dev_priv,
76*f005ef32Sjsg u32 interrupt_mask, u32 enabled_irq_mask)
77*f005ef32Sjsg {
78*f005ef32Sjsg u32 new_val;
79*f005ef32Sjsg u32 old_val;
80*f005ef32Sjsg
81*f005ef32Sjsg lockdep_assert_held(&dev_priv->irq_lock);
82*f005ef32Sjsg
83*f005ef32Sjsg drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
84*f005ef32Sjsg
85*f005ef32Sjsg if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
86*f005ef32Sjsg return;
87*f005ef32Sjsg
88*f005ef32Sjsg old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
89*f005ef32Sjsg
90*f005ef32Sjsg new_val = old_val;
91*f005ef32Sjsg new_val &= ~interrupt_mask;
92*f005ef32Sjsg new_val |= (~enabled_irq_mask & interrupt_mask);
93*f005ef32Sjsg
94*f005ef32Sjsg if (new_val != old_val) {
95*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
96*f005ef32Sjsg intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
97*f005ef32Sjsg }
98*f005ef32Sjsg }
99*f005ef32Sjsg
100*f005ef32Sjsg /**
101*f005ef32Sjsg * bdw_update_pipe_irq - update DE pipe interrupt
102*f005ef32Sjsg * @dev_priv: driver private
103*f005ef32Sjsg * @pipe: pipe whose interrupt to update
104*f005ef32Sjsg * @interrupt_mask: mask of interrupt bits to update
105*f005ef32Sjsg * @enabled_irq_mask: mask of interrupt bits to enable
106*f005ef32Sjsg */
bdw_update_pipe_irq(struct drm_i915_private * dev_priv,enum pipe pipe,u32 interrupt_mask,u32 enabled_irq_mask)107*f005ef32Sjsg static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
108*f005ef32Sjsg enum pipe pipe, u32 interrupt_mask,
109*f005ef32Sjsg u32 enabled_irq_mask)
110*f005ef32Sjsg {
111*f005ef32Sjsg u32 new_val;
112*f005ef32Sjsg
113*f005ef32Sjsg lockdep_assert_held(&dev_priv->irq_lock);
114*f005ef32Sjsg
115*f005ef32Sjsg drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
116*f005ef32Sjsg
117*f005ef32Sjsg if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
118*f005ef32Sjsg return;
119*f005ef32Sjsg
120*f005ef32Sjsg new_val = dev_priv->de_irq_mask[pipe];
121*f005ef32Sjsg new_val &= ~interrupt_mask;
122*f005ef32Sjsg new_val |= (~enabled_irq_mask & interrupt_mask);
123*f005ef32Sjsg
124*f005ef32Sjsg if (new_val != dev_priv->de_irq_mask[pipe]) {
125*f005ef32Sjsg dev_priv->de_irq_mask[pipe] = new_val;
126*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
127*f005ef32Sjsg intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
128*f005ef32Sjsg }
129*f005ef32Sjsg }
130*f005ef32Sjsg
bdw_enable_pipe_irq(struct drm_i915_private * i915,enum pipe pipe,u32 bits)131*f005ef32Sjsg void bdw_enable_pipe_irq(struct drm_i915_private *i915,
132*f005ef32Sjsg enum pipe pipe, u32 bits)
133*f005ef32Sjsg {
134*f005ef32Sjsg bdw_update_pipe_irq(i915, pipe, bits, bits);
135*f005ef32Sjsg }
136*f005ef32Sjsg
bdw_disable_pipe_irq(struct drm_i915_private * i915,enum pipe pipe,u32 bits)137*f005ef32Sjsg void bdw_disable_pipe_irq(struct drm_i915_private *i915,
138*f005ef32Sjsg enum pipe pipe, u32 bits)
139*f005ef32Sjsg {
140*f005ef32Sjsg bdw_update_pipe_irq(i915, pipe, bits, 0);
141*f005ef32Sjsg }
142*f005ef32Sjsg
143*f005ef32Sjsg /**
144*f005ef32Sjsg * ibx_display_interrupt_update - update SDEIMR
145*f005ef32Sjsg * @dev_priv: driver private
146*f005ef32Sjsg * @interrupt_mask: mask of interrupt bits to update
147*f005ef32Sjsg * @enabled_irq_mask: mask of interrupt bits to enable
148*f005ef32Sjsg */
ibx_display_interrupt_update(struct drm_i915_private * dev_priv,u32 interrupt_mask,u32 enabled_irq_mask)149*f005ef32Sjsg void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
150*f005ef32Sjsg u32 interrupt_mask,
151*f005ef32Sjsg u32 enabled_irq_mask)
152*f005ef32Sjsg {
153*f005ef32Sjsg u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
154*f005ef32Sjsg
155*f005ef32Sjsg sdeimr &= ~interrupt_mask;
156*f005ef32Sjsg sdeimr |= (~enabled_irq_mask & interrupt_mask);
157*f005ef32Sjsg
158*f005ef32Sjsg drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
159*f005ef32Sjsg
160*f005ef32Sjsg lockdep_assert_held(&dev_priv->irq_lock);
161*f005ef32Sjsg
162*f005ef32Sjsg if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
163*f005ef32Sjsg return;
164*f005ef32Sjsg
165*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
166*f005ef32Sjsg intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
167*f005ef32Sjsg }
168*f005ef32Sjsg
ibx_enable_display_interrupt(struct drm_i915_private * i915,u32 bits)169*f005ef32Sjsg void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
170*f005ef32Sjsg {
171*f005ef32Sjsg ibx_display_interrupt_update(i915, bits, bits);
172*f005ef32Sjsg }
173*f005ef32Sjsg
ibx_disable_display_interrupt(struct drm_i915_private * i915,u32 bits)174*f005ef32Sjsg void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
175*f005ef32Sjsg {
176*f005ef32Sjsg ibx_display_interrupt_update(i915, bits, 0);
177*f005ef32Sjsg }
178*f005ef32Sjsg
i915_pipestat_enable_mask(struct drm_i915_private * dev_priv,enum pipe pipe)179*f005ef32Sjsg u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
180*f005ef32Sjsg enum pipe pipe)
181*f005ef32Sjsg {
182*f005ef32Sjsg u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
183*f005ef32Sjsg u32 enable_mask = status_mask << 16;
184*f005ef32Sjsg
185*f005ef32Sjsg lockdep_assert_held(&dev_priv->irq_lock);
186*f005ef32Sjsg
187*f005ef32Sjsg if (DISPLAY_VER(dev_priv) < 5)
188*f005ef32Sjsg goto out;
189*f005ef32Sjsg
190*f005ef32Sjsg /*
191*f005ef32Sjsg * On pipe A we don't support the PSR interrupt yet,
192*f005ef32Sjsg * on pipe B and C the same bit MBZ.
193*f005ef32Sjsg */
194*f005ef32Sjsg if (drm_WARN_ON_ONCE(&dev_priv->drm,
195*f005ef32Sjsg status_mask & PIPE_A_PSR_STATUS_VLV))
196*f005ef32Sjsg return 0;
197*f005ef32Sjsg /*
198*f005ef32Sjsg * On pipe B and C we don't support the PSR interrupt yet, on pipe
199*f005ef32Sjsg * A the same bit is for perf counters which we don't use either.
200*f005ef32Sjsg */
201*f005ef32Sjsg if (drm_WARN_ON_ONCE(&dev_priv->drm,
202*f005ef32Sjsg status_mask & PIPE_B_PSR_STATUS_VLV))
203*f005ef32Sjsg return 0;
204*f005ef32Sjsg
205*f005ef32Sjsg enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
206*f005ef32Sjsg SPRITE0_FLIP_DONE_INT_EN_VLV |
207*f005ef32Sjsg SPRITE1_FLIP_DONE_INT_EN_VLV);
208*f005ef32Sjsg if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
209*f005ef32Sjsg enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
210*f005ef32Sjsg if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
211*f005ef32Sjsg enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
212*f005ef32Sjsg
213*f005ef32Sjsg out:
214*f005ef32Sjsg drm_WARN_ONCE(&dev_priv->drm,
215*f005ef32Sjsg enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
216*f005ef32Sjsg status_mask & ~PIPESTAT_INT_STATUS_MASK,
217*f005ef32Sjsg "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
218*f005ef32Sjsg pipe_name(pipe), enable_mask, status_mask);
219*f005ef32Sjsg
220*f005ef32Sjsg return enable_mask;
221*f005ef32Sjsg }
222*f005ef32Sjsg
i915_enable_pipestat(struct drm_i915_private * dev_priv,enum pipe pipe,u32 status_mask)223*f005ef32Sjsg void i915_enable_pipestat(struct drm_i915_private *dev_priv,
224*f005ef32Sjsg enum pipe pipe, u32 status_mask)
225*f005ef32Sjsg {
226*f005ef32Sjsg i915_reg_t reg = PIPESTAT(pipe);
227*f005ef32Sjsg u32 enable_mask;
228*f005ef32Sjsg
229*f005ef32Sjsg drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
230*f005ef32Sjsg "pipe %c: status_mask=0x%x\n",
231*f005ef32Sjsg pipe_name(pipe), status_mask);
232*f005ef32Sjsg
233*f005ef32Sjsg lockdep_assert_held(&dev_priv->irq_lock);
234*f005ef32Sjsg drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
235*f005ef32Sjsg
236*f005ef32Sjsg if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
237*f005ef32Sjsg return;
238*f005ef32Sjsg
239*f005ef32Sjsg dev_priv->pipestat_irq_mask[pipe] |= status_mask;
240*f005ef32Sjsg enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
241*f005ef32Sjsg
242*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
243*f005ef32Sjsg intel_uncore_posting_read(&dev_priv->uncore, reg);
244*f005ef32Sjsg }
245*f005ef32Sjsg
i915_disable_pipestat(struct drm_i915_private * dev_priv,enum pipe pipe,u32 status_mask)246*f005ef32Sjsg void i915_disable_pipestat(struct drm_i915_private *dev_priv,
247*f005ef32Sjsg enum pipe pipe, u32 status_mask)
248*f005ef32Sjsg {
249*f005ef32Sjsg i915_reg_t reg = PIPESTAT(pipe);
250*f005ef32Sjsg u32 enable_mask;
251*f005ef32Sjsg
252*f005ef32Sjsg drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
253*f005ef32Sjsg "pipe %c: status_mask=0x%x\n",
254*f005ef32Sjsg pipe_name(pipe), status_mask);
255*f005ef32Sjsg
256*f005ef32Sjsg lockdep_assert_held(&dev_priv->irq_lock);
257*f005ef32Sjsg drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
258*f005ef32Sjsg
259*f005ef32Sjsg if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
260*f005ef32Sjsg return;
261*f005ef32Sjsg
262*f005ef32Sjsg dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
263*f005ef32Sjsg enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
264*f005ef32Sjsg
265*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
266*f005ef32Sjsg intel_uncore_posting_read(&dev_priv->uncore, reg);
267*f005ef32Sjsg }
268*f005ef32Sjsg
i915_has_asle(struct drm_i915_private * dev_priv)269*f005ef32Sjsg static bool i915_has_asle(struct drm_i915_private *dev_priv)
270*f005ef32Sjsg {
271*f005ef32Sjsg if (!dev_priv->display.opregion.asle)
272*f005ef32Sjsg return false;
273*f005ef32Sjsg
274*f005ef32Sjsg return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
275*f005ef32Sjsg }
276*f005ef32Sjsg
277*f005ef32Sjsg /**
278*f005ef32Sjsg * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
279*f005ef32Sjsg * @dev_priv: i915 device private
280*f005ef32Sjsg */
i915_enable_asle_pipestat(struct drm_i915_private * dev_priv)281*f005ef32Sjsg void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
282*f005ef32Sjsg {
283*f005ef32Sjsg if (!i915_has_asle(dev_priv))
284*f005ef32Sjsg return;
285*f005ef32Sjsg
286*f005ef32Sjsg spin_lock_irq(&dev_priv->irq_lock);
287*f005ef32Sjsg
288*f005ef32Sjsg i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
289*f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 4)
290*f005ef32Sjsg i915_enable_pipestat(dev_priv, PIPE_A,
291*f005ef32Sjsg PIPE_LEGACY_BLC_EVENT_STATUS);
292*f005ef32Sjsg
293*f005ef32Sjsg spin_unlock_irq(&dev_priv->irq_lock);
294*f005ef32Sjsg }
295*f005ef32Sjsg
296*f005ef32Sjsg #if defined(CONFIG_DEBUG_FS)
display_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe,u32 crc0,u32 crc1,u32 crc2,u32 crc3,u32 crc4)297*f005ef32Sjsg static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
298*f005ef32Sjsg enum pipe pipe,
299*f005ef32Sjsg u32 crc0, u32 crc1,
300*f005ef32Sjsg u32 crc2, u32 crc3,
301*f005ef32Sjsg u32 crc4)
302*f005ef32Sjsg {
303*f005ef32Sjsg struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
304*f005ef32Sjsg struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
305*f005ef32Sjsg u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
306*f005ef32Sjsg
307*f005ef32Sjsg trace_intel_pipe_crc(crtc, crcs);
308*f005ef32Sjsg
309*f005ef32Sjsg spin_lock(&pipe_crc->lock);
310*f005ef32Sjsg /*
311*f005ef32Sjsg * For some not yet identified reason, the first CRC is
312*f005ef32Sjsg * bonkers. So let's just wait for the next vblank and read
313*f005ef32Sjsg * out the buggy result.
314*f005ef32Sjsg *
315*f005ef32Sjsg * On GEN8+ sometimes the second CRC is bonkers as well, so
316*f005ef32Sjsg * don't trust that one either.
317*f005ef32Sjsg */
318*f005ef32Sjsg if (pipe_crc->skipped <= 0 ||
319*f005ef32Sjsg (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
320*f005ef32Sjsg pipe_crc->skipped++;
321*f005ef32Sjsg spin_unlock(&pipe_crc->lock);
322*f005ef32Sjsg return;
323*f005ef32Sjsg }
324*f005ef32Sjsg spin_unlock(&pipe_crc->lock);
325*f005ef32Sjsg
326*f005ef32Sjsg drm_crtc_add_crc_entry(&crtc->base, true,
327*f005ef32Sjsg drm_crtc_accurate_vblank_count(&crtc->base),
328*f005ef32Sjsg crcs);
329*f005ef32Sjsg }
330*f005ef32Sjsg #else
331*f005ef32Sjsg static inline void
display_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe,u32 crc0,u32 crc1,u32 crc2,u32 crc3,u32 crc4)332*f005ef32Sjsg display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
333*f005ef32Sjsg enum pipe pipe,
334*f005ef32Sjsg u32 crc0, u32 crc1,
335*f005ef32Sjsg u32 crc2, u32 crc3,
336*f005ef32Sjsg u32 crc4) {}
337*f005ef32Sjsg #endif
338*f005ef32Sjsg
flip_done_handler(struct drm_i915_private * i915,enum pipe pipe)339*f005ef32Sjsg static void flip_done_handler(struct drm_i915_private *i915,
340*f005ef32Sjsg enum pipe pipe)
341*f005ef32Sjsg {
342*f005ef32Sjsg struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
343*f005ef32Sjsg struct drm_crtc_state *crtc_state = crtc->base.state;
344*f005ef32Sjsg struct drm_pending_vblank_event *e = crtc_state->event;
345*f005ef32Sjsg struct drm_device *dev = &i915->drm;
346*f005ef32Sjsg unsigned long irqflags;
347*f005ef32Sjsg
348*f005ef32Sjsg spin_lock_irqsave(&dev->event_lock, irqflags);
349*f005ef32Sjsg
350*f005ef32Sjsg crtc_state->event = NULL;
351*f005ef32Sjsg
352*f005ef32Sjsg drm_crtc_send_vblank_event(&crtc->base, e);
353*f005ef32Sjsg
354*f005ef32Sjsg spin_unlock_irqrestore(&dev->event_lock, irqflags);
355*f005ef32Sjsg }
356*f005ef32Sjsg
hsw_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe)357*f005ef32Sjsg static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
358*f005ef32Sjsg enum pipe pipe)
359*f005ef32Sjsg {
360*f005ef32Sjsg display_pipe_crc_irq_handler(dev_priv, pipe,
361*f005ef32Sjsg intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
362*f005ef32Sjsg 0, 0, 0, 0);
363*f005ef32Sjsg }
364*f005ef32Sjsg
ivb_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe)365*f005ef32Sjsg static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
366*f005ef32Sjsg enum pipe pipe)
367*f005ef32Sjsg {
368*f005ef32Sjsg display_pipe_crc_irq_handler(dev_priv, pipe,
369*f005ef32Sjsg intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
370*f005ef32Sjsg intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
371*f005ef32Sjsg intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
372*f005ef32Sjsg intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
373*f005ef32Sjsg intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
374*f005ef32Sjsg }
375*f005ef32Sjsg
i9xx_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe)376*f005ef32Sjsg static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
377*f005ef32Sjsg enum pipe pipe)
378*f005ef32Sjsg {
379*f005ef32Sjsg u32 res1, res2;
380*f005ef32Sjsg
381*f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 3)
382*f005ef32Sjsg res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
383*f005ef32Sjsg else
384*f005ef32Sjsg res1 = 0;
385*f005ef32Sjsg
386*f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
387*f005ef32Sjsg res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
388*f005ef32Sjsg else
389*f005ef32Sjsg res2 = 0;
390*f005ef32Sjsg
391*f005ef32Sjsg display_pipe_crc_irq_handler(dev_priv, pipe,
392*f005ef32Sjsg intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
393*f005ef32Sjsg intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
394*f005ef32Sjsg intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
395*f005ef32Sjsg res1, res2);
396*f005ef32Sjsg }
397*f005ef32Sjsg
i9xx_pipestat_irq_reset(struct drm_i915_private * dev_priv)398*f005ef32Sjsg void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
399*f005ef32Sjsg {
400*f005ef32Sjsg enum pipe pipe;
401*f005ef32Sjsg
402*f005ef32Sjsg for_each_pipe(dev_priv, pipe) {
403*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
404*f005ef32Sjsg PIPESTAT_INT_STATUS_MASK |
405*f005ef32Sjsg PIPE_FIFO_UNDERRUN_STATUS);
406*f005ef32Sjsg
407*f005ef32Sjsg dev_priv->pipestat_irq_mask[pipe] = 0;
408*f005ef32Sjsg }
409*f005ef32Sjsg }
410*f005ef32Sjsg
i9xx_pipestat_irq_ack(struct drm_i915_private * dev_priv,u32 iir,u32 pipe_stats[I915_MAX_PIPES])411*f005ef32Sjsg void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
412*f005ef32Sjsg u32 iir, u32 pipe_stats[I915_MAX_PIPES])
413*f005ef32Sjsg {
414*f005ef32Sjsg enum pipe pipe;
415*f005ef32Sjsg
416*f005ef32Sjsg spin_lock(&dev_priv->irq_lock);
417*f005ef32Sjsg
418*f005ef32Sjsg if (!dev_priv->display_irqs_enabled) {
419*f005ef32Sjsg spin_unlock(&dev_priv->irq_lock);
420*f005ef32Sjsg return;
421*f005ef32Sjsg }
422*f005ef32Sjsg
423*f005ef32Sjsg for_each_pipe(dev_priv, pipe) {
424*f005ef32Sjsg i915_reg_t reg;
425*f005ef32Sjsg u32 status_mask, enable_mask, iir_bit = 0;
426*f005ef32Sjsg
427*f005ef32Sjsg /*
428*f005ef32Sjsg * PIPESTAT bits get signalled even when the interrupt is
429*f005ef32Sjsg * disabled with the mask bits, and some of the status bits do
430*f005ef32Sjsg * not generate interrupts at all (like the underrun bit). Hence
431*f005ef32Sjsg * we need to be careful that we only handle what we want to
432*f005ef32Sjsg * handle.
433*f005ef32Sjsg */
434*f005ef32Sjsg
435*f005ef32Sjsg /* fifo underruns are filterered in the underrun handler. */
436*f005ef32Sjsg status_mask = PIPE_FIFO_UNDERRUN_STATUS;
437*f005ef32Sjsg
438*f005ef32Sjsg switch (pipe) {
439*f005ef32Sjsg default:
440*f005ef32Sjsg case PIPE_A:
441*f005ef32Sjsg iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
442*f005ef32Sjsg break;
443*f005ef32Sjsg case PIPE_B:
444*f005ef32Sjsg iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
445*f005ef32Sjsg break;
446*f005ef32Sjsg case PIPE_C:
447*f005ef32Sjsg iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
448*f005ef32Sjsg break;
449*f005ef32Sjsg }
450*f005ef32Sjsg if (iir & iir_bit)
451*f005ef32Sjsg status_mask |= dev_priv->pipestat_irq_mask[pipe];
452*f005ef32Sjsg
453*f005ef32Sjsg if (!status_mask)
454*f005ef32Sjsg continue;
455*f005ef32Sjsg
456*f005ef32Sjsg reg = PIPESTAT(pipe);
457*f005ef32Sjsg pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
458*f005ef32Sjsg enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
459*f005ef32Sjsg
460*f005ef32Sjsg /*
461*f005ef32Sjsg * Clear the PIPE*STAT regs before the IIR
462*f005ef32Sjsg *
463*f005ef32Sjsg * Toggle the enable bits to make sure we get an
464*f005ef32Sjsg * edge in the ISR pipe event bit if we don't clear
465*f005ef32Sjsg * all the enabled status bits. Otherwise the edge
466*f005ef32Sjsg * triggered IIR on i965/g4x wouldn't notice that
467*f005ef32Sjsg * an interrupt is still pending.
468*f005ef32Sjsg */
469*f005ef32Sjsg if (pipe_stats[pipe]) {
470*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
471*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
472*f005ef32Sjsg }
473*f005ef32Sjsg }
474*f005ef32Sjsg spin_unlock(&dev_priv->irq_lock);
475*f005ef32Sjsg }
476*f005ef32Sjsg
i8xx_pipestat_irq_handler(struct drm_i915_private * dev_priv,u16 iir,u32 pipe_stats[I915_MAX_PIPES])477*f005ef32Sjsg void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
478*f005ef32Sjsg u16 iir, u32 pipe_stats[I915_MAX_PIPES])
479*f005ef32Sjsg {
480*f005ef32Sjsg enum pipe pipe;
481*f005ef32Sjsg
482*f005ef32Sjsg for_each_pipe(dev_priv, pipe) {
483*f005ef32Sjsg if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
484*f005ef32Sjsg intel_handle_vblank(dev_priv, pipe);
485*f005ef32Sjsg
486*f005ef32Sjsg if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
487*f005ef32Sjsg i9xx_pipe_crc_irq_handler(dev_priv, pipe);
488*f005ef32Sjsg
489*f005ef32Sjsg if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
490*f005ef32Sjsg intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
491*f005ef32Sjsg }
492*f005ef32Sjsg }
493*f005ef32Sjsg
i915_pipestat_irq_handler(struct drm_i915_private * dev_priv,u32 iir,u32 pipe_stats[I915_MAX_PIPES])494*f005ef32Sjsg void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
495*f005ef32Sjsg u32 iir, u32 pipe_stats[I915_MAX_PIPES])
496*f005ef32Sjsg {
497*f005ef32Sjsg bool blc_event = false;
498*f005ef32Sjsg enum pipe pipe;
499*f005ef32Sjsg
500*f005ef32Sjsg for_each_pipe(dev_priv, pipe) {
501*f005ef32Sjsg if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
502*f005ef32Sjsg intel_handle_vblank(dev_priv, pipe);
503*f005ef32Sjsg
504*f005ef32Sjsg if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
505*f005ef32Sjsg blc_event = true;
506*f005ef32Sjsg
507*f005ef32Sjsg if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
508*f005ef32Sjsg i9xx_pipe_crc_irq_handler(dev_priv, pipe);
509*f005ef32Sjsg
510*f005ef32Sjsg if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
511*f005ef32Sjsg intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
512*f005ef32Sjsg }
513*f005ef32Sjsg
514*f005ef32Sjsg if (blc_event || (iir & I915_ASLE_INTERRUPT))
515*f005ef32Sjsg intel_opregion_asle_intr(dev_priv);
516*f005ef32Sjsg }
517*f005ef32Sjsg
i965_pipestat_irq_handler(struct drm_i915_private * dev_priv,u32 iir,u32 pipe_stats[I915_MAX_PIPES])518*f005ef32Sjsg void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
519*f005ef32Sjsg u32 iir, u32 pipe_stats[I915_MAX_PIPES])
520*f005ef32Sjsg {
521*f005ef32Sjsg bool blc_event = false;
522*f005ef32Sjsg enum pipe pipe;
523*f005ef32Sjsg
524*f005ef32Sjsg for_each_pipe(dev_priv, pipe) {
525*f005ef32Sjsg if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
526*f005ef32Sjsg intel_handle_vblank(dev_priv, pipe);
527*f005ef32Sjsg
528*f005ef32Sjsg if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
529*f005ef32Sjsg blc_event = true;
530*f005ef32Sjsg
531*f005ef32Sjsg if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
532*f005ef32Sjsg i9xx_pipe_crc_irq_handler(dev_priv, pipe);
533*f005ef32Sjsg
534*f005ef32Sjsg if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
535*f005ef32Sjsg intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
536*f005ef32Sjsg }
537*f005ef32Sjsg
538*f005ef32Sjsg if (blc_event || (iir & I915_ASLE_INTERRUPT))
539*f005ef32Sjsg intel_opregion_asle_intr(dev_priv);
540*f005ef32Sjsg
541*f005ef32Sjsg if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
542*f005ef32Sjsg intel_gmbus_irq_handler(dev_priv);
543*f005ef32Sjsg }
544*f005ef32Sjsg
valleyview_pipestat_irq_handler(struct drm_i915_private * dev_priv,u32 pipe_stats[I915_MAX_PIPES])545*f005ef32Sjsg void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
546*f005ef32Sjsg u32 pipe_stats[I915_MAX_PIPES])
547*f005ef32Sjsg {
548*f005ef32Sjsg enum pipe pipe;
549*f005ef32Sjsg
550*f005ef32Sjsg for_each_pipe(dev_priv, pipe) {
551*f005ef32Sjsg if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
552*f005ef32Sjsg intel_handle_vblank(dev_priv, pipe);
553*f005ef32Sjsg
554*f005ef32Sjsg if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
555*f005ef32Sjsg flip_done_handler(dev_priv, pipe);
556*f005ef32Sjsg
557*f005ef32Sjsg if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
558*f005ef32Sjsg i9xx_pipe_crc_irq_handler(dev_priv, pipe);
559*f005ef32Sjsg
560*f005ef32Sjsg if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
561*f005ef32Sjsg intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
562*f005ef32Sjsg }
563*f005ef32Sjsg
564*f005ef32Sjsg if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
565*f005ef32Sjsg intel_gmbus_irq_handler(dev_priv);
566*f005ef32Sjsg }
567*f005ef32Sjsg
ibx_irq_handler(struct drm_i915_private * dev_priv,u32 pch_iir)568*f005ef32Sjsg static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
569*f005ef32Sjsg {
570*f005ef32Sjsg enum pipe pipe;
571*f005ef32Sjsg u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
572*f005ef32Sjsg
573*f005ef32Sjsg ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
574*f005ef32Sjsg
575*f005ef32Sjsg if (pch_iir & SDE_AUDIO_POWER_MASK) {
576*f005ef32Sjsg int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
577*f005ef32Sjsg SDE_AUDIO_POWER_SHIFT);
578*f005ef32Sjsg drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
579*f005ef32Sjsg port_name(port));
580*f005ef32Sjsg }
581*f005ef32Sjsg
582*f005ef32Sjsg if (pch_iir & SDE_AUX_MASK)
583*f005ef32Sjsg intel_dp_aux_irq_handler(dev_priv);
584*f005ef32Sjsg
585*f005ef32Sjsg if (pch_iir & SDE_GMBUS)
586*f005ef32Sjsg intel_gmbus_irq_handler(dev_priv);
587*f005ef32Sjsg
588*f005ef32Sjsg if (pch_iir & SDE_AUDIO_HDCP_MASK)
589*f005ef32Sjsg drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
590*f005ef32Sjsg
591*f005ef32Sjsg if (pch_iir & SDE_AUDIO_TRANS_MASK)
592*f005ef32Sjsg drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
593*f005ef32Sjsg
594*f005ef32Sjsg if (pch_iir & SDE_POISON)
595*f005ef32Sjsg drm_err(&dev_priv->drm, "PCH poison interrupt\n");
596*f005ef32Sjsg
597*f005ef32Sjsg if (pch_iir & SDE_FDI_MASK) {
598*f005ef32Sjsg for_each_pipe(dev_priv, pipe)
599*f005ef32Sjsg drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
600*f005ef32Sjsg pipe_name(pipe),
601*f005ef32Sjsg intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
602*f005ef32Sjsg }
603*f005ef32Sjsg
604*f005ef32Sjsg if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
605*f005ef32Sjsg drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
606*f005ef32Sjsg
607*f005ef32Sjsg if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
608*f005ef32Sjsg drm_dbg(&dev_priv->drm,
609*f005ef32Sjsg "PCH transcoder CRC error interrupt\n");
610*f005ef32Sjsg
611*f005ef32Sjsg if (pch_iir & SDE_TRANSA_FIFO_UNDER)
612*f005ef32Sjsg intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
613*f005ef32Sjsg
614*f005ef32Sjsg if (pch_iir & SDE_TRANSB_FIFO_UNDER)
615*f005ef32Sjsg intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
616*f005ef32Sjsg }
617*f005ef32Sjsg
ivb_err_int_handler(struct drm_i915_private * dev_priv)618*f005ef32Sjsg static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
619*f005ef32Sjsg {
620*f005ef32Sjsg u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
621*f005ef32Sjsg enum pipe pipe;
622*f005ef32Sjsg
623*f005ef32Sjsg if (err_int & ERR_INT_POISON)
624*f005ef32Sjsg drm_err(&dev_priv->drm, "Poison interrupt\n");
625*f005ef32Sjsg
626*f005ef32Sjsg for_each_pipe(dev_priv, pipe) {
627*f005ef32Sjsg if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
628*f005ef32Sjsg intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
629*f005ef32Sjsg
630*f005ef32Sjsg if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
631*f005ef32Sjsg if (IS_IVYBRIDGE(dev_priv))
632*f005ef32Sjsg ivb_pipe_crc_irq_handler(dev_priv, pipe);
633*f005ef32Sjsg else
634*f005ef32Sjsg hsw_pipe_crc_irq_handler(dev_priv, pipe);
635*f005ef32Sjsg }
636*f005ef32Sjsg }
637*f005ef32Sjsg
638*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
639*f005ef32Sjsg }
640*f005ef32Sjsg
cpt_serr_int_handler(struct drm_i915_private * dev_priv)641*f005ef32Sjsg static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
642*f005ef32Sjsg {
643*f005ef32Sjsg u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
644*f005ef32Sjsg enum pipe pipe;
645*f005ef32Sjsg
646*f005ef32Sjsg if (serr_int & SERR_INT_POISON)
647*f005ef32Sjsg drm_err(&dev_priv->drm, "PCH poison interrupt\n");
648*f005ef32Sjsg
649*f005ef32Sjsg for_each_pipe(dev_priv, pipe)
650*f005ef32Sjsg if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
651*f005ef32Sjsg intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
652*f005ef32Sjsg
653*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
654*f005ef32Sjsg }
655*f005ef32Sjsg
cpt_irq_handler(struct drm_i915_private * dev_priv,u32 pch_iir)656*f005ef32Sjsg static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
657*f005ef32Sjsg {
658*f005ef32Sjsg enum pipe pipe;
659*f005ef32Sjsg u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
660*f005ef32Sjsg
661*f005ef32Sjsg ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
662*f005ef32Sjsg
663*f005ef32Sjsg if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
664*f005ef32Sjsg int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
665*f005ef32Sjsg SDE_AUDIO_POWER_SHIFT_CPT);
666*f005ef32Sjsg drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
667*f005ef32Sjsg port_name(port));
668*f005ef32Sjsg }
669*f005ef32Sjsg
670*f005ef32Sjsg if (pch_iir & SDE_AUX_MASK_CPT)
671*f005ef32Sjsg intel_dp_aux_irq_handler(dev_priv);
672*f005ef32Sjsg
673*f005ef32Sjsg if (pch_iir & SDE_GMBUS_CPT)
674*f005ef32Sjsg intel_gmbus_irq_handler(dev_priv);
675*f005ef32Sjsg
676*f005ef32Sjsg if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
677*f005ef32Sjsg drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
678*f005ef32Sjsg
679*f005ef32Sjsg if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
680*f005ef32Sjsg drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
681*f005ef32Sjsg
682*f005ef32Sjsg if (pch_iir & SDE_FDI_MASK_CPT) {
683*f005ef32Sjsg for_each_pipe(dev_priv, pipe)
684*f005ef32Sjsg drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
685*f005ef32Sjsg pipe_name(pipe),
686*f005ef32Sjsg intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
687*f005ef32Sjsg }
688*f005ef32Sjsg
689*f005ef32Sjsg if (pch_iir & SDE_ERROR_CPT)
690*f005ef32Sjsg cpt_serr_int_handler(dev_priv);
691*f005ef32Sjsg }
692*f005ef32Sjsg
ilk_display_irq_handler(struct drm_i915_private * dev_priv,u32 de_iir)693*f005ef32Sjsg void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
694*f005ef32Sjsg {
695*f005ef32Sjsg enum pipe pipe;
696*f005ef32Sjsg u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
697*f005ef32Sjsg
698*f005ef32Sjsg if (hotplug_trigger)
699*f005ef32Sjsg ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
700*f005ef32Sjsg
701*f005ef32Sjsg if (de_iir & DE_AUX_CHANNEL_A)
702*f005ef32Sjsg intel_dp_aux_irq_handler(dev_priv);
703*f005ef32Sjsg
704*f005ef32Sjsg if (de_iir & DE_GSE)
705*f005ef32Sjsg intel_opregion_asle_intr(dev_priv);
706*f005ef32Sjsg
707*f005ef32Sjsg if (de_iir & DE_POISON)
708*f005ef32Sjsg drm_err(&dev_priv->drm, "Poison interrupt\n");
709*f005ef32Sjsg
710*f005ef32Sjsg for_each_pipe(dev_priv, pipe) {
711*f005ef32Sjsg if (de_iir & DE_PIPE_VBLANK(pipe))
712*f005ef32Sjsg intel_handle_vblank(dev_priv, pipe);
713*f005ef32Sjsg
714*f005ef32Sjsg if (de_iir & DE_PLANE_FLIP_DONE(pipe))
715*f005ef32Sjsg flip_done_handler(dev_priv, pipe);
716*f005ef32Sjsg
717*f005ef32Sjsg if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
718*f005ef32Sjsg intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
719*f005ef32Sjsg
720*f005ef32Sjsg if (de_iir & DE_PIPE_CRC_DONE(pipe))
721*f005ef32Sjsg i9xx_pipe_crc_irq_handler(dev_priv, pipe);
722*f005ef32Sjsg }
723*f005ef32Sjsg
724*f005ef32Sjsg /* check event from PCH */
725*f005ef32Sjsg if (de_iir & DE_PCH_EVENT) {
726*f005ef32Sjsg u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
727*f005ef32Sjsg
728*f005ef32Sjsg if (HAS_PCH_CPT(dev_priv))
729*f005ef32Sjsg cpt_irq_handler(dev_priv, pch_iir);
730*f005ef32Sjsg else
731*f005ef32Sjsg ibx_irq_handler(dev_priv, pch_iir);
732*f005ef32Sjsg
733*f005ef32Sjsg /* should clear PCH hotplug event before clear CPU irq */
734*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
735*f005ef32Sjsg }
736*f005ef32Sjsg
737*f005ef32Sjsg if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
738*f005ef32Sjsg gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
739*f005ef32Sjsg }
740*f005ef32Sjsg
ivb_display_irq_handler(struct drm_i915_private * dev_priv,u32 de_iir)741*f005ef32Sjsg void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
742*f005ef32Sjsg {
743*f005ef32Sjsg enum pipe pipe;
744*f005ef32Sjsg u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
745*f005ef32Sjsg
746*f005ef32Sjsg if (hotplug_trigger)
747*f005ef32Sjsg ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
748*f005ef32Sjsg
749*f005ef32Sjsg if (de_iir & DE_ERR_INT_IVB)
750*f005ef32Sjsg ivb_err_int_handler(dev_priv);
751*f005ef32Sjsg
752*f005ef32Sjsg if (de_iir & DE_EDP_PSR_INT_HSW) {
753*f005ef32Sjsg struct intel_encoder *encoder;
754*f005ef32Sjsg
755*f005ef32Sjsg for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
756*f005ef32Sjsg struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
757*f005ef32Sjsg u32 psr_iir;
758*f005ef32Sjsg
759*f005ef32Sjsg psr_iir = intel_uncore_rmw(&dev_priv->uncore,
760*f005ef32Sjsg EDP_PSR_IIR, 0, 0);
761*f005ef32Sjsg intel_psr_irq_handler(intel_dp, psr_iir);
762*f005ef32Sjsg break;
763*f005ef32Sjsg }
764*f005ef32Sjsg }
765*f005ef32Sjsg
766*f005ef32Sjsg if (de_iir & DE_AUX_CHANNEL_A_IVB)
767*f005ef32Sjsg intel_dp_aux_irq_handler(dev_priv);
768*f005ef32Sjsg
769*f005ef32Sjsg if (de_iir & DE_GSE_IVB)
770*f005ef32Sjsg intel_opregion_asle_intr(dev_priv);
771*f005ef32Sjsg
772*f005ef32Sjsg for_each_pipe(dev_priv, pipe) {
773*f005ef32Sjsg if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
774*f005ef32Sjsg intel_handle_vblank(dev_priv, pipe);
775*f005ef32Sjsg
776*f005ef32Sjsg if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
777*f005ef32Sjsg flip_done_handler(dev_priv, pipe);
778*f005ef32Sjsg }
779*f005ef32Sjsg
780*f005ef32Sjsg /* check event from PCH */
781*f005ef32Sjsg if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
782*f005ef32Sjsg u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
783*f005ef32Sjsg
784*f005ef32Sjsg cpt_irq_handler(dev_priv, pch_iir);
785*f005ef32Sjsg
786*f005ef32Sjsg /* clear PCH hotplug event before clear CPU irq */
787*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
788*f005ef32Sjsg }
789*f005ef32Sjsg }
790*f005ef32Sjsg
gen8_de_port_aux_mask(struct drm_i915_private * dev_priv)791*f005ef32Sjsg static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
792*f005ef32Sjsg {
793*f005ef32Sjsg u32 mask;
794*f005ef32Sjsg
795*f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 14)
796*f005ef32Sjsg return TGL_DE_PORT_AUX_DDIA |
797*f005ef32Sjsg TGL_DE_PORT_AUX_DDIB;
798*f005ef32Sjsg else if (DISPLAY_VER(dev_priv) >= 13)
799*f005ef32Sjsg return TGL_DE_PORT_AUX_DDIA |
800*f005ef32Sjsg TGL_DE_PORT_AUX_DDIB |
801*f005ef32Sjsg TGL_DE_PORT_AUX_DDIC |
802*f005ef32Sjsg XELPD_DE_PORT_AUX_DDID |
803*f005ef32Sjsg XELPD_DE_PORT_AUX_DDIE |
804*f005ef32Sjsg TGL_DE_PORT_AUX_USBC1 |
805*f005ef32Sjsg TGL_DE_PORT_AUX_USBC2 |
806*f005ef32Sjsg TGL_DE_PORT_AUX_USBC3 |
807*f005ef32Sjsg TGL_DE_PORT_AUX_USBC4;
808*f005ef32Sjsg else if (DISPLAY_VER(dev_priv) >= 12)
809*f005ef32Sjsg return TGL_DE_PORT_AUX_DDIA |
810*f005ef32Sjsg TGL_DE_PORT_AUX_DDIB |
811*f005ef32Sjsg TGL_DE_PORT_AUX_DDIC |
812*f005ef32Sjsg TGL_DE_PORT_AUX_USBC1 |
813*f005ef32Sjsg TGL_DE_PORT_AUX_USBC2 |
814*f005ef32Sjsg TGL_DE_PORT_AUX_USBC3 |
815*f005ef32Sjsg TGL_DE_PORT_AUX_USBC4 |
816*f005ef32Sjsg TGL_DE_PORT_AUX_USBC5 |
817*f005ef32Sjsg TGL_DE_PORT_AUX_USBC6;
818*f005ef32Sjsg
819*f005ef32Sjsg mask = GEN8_AUX_CHANNEL_A;
820*f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 9)
821*f005ef32Sjsg mask |= GEN9_AUX_CHANNEL_B |
822*f005ef32Sjsg GEN9_AUX_CHANNEL_C |
823*f005ef32Sjsg GEN9_AUX_CHANNEL_D;
824*f005ef32Sjsg
825*f005ef32Sjsg if (DISPLAY_VER(dev_priv) == 11) {
826*f005ef32Sjsg mask |= ICL_AUX_CHANNEL_F;
827*f005ef32Sjsg mask |= ICL_AUX_CHANNEL_E;
828*f005ef32Sjsg }
829*f005ef32Sjsg
830*f005ef32Sjsg return mask;
831*f005ef32Sjsg }
832*f005ef32Sjsg
gen8_de_pipe_fault_mask(struct drm_i915_private * dev_priv)833*f005ef32Sjsg static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
834*f005ef32Sjsg {
835*f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
836*f005ef32Sjsg return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
837*f005ef32Sjsg else if (DISPLAY_VER(dev_priv) >= 11)
838*f005ef32Sjsg return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
839*f005ef32Sjsg else if (DISPLAY_VER(dev_priv) >= 9)
840*f005ef32Sjsg return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
841*f005ef32Sjsg else
842*f005ef32Sjsg return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
843*f005ef32Sjsg }
844*f005ef32Sjsg
intel_pmdemand_irq_handler(struct drm_i915_private * dev_priv)845*f005ef32Sjsg static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv)
846*f005ef32Sjsg {
847*f005ef32Sjsg wake_up_all(&dev_priv->display.pmdemand.waitqueue);
848*f005ef32Sjsg }
849*f005ef32Sjsg
850*f005ef32Sjsg static void
gen8_de_misc_irq_handler(struct drm_i915_private * dev_priv,u32 iir)851*f005ef32Sjsg gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
852*f005ef32Sjsg {
853*f005ef32Sjsg bool found = false;
854*f005ef32Sjsg
855*f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 14) {
856*f005ef32Sjsg if (iir & (XELPDP_PMDEMAND_RSP |
857*f005ef32Sjsg XELPDP_PMDEMAND_RSPTOUT_ERR)) {
858*f005ef32Sjsg if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR)
859*f005ef32Sjsg drm_dbg(&dev_priv->drm,
860*f005ef32Sjsg "Error waiting for Punit PM Demand Response\n");
861*f005ef32Sjsg
862*f005ef32Sjsg intel_pmdemand_irq_handler(dev_priv);
863*f005ef32Sjsg found = true;
864*f005ef32Sjsg }
865*f005ef32Sjsg } else if (iir & GEN8_DE_MISC_GSE) {
866*f005ef32Sjsg intel_opregion_asle_intr(dev_priv);
867*f005ef32Sjsg found = true;
868*f005ef32Sjsg }
869*f005ef32Sjsg
870*f005ef32Sjsg if (iir & GEN8_DE_EDP_PSR) {
871*f005ef32Sjsg struct intel_encoder *encoder;
872*f005ef32Sjsg u32 psr_iir;
873*f005ef32Sjsg i915_reg_t iir_reg;
874*f005ef32Sjsg
875*f005ef32Sjsg for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
876*f005ef32Sjsg struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
877*f005ef32Sjsg
878*f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 12)
879*f005ef32Sjsg iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
880*f005ef32Sjsg else
881*f005ef32Sjsg iir_reg = EDP_PSR_IIR;
882*f005ef32Sjsg
883*f005ef32Sjsg psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0);
884*f005ef32Sjsg
885*f005ef32Sjsg if (psr_iir)
886*f005ef32Sjsg found = true;
887*f005ef32Sjsg
888*f005ef32Sjsg intel_psr_irq_handler(intel_dp, psr_iir);
889*f005ef32Sjsg
890*f005ef32Sjsg /* prior GEN12 only have one EDP PSR */
891*f005ef32Sjsg if (DISPLAY_VER(dev_priv) < 12)
892*f005ef32Sjsg break;
893*f005ef32Sjsg }
894*f005ef32Sjsg }
895*f005ef32Sjsg
896*f005ef32Sjsg if (!found)
897*f005ef32Sjsg drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
898*f005ef32Sjsg }
899*f005ef32Sjsg
gen11_dsi_te_interrupt_handler(struct drm_i915_private * dev_priv,u32 te_trigger)900*f005ef32Sjsg static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
901*f005ef32Sjsg u32 te_trigger)
902*f005ef32Sjsg {
903*f005ef32Sjsg enum pipe pipe = INVALID_PIPE;
904*f005ef32Sjsg enum transcoder dsi_trans;
905*f005ef32Sjsg enum port port;
906*f005ef32Sjsg u32 val;
907*f005ef32Sjsg
908*f005ef32Sjsg /*
909*f005ef32Sjsg * Incase of dual link, TE comes from DSI_1
910*f005ef32Sjsg * this is to check if dual link is enabled
911*f005ef32Sjsg */
912*f005ef32Sjsg val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
913*f005ef32Sjsg val &= PORT_SYNC_MODE_ENABLE;
914*f005ef32Sjsg
915*f005ef32Sjsg /*
916*f005ef32Sjsg * if dual link is enabled, then read DSI_0
917*f005ef32Sjsg * transcoder registers
918*f005ef32Sjsg */
919*f005ef32Sjsg port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
920*f005ef32Sjsg PORT_A : PORT_B;
921*f005ef32Sjsg dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
922*f005ef32Sjsg
923*f005ef32Sjsg /* Check if DSI configured in command mode */
924*f005ef32Sjsg val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
925*f005ef32Sjsg val = val & OP_MODE_MASK;
926*f005ef32Sjsg
927*f005ef32Sjsg if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
928*f005ef32Sjsg drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
929*f005ef32Sjsg return;
930*f005ef32Sjsg }
931*f005ef32Sjsg
932*f005ef32Sjsg /* Get PIPE for handling VBLANK event */
933*f005ef32Sjsg val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
934*f005ef32Sjsg switch (val & TRANS_DDI_EDP_INPUT_MASK) {
935*f005ef32Sjsg case TRANS_DDI_EDP_INPUT_A_ON:
936*f005ef32Sjsg pipe = PIPE_A;
937*f005ef32Sjsg break;
938*f005ef32Sjsg case TRANS_DDI_EDP_INPUT_B_ONOFF:
939*f005ef32Sjsg pipe = PIPE_B;
940*f005ef32Sjsg break;
941*f005ef32Sjsg case TRANS_DDI_EDP_INPUT_C_ONOFF:
942*f005ef32Sjsg pipe = PIPE_C;
943*f005ef32Sjsg break;
944*f005ef32Sjsg default:
945*f005ef32Sjsg drm_err(&dev_priv->drm, "Invalid PIPE\n");
946*f005ef32Sjsg return;
947*f005ef32Sjsg }
948*f005ef32Sjsg
949*f005ef32Sjsg intel_handle_vblank(dev_priv, pipe);
950*f005ef32Sjsg
951*f005ef32Sjsg /* clear TE in dsi IIR */
952*f005ef32Sjsg port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
953*f005ef32Sjsg intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
954*f005ef32Sjsg }
955*f005ef32Sjsg
gen8_de_pipe_flip_done_mask(struct drm_i915_private * i915)956*f005ef32Sjsg static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
957*f005ef32Sjsg {
958*f005ef32Sjsg if (DISPLAY_VER(i915) >= 9)
959*f005ef32Sjsg return GEN9_PIPE_PLANE1_FLIP_DONE;
960*f005ef32Sjsg else
961*f005ef32Sjsg return GEN8_PIPE_PRIMARY_FLIP_DONE;
962*f005ef32Sjsg }
963*f005ef32Sjsg
gen8_de_pipe_underrun_mask(struct drm_i915_private * dev_priv)964*f005ef32Sjsg u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
965*f005ef32Sjsg {
966*f005ef32Sjsg u32 mask = GEN8_PIPE_FIFO_UNDERRUN;
967*f005ef32Sjsg
968*f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 13)
969*f005ef32Sjsg mask |= XELPD_PIPE_SOFT_UNDERRUN |
970*f005ef32Sjsg XELPD_PIPE_HARD_UNDERRUN;
971*f005ef32Sjsg
972*f005ef32Sjsg return mask;
973*f005ef32Sjsg }
974*f005ef32Sjsg
gen8_read_and_ack_pch_irqs(struct drm_i915_private * i915,u32 * pch_iir,u32 * pica_iir)975*f005ef32Sjsg static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir)
976*f005ef32Sjsg {
977*f005ef32Sjsg u32 pica_ier = 0;
978*f005ef32Sjsg
979*f005ef32Sjsg *pica_iir = 0;
980*f005ef32Sjsg *pch_iir = intel_de_read(i915, SDEIIR);
981*f005ef32Sjsg if (!*pch_iir)
982*f005ef32Sjsg return;
983*f005ef32Sjsg
984*f005ef32Sjsg /**
985*f005ef32Sjsg * PICA IER must be disabled/re-enabled around clearing PICA IIR and
986*f005ef32Sjsg * SDEIIR, to avoid losing PICA IRQs and to ensure that such IRQs set
987*f005ef32Sjsg * their flags both in the PICA and SDE IIR.
988*f005ef32Sjsg */
989*f005ef32Sjsg if (*pch_iir & SDE_PICAINTERRUPT) {
990*f005ef32Sjsg drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTP);
991*f005ef32Sjsg
992*f005ef32Sjsg pica_ier = intel_de_rmw(i915, PICAINTERRUPT_IER, ~0, 0);
993*f005ef32Sjsg *pica_iir = intel_de_read(i915, PICAINTERRUPT_IIR);
994*f005ef32Sjsg intel_de_write(i915, PICAINTERRUPT_IIR, *pica_iir);
995*f005ef32Sjsg }
996*f005ef32Sjsg
997*f005ef32Sjsg intel_de_write(i915, SDEIIR, *pch_iir);
998*f005ef32Sjsg
999*f005ef32Sjsg if (pica_ier)
1000*f005ef32Sjsg intel_de_write(i915, PICAINTERRUPT_IER, pica_ier);
1001*f005ef32Sjsg }
1002*f005ef32Sjsg
gen8_de_irq_handler(struct drm_i915_private * dev_priv,u32 master_ctl)1003*f005ef32Sjsg void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
1004*f005ef32Sjsg {
1005*f005ef32Sjsg u32 iir;
1006*f005ef32Sjsg enum pipe pipe;
1007*f005ef32Sjsg
1008*f005ef32Sjsg drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
1009*f005ef32Sjsg
1010*f005ef32Sjsg if (master_ctl & GEN8_DE_MISC_IRQ) {
1011*f005ef32Sjsg iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
1012*f005ef32Sjsg if (iir) {
1013*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
1014*f005ef32Sjsg gen8_de_misc_irq_handler(dev_priv, iir);
1015*f005ef32Sjsg } else {
1016*f005ef32Sjsg drm_err_ratelimited(&dev_priv->drm,
1017*f005ef32Sjsg "The master control interrupt lied (DE MISC)!\n");
1018*f005ef32Sjsg }
1019*f005ef32Sjsg }
1020*f005ef32Sjsg
1021*f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
1022*f005ef32Sjsg iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
1023*f005ef32Sjsg if (iir) {
1024*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
1025*f005ef32Sjsg gen11_hpd_irq_handler(dev_priv, iir);
1026*f005ef32Sjsg } else {
1027*f005ef32Sjsg drm_err_ratelimited(&dev_priv->drm,
1028*f005ef32Sjsg "The master control interrupt lied, (DE HPD)!\n");
1029*f005ef32Sjsg }
1030*f005ef32Sjsg }
1031*f005ef32Sjsg
1032*f005ef32Sjsg if (master_ctl & GEN8_DE_PORT_IRQ) {
1033*f005ef32Sjsg iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
1034*f005ef32Sjsg if (iir) {
1035*f005ef32Sjsg bool found = false;
1036*f005ef32Sjsg
1037*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
1038*f005ef32Sjsg
1039*f005ef32Sjsg if (iir & gen8_de_port_aux_mask(dev_priv)) {
1040*f005ef32Sjsg intel_dp_aux_irq_handler(dev_priv);
1041*f005ef32Sjsg found = true;
1042*f005ef32Sjsg }
1043*f005ef32Sjsg
1044*f005ef32Sjsg if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
1045*f005ef32Sjsg u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
1046*f005ef32Sjsg
1047*f005ef32Sjsg if (hotplug_trigger) {
1048*f005ef32Sjsg bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
1049*f005ef32Sjsg found = true;
1050*f005ef32Sjsg }
1051*f005ef32Sjsg } else if (IS_BROADWELL(dev_priv)) {
1052*f005ef32Sjsg u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
1053*f005ef32Sjsg
1054*f005ef32Sjsg if (hotplug_trigger) {
1055*f005ef32Sjsg ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
1056*f005ef32Sjsg found = true;
1057*f005ef32Sjsg }
1058*f005ef32Sjsg }
1059*f005ef32Sjsg
1060*f005ef32Sjsg if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
1061*f005ef32Sjsg (iir & BXT_DE_PORT_GMBUS)) {
1062*f005ef32Sjsg intel_gmbus_irq_handler(dev_priv);
1063*f005ef32Sjsg found = true;
1064*f005ef32Sjsg }
1065*f005ef32Sjsg
1066*f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 11) {
1067*f005ef32Sjsg u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
1068*f005ef32Sjsg
1069*f005ef32Sjsg if (te_trigger) {
1070*f005ef32Sjsg gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
1071*f005ef32Sjsg found = true;
1072*f005ef32Sjsg }
1073*f005ef32Sjsg }
1074*f005ef32Sjsg
1075*f005ef32Sjsg if (!found)
1076*f005ef32Sjsg drm_err_ratelimited(&dev_priv->drm,
1077*f005ef32Sjsg "Unexpected DE Port interrupt\n");
1078*f005ef32Sjsg } else {
1079*f005ef32Sjsg drm_err_ratelimited(&dev_priv->drm,
1080*f005ef32Sjsg "The master control interrupt lied (DE PORT)!\n");
1081*f005ef32Sjsg }
1082*f005ef32Sjsg }
1083*f005ef32Sjsg
1084*f005ef32Sjsg for_each_pipe(dev_priv, pipe) {
1085*f005ef32Sjsg u32 fault_errors;
1086*f005ef32Sjsg
1087*f005ef32Sjsg if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
1088*f005ef32Sjsg continue;
1089*f005ef32Sjsg
1090*f005ef32Sjsg iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
1091*f005ef32Sjsg if (!iir) {
1092*f005ef32Sjsg drm_err_ratelimited(&dev_priv->drm,
1093*f005ef32Sjsg "The master control interrupt lied (DE PIPE)!\n");
1094*f005ef32Sjsg continue;
1095*f005ef32Sjsg }
1096*f005ef32Sjsg
1097*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
1098*f005ef32Sjsg
1099*f005ef32Sjsg if (iir & GEN8_PIPE_VBLANK)
1100*f005ef32Sjsg intel_handle_vblank(dev_priv, pipe);
1101*f005ef32Sjsg
1102*f005ef32Sjsg if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
1103*f005ef32Sjsg flip_done_handler(dev_priv, pipe);
1104*f005ef32Sjsg
1105*f005ef32Sjsg if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
1106*f005ef32Sjsg hsw_pipe_crc_irq_handler(dev_priv, pipe);
1107*f005ef32Sjsg
1108*f005ef32Sjsg if (iir & gen8_de_pipe_underrun_mask(dev_priv))
1109*f005ef32Sjsg intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1110*f005ef32Sjsg
1111*f005ef32Sjsg fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
1112*f005ef32Sjsg if (fault_errors)
1113*f005ef32Sjsg drm_err_ratelimited(&dev_priv->drm,
1114*f005ef32Sjsg "Fault errors on pipe %c: 0x%08x\n",
1115*f005ef32Sjsg pipe_name(pipe),
1116*f005ef32Sjsg fault_errors);
1117*f005ef32Sjsg }
1118*f005ef32Sjsg
1119*f005ef32Sjsg if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
1120*f005ef32Sjsg master_ctl & GEN8_DE_PCH_IRQ) {
1121*f005ef32Sjsg u32 pica_iir;
1122*f005ef32Sjsg
1123*f005ef32Sjsg /*
1124*f005ef32Sjsg * FIXME(BDW): Assume for now that the new interrupt handling
1125*f005ef32Sjsg * scheme also closed the SDE interrupt handling race we've seen
1126*f005ef32Sjsg * on older pch-split platforms. But this needs testing.
1127*f005ef32Sjsg */
1128*f005ef32Sjsg gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir);
1129*f005ef32Sjsg if (iir) {
1130*f005ef32Sjsg if (pica_iir)
1131*f005ef32Sjsg xelpdp_pica_irq_handler(dev_priv, pica_iir);
1132*f005ef32Sjsg
1133*f005ef32Sjsg if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
1134*f005ef32Sjsg icp_irq_handler(dev_priv, iir);
1135*f005ef32Sjsg else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
1136*f005ef32Sjsg spt_irq_handler(dev_priv, iir);
1137*f005ef32Sjsg else
1138*f005ef32Sjsg cpt_irq_handler(dev_priv, iir);
1139*f005ef32Sjsg } else {
1140*f005ef32Sjsg /*
1141*f005ef32Sjsg * Like on previous PCH there seems to be something
1142*f005ef32Sjsg * fishy going on with forwarding PCH interrupts.
1143*f005ef32Sjsg */
1144*f005ef32Sjsg drm_dbg(&dev_priv->drm,
1145*f005ef32Sjsg "The master control interrupt lied (SDE)!\n");
1146*f005ef32Sjsg }
1147*f005ef32Sjsg }
1148*f005ef32Sjsg }
1149*f005ef32Sjsg
gen11_gu_misc_irq_ack(struct drm_i915_private * i915,const u32 master_ctl)1150*f005ef32Sjsg u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
1151*f005ef32Sjsg {
1152*f005ef32Sjsg void __iomem * const regs = intel_uncore_regs(&i915->uncore);
1153*f005ef32Sjsg u32 iir;
1154*f005ef32Sjsg
1155*f005ef32Sjsg if (!(master_ctl & GEN11_GU_MISC_IRQ))
1156*f005ef32Sjsg return 0;
1157*f005ef32Sjsg
1158*f005ef32Sjsg iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
1159*f005ef32Sjsg if (likely(iir))
1160*f005ef32Sjsg raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
1161*f005ef32Sjsg
1162*f005ef32Sjsg return iir;
1163*f005ef32Sjsg }
1164*f005ef32Sjsg
gen11_gu_misc_irq_handler(struct drm_i915_private * i915,const u32 iir)1165*f005ef32Sjsg void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
1166*f005ef32Sjsg {
1167*f005ef32Sjsg if (iir & GEN11_GU_MISC_GSE)
1168*f005ef32Sjsg intel_opregion_asle_intr(i915);
1169*f005ef32Sjsg }
1170*f005ef32Sjsg
gen11_display_irq_handler(struct drm_i915_private * i915)1171*f005ef32Sjsg void gen11_display_irq_handler(struct drm_i915_private *i915)
1172*f005ef32Sjsg {
1173*f005ef32Sjsg void __iomem * const regs = intel_uncore_regs(&i915->uncore);
1174*f005ef32Sjsg const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
1175*f005ef32Sjsg
1176*f005ef32Sjsg disable_rpm_wakeref_asserts(&i915->runtime_pm);
1177*f005ef32Sjsg /*
1178*f005ef32Sjsg * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
1179*f005ef32Sjsg * for the display related bits.
1180*f005ef32Sjsg */
1181*f005ef32Sjsg raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
1182*f005ef32Sjsg gen8_de_irq_handler(i915, disp_ctl);
1183*f005ef32Sjsg raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
1184*f005ef32Sjsg GEN11_DISPLAY_IRQ_ENABLE);
1185*f005ef32Sjsg
1186*f005ef32Sjsg enable_rpm_wakeref_asserts(&i915->runtime_pm);
1187*f005ef32Sjsg }
1188*f005ef32Sjsg
1189*f005ef32Sjsg /* Called from drm generic code, passed 'crtc' which
1190*f005ef32Sjsg * we use as a pipe index
1191*f005ef32Sjsg */
i8xx_enable_vblank(struct drm_crtc * crtc)1192*f005ef32Sjsg int i8xx_enable_vblank(struct drm_crtc *crtc)
1193*f005ef32Sjsg {
1194*f005ef32Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1195*f005ef32Sjsg enum pipe pipe = to_intel_crtc(crtc)->pipe;
1196*f005ef32Sjsg unsigned long irqflags;
1197*f005ef32Sjsg
1198*f005ef32Sjsg spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1199*f005ef32Sjsg i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
1200*f005ef32Sjsg spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1201*f005ef32Sjsg
1202*f005ef32Sjsg return 0;
1203*f005ef32Sjsg }
1204*f005ef32Sjsg
i915gm_enable_vblank(struct drm_crtc * crtc)1205*f005ef32Sjsg int i915gm_enable_vblank(struct drm_crtc *crtc)
1206*f005ef32Sjsg {
1207*f005ef32Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1208*f005ef32Sjsg
1209*f005ef32Sjsg /*
1210*f005ef32Sjsg * Vblank interrupts fail to wake the device up from C2+.
1211*f005ef32Sjsg * Disabling render clock gating during C-states avoids
1212*f005ef32Sjsg * the problem. There is a small power cost so we do this
1213*f005ef32Sjsg * only when vblank interrupts are actually enabled.
1214*f005ef32Sjsg */
1215*f005ef32Sjsg if (dev_priv->vblank_enabled++ == 0)
1216*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
1217*f005ef32Sjsg
1218*f005ef32Sjsg return i8xx_enable_vblank(crtc);
1219*f005ef32Sjsg }
1220*f005ef32Sjsg
i965_enable_vblank(struct drm_crtc * crtc)1221*f005ef32Sjsg int i965_enable_vblank(struct drm_crtc *crtc)
1222*f005ef32Sjsg {
1223*f005ef32Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1224*f005ef32Sjsg enum pipe pipe = to_intel_crtc(crtc)->pipe;
1225*f005ef32Sjsg unsigned long irqflags;
1226*f005ef32Sjsg
1227*f005ef32Sjsg spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1228*f005ef32Sjsg i915_enable_pipestat(dev_priv, pipe,
1229*f005ef32Sjsg PIPE_START_VBLANK_INTERRUPT_STATUS);
1230*f005ef32Sjsg spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1231*f005ef32Sjsg
1232*f005ef32Sjsg return 0;
1233*f005ef32Sjsg }
1234*f005ef32Sjsg
ilk_enable_vblank(struct drm_crtc * crtc)1235*f005ef32Sjsg int ilk_enable_vblank(struct drm_crtc *crtc)
1236*f005ef32Sjsg {
1237*f005ef32Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1238*f005ef32Sjsg enum pipe pipe = to_intel_crtc(crtc)->pipe;
1239*f005ef32Sjsg unsigned long irqflags;
1240*f005ef32Sjsg u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
1241*f005ef32Sjsg DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
1242*f005ef32Sjsg
1243*f005ef32Sjsg spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1244*f005ef32Sjsg ilk_enable_display_irq(dev_priv, bit);
1245*f005ef32Sjsg spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1246*f005ef32Sjsg
1247*f005ef32Sjsg /* Even though there is no DMC, frame counter can get stuck when
1248*f005ef32Sjsg * PSR is active as no frames are generated.
1249*f005ef32Sjsg */
1250*f005ef32Sjsg if (HAS_PSR(dev_priv))
1251*f005ef32Sjsg drm_crtc_vblank_restore(crtc);
1252*f005ef32Sjsg
1253*f005ef32Sjsg return 0;
1254*f005ef32Sjsg }
1255*f005ef32Sjsg
gen11_dsi_configure_te(struct intel_crtc * intel_crtc,bool enable)1256*f005ef32Sjsg static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
1257*f005ef32Sjsg bool enable)
1258*f005ef32Sjsg {
1259*f005ef32Sjsg struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
1260*f005ef32Sjsg enum port port;
1261*f005ef32Sjsg
1262*f005ef32Sjsg if (!(intel_crtc->mode_flags &
1263*f005ef32Sjsg (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
1264*f005ef32Sjsg return false;
1265*f005ef32Sjsg
1266*f005ef32Sjsg /* for dual link cases we consider TE from slave */
1267*f005ef32Sjsg if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
1268*f005ef32Sjsg port = PORT_B;
1269*f005ef32Sjsg else
1270*f005ef32Sjsg port = PORT_A;
1271*f005ef32Sjsg
1272*f005ef32Sjsg intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT,
1273*f005ef32Sjsg enable ? 0 : DSI_TE_EVENT);
1274*f005ef32Sjsg
1275*f005ef32Sjsg intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0);
1276*f005ef32Sjsg
1277*f005ef32Sjsg return true;
1278*f005ef32Sjsg }
1279*f005ef32Sjsg
bdw_enable_vblank(struct drm_crtc * _crtc)1280*f005ef32Sjsg int bdw_enable_vblank(struct drm_crtc *_crtc)
1281*f005ef32Sjsg {
1282*f005ef32Sjsg struct intel_crtc *crtc = to_intel_crtc(_crtc);
1283*f005ef32Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1284*f005ef32Sjsg enum pipe pipe = crtc->pipe;
1285*f005ef32Sjsg unsigned long irqflags;
1286*f005ef32Sjsg
1287*f005ef32Sjsg if (gen11_dsi_configure_te(crtc, true))
1288*f005ef32Sjsg return 0;
1289*f005ef32Sjsg
1290*f005ef32Sjsg spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1291*f005ef32Sjsg bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
1292*f005ef32Sjsg spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1293*f005ef32Sjsg
1294*f005ef32Sjsg /* Even if there is no DMC, frame counter can get stuck when
1295*f005ef32Sjsg * PSR is active as no frames are generated, so check only for PSR.
1296*f005ef32Sjsg */
1297*f005ef32Sjsg if (HAS_PSR(dev_priv))
1298*f005ef32Sjsg drm_crtc_vblank_restore(&crtc->base);
1299*f005ef32Sjsg
1300*f005ef32Sjsg return 0;
1301*f005ef32Sjsg }
1302*f005ef32Sjsg
1303*f005ef32Sjsg /* Called from drm generic code, passed 'crtc' which
1304*f005ef32Sjsg * we use as a pipe index
1305*f005ef32Sjsg */
i8xx_disable_vblank(struct drm_crtc * crtc)1306*f005ef32Sjsg void i8xx_disable_vblank(struct drm_crtc *crtc)
1307*f005ef32Sjsg {
1308*f005ef32Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1309*f005ef32Sjsg enum pipe pipe = to_intel_crtc(crtc)->pipe;
1310*f005ef32Sjsg unsigned long irqflags;
1311*f005ef32Sjsg
1312*f005ef32Sjsg spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1313*f005ef32Sjsg i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
1314*f005ef32Sjsg spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1315*f005ef32Sjsg }
1316*f005ef32Sjsg
i915gm_disable_vblank(struct drm_crtc * crtc)1317*f005ef32Sjsg void i915gm_disable_vblank(struct drm_crtc *crtc)
1318*f005ef32Sjsg {
1319*f005ef32Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1320*f005ef32Sjsg
1321*f005ef32Sjsg i8xx_disable_vblank(crtc);
1322*f005ef32Sjsg
1323*f005ef32Sjsg if (--dev_priv->vblank_enabled == 0)
1324*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
1325*f005ef32Sjsg }
1326*f005ef32Sjsg
i965_disable_vblank(struct drm_crtc * crtc)1327*f005ef32Sjsg void i965_disable_vblank(struct drm_crtc *crtc)
1328*f005ef32Sjsg {
1329*f005ef32Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1330*f005ef32Sjsg enum pipe pipe = to_intel_crtc(crtc)->pipe;
1331*f005ef32Sjsg unsigned long irqflags;
1332*f005ef32Sjsg
1333*f005ef32Sjsg spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1334*f005ef32Sjsg i915_disable_pipestat(dev_priv, pipe,
1335*f005ef32Sjsg PIPE_START_VBLANK_INTERRUPT_STATUS);
1336*f005ef32Sjsg spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1337*f005ef32Sjsg }
1338*f005ef32Sjsg
ilk_disable_vblank(struct drm_crtc * crtc)1339*f005ef32Sjsg void ilk_disable_vblank(struct drm_crtc *crtc)
1340*f005ef32Sjsg {
1341*f005ef32Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1342*f005ef32Sjsg enum pipe pipe = to_intel_crtc(crtc)->pipe;
1343*f005ef32Sjsg unsigned long irqflags;
1344*f005ef32Sjsg u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
1345*f005ef32Sjsg DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
1346*f005ef32Sjsg
1347*f005ef32Sjsg spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1348*f005ef32Sjsg ilk_disable_display_irq(dev_priv, bit);
1349*f005ef32Sjsg spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1350*f005ef32Sjsg }
1351*f005ef32Sjsg
bdw_disable_vblank(struct drm_crtc * _crtc)1352*f005ef32Sjsg void bdw_disable_vblank(struct drm_crtc *_crtc)
1353*f005ef32Sjsg {
1354*f005ef32Sjsg struct intel_crtc *crtc = to_intel_crtc(_crtc);
1355*f005ef32Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1356*f005ef32Sjsg enum pipe pipe = crtc->pipe;
1357*f005ef32Sjsg unsigned long irqflags;
1358*f005ef32Sjsg
1359*f005ef32Sjsg if (gen11_dsi_configure_te(crtc, false))
1360*f005ef32Sjsg return;
1361*f005ef32Sjsg
1362*f005ef32Sjsg spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1363*f005ef32Sjsg bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
1364*f005ef32Sjsg spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1365*f005ef32Sjsg }
1366*f005ef32Sjsg
vlv_display_irq_reset(struct drm_i915_private * dev_priv)1367*f005ef32Sjsg void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
1368*f005ef32Sjsg {
1369*f005ef32Sjsg struct intel_uncore *uncore = &dev_priv->uncore;
1370*f005ef32Sjsg
1371*f005ef32Sjsg if (IS_CHERRYVIEW(dev_priv))
1372*f005ef32Sjsg intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
1373*f005ef32Sjsg else
1374*f005ef32Sjsg intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
1375*f005ef32Sjsg
1376*f005ef32Sjsg i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
1377*f005ef32Sjsg intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0);
1378*f005ef32Sjsg
1379*f005ef32Sjsg i9xx_pipestat_irq_reset(dev_priv);
1380*f005ef32Sjsg
1381*f005ef32Sjsg GEN3_IRQ_RESET(uncore, VLV_);
1382*f005ef32Sjsg dev_priv->irq_mask = ~0u;
1383*f005ef32Sjsg }
1384*f005ef32Sjsg
vlv_display_irq_postinstall(struct drm_i915_private * dev_priv)1385*f005ef32Sjsg void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
1386*f005ef32Sjsg {
1387*f005ef32Sjsg struct intel_uncore *uncore = &dev_priv->uncore;
1388*f005ef32Sjsg
1389*f005ef32Sjsg u32 pipestat_mask;
1390*f005ef32Sjsg u32 enable_mask;
1391*f005ef32Sjsg enum pipe pipe;
1392*f005ef32Sjsg
1393*f005ef32Sjsg pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
1394*f005ef32Sjsg
1395*f005ef32Sjsg i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
1396*f005ef32Sjsg for_each_pipe(dev_priv, pipe)
1397*f005ef32Sjsg i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
1398*f005ef32Sjsg
1399*f005ef32Sjsg enable_mask = I915_DISPLAY_PORT_INTERRUPT |
1400*f005ef32Sjsg I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1401*f005ef32Sjsg I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1402*f005ef32Sjsg I915_LPE_PIPE_A_INTERRUPT |
1403*f005ef32Sjsg I915_LPE_PIPE_B_INTERRUPT;
1404*f005ef32Sjsg
1405*f005ef32Sjsg if (IS_CHERRYVIEW(dev_priv))
1406*f005ef32Sjsg enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
1407*f005ef32Sjsg I915_LPE_PIPE_C_INTERRUPT;
1408*f005ef32Sjsg
1409*f005ef32Sjsg drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
1410*f005ef32Sjsg
1411*f005ef32Sjsg dev_priv->irq_mask = ~enable_mask;
1412*f005ef32Sjsg
1413*f005ef32Sjsg GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
1414*f005ef32Sjsg }
1415*f005ef32Sjsg
gen8_display_irq_reset(struct drm_i915_private * dev_priv)1416*f005ef32Sjsg void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
1417*f005ef32Sjsg {
1418*f005ef32Sjsg struct intel_uncore *uncore = &dev_priv->uncore;
1419*f005ef32Sjsg enum pipe pipe;
1420*f005ef32Sjsg
1421*f005ef32Sjsg if (!HAS_DISPLAY(dev_priv))
1422*f005ef32Sjsg return;
1423*f005ef32Sjsg
1424*f005ef32Sjsg intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
1425*f005ef32Sjsg intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
1426*f005ef32Sjsg
1427*f005ef32Sjsg for_each_pipe(dev_priv, pipe)
1428*f005ef32Sjsg if (intel_display_power_is_enabled(dev_priv,
1429*f005ef32Sjsg POWER_DOMAIN_PIPE(pipe)))
1430*f005ef32Sjsg GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
1431*f005ef32Sjsg
1432*f005ef32Sjsg GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
1433*f005ef32Sjsg GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
1434*f005ef32Sjsg }
1435*f005ef32Sjsg
gen11_display_irq_reset(struct drm_i915_private * dev_priv)1436*f005ef32Sjsg void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
1437*f005ef32Sjsg {
1438*f005ef32Sjsg struct intel_uncore *uncore = &dev_priv->uncore;
1439*f005ef32Sjsg enum pipe pipe;
1440*f005ef32Sjsg u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
1441*f005ef32Sjsg BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
1442*f005ef32Sjsg
1443*f005ef32Sjsg if (!HAS_DISPLAY(dev_priv))
1444*f005ef32Sjsg return;
1445*f005ef32Sjsg
1446*f005ef32Sjsg intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
1447*f005ef32Sjsg
1448*f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 12) {
1449*f005ef32Sjsg enum transcoder trans;
1450*f005ef32Sjsg
1451*f005ef32Sjsg for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
1452*f005ef32Sjsg enum intel_display_power_domain domain;
1453*f005ef32Sjsg
1454*f005ef32Sjsg domain = POWER_DOMAIN_TRANSCODER(trans);
1455*f005ef32Sjsg if (!intel_display_power_is_enabled(dev_priv, domain))
1456*f005ef32Sjsg continue;
1457*f005ef32Sjsg
1458*f005ef32Sjsg intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
1459*f005ef32Sjsg intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
1460*f005ef32Sjsg }
1461*f005ef32Sjsg } else {
1462*f005ef32Sjsg intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
1463*f005ef32Sjsg intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
1464*f005ef32Sjsg }
1465*f005ef32Sjsg
1466*f005ef32Sjsg for_each_pipe(dev_priv, pipe)
1467*f005ef32Sjsg if (intel_display_power_is_enabled(dev_priv,
1468*f005ef32Sjsg POWER_DOMAIN_PIPE(pipe)))
1469*f005ef32Sjsg GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
1470*f005ef32Sjsg
1471*f005ef32Sjsg GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
1472*f005ef32Sjsg GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
1473*f005ef32Sjsg
1474*f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 14)
1475*f005ef32Sjsg GEN3_IRQ_RESET(uncore, PICAINTERRUPT_);
1476*f005ef32Sjsg else
1477*f005ef32Sjsg GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
1478*f005ef32Sjsg
1479*f005ef32Sjsg if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
1480*f005ef32Sjsg GEN3_IRQ_RESET(uncore, SDE);
1481*f005ef32Sjsg }
1482*f005ef32Sjsg
gen8_irq_power_well_post_enable(struct drm_i915_private * dev_priv,u8 pipe_mask)1483*f005ef32Sjsg void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
1484*f005ef32Sjsg u8 pipe_mask)
1485*f005ef32Sjsg {
1486*f005ef32Sjsg struct intel_uncore *uncore = &dev_priv->uncore;
1487*f005ef32Sjsg u32 extra_ier = GEN8_PIPE_VBLANK |
1488*f005ef32Sjsg gen8_de_pipe_underrun_mask(dev_priv) |
1489*f005ef32Sjsg gen8_de_pipe_flip_done_mask(dev_priv);
1490*f005ef32Sjsg enum pipe pipe;
1491*f005ef32Sjsg
1492*f005ef32Sjsg spin_lock_irq(&dev_priv->irq_lock);
1493*f005ef32Sjsg
1494*f005ef32Sjsg if (!intel_irqs_enabled(dev_priv)) {
1495*f005ef32Sjsg spin_unlock_irq(&dev_priv->irq_lock);
1496*f005ef32Sjsg return;
1497*f005ef32Sjsg }
1498*f005ef32Sjsg
1499*f005ef32Sjsg for_each_pipe_masked(dev_priv, pipe, pipe_mask)
1500*f005ef32Sjsg GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
1501*f005ef32Sjsg dev_priv->de_irq_mask[pipe],
1502*f005ef32Sjsg ~dev_priv->de_irq_mask[pipe] | extra_ier);
1503*f005ef32Sjsg
1504*f005ef32Sjsg spin_unlock_irq(&dev_priv->irq_lock);
1505*f005ef32Sjsg }
1506*f005ef32Sjsg
gen8_irq_power_well_pre_disable(struct drm_i915_private * dev_priv,u8 pipe_mask)1507*f005ef32Sjsg void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
1508*f005ef32Sjsg u8 pipe_mask)
1509*f005ef32Sjsg {
1510*f005ef32Sjsg struct intel_uncore *uncore = &dev_priv->uncore;
1511*f005ef32Sjsg enum pipe pipe;
1512*f005ef32Sjsg
1513*f005ef32Sjsg spin_lock_irq(&dev_priv->irq_lock);
1514*f005ef32Sjsg
1515*f005ef32Sjsg if (!intel_irqs_enabled(dev_priv)) {
1516*f005ef32Sjsg spin_unlock_irq(&dev_priv->irq_lock);
1517*f005ef32Sjsg return;
1518*f005ef32Sjsg }
1519*f005ef32Sjsg
1520*f005ef32Sjsg for_each_pipe_masked(dev_priv, pipe, pipe_mask)
1521*f005ef32Sjsg GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
1522*f005ef32Sjsg
1523*f005ef32Sjsg spin_unlock_irq(&dev_priv->irq_lock);
1524*f005ef32Sjsg
1525*f005ef32Sjsg /* make sure we're done processing display irqs */
1526*f005ef32Sjsg intel_synchronize_irq(dev_priv);
1527*f005ef32Sjsg }
1528*f005ef32Sjsg
1529*f005ef32Sjsg /*
1530*f005ef32Sjsg * SDEIER is also touched by the interrupt handler to work around missed PCH
1531*f005ef32Sjsg * interrupts. Hence we can't update it after the interrupt handler is enabled -
1532*f005ef32Sjsg * instead we unconditionally enable all PCH interrupt sources here, but then
1533*f005ef32Sjsg * only unmask them as needed with SDEIMR.
1534*f005ef32Sjsg *
1535*f005ef32Sjsg * Note that we currently do this after installing the interrupt handler,
1536*f005ef32Sjsg * but before we enable the master interrupt. That should be sufficient
1537*f005ef32Sjsg * to avoid races with the irq handler, assuming we have MSI. Shared legacy
1538*f005ef32Sjsg * interrupts could still race.
1539*f005ef32Sjsg */
ibx_irq_postinstall(struct drm_i915_private * dev_priv)1540*f005ef32Sjsg static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
1541*f005ef32Sjsg {
1542*f005ef32Sjsg struct intel_uncore *uncore = &dev_priv->uncore;
1543*f005ef32Sjsg u32 mask;
1544*f005ef32Sjsg
1545*f005ef32Sjsg if (HAS_PCH_NOP(dev_priv))
1546*f005ef32Sjsg return;
1547*f005ef32Sjsg
1548*f005ef32Sjsg if (HAS_PCH_IBX(dev_priv))
1549*f005ef32Sjsg mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
1550*f005ef32Sjsg else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
1551*f005ef32Sjsg mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
1552*f005ef32Sjsg else
1553*f005ef32Sjsg mask = SDE_GMBUS_CPT;
1554*f005ef32Sjsg
1555*f005ef32Sjsg GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
1556*f005ef32Sjsg }
1557*f005ef32Sjsg
valleyview_enable_display_irqs(struct drm_i915_private * dev_priv)1558*f005ef32Sjsg void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
1559*f005ef32Sjsg {
1560*f005ef32Sjsg lockdep_assert_held(&dev_priv->irq_lock);
1561*f005ef32Sjsg
1562*f005ef32Sjsg if (dev_priv->display_irqs_enabled)
1563*f005ef32Sjsg return;
1564*f005ef32Sjsg
1565*f005ef32Sjsg dev_priv->display_irqs_enabled = true;
1566*f005ef32Sjsg
1567*f005ef32Sjsg if (intel_irqs_enabled(dev_priv)) {
1568*f005ef32Sjsg vlv_display_irq_reset(dev_priv);
1569*f005ef32Sjsg vlv_display_irq_postinstall(dev_priv);
1570*f005ef32Sjsg }
1571*f005ef32Sjsg }
1572*f005ef32Sjsg
valleyview_disable_display_irqs(struct drm_i915_private * dev_priv)1573*f005ef32Sjsg void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
1574*f005ef32Sjsg {
1575*f005ef32Sjsg lockdep_assert_held(&dev_priv->irq_lock);
1576*f005ef32Sjsg
1577*f005ef32Sjsg if (!dev_priv->display_irqs_enabled)
1578*f005ef32Sjsg return;
1579*f005ef32Sjsg
1580*f005ef32Sjsg dev_priv->display_irqs_enabled = false;
1581*f005ef32Sjsg
1582*f005ef32Sjsg if (intel_irqs_enabled(dev_priv))
1583*f005ef32Sjsg vlv_display_irq_reset(dev_priv);
1584*f005ef32Sjsg }
1585*f005ef32Sjsg
ilk_de_irq_postinstall(struct drm_i915_private * i915)1586*f005ef32Sjsg void ilk_de_irq_postinstall(struct drm_i915_private *i915)
1587*f005ef32Sjsg {
1588*f005ef32Sjsg struct intel_uncore *uncore = &i915->uncore;
1589*f005ef32Sjsg u32 display_mask, extra_mask;
1590*f005ef32Sjsg
1591*f005ef32Sjsg if (GRAPHICS_VER(i915) >= 7) {
1592*f005ef32Sjsg display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
1593*f005ef32Sjsg DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
1594*f005ef32Sjsg extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
1595*f005ef32Sjsg DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
1596*f005ef32Sjsg DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
1597*f005ef32Sjsg DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
1598*f005ef32Sjsg DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
1599*f005ef32Sjsg DE_DP_A_HOTPLUG_IVB);
1600*f005ef32Sjsg } else {
1601*f005ef32Sjsg display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1602*f005ef32Sjsg DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
1603*f005ef32Sjsg DE_PIPEA_CRC_DONE | DE_POISON);
1604*f005ef32Sjsg extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
1605*f005ef32Sjsg DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
1606*f005ef32Sjsg DE_PLANE_FLIP_DONE(PLANE_A) |
1607*f005ef32Sjsg DE_PLANE_FLIP_DONE(PLANE_B) |
1608*f005ef32Sjsg DE_DP_A_HOTPLUG);
1609*f005ef32Sjsg }
1610*f005ef32Sjsg
1611*f005ef32Sjsg if (IS_HASWELL(i915)) {
1612*f005ef32Sjsg gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
1613*f005ef32Sjsg display_mask |= DE_EDP_PSR_INT_HSW;
1614*f005ef32Sjsg }
1615*f005ef32Sjsg
1616*f005ef32Sjsg if (IS_IRONLAKE_M(i915))
1617*f005ef32Sjsg extra_mask |= DE_PCU_EVENT;
1618*f005ef32Sjsg
1619*f005ef32Sjsg i915->irq_mask = ~display_mask;
1620*f005ef32Sjsg
1621*f005ef32Sjsg ibx_irq_postinstall(i915);
1622*f005ef32Sjsg
1623*f005ef32Sjsg GEN3_IRQ_INIT(uncore, DE, i915->irq_mask,
1624*f005ef32Sjsg display_mask | extra_mask);
1625*f005ef32Sjsg }
1626*f005ef32Sjsg
1627*f005ef32Sjsg static void mtp_irq_postinstall(struct drm_i915_private *i915);
1628*f005ef32Sjsg static void icp_irq_postinstall(struct drm_i915_private *i915);
1629*f005ef32Sjsg
gen8_de_irq_postinstall(struct drm_i915_private * dev_priv)1630*f005ef32Sjsg void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
1631*f005ef32Sjsg {
1632*f005ef32Sjsg struct intel_uncore *uncore = &dev_priv->uncore;
1633*f005ef32Sjsg
1634*f005ef32Sjsg u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
1635*f005ef32Sjsg GEN8_PIPE_CDCLK_CRC_DONE;
1636*f005ef32Sjsg u32 de_pipe_enables;
1637*f005ef32Sjsg u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
1638*f005ef32Sjsg u32 de_port_enables;
1639*f005ef32Sjsg u32 de_misc_masked = GEN8_DE_EDP_PSR;
1640*f005ef32Sjsg u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
1641*f005ef32Sjsg BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
1642*f005ef32Sjsg enum pipe pipe;
1643*f005ef32Sjsg
1644*f005ef32Sjsg if (!HAS_DISPLAY(dev_priv))
1645*f005ef32Sjsg return;
1646*f005ef32Sjsg
1647*f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 14)
1648*f005ef32Sjsg mtp_irq_postinstall(dev_priv);
1649*f005ef32Sjsg else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
1650*f005ef32Sjsg icp_irq_postinstall(dev_priv);
1651*f005ef32Sjsg else if (HAS_PCH_SPLIT(dev_priv))
1652*f005ef32Sjsg ibx_irq_postinstall(dev_priv);
1653*f005ef32Sjsg
1654*f005ef32Sjsg if (DISPLAY_VER(dev_priv) <= 10)
1655*f005ef32Sjsg de_misc_masked |= GEN8_DE_MISC_GSE;
1656*f005ef32Sjsg
1657*f005ef32Sjsg if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
1658*f005ef32Sjsg de_port_masked |= BXT_DE_PORT_GMBUS;
1659*f005ef32Sjsg
1660*f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 14) {
1661*f005ef32Sjsg de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR |
1662*f005ef32Sjsg XELPDP_PMDEMAND_RSP;
1663*f005ef32Sjsg } else if (DISPLAY_VER(dev_priv) >= 11) {
1664*f005ef32Sjsg enum port port;
1665*f005ef32Sjsg
1666*f005ef32Sjsg if (intel_bios_is_dsi_present(dev_priv, &port))
1667*f005ef32Sjsg de_port_masked |= DSI0_TE | DSI1_TE;
1668*f005ef32Sjsg }
1669*f005ef32Sjsg
1670*f005ef32Sjsg de_pipe_enables = de_pipe_masked |
1671*f005ef32Sjsg GEN8_PIPE_VBLANK |
1672*f005ef32Sjsg gen8_de_pipe_underrun_mask(dev_priv) |
1673*f005ef32Sjsg gen8_de_pipe_flip_done_mask(dev_priv);
1674*f005ef32Sjsg
1675*f005ef32Sjsg de_port_enables = de_port_masked;
1676*f005ef32Sjsg if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
1677*f005ef32Sjsg de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
1678*f005ef32Sjsg else if (IS_BROADWELL(dev_priv))
1679*f005ef32Sjsg de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
1680*f005ef32Sjsg
1681*f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 12) {
1682*f005ef32Sjsg enum transcoder trans;
1683*f005ef32Sjsg
1684*f005ef32Sjsg for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
1685*f005ef32Sjsg enum intel_display_power_domain domain;
1686*f005ef32Sjsg
1687*f005ef32Sjsg domain = POWER_DOMAIN_TRANSCODER(trans);
1688*f005ef32Sjsg if (!intel_display_power_is_enabled(dev_priv, domain))
1689*f005ef32Sjsg continue;
1690*f005ef32Sjsg
1691*f005ef32Sjsg gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
1692*f005ef32Sjsg }
1693*f005ef32Sjsg } else {
1694*f005ef32Sjsg gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
1695*f005ef32Sjsg }
1696*f005ef32Sjsg
1697*f005ef32Sjsg for_each_pipe(dev_priv, pipe) {
1698*f005ef32Sjsg dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
1699*f005ef32Sjsg
1700*f005ef32Sjsg if (intel_display_power_is_enabled(dev_priv,
1701*f005ef32Sjsg POWER_DOMAIN_PIPE(pipe)))
1702*f005ef32Sjsg GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
1703*f005ef32Sjsg dev_priv->de_irq_mask[pipe],
1704*f005ef32Sjsg de_pipe_enables);
1705*f005ef32Sjsg }
1706*f005ef32Sjsg
1707*f005ef32Sjsg GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
1708*f005ef32Sjsg GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
1709*f005ef32Sjsg
1710*f005ef32Sjsg if (IS_DISPLAY_VER(dev_priv, 11, 13)) {
1711*f005ef32Sjsg u32 de_hpd_masked = 0;
1712*f005ef32Sjsg u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
1713*f005ef32Sjsg GEN11_DE_TBT_HOTPLUG_MASK;
1714*f005ef32Sjsg
1715*f005ef32Sjsg GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
1716*f005ef32Sjsg de_hpd_enables);
1717*f005ef32Sjsg }
1718*f005ef32Sjsg }
1719*f005ef32Sjsg
mtp_irq_postinstall(struct drm_i915_private * i915)1720*f005ef32Sjsg static void mtp_irq_postinstall(struct drm_i915_private *i915)
1721*f005ef32Sjsg {
1722*f005ef32Sjsg struct intel_uncore *uncore = &i915->uncore;
1723*f005ef32Sjsg u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT;
1724*f005ef32Sjsg u32 de_hpd_mask = XELPDP_AUX_TC_MASK;
1725*f005ef32Sjsg u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK |
1726*f005ef32Sjsg XELPDP_TBT_HOTPLUG_MASK;
1727*f005ef32Sjsg
1728*f005ef32Sjsg GEN3_IRQ_INIT(uncore, PICAINTERRUPT_, ~de_hpd_mask,
1729*f005ef32Sjsg de_hpd_enables);
1730*f005ef32Sjsg
1731*f005ef32Sjsg GEN3_IRQ_INIT(uncore, SDE, ~sde_mask, 0xffffffff);
1732*f005ef32Sjsg }
1733*f005ef32Sjsg
icp_irq_postinstall(struct drm_i915_private * dev_priv)1734*f005ef32Sjsg static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
1735*f005ef32Sjsg {
1736*f005ef32Sjsg struct intel_uncore *uncore = &dev_priv->uncore;
1737*f005ef32Sjsg u32 mask = SDE_GMBUS_ICP;
1738*f005ef32Sjsg
1739*f005ef32Sjsg GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
1740*f005ef32Sjsg }
1741*f005ef32Sjsg
gen11_de_irq_postinstall(struct drm_i915_private * dev_priv)1742*f005ef32Sjsg void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
1743*f005ef32Sjsg {
1744*f005ef32Sjsg if (!HAS_DISPLAY(dev_priv))
1745*f005ef32Sjsg return;
1746*f005ef32Sjsg
1747*f005ef32Sjsg gen8_de_irq_postinstall(dev_priv);
1748*f005ef32Sjsg
1749*f005ef32Sjsg intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
1750*f005ef32Sjsg GEN11_DISPLAY_IRQ_ENABLE);
1751*f005ef32Sjsg }
1752*f005ef32Sjsg
dg1_de_irq_postinstall(struct drm_i915_private * i915)1753*f005ef32Sjsg void dg1_de_irq_postinstall(struct drm_i915_private *i915)
1754*f005ef32Sjsg {
1755*f005ef32Sjsg if (!HAS_DISPLAY(i915))
1756*f005ef32Sjsg return;
1757*f005ef32Sjsg
1758*f005ef32Sjsg gen8_de_irq_postinstall(i915);
1759*f005ef32Sjsg intel_uncore_write(&i915->uncore, GEN11_DISPLAY_INT_CTL,
1760*f005ef32Sjsg GEN11_DISPLAY_IRQ_ENABLE);
1761*f005ef32Sjsg }
1762*f005ef32Sjsg
intel_display_irq_init(struct drm_i915_private * i915)1763*f005ef32Sjsg void intel_display_irq_init(struct drm_i915_private *i915)
1764*f005ef32Sjsg {
1765*f005ef32Sjsg i915->drm.vblank_disable_immediate = true;
1766*f005ef32Sjsg
1767*f005ef32Sjsg /*
1768*f005ef32Sjsg * Most platforms treat the display irq block as an always-on power
1769*f005ef32Sjsg * domain. vlv/chv can disable it at runtime and need special care to
1770*f005ef32Sjsg * avoid writing any of the display block registers outside of the power
1771*f005ef32Sjsg * domain. We defer setting up the display irqs in this case to the
1772*f005ef32Sjsg * runtime pm.
1773*f005ef32Sjsg */
1774*f005ef32Sjsg i915->display_irqs_enabled = true;
1775*f005ef32Sjsg if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
1776*f005ef32Sjsg i915->display_irqs_enabled = false;
1777*f005ef32Sjsg
1778*f005ef32Sjsg intel_hotplug_irq_init(i915);
1779*f005ef32Sjsg }
1780