1c349dbc7Sjsg /*
2c349dbc7Sjsg * Copyright © 2014 Intel Corporation
3c349dbc7Sjsg *
4c349dbc7Sjsg * Permission is hereby granted, free of charge, to any person obtaining a
5c349dbc7Sjsg * copy of this software and associated documentation files (the "Software"),
6c349dbc7Sjsg * to deal in the Software without restriction, including without limitation
7c349dbc7Sjsg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8c349dbc7Sjsg * and/or sell copies of the Software, and to permit persons to whom the
9c349dbc7Sjsg * Software is furnished to do so, subject to the following conditions:
10c349dbc7Sjsg *
11c349dbc7Sjsg * The above copyright notice and this permission notice (including the next
12c349dbc7Sjsg * paragraph) shall be included in all copies or substantial portions of the
13c349dbc7Sjsg * Software.
14c349dbc7Sjsg *
15c349dbc7Sjsg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16c349dbc7Sjsg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17c349dbc7Sjsg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18c349dbc7Sjsg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19c349dbc7Sjsg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20c349dbc7Sjsg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21c349dbc7Sjsg * DEALINGS IN THE SOFTWARE.
22c349dbc7Sjsg */
23c349dbc7Sjsg
24c349dbc7Sjsg #include <drm/drm_atomic_helper.h>
251bb76ff1Sjsg #include <drm/drm_damage_helper.h>
26c349dbc7Sjsg
27c349dbc7Sjsg #include "i915_drv.h"
28f005ef32Sjsg #include "i915_reg.h"
29c349dbc7Sjsg #include "intel_atomic.h"
301bb76ff1Sjsg #include "intel_crtc.h"
315ca02815Sjsg #include "intel_de.h"
32c349dbc7Sjsg #include "intel_display_types.h"
33f005ef32Sjsg #include "intel_dp.h"
345ca02815Sjsg #include "intel_dp_aux.h"
35ad8b1aafSjsg #include "intel_hdmi.h"
365ca02815Sjsg #include "intel_psr.h"
37f005ef32Sjsg #include "intel_psr_regs.h"
385ca02815Sjsg #include "intel_snps_phy.h"
395ca02815Sjsg #include "skl_universal_plane.h"
40c349dbc7Sjsg
41c349dbc7Sjsg /**
42c349dbc7Sjsg * DOC: Panel Self Refresh (PSR/SRD)
43c349dbc7Sjsg *
44c349dbc7Sjsg * Since Haswell Display controller supports Panel Self-Refresh on display
45c349dbc7Sjsg * panels witch have a remote frame buffer (RFB) implemented according to PSR
46c349dbc7Sjsg * spec in eDP1.3. PSR feature allows the display to go to lower standby states
47c349dbc7Sjsg * when system is idle but display is on as it eliminates display refresh
48c349dbc7Sjsg * request to DDR memory completely as long as the frame buffer for that
49c349dbc7Sjsg * display is unchanged.
50c349dbc7Sjsg *
51c349dbc7Sjsg * Panel Self Refresh must be supported by both Hardware (source) and
52c349dbc7Sjsg * Panel (sink).
53c349dbc7Sjsg *
54c349dbc7Sjsg * PSR saves power by caching the framebuffer in the panel RFB, which allows us
55c349dbc7Sjsg * to power down the link and memory controller. For DSI panels the same idea
56c349dbc7Sjsg * is called "manual mode".
57c349dbc7Sjsg *
58c349dbc7Sjsg * The implementation uses the hardware-based PSR support which automatically
59c349dbc7Sjsg * enters/exits self-refresh mode. The hardware takes care of sending the
60c349dbc7Sjsg * required DP aux message and could even retrain the link (that part isn't
61c349dbc7Sjsg * enabled yet though). The hardware also keeps track of any frontbuffer
62c349dbc7Sjsg * changes to know when to exit self-refresh mode again. Unfortunately that
63c349dbc7Sjsg * part doesn't work too well, hence why the i915 PSR support uses the
64c349dbc7Sjsg * software frontbuffer tracking to make sure it doesn't miss a screen
65c349dbc7Sjsg * update. For this integration intel_psr_invalidate() and intel_psr_flush()
66c349dbc7Sjsg * get called by the frontbuffer tracking code. Note that because of locking
67c349dbc7Sjsg * issues the self-refresh re-enable code is done from a work queue, which
68c349dbc7Sjsg * must be correctly synchronized/cancelled when shutting down the pipe."
69c349dbc7Sjsg *
70c349dbc7Sjsg * DC3CO (DC3 clock off)
71c349dbc7Sjsg *
72c349dbc7Sjsg * On top of PSR2, GEN12 adds a intermediate power savings state that turns
73c349dbc7Sjsg * clock off automatically during PSR2 idle state.
74c349dbc7Sjsg * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
75c349dbc7Sjsg * entry/exit allows the HW to enter a low-power state even when page flipping
76c349dbc7Sjsg * periodically (for instance a 30fps video playback scenario).
77c349dbc7Sjsg *
78c349dbc7Sjsg * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
79c349dbc7Sjsg * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
80c349dbc7Sjsg * frames, if no other flip occurs and the function above is executed, DC3CO is
81c349dbc7Sjsg * disabled and PSR2 is configured to enter deep sleep, resetting again in case
82c349dbc7Sjsg * of another flip.
83c349dbc7Sjsg * Front buffer modifications do not trigger DC3CO activation on purpose as it
84c349dbc7Sjsg * would bring a lot of complexity and most of the moderns systems will only
85c349dbc7Sjsg * use page flips.
86c349dbc7Sjsg */
87c349dbc7Sjsg
88f005ef32Sjsg /*
89f005ef32Sjsg * Description of PSR mask bits:
90f005ef32Sjsg *
91f005ef32Sjsg * EDP_PSR_DEBUG[16]/EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw-skl):
92f005ef32Sjsg *
93f005ef32Sjsg * When unmasked (nearly) all display register writes (eg. even
94f005ef32Sjsg * SWF) trigger a PSR exit. Some registers are excluded from this
95f005ef32Sjsg * and they have a more specific mask (described below). On icl+
96f005ef32Sjsg * this bit no longer exists and is effectively always set.
97f005ef32Sjsg *
98f005ef32Sjsg * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+):
99f005ef32Sjsg *
100f005ef32Sjsg * When unmasked (nearly) all pipe/plane register writes
101f005ef32Sjsg * trigger a PSR exit. Some plane registers are excluded from this
102f005ef32Sjsg * and they have a more specific mask (described below).
103f005ef32Sjsg *
104f005ef32Sjsg * CHICKEN_PIPESL_1[11]/SKL_PSR_MASK_PLANE_FLIP (skl+):
105f005ef32Sjsg * PIPE_MISC[23]/PIPE_MISC_PSR_MASK_PRIMARY_FLIP (bdw):
106f005ef32Sjsg * EDP_PSR_DEBUG[23]/EDP_PSR_DEBUG_MASK_PRIMARY_FLIP (hsw):
107f005ef32Sjsg *
108f005ef32Sjsg * When unmasked PRI_SURF/PLANE_SURF writes trigger a PSR exit.
109f005ef32Sjsg * SPR_SURF/CURBASE are not included in this and instead are
110f005ef32Sjsg * controlled by PIPE_MISC_PSR_MASK_PIPE_REG_WRITE (skl+) or
111f005ef32Sjsg * EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (hsw/bdw).
112f005ef32Sjsg *
113f005ef32Sjsg * PIPE_MISC[22]/PIPE_MISC_PSR_MASK_SPRITE_ENABLE (bdw):
114f005ef32Sjsg * EDP_PSR_DEBUG[21]/EDP_PSR_DEBUG_MASK_SPRITE_ENABLE (hsw):
115f005ef32Sjsg *
116f005ef32Sjsg * When unmasked PSR is blocked as long as the sprite
117f005ef32Sjsg * plane is enabled. skl+ with their universal planes no
118f005ef32Sjsg * longer have a mask bit like this, and no plane being
119f005ef32Sjsg * enabledb blocks PSR.
120f005ef32Sjsg *
121f005ef32Sjsg * PIPE_MISC[21]/PIPE_MISC_PSR_MASK_CURSOR_MOVE (bdw):
122f005ef32Sjsg * EDP_PSR_DEBUG[20]/EDP_PSR_DEBUG_MASK_CURSOR_MOVE (hsw):
123f005ef32Sjsg *
124f005ef32Sjsg * When umasked CURPOS writes trigger a PSR exit. On skl+
125f005ef32Sjsg * this doesn't exit but CURPOS is included in the
126f005ef32Sjsg * PIPE_MISC_PSR_MASK_PIPE_REG_WRITE mask.
127f005ef32Sjsg *
128f005ef32Sjsg * PIPE_MISC[20]/PIPE_MISC_PSR_MASK_VBLANK_VSYNC_INT (bdw+):
129f005ef32Sjsg * EDP_PSR_DEBUG[19]/EDP_PSR_DEBUG_MASK_VBLANK_VSYNC_INT (hsw):
130f005ef32Sjsg *
131f005ef32Sjsg * When unmasked PSR is blocked as long as vblank and/or vsync
132f005ef32Sjsg * interrupt is unmasked in IMR *and* enabled in IER.
133f005ef32Sjsg *
134f005ef32Sjsg * CHICKEN_TRANS[30]/SKL_UNMASK_VBL_TO_PIPE_IN_SRD (skl+):
135f005ef32Sjsg * CHICKEN_PAR1_1[15]/HSW_MASK_VBL_TO_PIPE_IN_SRD (hsw/bdw):
136f005ef32Sjsg *
137f005ef32Sjsg * Selectcs whether PSR exit generates an extra vblank before
138f005ef32Sjsg * the first frame is transmitted. Also note the opposite polarity
139f005ef32Sjsg * if the bit on hsw/bdw vs. skl+ (masked==generate the extra vblank,
140f005ef32Sjsg * unmasked==do not generate the extra vblank).
141f005ef32Sjsg *
142f005ef32Sjsg * With DC states enabled the extra vblank happens after link training,
143f005ef32Sjsg * with DC states disabled it happens immediately upuon PSR exit trigger.
144f005ef32Sjsg * No idea as of now why there is a difference. HSW/BDW (which don't
145f005ef32Sjsg * even have DMC) always generate it after link training. Go figure.
146f005ef32Sjsg *
147f005ef32Sjsg * Unfortunately CHICKEN_TRANS itself seems to be double buffered
148f005ef32Sjsg * and thus won't latch until the first vblank. So with DC states
149f005ef32Sjsg * enabled the register effctively uses the reset value during DC5
150f005ef32Sjsg * exit+PSR exit sequence, and thus the bit does nothing until
151f005ef32Sjsg * latched by the vblank that it was trying to prevent from being
152f005ef32Sjsg * generated in the first place. So we should probably call this
153f005ef32Sjsg * one a chicken/egg bit instead on skl+.
154f005ef32Sjsg *
155f005ef32Sjsg * In standby mode (as opposed to link-off) this makes no difference
156f005ef32Sjsg * as the timing generator keeps running the whole time generating
157f005ef32Sjsg * normal periodic vblanks.
158f005ef32Sjsg *
159f005ef32Sjsg * WaPsrDPAMaskVBlankInSRD asks us to set the bit on hsw/bdw,
160f005ef32Sjsg * and doing so makes the behaviour match the skl+ reset value.
161f005ef32Sjsg *
162f005ef32Sjsg * CHICKEN_PIPESL_1[0]/BDW_UNMASK_VBL_TO_REGS_IN_SRD (bdw):
163f005ef32Sjsg * CHICKEN_PIPESL_1[15]/HSW_UNMASK_VBL_TO_REGS_IN_SRD (hsw):
164f005ef32Sjsg *
165f005ef32Sjsg * On BDW without this bit is no vblanks whatsoever are
166f005ef32Sjsg * generated after PSR exit. On HSW this has no apparant effect.
167f005ef32Sjsg * WaPsrDPRSUnmaskVBlankInSRD says to set this.
168f005ef32Sjsg *
169f005ef32Sjsg * The rest of the bits are more self-explanatory and/or
170f005ef32Sjsg * irrelevant for normal operation.
171f005ef32Sjsg */
172f005ef32Sjsg
psr_global_enabled(struct intel_dp * intel_dp)1735ca02815Sjsg static bool psr_global_enabled(struct intel_dp *intel_dp)
174c349dbc7Sjsg {
1751bb76ff1Sjsg struct intel_connector *connector = intel_dp->attached_connector;
1765ca02815Sjsg struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1775ca02815Sjsg
1785ca02815Sjsg switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
179c349dbc7Sjsg case I915_PSR_DEBUG_DEFAULT:
1801bb76ff1Sjsg if (i915->params.enable_psr == -1)
1811bb76ff1Sjsg return connector->panel.vbt.psr.enable;
182ad8b1aafSjsg return i915->params.enable_psr;
183c349dbc7Sjsg case I915_PSR_DEBUG_DISABLE:
184c349dbc7Sjsg return false;
185c349dbc7Sjsg default:
186c349dbc7Sjsg return true;
187c349dbc7Sjsg }
188c349dbc7Sjsg }
189c349dbc7Sjsg
psr2_global_enabled(struct intel_dp * intel_dp)1905ca02815Sjsg static bool psr2_global_enabled(struct intel_dp *intel_dp)
191c349dbc7Sjsg {
1921bb76ff1Sjsg struct drm_i915_private *i915 = dp_to_i915(intel_dp);
1931bb76ff1Sjsg
1945ca02815Sjsg switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
195c349dbc7Sjsg case I915_PSR_DEBUG_DISABLE:
196c349dbc7Sjsg case I915_PSR_DEBUG_FORCE_PSR1:
197c349dbc7Sjsg return false;
198c349dbc7Sjsg default:
1991bb76ff1Sjsg if (i915->params.enable_psr == 1)
2001bb76ff1Sjsg return false;
2015ca02815Sjsg return true;
202c349dbc7Sjsg }
203c349dbc7Sjsg }
204c349dbc7Sjsg
psr_irq_psr_error_bit_get(struct intel_dp * intel_dp)2051bb76ff1Sjsg static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
2061bb76ff1Sjsg {
2071bb76ff1Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2081bb76ff1Sjsg
2091bb76ff1Sjsg return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
2101bb76ff1Sjsg EDP_PSR_ERROR(intel_dp->psr.transcoder);
2111bb76ff1Sjsg }
2121bb76ff1Sjsg
psr_irq_post_exit_bit_get(struct intel_dp * intel_dp)2131bb76ff1Sjsg static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
2141bb76ff1Sjsg {
2151bb76ff1Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2161bb76ff1Sjsg
2171bb76ff1Sjsg return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
2181bb76ff1Sjsg EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
2191bb76ff1Sjsg }
2201bb76ff1Sjsg
psr_irq_pre_entry_bit_get(struct intel_dp * intel_dp)2211bb76ff1Sjsg static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
2221bb76ff1Sjsg {
2231bb76ff1Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2241bb76ff1Sjsg
2251bb76ff1Sjsg return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
2261bb76ff1Sjsg EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
2271bb76ff1Sjsg }
2281bb76ff1Sjsg
psr_irq_mask_get(struct intel_dp * intel_dp)2291bb76ff1Sjsg static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
2301bb76ff1Sjsg {
2311bb76ff1Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2321bb76ff1Sjsg
2331bb76ff1Sjsg return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
2341bb76ff1Sjsg EDP_PSR_MASK(intel_dp->psr.transcoder);
2351bb76ff1Sjsg }
2361bb76ff1Sjsg
psr_ctl_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)237f005ef32Sjsg static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
238f005ef32Sjsg enum transcoder cpu_transcoder)
239f005ef32Sjsg {
240f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 8)
241f005ef32Sjsg return EDP_PSR_CTL(cpu_transcoder);
242f005ef32Sjsg else
243f005ef32Sjsg return HSW_SRD_CTL;
244f005ef32Sjsg }
245f005ef32Sjsg
psr_debug_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)246f005ef32Sjsg static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
247f005ef32Sjsg enum transcoder cpu_transcoder)
248f005ef32Sjsg {
249f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 8)
250f005ef32Sjsg return EDP_PSR_DEBUG(cpu_transcoder);
251f005ef32Sjsg else
252f005ef32Sjsg return HSW_SRD_DEBUG;
253f005ef32Sjsg }
254f005ef32Sjsg
psr_perf_cnt_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)255f005ef32Sjsg static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
256f005ef32Sjsg enum transcoder cpu_transcoder)
257f005ef32Sjsg {
258f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 8)
259f005ef32Sjsg return EDP_PSR_PERF_CNT(cpu_transcoder);
260f005ef32Sjsg else
261f005ef32Sjsg return HSW_SRD_PERF_CNT;
262f005ef32Sjsg }
263f005ef32Sjsg
psr_status_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)264f005ef32Sjsg static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
265f005ef32Sjsg enum transcoder cpu_transcoder)
266f005ef32Sjsg {
267f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 8)
268f005ef32Sjsg return EDP_PSR_STATUS(cpu_transcoder);
269f005ef32Sjsg else
270f005ef32Sjsg return HSW_SRD_STATUS;
271f005ef32Sjsg }
272f005ef32Sjsg
psr_imr_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)273f005ef32Sjsg static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
274f005ef32Sjsg enum transcoder cpu_transcoder)
275f005ef32Sjsg {
276f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 12)
277f005ef32Sjsg return TRANS_PSR_IMR(cpu_transcoder);
278f005ef32Sjsg else
279f005ef32Sjsg return EDP_PSR_IMR;
280f005ef32Sjsg }
281f005ef32Sjsg
psr_iir_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)282f005ef32Sjsg static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
283f005ef32Sjsg enum transcoder cpu_transcoder)
284f005ef32Sjsg {
285f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 12)
286f005ef32Sjsg return TRANS_PSR_IIR(cpu_transcoder);
287f005ef32Sjsg else
288f005ef32Sjsg return EDP_PSR_IIR;
289f005ef32Sjsg }
290f005ef32Sjsg
psr_aux_ctl_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)291f005ef32Sjsg static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
292f005ef32Sjsg enum transcoder cpu_transcoder)
293f005ef32Sjsg {
294f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 8)
295f005ef32Sjsg return EDP_PSR_AUX_CTL(cpu_transcoder);
296f005ef32Sjsg else
297f005ef32Sjsg return HSW_SRD_AUX_CTL;
298f005ef32Sjsg }
299f005ef32Sjsg
psr_aux_data_reg(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder,int i)300f005ef32Sjsg static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
301f005ef32Sjsg enum transcoder cpu_transcoder, int i)
302f005ef32Sjsg {
303f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 8)
304f005ef32Sjsg return EDP_PSR_AUX_DATA(cpu_transcoder, i);
305f005ef32Sjsg else
306f005ef32Sjsg return HSW_SRD_AUX_DATA(i);
307f005ef32Sjsg }
308f005ef32Sjsg
psr_irq_control(struct intel_dp * intel_dp)3095ca02815Sjsg static void psr_irq_control(struct intel_dp *intel_dp)
310c349dbc7Sjsg {
3115ca02815Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
312f005ef32Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
313f005ef32Sjsg u32 mask;
314c349dbc7Sjsg
3151bb76ff1Sjsg mask = psr_irq_psr_error_bit_get(intel_dp);
3165ca02815Sjsg if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
3171bb76ff1Sjsg mask |= psr_irq_post_exit_bit_get(intel_dp) |
3181bb76ff1Sjsg psr_irq_pre_entry_bit_get(intel_dp);
319c349dbc7Sjsg
320f005ef32Sjsg intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
321f005ef32Sjsg psr_irq_mask_get(intel_dp), ~mask);
322c349dbc7Sjsg }
323c349dbc7Sjsg
psr_event_print(struct drm_i915_private * i915,u32 val,bool psr2_enabled)324ad8b1aafSjsg static void psr_event_print(struct drm_i915_private *i915,
325ad8b1aafSjsg u32 val, bool psr2_enabled)
326c349dbc7Sjsg {
327ad8b1aafSjsg drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
328c349dbc7Sjsg if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
329ad8b1aafSjsg drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
330c349dbc7Sjsg if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
331ad8b1aafSjsg drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
332c349dbc7Sjsg if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
333ad8b1aafSjsg drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
334c349dbc7Sjsg if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
335ad8b1aafSjsg drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
336c349dbc7Sjsg if (val & PSR_EVENT_GRAPHICS_RESET)
337ad8b1aafSjsg drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
338c349dbc7Sjsg if (val & PSR_EVENT_PCH_INTERRUPT)
339ad8b1aafSjsg drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
340c349dbc7Sjsg if (val & PSR_EVENT_MEMORY_UP)
341ad8b1aafSjsg drm_dbg_kms(&i915->drm, "\tMemory up\n");
342c349dbc7Sjsg if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
343ad8b1aafSjsg drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
344c349dbc7Sjsg if (val & PSR_EVENT_WD_TIMER_EXPIRE)
345ad8b1aafSjsg drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
346c349dbc7Sjsg if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
347ad8b1aafSjsg drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
348c349dbc7Sjsg if (val & PSR_EVENT_REGISTER_UPDATE)
349ad8b1aafSjsg drm_dbg_kms(&i915->drm, "\tRegister updated\n");
350c349dbc7Sjsg if (val & PSR_EVENT_HDCP_ENABLE)
351ad8b1aafSjsg drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
352c349dbc7Sjsg if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
353ad8b1aafSjsg drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
354c349dbc7Sjsg if (val & PSR_EVENT_VBI_ENABLE)
355ad8b1aafSjsg drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
356c349dbc7Sjsg if (val & PSR_EVENT_LPSP_MODE_EXIT)
357ad8b1aafSjsg drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
358c349dbc7Sjsg if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
359ad8b1aafSjsg drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
360c349dbc7Sjsg }
361c349dbc7Sjsg
intel_psr_irq_handler(struct intel_dp * intel_dp,u32 psr_iir)3625ca02815Sjsg void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
363c349dbc7Sjsg {
3645ca02815Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
365f005ef32Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
3665ca02815Sjsg ktime_t time_ns = ktime_get();
367c349dbc7Sjsg
3681bb76ff1Sjsg if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
3695ca02815Sjsg intel_dp->psr.last_entry_attempt = time_ns;
370c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm,
371c349dbc7Sjsg "[transcoder %s] PSR entry attempt in 2 vblanks\n",
372c349dbc7Sjsg transcoder_name(cpu_transcoder));
373c349dbc7Sjsg }
374c349dbc7Sjsg
3751bb76ff1Sjsg if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
3765ca02815Sjsg intel_dp->psr.last_exit = time_ns;
377c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm,
378c349dbc7Sjsg "[transcoder %s] PSR exit completed\n",
379c349dbc7Sjsg transcoder_name(cpu_transcoder));
380c349dbc7Sjsg
3815ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 9) {
382f005ef32Sjsg u32 val;
383c349dbc7Sjsg
384f005ef32Sjsg val = intel_de_rmw(dev_priv, PSR_EVENT(cpu_transcoder), 0, 0);
385f005ef32Sjsg
386f005ef32Sjsg psr_event_print(dev_priv, val, intel_dp->psr.psr2_enabled);
387c349dbc7Sjsg }
388c349dbc7Sjsg }
389c349dbc7Sjsg
3901bb76ff1Sjsg if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
391c349dbc7Sjsg drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
392c349dbc7Sjsg transcoder_name(cpu_transcoder));
393c349dbc7Sjsg
3945ca02815Sjsg intel_dp->psr.irq_aux_error = true;
395c349dbc7Sjsg
396c349dbc7Sjsg /*
397c349dbc7Sjsg * If this interruption is not masked it will keep
398c349dbc7Sjsg * interrupting so fast that it prevents the scheduled
399c349dbc7Sjsg * work to run.
400c349dbc7Sjsg * Also after a PSR error, we don't want to arm PSR
401c349dbc7Sjsg * again so we don't care about unmask the interruption
402c349dbc7Sjsg * or unset irq_aux_error.
403c349dbc7Sjsg */
404f005ef32Sjsg intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
405f005ef32Sjsg 0, psr_irq_psr_error_bit_get(intel_dp));
406c349dbc7Sjsg
407f005ef32Sjsg queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
408c349dbc7Sjsg }
409c349dbc7Sjsg }
410c349dbc7Sjsg
intel_dp_get_alpm_status(struct intel_dp * intel_dp)411c349dbc7Sjsg static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
412c349dbc7Sjsg {
413c349dbc7Sjsg u8 alpm_caps = 0;
414c349dbc7Sjsg
415c349dbc7Sjsg if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
416c349dbc7Sjsg &alpm_caps) != 1)
417c349dbc7Sjsg return false;
418c349dbc7Sjsg return alpm_caps & DP_ALPM_CAP;
419c349dbc7Sjsg }
420c349dbc7Sjsg
intel_dp_get_sink_sync_latency(struct intel_dp * intel_dp)421c349dbc7Sjsg static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
422c349dbc7Sjsg {
423ad8b1aafSjsg struct drm_i915_private *i915 = dp_to_i915(intel_dp);
424c349dbc7Sjsg u8 val = 8; /* assume the worst if we can't read the value */
425c349dbc7Sjsg
426c349dbc7Sjsg if (drm_dp_dpcd_readb(&intel_dp->aux,
427c349dbc7Sjsg DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
428c349dbc7Sjsg val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
429c349dbc7Sjsg else
430ad8b1aafSjsg drm_dbg_kms(&i915->drm,
431ad8b1aafSjsg "Unable to get sink synchronization latency, assuming 8 frames\n");
432c349dbc7Sjsg return val;
433c349dbc7Sjsg }
434c349dbc7Sjsg
intel_dp_get_su_granularity(struct intel_dp * intel_dp)4355ca02815Sjsg static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
436c349dbc7Sjsg {
437ad8b1aafSjsg struct drm_i915_private *i915 = dp_to_i915(intel_dp);
438c349dbc7Sjsg ssize_t r;
4395ca02815Sjsg u16 w;
4405ca02815Sjsg u8 y;
441c349dbc7Sjsg
4425ca02815Sjsg /* If sink don't have specific granularity requirements set legacy ones */
4435ca02815Sjsg if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED)) {
4445ca02815Sjsg /* As PSR2 HW sends full lines, we do not care about x granularity */
4455ca02815Sjsg w = 4;
4465ca02815Sjsg y = 4;
4475ca02815Sjsg goto exit;
4485ca02815Sjsg }
449c349dbc7Sjsg
4505ca02815Sjsg r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &w, 2);
451c349dbc7Sjsg if (r != 2)
452ad8b1aafSjsg drm_dbg_kms(&i915->drm,
453ad8b1aafSjsg "Unable to read DP_PSR2_SU_X_GRANULARITY\n");
454c349dbc7Sjsg /*
455c349dbc7Sjsg * Spec says that if the value read is 0 the default granularity should
456c349dbc7Sjsg * be used instead.
457c349dbc7Sjsg */
4585ca02815Sjsg if (r != 2 || w == 0)
4595ca02815Sjsg w = 4;
460c349dbc7Sjsg
4615ca02815Sjsg r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_Y_GRANULARITY, &y, 1);
4625ca02815Sjsg if (r != 1) {
4635ca02815Sjsg drm_dbg_kms(&i915->drm,
4645ca02815Sjsg "Unable to read DP_PSR2_SU_Y_GRANULARITY\n");
4655ca02815Sjsg y = 4;
4665ca02815Sjsg }
4675ca02815Sjsg if (y == 0)
4685ca02815Sjsg y = 1;
4695ca02815Sjsg
4705ca02815Sjsg exit:
4715ca02815Sjsg intel_dp->psr.su_w_granularity = w;
4725ca02815Sjsg intel_dp->psr.su_y_granularity = y;
473c349dbc7Sjsg }
474c349dbc7Sjsg
intel_psr_init_dpcd(struct intel_dp * intel_dp)475c349dbc7Sjsg void intel_psr_init_dpcd(struct intel_dp *intel_dp)
476c349dbc7Sjsg {
477c349dbc7Sjsg struct drm_i915_private *dev_priv =
478c349dbc7Sjsg to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
479c349dbc7Sjsg
480c349dbc7Sjsg drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
481c349dbc7Sjsg sizeof(intel_dp->psr_dpcd));
482c349dbc7Sjsg
483c349dbc7Sjsg if (!intel_dp->psr_dpcd[0])
484c349dbc7Sjsg return;
485c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
486c349dbc7Sjsg intel_dp->psr_dpcd[0]);
487c349dbc7Sjsg
4885ca02815Sjsg if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
489c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm,
490c349dbc7Sjsg "PSR support not currently available for this panel\n");
491c349dbc7Sjsg return;
492c349dbc7Sjsg }
493c349dbc7Sjsg
494c349dbc7Sjsg if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
495c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm,
496c349dbc7Sjsg "Panel lacks power state control, PSR cannot be enabled\n");
497c349dbc7Sjsg return;
498c349dbc7Sjsg }
499c349dbc7Sjsg
5005ca02815Sjsg intel_dp->psr.sink_support = true;
5015ca02815Sjsg intel_dp->psr.sink_sync_latency =
502c349dbc7Sjsg intel_dp_get_sink_sync_latency(intel_dp);
503c349dbc7Sjsg
5045ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 9 &&
505c349dbc7Sjsg (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
506c349dbc7Sjsg bool y_req = intel_dp->psr_dpcd[1] &
507c349dbc7Sjsg DP_PSR2_SU_Y_COORDINATE_REQUIRED;
508c349dbc7Sjsg bool alpm = intel_dp_get_alpm_status(intel_dp);
509c349dbc7Sjsg
510c349dbc7Sjsg /*
511c349dbc7Sjsg * All panels that supports PSR version 03h (PSR2 +
512c349dbc7Sjsg * Y-coordinate) can handle Y-coordinates in VSC but we are
513c349dbc7Sjsg * only sure that it is going to be used when required by the
514c349dbc7Sjsg * panel. This way panel is capable to do selective update
515c349dbc7Sjsg * without a aux frame sync.
516c349dbc7Sjsg *
517c349dbc7Sjsg * To support PSR version 02h and PSR version 03h without
518c349dbc7Sjsg * Y-coordinate requirement panels we would need to enable
519c349dbc7Sjsg * GTC first.
520c349dbc7Sjsg */
5215ca02815Sjsg intel_dp->psr.sink_psr2_support = y_req && alpm;
522c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
5235ca02815Sjsg intel_dp->psr.sink_psr2_support ? "" : "not ");
524c349dbc7Sjsg
5255ca02815Sjsg if (intel_dp->psr.sink_psr2_support) {
5265ca02815Sjsg intel_dp->psr.colorimetry_support =
527c349dbc7Sjsg intel_dp_get_colorimetry_status(intel_dp);
5285ca02815Sjsg intel_dp_get_su_granularity(intel_dp);
529c349dbc7Sjsg }
530c349dbc7Sjsg }
531c349dbc7Sjsg }
532c349dbc7Sjsg
hsw_psr_setup_aux(struct intel_dp * intel_dp)533f005ef32Sjsg static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
534f005ef32Sjsg {
535f005ef32Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
536f005ef32Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
537f005ef32Sjsg u32 aux_clock_divider, aux_ctl;
538f005ef32Sjsg /* write DP_SET_POWER=D0 */
539f005ef32Sjsg static const u8 aux_msg[] = {
540f005ef32Sjsg [0] = (DP_AUX_NATIVE_WRITE << 4) | ((DP_SET_POWER >> 16) & 0xf),
541f005ef32Sjsg [1] = (DP_SET_POWER >> 8) & 0xff,
542f005ef32Sjsg [2] = DP_SET_POWER & 0xff,
543f005ef32Sjsg [3] = 1 - 1,
544f005ef32Sjsg [4] = DP_SET_POWER_D0,
545f005ef32Sjsg };
546f005ef32Sjsg int i;
547f005ef32Sjsg
548f005ef32Sjsg BUILD_BUG_ON(sizeof(aux_msg) > 20);
549f005ef32Sjsg for (i = 0; i < sizeof(aux_msg); i += 4)
550f005ef32Sjsg intel_de_write(dev_priv,
551f005ef32Sjsg psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
552f005ef32Sjsg intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
553f005ef32Sjsg
554f005ef32Sjsg aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
555f005ef32Sjsg
556f005ef32Sjsg /* Start with bits set for DDI_AUX_CTL register */
557f005ef32Sjsg aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
558f005ef32Sjsg aux_clock_divider);
559f005ef32Sjsg
560f005ef32Sjsg /* Select only valid bits for SRD_AUX_CTL */
561f005ef32Sjsg aux_ctl &= EDP_PSR_AUX_CTL_TIME_OUT_MASK |
562f005ef32Sjsg EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
563f005ef32Sjsg EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
564f005ef32Sjsg EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
565f005ef32Sjsg
566f005ef32Sjsg intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
567f005ef32Sjsg aux_ctl);
568f005ef32Sjsg }
569f005ef32Sjsg
intel_psr_enable_sink(struct intel_dp * intel_dp)570c349dbc7Sjsg static void intel_psr_enable_sink(struct intel_dp *intel_dp)
571c349dbc7Sjsg {
572c349dbc7Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
573c349dbc7Sjsg u8 dpcd_val = DP_PSR_ENABLE;
574c349dbc7Sjsg
575c349dbc7Sjsg /* Enable ALPM at sink for psr2 */
5765ca02815Sjsg if (intel_dp->psr.psr2_enabled) {
577c349dbc7Sjsg drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
578c349dbc7Sjsg DP_ALPM_ENABLE |
579c349dbc7Sjsg DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
580c349dbc7Sjsg
581c349dbc7Sjsg dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
582c349dbc7Sjsg } else {
5835ca02815Sjsg if (intel_dp->psr.link_standby)
584c349dbc7Sjsg dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
585c349dbc7Sjsg
5865ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 8)
587c349dbc7Sjsg dpcd_val |= DP_PSR_CRC_VERIFICATION;
588c349dbc7Sjsg }
589c349dbc7Sjsg
5905ca02815Sjsg if (intel_dp->psr.req_psr2_sdp_prior_scanline)
5915ca02815Sjsg dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
5925ca02815Sjsg
593c349dbc7Sjsg drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
594c349dbc7Sjsg
595c349dbc7Sjsg drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
596c349dbc7Sjsg }
597c349dbc7Sjsg
intel_psr1_get_tp_time(struct intel_dp * intel_dp)598c349dbc7Sjsg static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
599c349dbc7Sjsg {
6001bb76ff1Sjsg struct intel_connector *connector = intel_dp->attached_connector;
601c349dbc7Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
602c349dbc7Sjsg u32 val = 0;
603c349dbc7Sjsg
6045ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 11)
605f005ef32Sjsg val |= EDP_PSR_TP4_TIME_0us;
606c349dbc7Sjsg
607ad8b1aafSjsg if (dev_priv->params.psr_safest_params) {
608ad8b1aafSjsg val |= EDP_PSR_TP1_TIME_2500us;
609ad8b1aafSjsg val |= EDP_PSR_TP2_TP3_TIME_2500us;
610ad8b1aafSjsg goto check_tp3_sel;
611ad8b1aafSjsg }
612ad8b1aafSjsg
6131bb76ff1Sjsg if (connector->panel.vbt.psr.tp1_wakeup_time_us == 0)
614c349dbc7Sjsg val |= EDP_PSR_TP1_TIME_0us;
6151bb76ff1Sjsg else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 100)
616c349dbc7Sjsg val |= EDP_PSR_TP1_TIME_100us;
6171bb76ff1Sjsg else if (connector->panel.vbt.psr.tp1_wakeup_time_us <= 500)
618c349dbc7Sjsg val |= EDP_PSR_TP1_TIME_500us;
619c349dbc7Sjsg else
620c349dbc7Sjsg val |= EDP_PSR_TP1_TIME_2500us;
621c349dbc7Sjsg
6221bb76ff1Sjsg if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
623c349dbc7Sjsg val |= EDP_PSR_TP2_TP3_TIME_0us;
6241bb76ff1Sjsg else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 100)
625c349dbc7Sjsg val |= EDP_PSR_TP2_TP3_TIME_100us;
6261bb76ff1Sjsg else if (connector->panel.vbt.psr.tp2_tp3_wakeup_time_us <= 500)
627c349dbc7Sjsg val |= EDP_PSR_TP2_TP3_TIME_500us;
628c349dbc7Sjsg else
629c349dbc7Sjsg val |= EDP_PSR_TP2_TP3_TIME_2500us;
630c349dbc7Sjsg
631f005ef32Sjsg /*
632f005ef32Sjsg * WA 0479: hsw,bdw
633f005ef32Sjsg * "Do not skip both TP1 and TP2/TP3"
634f005ef32Sjsg */
635f005ef32Sjsg if (DISPLAY_VER(dev_priv) < 9 &&
636f005ef32Sjsg connector->panel.vbt.psr.tp1_wakeup_time_us == 0 &&
637f005ef32Sjsg connector->panel.vbt.psr.tp2_tp3_wakeup_time_us == 0)
638f005ef32Sjsg val |= EDP_PSR_TP2_TP3_TIME_100us;
639f005ef32Sjsg
640ad8b1aafSjsg check_tp3_sel:
6411bb76ff1Sjsg if (intel_dp_source_supports_tps3(dev_priv) &&
642c349dbc7Sjsg drm_dp_tps3_supported(intel_dp->dpcd))
643f005ef32Sjsg val |= EDP_PSR_TP_TP1_TP3;
644c349dbc7Sjsg else
645f005ef32Sjsg val |= EDP_PSR_TP_TP1_TP2;
646c349dbc7Sjsg
647c349dbc7Sjsg return val;
648c349dbc7Sjsg }
649c349dbc7Sjsg
psr_compute_idle_frames(struct intel_dp * intel_dp)650c349dbc7Sjsg static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
651c349dbc7Sjsg {
6521bb76ff1Sjsg struct intel_connector *connector = intel_dp->attached_connector;
653c349dbc7Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
654c349dbc7Sjsg int idle_frames;
655c349dbc7Sjsg
656c349dbc7Sjsg /* Let's use 6 as the minimum to cover all known cases including the
657c349dbc7Sjsg * off-by-one issue that HW has in some cases.
658c349dbc7Sjsg */
6591bb76ff1Sjsg idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
6605ca02815Sjsg idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
661c349dbc7Sjsg
662c349dbc7Sjsg if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
663c349dbc7Sjsg idle_frames = 0xf;
664c349dbc7Sjsg
665c349dbc7Sjsg return idle_frames;
666c349dbc7Sjsg }
667c349dbc7Sjsg
hsw_activate_psr1(struct intel_dp * intel_dp)668c349dbc7Sjsg static void hsw_activate_psr1(struct intel_dp *intel_dp)
669c349dbc7Sjsg {
670c349dbc7Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
671f005ef32Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
672c349dbc7Sjsg u32 max_sleep_time = 0x1f;
673c349dbc7Sjsg u32 val = EDP_PSR_ENABLE;
674c349dbc7Sjsg
675f005ef32Sjsg val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
676c349dbc7Sjsg
6771747a499Sjsg if (DISPLAY_VER(dev_priv) < 20)
678f005ef32Sjsg val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
6791747a499Sjsg
680c349dbc7Sjsg if (IS_HASWELL(dev_priv))
681c349dbc7Sjsg val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
682c349dbc7Sjsg
6835ca02815Sjsg if (intel_dp->psr.link_standby)
684c349dbc7Sjsg val |= EDP_PSR_LINK_STANDBY;
685c349dbc7Sjsg
686c349dbc7Sjsg val |= intel_psr1_get_tp_time(intel_dp);
687c349dbc7Sjsg
6885ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 8)
689c349dbc7Sjsg val |= EDP_PSR_CRC_ENABLE;
690c349dbc7Sjsg
691f005ef32Sjsg intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
692f005ef32Sjsg ~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
693c349dbc7Sjsg }
694c349dbc7Sjsg
intel_psr2_get_tp_time(struct intel_dp * intel_dp)695ad8b1aafSjsg static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
696ad8b1aafSjsg {
6971bb76ff1Sjsg struct intel_connector *connector = intel_dp->attached_connector;
698ad8b1aafSjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
699ad8b1aafSjsg u32 val = 0;
700ad8b1aafSjsg
701ad8b1aafSjsg if (dev_priv->params.psr_safest_params)
702ad8b1aafSjsg return EDP_PSR2_TP2_TIME_2500us;
703ad8b1aafSjsg
7041bb76ff1Sjsg if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
7051bb76ff1Sjsg connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
706ad8b1aafSjsg val |= EDP_PSR2_TP2_TIME_50us;
7071bb76ff1Sjsg else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
708ad8b1aafSjsg val |= EDP_PSR2_TP2_TIME_100us;
7091bb76ff1Sjsg else if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
710ad8b1aafSjsg val |= EDP_PSR2_TP2_TIME_500us;
711ad8b1aafSjsg else
712ad8b1aafSjsg val |= EDP_PSR2_TP2_TIME_2500us;
713ad8b1aafSjsg
714ad8b1aafSjsg return val;
715ad8b1aafSjsg }
716ad8b1aafSjsg
psr2_block_count_lines(struct intel_dp * intel_dp)717f005ef32Sjsg static int psr2_block_count_lines(struct intel_dp *intel_dp)
718f005ef32Sjsg {
719f005ef32Sjsg return intel_dp->psr.io_wake_lines < 9 &&
720f005ef32Sjsg intel_dp->psr.fast_wake_lines < 9 ? 8 : 12;
721f005ef32Sjsg }
722f005ef32Sjsg
psr2_block_count(struct intel_dp * intel_dp)723f005ef32Sjsg static int psr2_block_count(struct intel_dp *intel_dp)
724f005ef32Sjsg {
725f005ef32Sjsg return psr2_block_count_lines(intel_dp) / 4;
726f005ef32Sjsg }
727f005ef32Sjsg
hsw_activate_psr2(struct intel_dp * intel_dp)728c349dbc7Sjsg static void hsw_activate_psr2(struct intel_dp *intel_dp)
729c349dbc7Sjsg {
730c349dbc7Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
731f005ef32Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
7325ca02815Sjsg u32 val = EDP_PSR2_ENABLE;
733c349dbc7Sjsg
734f005ef32Sjsg val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
735c349dbc7Sjsg
736f005ef32Sjsg if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))
7375ca02815Sjsg val |= EDP_SU_TRACK_ENABLE;
7385ca02815Sjsg
7395ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
740c349dbc7Sjsg val |= EDP_Y_COORDINATE_ENABLE;
741c349dbc7Sjsg
7421bb76ff1Sjsg val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
743ad8b1aafSjsg val |= intel_psr2_get_tp_time(intel_dp);
744c349dbc7Sjsg
74557e1258aSjsg if (DISPLAY_VER(dev_priv) >= 12) {
746f005ef32Sjsg if (psr2_block_count(intel_dp) > 2)
74757e1258aSjsg val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
748f005ef32Sjsg else
749f005ef32Sjsg val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2;
75057e1258aSjsg }
75157e1258aSjsg
7525ca02815Sjsg /* Wa_22012278275:adl-p */
753f005ef32Sjsg if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
7545ca02815Sjsg static const u8 map[] = {
7555ca02815Sjsg 2, /* 5 lines */
7565ca02815Sjsg 1, /* 6 lines */
7575ca02815Sjsg 0, /* 7 lines */
7585ca02815Sjsg 3, /* 8 lines */
7595ca02815Sjsg 6, /* 9 lines */
7605ca02815Sjsg 5, /* 10 lines */
7615ca02815Sjsg 4, /* 11 lines */
7625ca02815Sjsg 7, /* 12 lines */
7635ca02815Sjsg };
7645ca02815Sjsg /*
7655ca02815Sjsg * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see
7665ca02815Sjsg * comments bellow for more information
7675ca02815Sjsg */
768f005ef32Sjsg int tmp;
7695ca02815Sjsg
77057e1258aSjsg tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
771f005ef32Sjsg val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
7725ca02815Sjsg
77357e1258aSjsg tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
774f005ef32Sjsg val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
7755ca02815Sjsg } else if (DISPLAY_VER(dev_priv) >= 12) {
77657e1258aSjsg val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
77757e1258aSjsg val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
7785ca02815Sjsg } else if (DISPLAY_VER(dev_priv) >= 9) {
77957e1258aSjsg val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
78057e1258aSjsg val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
781ad8b1aafSjsg }
782ad8b1aafSjsg
7835ca02815Sjsg if (intel_dp->psr.req_psr2_sdp_prior_scanline)
7845ca02815Sjsg val |= EDP_PSR2_SU_SDP_SCANLINE;
7855ca02815Sjsg
7865ca02815Sjsg if (intel_dp->psr.psr2_sel_fetch_enabled) {
7871bb76ff1Sjsg u32 tmp;
7881bb76ff1Sjsg
789f005ef32Sjsg tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
7901bb76ff1Sjsg drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
791ad8b1aafSjsg } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
792f005ef32Sjsg intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), 0);
793ad8b1aafSjsg }
794c349dbc7Sjsg
795c349dbc7Sjsg /*
796c349dbc7Sjsg * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
797c349dbc7Sjsg * recommending keep this bit unset while PSR2 is enabled.
798c349dbc7Sjsg */
799f005ef32Sjsg intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 0);
800c349dbc7Sjsg
801f005ef32Sjsg intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
802c349dbc7Sjsg }
803c349dbc7Sjsg
804c349dbc7Sjsg static bool
transcoder_has_psr2(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)805f005ef32Sjsg transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
806c349dbc7Sjsg {
807f005ef32Sjsg if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
808f005ef32Sjsg return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
8095ca02815Sjsg else if (DISPLAY_VER(dev_priv) >= 12)
810f005ef32Sjsg return cpu_transcoder == TRANSCODER_A;
811f005ef32Sjsg else if (DISPLAY_VER(dev_priv) >= 9)
812f005ef32Sjsg return cpu_transcoder == TRANSCODER_EDP;
813c349dbc7Sjsg else
814f005ef32Sjsg return false;
815c349dbc7Sjsg }
816c349dbc7Sjsg
intel_get_frame_time_us(const struct intel_crtc_state * cstate)817c349dbc7Sjsg static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
818c349dbc7Sjsg {
819c349dbc7Sjsg if (!cstate || !cstate->hw.active)
820c349dbc7Sjsg return 0;
821c349dbc7Sjsg
822c349dbc7Sjsg return DIV_ROUND_UP(1000 * 1000,
823c349dbc7Sjsg drm_mode_vrefresh(&cstate->hw.adjusted_mode));
824c349dbc7Sjsg }
825c349dbc7Sjsg
psr2_program_idle_frames(struct intel_dp * intel_dp,u32 idle_frames)8265ca02815Sjsg static void psr2_program_idle_frames(struct intel_dp *intel_dp,
827c349dbc7Sjsg u32 idle_frames)
828c349dbc7Sjsg {
8295ca02815Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
830f005ef32Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
831c349dbc7Sjsg
832f005ef32Sjsg intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
833f005ef32Sjsg EDP_PSR2_IDLE_FRAMES_MASK,
834f005ef32Sjsg EDP_PSR2_IDLE_FRAMES(idle_frames));
835c349dbc7Sjsg }
836c349dbc7Sjsg
tgl_psr2_enable_dc3co(struct intel_dp * intel_dp)8375ca02815Sjsg static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
838c349dbc7Sjsg {
8395ca02815Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
8405ca02815Sjsg
8415ca02815Sjsg psr2_program_idle_frames(intel_dp, 0);
842c349dbc7Sjsg intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
843c349dbc7Sjsg }
844c349dbc7Sjsg
tgl_psr2_disable_dc3co(struct intel_dp * intel_dp)8455ca02815Sjsg static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
846c349dbc7Sjsg {
8475ca02815Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
848c349dbc7Sjsg
849c349dbc7Sjsg intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
8505ca02815Sjsg psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
851c349dbc7Sjsg }
852c349dbc7Sjsg
tgl_dc3co_disable_work(struct work_struct * work)853c349dbc7Sjsg static void tgl_dc3co_disable_work(struct work_struct *work)
854c349dbc7Sjsg {
8555ca02815Sjsg struct intel_dp *intel_dp =
8565ca02815Sjsg container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
857c349dbc7Sjsg
8585ca02815Sjsg mutex_lock(&intel_dp->psr.lock);
859c349dbc7Sjsg /* If delayed work is pending, it is not idle */
8605ca02815Sjsg if (delayed_work_pending(&intel_dp->psr.dc3co_work))
861c349dbc7Sjsg goto unlock;
862c349dbc7Sjsg
8635ca02815Sjsg tgl_psr2_disable_dc3co(intel_dp);
864c349dbc7Sjsg unlock:
8655ca02815Sjsg mutex_unlock(&intel_dp->psr.lock);
866c349dbc7Sjsg }
867c349dbc7Sjsg
tgl_disallow_dc3co_on_psr2_exit(struct intel_dp * intel_dp)8685ca02815Sjsg static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
869c349dbc7Sjsg {
8705ca02815Sjsg if (!intel_dp->psr.dc3co_exitline)
871c349dbc7Sjsg return;
872c349dbc7Sjsg
8735ca02815Sjsg cancel_delayed_work(&intel_dp->psr.dc3co_work);
874c349dbc7Sjsg /* Before PSR2 exit disallow dc3co*/
8755ca02815Sjsg tgl_psr2_disable_dc3co(intel_dp);
8765ca02815Sjsg }
8775ca02815Sjsg
8785ca02815Sjsg static bool
dc3co_is_pipe_port_compatible(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)8795ca02815Sjsg dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
8805ca02815Sjsg struct intel_crtc_state *crtc_state)
8815ca02815Sjsg {
8825ca02815Sjsg struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
8835ca02815Sjsg enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
8845ca02815Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
8855ca02815Sjsg enum port port = dig_port->base.port;
8865ca02815Sjsg
887f005ef32Sjsg if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
8885ca02815Sjsg return pipe <= PIPE_B && port <= PORT_B;
8895ca02815Sjsg else
8905ca02815Sjsg return pipe == PIPE_A && port == PORT_A;
891c349dbc7Sjsg }
892c349dbc7Sjsg
893c349dbc7Sjsg static void
tgl_dc3co_exitline_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)894c349dbc7Sjsg tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
895c349dbc7Sjsg struct intel_crtc_state *crtc_state)
896c349dbc7Sjsg {
897c349dbc7Sjsg const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
898c349dbc7Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
899f005ef32Sjsg struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
900c349dbc7Sjsg u32 exit_scanlines;
901c349dbc7Sjsg
9025ca02815Sjsg /*
9035ca02815Sjsg * FIXME: Due to the changed sequence of activating/deactivating DC3CO,
9045ca02815Sjsg * disable DC3CO until the changed dc3co activating/deactivating sequence
9055ca02815Sjsg * is applied. B.Specs:49196
9065ca02815Sjsg */
907c349dbc7Sjsg return;
908c349dbc7Sjsg
9095ca02815Sjsg /*
9105ca02815Sjsg * DMC's DC3CO exit mechanism has an issue with Selective Fecth
9115ca02815Sjsg * TODO: when the issue is addressed, this restriction should be removed.
9125ca02815Sjsg */
9135ca02815Sjsg if (crtc_state->enable_psr2_sel_fetch)
9145ca02815Sjsg return;
9155ca02815Sjsg
916f005ef32Sjsg if (!(power_domains->allowed_dc_mask & DC_STATE_EN_DC3CO))
9175ca02815Sjsg return;
9185ca02815Sjsg
9195ca02815Sjsg if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
9205ca02815Sjsg return;
9215ca02815Sjsg
9225ca02815Sjsg /* Wa_16011303918:adl-p */
923f005ef32Sjsg if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
924c349dbc7Sjsg return;
925c349dbc7Sjsg
926c349dbc7Sjsg /*
927c349dbc7Sjsg * DC3CO Exit time 200us B.Spec 49196
928c349dbc7Sjsg * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
929c349dbc7Sjsg */
930c349dbc7Sjsg exit_scanlines =
931c349dbc7Sjsg intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
932c349dbc7Sjsg
933c349dbc7Sjsg if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
934c349dbc7Sjsg return;
935c349dbc7Sjsg
936c349dbc7Sjsg crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
937c349dbc7Sjsg }
938c349dbc7Sjsg
intel_psr2_sel_fetch_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)939ad8b1aafSjsg static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
940ad8b1aafSjsg struct intel_crtc_state *crtc_state)
941ad8b1aafSjsg {
942ad8b1aafSjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
943ad8b1aafSjsg
9445ca02815Sjsg if (!dev_priv->params.enable_psr2_sel_fetch &&
9455ca02815Sjsg intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
946ad8b1aafSjsg drm_dbg_kms(&dev_priv->drm,
947ad8b1aafSjsg "PSR2 sel fetch not enabled, disabled by parameter\n");
948ad8b1aafSjsg return false;
949ad8b1aafSjsg }
950ad8b1aafSjsg
951ad8b1aafSjsg if (crtc_state->uapi.async_flip) {
952ad8b1aafSjsg drm_dbg_kms(&dev_priv->drm,
953ad8b1aafSjsg "PSR2 sel fetch not enabled, async flip enabled\n");
954ad8b1aafSjsg return false;
955ad8b1aafSjsg }
956ad8b1aafSjsg
957ad8b1aafSjsg return crtc_state->enable_psr2_sel_fetch = true;
958ad8b1aafSjsg }
959ad8b1aafSjsg
psr2_granularity_check(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)9605ca02815Sjsg static bool psr2_granularity_check(struct intel_dp *intel_dp,
9615ca02815Sjsg struct intel_crtc_state *crtc_state)
9625ca02815Sjsg {
9635ca02815Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
964f005ef32Sjsg const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
9655ca02815Sjsg const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
9665ca02815Sjsg const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
9675ca02815Sjsg u16 y_granularity = 0;
9685ca02815Sjsg
9695ca02815Sjsg /* PSR2 HW only send full lines so we only need to validate the width */
9705ca02815Sjsg if (crtc_hdisplay % intel_dp->psr.su_w_granularity)
9715ca02815Sjsg return false;
9725ca02815Sjsg
9735ca02815Sjsg if (crtc_vdisplay % intel_dp->psr.su_y_granularity)
9745ca02815Sjsg return false;
9755ca02815Sjsg
9765ca02815Sjsg /* HW tracking is only aligned to 4 lines */
9775ca02815Sjsg if (!crtc_state->enable_psr2_sel_fetch)
9785ca02815Sjsg return intel_dp->psr.su_y_granularity == 4;
9795ca02815Sjsg
9805ca02815Sjsg /*
981f005ef32Sjsg * adl_p and mtl platforms have 1 line granularity.
982f005ef32Sjsg * For other platforms with SW tracking we can adjust the y coordinates
983f005ef32Sjsg * to match sink requirement if multiple of 4.
9845ca02815Sjsg */
985f005ef32Sjsg if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
9865ca02815Sjsg y_granularity = intel_dp->psr.su_y_granularity;
9875ca02815Sjsg else if (intel_dp->psr.su_y_granularity <= 2)
9885ca02815Sjsg y_granularity = 4;
9895ca02815Sjsg else if ((intel_dp->psr.su_y_granularity % 4) == 0)
9905ca02815Sjsg y_granularity = intel_dp->psr.su_y_granularity;
9915ca02815Sjsg
9925ca02815Sjsg if (y_granularity == 0 || crtc_vdisplay % y_granularity)
9935ca02815Sjsg return false;
9945ca02815Sjsg
995f005ef32Sjsg if (crtc_state->dsc.compression_enable &&
996f005ef32Sjsg vdsc_cfg->slice_height % y_granularity)
997f005ef32Sjsg return false;
998f005ef32Sjsg
9995ca02815Sjsg crtc_state->su_y_granularity = y_granularity;
10005ca02815Sjsg return true;
10015ca02815Sjsg }
10025ca02815Sjsg
_compute_psr2_sdp_prior_scanline_indication(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)10035ca02815Sjsg static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
10045ca02815Sjsg struct intel_crtc_state *crtc_state)
10055ca02815Sjsg {
10065ca02815Sjsg const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
10075ca02815Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
10085ca02815Sjsg u32 hblank_total, hblank_ns, req_ns;
10095ca02815Sjsg
10105ca02815Sjsg hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
10115ca02815Sjsg hblank_ns = div_u64(1000000ULL * hblank_total, adjusted_mode->crtc_clock);
10125ca02815Sjsg
10131bb76ff1Sjsg /* From spec: ((60 / number of lanes) + 11) * 1000 / symbol clock frequency MHz */
10141bb76ff1Sjsg req_ns = ((60 / crtc_state->lane_count) + 11) * 1000 / (crtc_state->port_clock / 1000);
10155ca02815Sjsg
10165ca02815Sjsg if ((hblank_ns - req_ns) > 100)
10175ca02815Sjsg return true;
10185ca02815Sjsg
10191bb76ff1Sjsg /* Not supported <13 / Wa_22012279113:adl-p */
10201bb76ff1Sjsg if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
10215ca02815Sjsg return false;
10225ca02815Sjsg
10235ca02815Sjsg crtc_state->req_psr2_sdp_prior_scanline = true;
10245ca02815Sjsg return true;
10255ca02815Sjsg }
10265ca02815Sjsg
_compute_psr2_wake_times(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)102757e1258aSjsg static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
102857e1258aSjsg struct intel_crtc_state *crtc_state)
102957e1258aSjsg {
103057e1258aSjsg struct drm_i915_private *i915 = dp_to_i915(intel_dp);
103157e1258aSjsg int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
103257e1258aSjsg u8 max_wake_lines;
103357e1258aSjsg
103457e1258aSjsg if (DISPLAY_VER(i915) >= 12) {
103557e1258aSjsg io_wake_time = 42;
103657e1258aSjsg /*
103757e1258aSjsg * According to Bspec it's 42us, but based on testing
103857e1258aSjsg * it is not enough -> use 45 us.
103957e1258aSjsg */
104057e1258aSjsg fast_wake_time = 45;
104157e1258aSjsg max_wake_lines = 12;
104257e1258aSjsg } else {
104357e1258aSjsg io_wake_time = 50;
104457e1258aSjsg fast_wake_time = 32;
104557e1258aSjsg max_wake_lines = 8;
104657e1258aSjsg }
104757e1258aSjsg
104857e1258aSjsg io_wake_lines = intel_usecs_to_scanlines(
10497e96cc0bSjsg &crtc_state->hw.adjusted_mode, io_wake_time);
105057e1258aSjsg fast_wake_lines = intel_usecs_to_scanlines(
10517e96cc0bSjsg &crtc_state->hw.adjusted_mode, fast_wake_time);
105257e1258aSjsg
105357e1258aSjsg if (io_wake_lines > max_wake_lines ||
105457e1258aSjsg fast_wake_lines > max_wake_lines)
105557e1258aSjsg return false;
105657e1258aSjsg
105757e1258aSjsg if (i915->params.psr_safest_params)
105857e1258aSjsg io_wake_lines = fast_wake_lines = max_wake_lines;
105957e1258aSjsg
106057e1258aSjsg /* According to Bspec lower limit should be set as 7 lines. */
106157e1258aSjsg intel_dp->psr.io_wake_lines = max(io_wake_lines, 7);
106257e1258aSjsg intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7);
106357e1258aSjsg
106457e1258aSjsg return true;
106557e1258aSjsg }
106657e1258aSjsg
intel_psr2_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)1067c349dbc7Sjsg static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
1068c349dbc7Sjsg struct intel_crtc_state *crtc_state)
1069c349dbc7Sjsg {
1070c349dbc7Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1071c349dbc7Sjsg int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
1072c349dbc7Sjsg int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
1073c349dbc7Sjsg int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
1074c349dbc7Sjsg
10755ca02815Sjsg if (!intel_dp->psr.sink_psr2_support)
1076c349dbc7Sjsg return false;
1077c349dbc7Sjsg
10785ca02815Sjsg /* JSL and EHL only supports eDP 1.3 */
1079f005ef32Sjsg if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
10805ca02815Sjsg drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
10815ca02815Sjsg return false;
10825ca02815Sjsg }
10835ca02815Sjsg
10845ca02815Sjsg /* Wa_16011181250 */
10855ca02815Sjsg if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
10865ca02815Sjsg IS_DG2(dev_priv)) {
10875ca02815Sjsg drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
10885ca02815Sjsg return false;
10895ca02815Sjsg }
10905ca02815Sjsg
1091f005ef32Sjsg if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
10921bb76ff1Sjsg drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
10935ca02815Sjsg return false;
10945ca02815Sjsg }
10955ca02815Sjsg
1096c349dbc7Sjsg if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
1097c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm,
1098c349dbc7Sjsg "PSR2 not supported in transcoder %s\n",
1099c349dbc7Sjsg transcoder_name(crtc_state->cpu_transcoder));
1100c349dbc7Sjsg return false;
1101c349dbc7Sjsg }
1102c349dbc7Sjsg
11035ca02815Sjsg if (!psr2_global_enabled(intel_dp)) {
11045ca02815Sjsg drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
11055ca02815Sjsg return false;
11065ca02815Sjsg }
11075ca02815Sjsg
1108c349dbc7Sjsg /*
1109c349dbc7Sjsg * DSC and PSR2 cannot be enabled simultaneously. If a requested
1110c349dbc7Sjsg * resolution requires DSC to be enabled, priority is given to DSC
1111c349dbc7Sjsg * over PSR2.
1112c349dbc7Sjsg */
1113f005ef32Sjsg if (crtc_state->dsc.compression_enable &&
1114f005ef32Sjsg (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))) {
1115c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm,
1116c349dbc7Sjsg "PSR2 cannot be enabled since DSC is enabled\n");
1117c349dbc7Sjsg return false;
1118c349dbc7Sjsg }
1119c349dbc7Sjsg
1120ad8b1aafSjsg if (crtc_state->crc_enabled) {
1121ad8b1aafSjsg drm_dbg_kms(&dev_priv->drm,
1122ad8b1aafSjsg "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
1123ad8b1aafSjsg return false;
1124ad8b1aafSjsg }
1125ad8b1aafSjsg
11265ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 12) {
1127c349dbc7Sjsg psr_max_h = 5120;
1128c349dbc7Sjsg psr_max_v = 3200;
1129c349dbc7Sjsg max_bpp = 30;
11305ca02815Sjsg } else if (DISPLAY_VER(dev_priv) >= 10) {
1131c349dbc7Sjsg psr_max_h = 4096;
1132c349dbc7Sjsg psr_max_v = 2304;
1133c349dbc7Sjsg max_bpp = 24;
11345ca02815Sjsg } else if (DISPLAY_VER(dev_priv) == 9) {
1135c349dbc7Sjsg psr_max_h = 3640;
1136c349dbc7Sjsg psr_max_v = 2304;
1137c349dbc7Sjsg max_bpp = 24;
1138c349dbc7Sjsg }
1139c349dbc7Sjsg
1140c349dbc7Sjsg if (crtc_state->pipe_bpp > max_bpp) {
1141c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm,
1142c349dbc7Sjsg "PSR2 not enabled, pipe bpp %d > max supported %d\n",
1143c349dbc7Sjsg crtc_state->pipe_bpp, max_bpp);
1144c349dbc7Sjsg return false;
1145c349dbc7Sjsg }
1146c349dbc7Sjsg
1147e1b3a0e7Sjsg /* Wa_16011303918:adl-p */
1148e1b3a0e7Sjsg if (crtc_state->vrr.enable &&
1149f005ef32Sjsg IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
1150e1b3a0e7Sjsg drm_dbg_kms(&dev_priv->drm,
1151e1b3a0e7Sjsg "PSR2 not enabled, not compatible with HW stepping + VRR\n");
1152e1b3a0e7Sjsg return false;
1153e1b3a0e7Sjsg }
1154e1b3a0e7Sjsg
1155e1b3a0e7Sjsg if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
1156e1b3a0e7Sjsg drm_dbg_kms(&dev_priv->drm,
1157e1b3a0e7Sjsg "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
1158e1b3a0e7Sjsg return false;
1159e1b3a0e7Sjsg }
1160e1b3a0e7Sjsg
116157e1258aSjsg if (!_compute_psr2_wake_times(intel_dp, crtc_state)) {
116257e1258aSjsg drm_dbg_kms(&dev_priv->drm,
116357e1258aSjsg "PSR2 not enabled, Unable to use long enough wake times\n");
116457e1258aSjsg return false;
116557e1258aSjsg }
116657e1258aSjsg
1167f005ef32Sjsg /* Vblank >= PSR2_CTL Block Count Number maximum line count */
1168f005ef32Sjsg if (crtc_state->hw.adjusted_mode.crtc_vblank_end -
1169f005ef32Sjsg crtc_state->hw.adjusted_mode.crtc_vblank_start <
1170f005ef32Sjsg psr2_block_count_lines(intel_dp)) {
1171f005ef32Sjsg drm_dbg_kms(&dev_priv->drm,
1172f005ef32Sjsg "PSR2 not enabled, too short vblank time\n");
1173f005ef32Sjsg return false;
1174f005ef32Sjsg }
1175f005ef32Sjsg
1176ad8b1aafSjsg if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1177ad8b1aafSjsg if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
1178ad8b1aafSjsg !HAS_PSR_HW_TRACKING(dev_priv)) {
1179c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm,
1180ad8b1aafSjsg "PSR2 not enabled, selective fetch not valid and no HW tracking available\n");
1181ad8b1aafSjsg return false;
1182ad8b1aafSjsg }
1183ad8b1aafSjsg }
1184ad8b1aafSjsg
11855ca02815Sjsg if (!psr2_granularity_check(intel_dp, crtc_state)) {
11865ca02815Sjsg drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
1187e1b3a0e7Sjsg goto unsupported;
11885ca02815Sjsg }
11895ca02815Sjsg
1190ad8b1aafSjsg if (!crtc_state->enable_psr2_sel_fetch &&
1191ad8b1aafSjsg (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
1192ad8b1aafSjsg drm_dbg_kms(&dev_priv->drm,
1193ad8b1aafSjsg "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
1194ad8b1aafSjsg crtc_hdisplay, crtc_vdisplay,
1195ad8b1aafSjsg psr_max_h, psr_max_v);
1196e1b3a0e7Sjsg goto unsupported;
11975ca02815Sjsg }
11985ca02815Sjsg
1199c349dbc7Sjsg tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
1200c349dbc7Sjsg return true;
1201e1b3a0e7Sjsg
1202e1b3a0e7Sjsg unsupported:
1203e1b3a0e7Sjsg crtc_state->enable_psr2_sel_fetch = false;
1204e1b3a0e7Sjsg return false;
1205c349dbc7Sjsg }
1206c349dbc7Sjsg
intel_psr_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state,struct drm_connector_state * conn_state)1207c349dbc7Sjsg void intel_psr_compute_config(struct intel_dp *intel_dp,
12081bb76ff1Sjsg struct intel_crtc_state *crtc_state,
12091bb76ff1Sjsg struct drm_connector_state *conn_state)
1210c349dbc7Sjsg {
1211c349dbc7Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1212c349dbc7Sjsg const struct drm_display_mode *adjusted_mode =
1213c349dbc7Sjsg &crtc_state->hw.adjusted_mode;
1214c349dbc7Sjsg int psr_setup_time;
1215c349dbc7Sjsg
1216c349dbc7Sjsg /*
12171bb76ff1Sjsg * Current PSR panels don't work reliably with VRR enabled
12185ca02815Sjsg * So if VRR is enabled, do not enable PSR.
1219c349dbc7Sjsg */
12205ca02815Sjsg if (crtc_state->vrr.enable)
12215ca02815Sjsg return;
12225ca02815Sjsg
12235ca02815Sjsg if (!CAN_PSR(intel_dp))
12245ca02815Sjsg return;
12255ca02815Sjsg
12265ca02815Sjsg if (!psr_global_enabled(intel_dp)) {
12275ca02815Sjsg drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
1228c349dbc7Sjsg return;
1229c349dbc7Sjsg }
1230c349dbc7Sjsg
12315ca02815Sjsg if (intel_dp->psr.sink_not_reliable) {
1232c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm,
1233c349dbc7Sjsg "PSR sink implementation is not reliable\n");
1234c349dbc7Sjsg return;
1235c349dbc7Sjsg }
1236c349dbc7Sjsg
1237c349dbc7Sjsg if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
1238c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm,
1239c349dbc7Sjsg "PSR condition failed: Interlaced mode enabled\n");
1240c349dbc7Sjsg return;
1241c349dbc7Sjsg }
1242c349dbc7Sjsg
1243c349dbc7Sjsg psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
1244c349dbc7Sjsg if (psr_setup_time < 0) {
1245c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm,
1246c349dbc7Sjsg "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
1247c349dbc7Sjsg intel_dp->psr_dpcd[1]);
1248c349dbc7Sjsg return;
1249c349dbc7Sjsg }
1250c349dbc7Sjsg
1251c349dbc7Sjsg if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
1252c349dbc7Sjsg adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
1253c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm,
1254c349dbc7Sjsg "PSR condition failed: PSR setup time (%d us) too long\n",
1255c349dbc7Sjsg psr_setup_time);
1256c349dbc7Sjsg return;
1257c349dbc7Sjsg }
1258c349dbc7Sjsg
1259c349dbc7Sjsg crtc_state->has_psr = true;
1260c349dbc7Sjsg crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
12611bb76ff1Sjsg
1262ad8b1aafSjsg crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
12631bb76ff1Sjsg intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
12641bb76ff1Sjsg &crtc_state->psr_vsc);
1265c349dbc7Sjsg }
1266c349dbc7Sjsg
intel_psr_get_config(struct intel_encoder * encoder,struct intel_crtc_state * pipe_config)12675ca02815Sjsg void intel_psr_get_config(struct intel_encoder *encoder,
12685ca02815Sjsg struct intel_crtc_state *pipe_config)
12695ca02815Sjsg {
12705ca02815Sjsg struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
12715ca02815Sjsg struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1272f005ef32Sjsg enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
12735ca02815Sjsg struct intel_dp *intel_dp;
12745ca02815Sjsg u32 val;
12755ca02815Sjsg
12765ca02815Sjsg if (!dig_port)
12775ca02815Sjsg return;
12785ca02815Sjsg
12795ca02815Sjsg intel_dp = &dig_port->dp;
12805ca02815Sjsg if (!CAN_PSR(intel_dp))
12815ca02815Sjsg return;
12825ca02815Sjsg
12835ca02815Sjsg mutex_lock(&intel_dp->psr.lock);
12845ca02815Sjsg if (!intel_dp->psr.enabled)
12855ca02815Sjsg goto unlock;
12865ca02815Sjsg
12875ca02815Sjsg /*
12885ca02815Sjsg * Not possible to read EDP_PSR/PSR2_CTL registers as it is
12895ca02815Sjsg * enabled/disabled because of frontbuffer tracking and others.
12905ca02815Sjsg */
12915ca02815Sjsg pipe_config->has_psr = true;
12925ca02815Sjsg pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
12935ca02815Sjsg pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
12945ca02815Sjsg
12955ca02815Sjsg if (!intel_dp->psr.psr2_enabled)
12965ca02815Sjsg goto unlock;
12975ca02815Sjsg
12985ca02815Sjsg if (HAS_PSR2_SEL_FETCH(dev_priv)) {
1299f005ef32Sjsg val = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder));
13005ca02815Sjsg if (val & PSR2_MAN_TRK_CTL_ENABLE)
13015ca02815Sjsg pipe_config->enable_psr2_sel_fetch = true;
13025ca02815Sjsg }
13035ca02815Sjsg
13045ca02815Sjsg if (DISPLAY_VER(dev_priv) >= 12) {
1305f005ef32Sjsg val = intel_de_read(dev_priv, TRANS_EXITLINE(cpu_transcoder));
1306f005ef32Sjsg pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
13075ca02815Sjsg }
13085ca02815Sjsg unlock:
13095ca02815Sjsg mutex_unlock(&intel_dp->psr.lock);
13105ca02815Sjsg }
13115ca02815Sjsg
intel_psr_activate(struct intel_dp * intel_dp)1312c349dbc7Sjsg static void intel_psr_activate(struct intel_dp *intel_dp)
1313c349dbc7Sjsg {
1314c349dbc7Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1315f005ef32Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1316c349dbc7Sjsg
1317c349dbc7Sjsg drm_WARN_ON(&dev_priv->drm,
1318f005ef32Sjsg transcoder_has_psr2(dev_priv, cpu_transcoder) &&
1319f005ef32Sjsg intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder)) & EDP_PSR2_ENABLE);
1320f005ef32Sjsg
1321f005ef32Sjsg drm_WARN_ON(&dev_priv->drm,
1322f005ef32Sjsg intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
1323f005ef32Sjsg
13245ca02815Sjsg drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
1325f005ef32Sjsg
13265ca02815Sjsg lockdep_assert_held(&intel_dp->psr.lock);
1327c349dbc7Sjsg
1328c349dbc7Sjsg /* psr1 and psr2 are mutually exclusive.*/
13295ca02815Sjsg if (intel_dp->psr.psr2_enabled)
1330c349dbc7Sjsg hsw_activate_psr2(intel_dp);
1331c349dbc7Sjsg else
1332c349dbc7Sjsg hsw_activate_psr1(intel_dp);
1333c349dbc7Sjsg
13345ca02815Sjsg intel_dp->psr.active = true;
1335c349dbc7Sjsg }
1336c349dbc7Sjsg
wa_16013835468_bit_get(struct intel_dp * intel_dp)13371bb76ff1Sjsg static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
13381bb76ff1Sjsg {
13391bb76ff1Sjsg switch (intel_dp->psr.pipe) {
13401bb76ff1Sjsg case PIPE_A:
13411bb76ff1Sjsg return LATENCY_REPORTING_REMOVED_PIPE_A;
13421bb76ff1Sjsg case PIPE_B:
13431bb76ff1Sjsg return LATENCY_REPORTING_REMOVED_PIPE_B;
13441bb76ff1Sjsg case PIPE_C:
13451bb76ff1Sjsg return LATENCY_REPORTING_REMOVED_PIPE_C;
1346f005ef32Sjsg case PIPE_D:
1347f005ef32Sjsg return LATENCY_REPORTING_REMOVED_PIPE_D;
13481bb76ff1Sjsg default:
13491bb76ff1Sjsg MISSING_CASE(intel_dp->psr.pipe);
13501bb76ff1Sjsg return 0;
13511bb76ff1Sjsg }
13521bb76ff1Sjsg }
13531bb76ff1Sjsg
1354f005ef32Sjsg /*
1355f005ef32Sjsg * Wa_16013835468
1356f005ef32Sjsg * Wa_14015648006
1357f005ef32Sjsg */
wm_optimization_wa(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1358f005ef32Sjsg static void wm_optimization_wa(struct intel_dp *intel_dp,
1359f005ef32Sjsg const struct intel_crtc_state *crtc_state)
1360f005ef32Sjsg {
1361f005ef32Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1362f005ef32Sjsg bool set_wa_bit = false;
1363f005ef32Sjsg
1364f005ef32Sjsg /* Wa_14015648006 */
1365f005ef32Sjsg if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
1366f005ef32Sjsg IS_DISPLAY_VER(dev_priv, 11, 13))
1367f005ef32Sjsg set_wa_bit |= crtc_state->wm_level_disabled;
1368f005ef32Sjsg
1369f005ef32Sjsg /* Wa_16013835468 */
1370f005ef32Sjsg if (DISPLAY_VER(dev_priv) == 12)
1371f005ef32Sjsg set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
1372f005ef32Sjsg crtc_state->hw.adjusted_mode.crtc_vdisplay;
1373f005ef32Sjsg
1374f005ef32Sjsg if (set_wa_bit)
1375f005ef32Sjsg intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1376f005ef32Sjsg 0, wa_16013835468_bit_get(intel_dp));
1377f005ef32Sjsg else
1378f005ef32Sjsg intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
1379f005ef32Sjsg wa_16013835468_bit_get(intel_dp), 0);
1380f005ef32Sjsg }
1381f005ef32Sjsg
intel_psr_enable_source(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)13821bb76ff1Sjsg static void intel_psr_enable_source(struct intel_dp *intel_dp,
13831bb76ff1Sjsg const struct intel_crtc_state *crtc_state)
1384c349dbc7Sjsg {
1385c349dbc7Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
13865ca02815Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1387c349dbc7Sjsg u32 mask;
1388c349dbc7Sjsg
1389c349dbc7Sjsg /*
1390f005ef32Sjsg * Only HSW and BDW have PSR AUX registers that need to be setup.
1391f005ef32Sjsg * SKL+ use hardcoded values PSR AUX transactions
1392f005ef32Sjsg */
1393f005ef32Sjsg if (DISPLAY_VER(dev_priv) < 9)
1394f005ef32Sjsg hsw_psr_setup_aux(intel_dp);
1395f005ef32Sjsg
1396f005ef32Sjsg /*
1397c349dbc7Sjsg * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
1398c349dbc7Sjsg * mask LPSP to avoid dependency on other drivers that might block
1399c349dbc7Sjsg * runtime_pm besides preventing other hw tracking issues now we
1400c349dbc7Sjsg * can rely on frontbuffer tracking.
1401c349dbc7Sjsg */
1402c349dbc7Sjsg mask = EDP_PSR_DEBUG_MASK_MEMUP |
1403*30a1c604Sjsg EDP_PSR_DEBUG_MASK_HPD;
1404*30a1c604Sjsg
1405*30a1c604Sjsg /*
1406*30a1c604Sjsg * For some unknown reason on HSW non-ULT (or at least on
1407*30a1c604Sjsg * Dell Latitude E6540) external displays start to flicker
1408*30a1c604Sjsg * when PSR is enabled on the eDP. SR/PC6 residency is much
1409*30a1c604Sjsg * higher than should be possible with an external display.
1410*30a1c604Sjsg * As a workaround leave LPSP unmasked to prevent PSR entry
1411*30a1c604Sjsg * when external displays are active.
1412*30a1c604Sjsg */
1413*30a1c604Sjsg if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
1414*30a1c604Sjsg mask |= EDP_PSR_DEBUG_MASK_LPSP;
14151747a499Sjsg
14161747a499Sjsg if (DISPLAY_VER(dev_priv) < 20)
14171747a499Sjsg mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
1418c349dbc7Sjsg
1419f005ef32Sjsg /*
1420f005ef32Sjsg * No separate pipe reg write mask on hsw/bdw, so have to unmask all
1421f005ef32Sjsg * registers in order to keep the CURSURFLIVE tricks working :(
1422f005ef32Sjsg */
1423f005ef32Sjsg if (IS_DISPLAY_VER(dev_priv, 9, 10))
1424c349dbc7Sjsg mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
1425c349dbc7Sjsg
1426f005ef32Sjsg /* allow PSR with sprite enabled */
1427f005ef32Sjsg if (IS_HASWELL(dev_priv))
1428f005ef32Sjsg mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
1429f005ef32Sjsg
1430f005ef32Sjsg intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
1431c349dbc7Sjsg
14325ca02815Sjsg psr_irq_control(intel_dp);
1433c349dbc7Sjsg
1434c349dbc7Sjsg /*
1435c349dbc7Sjsg * TODO: if future platforms supports DC3CO in more than one
1436c349dbc7Sjsg * transcoder, EXITLINE will need to be unset when disabling PSR
1437c349dbc7Sjsg */
1438f005ef32Sjsg if (intel_dp->psr.dc3co_exitline)
1439f005ef32Sjsg intel_de_rmw(dev_priv, TRANS_EXITLINE(cpu_transcoder), EXITLINE_MASK,
1440f005ef32Sjsg intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
1441ad8b1aafSjsg
14425ca02815Sjsg if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
1443ad8b1aafSjsg intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
14445ca02815Sjsg intel_dp->psr.psr2_sel_fetch_enabled ?
1445ad8b1aafSjsg IGNORE_PSR2_HW_TRACKING : 0);
14465ca02815Sjsg
1447f005ef32Sjsg /*
1448f005ef32Sjsg * Wa_16013835468
1449f005ef32Sjsg * Wa_14015648006
1450f005ef32Sjsg */
1451f005ef32Sjsg wm_optimization_wa(intel_dp, crtc_state);
1452f005ef32Sjsg
14531bb76ff1Sjsg if (intel_dp->psr.psr2_enabled) {
14541bb76ff1Sjsg if (DISPLAY_VER(dev_priv) == 9)
14551bb76ff1Sjsg intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
14561bb76ff1Sjsg PSR2_VSC_ENABLE_PROG_HEADER |
14571bb76ff1Sjsg PSR2_ADD_VERTICAL_LINE_COUNT);
14581bb76ff1Sjsg
14591bb76ff1Sjsg /*
1460f005ef32Sjsg * Wa_16014451276:adlp,mtl[a0,b0]
14611bb76ff1Sjsg * All supported adlp panels have 1-based X granularity, this may
14621bb76ff1Sjsg * cause issues if non-supported panels are used.
14631bb76ff1Sjsg */
1464f005ef32Sjsg if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1465f005ef32Sjsg intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0,
1466f005ef32Sjsg ADLP_1_BASED_X_GRANULARITY);
1467f005ef32Sjsg else if (IS_ALDERLAKE_P(dev_priv))
14681bb76ff1Sjsg intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
14691bb76ff1Sjsg ADLP_1_BASED_X_GRANULARITY);
14701bb76ff1Sjsg
1471f005ef32Sjsg /* Wa_16012604467:adlp,mtl[a0,b0] */
1472f005ef32Sjsg if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
14735ca02815Sjsg intel_de_rmw(dev_priv,
1474f005ef32Sjsg MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
1475f005ef32Sjsg MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
1476f005ef32Sjsg else if (IS_ALDERLAKE_P(dev_priv))
14771bb76ff1Sjsg intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
14781bb76ff1Sjsg CLKGATE_DIS_MISC_DMASC_GATING_DIS);
14791bb76ff1Sjsg }
1480c349dbc7Sjsg }
1481c349dbc7Sjsg
psr_interrupt_error_check(struct intel_dp * intel_dp)14825ca02815Sjsg static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
1483c349dbc7Sjsg {
14845ca02815Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1485f005ef32Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1486c349dbc7Sjsg u32 val;
1487c349dbc7Sjsg
1488c349dbc7Sjsg /*
1489c349dbc7Sjsg * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
1490c349dbc7Sjsg * will still keep the error set even after the reset done in the
1491c349dbc7Sjsg * irq_preinstall and irq_uninstall hooks.
1492c349dbc7Sjsg * And enabling in this situation cause the screen to freeze in the
1493c349dbc7Sjsg * first time that PSR HW tries to activate so lets keep PSR disabled
1494c349dbc7Sjsg * to avoid any rendering problems.
1495c349dbc7Sjsg */
1496f005ef32Sjsg val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
14971bb76ff1Sjsg val &= psr_irq_psr_error_bit_get(intel_dp);
1498c349dbc7Sjsg if (val) {
14995ca02815Sjsg intel_dp->psr.sink_not_reliable = true;
1500c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm,
1501c349dbc7Sjsg "PSR interruption error set, not enabling PSR\n");
15025ca02815Sjsg return false;
1503c349dbc7Sjsg }
1504c349dbc7Sjsg
15055ca02815Sjsg return true;
15065ca02815Sjsg }
15075ca02815Sjsg
intel_psr_enable_locked(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)15085ca02815Sjsg static void intel_psr_enable_locked(struct intel_dp *intel_dp,
15091bb76ff1Sjsg const struct intel_crtc_state *crtc_state)
15105ca02815Sjsg {
15115ca02815Sjsg struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
15125ca02815Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
15135ca02815Sjsg enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
15145ca02815Sjsg struct intel_encoder *encoder = &dig_port->base;
15155ca02815Sjsg u32 val;
15165ca02815Sjsg
15175ca02815Sjsg drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
15185ca02815Sjsg
15195ca02815Sjsg intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
15205ca02815Sjsg intel_dp->psr.busy_frontbuffer_bits = 0;
15215ca02815Sjsg intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
15225ca02815Sjsg intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
15235ca02815Sjsg /* DC5/DC6 requires at least 6 idle frames */
15245ca02815Sjsg val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
15255ca02815Sjsg intel_dp->psr.dc3co_exit_delay = val;
15265ca02815Sjsg intel_dp->psr.dc3co_exitline = crtc_state->dc3co_exitline;
15275ca02815Sjsg intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
15281bb76ff1Sjsg intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
15295ca02815Sjsg intel_dp->psr.req_psr2_sdp_prior_scanline =
15305ca02815Sjsg crtc_state->req_psr2_sdp_prior_scanline;
15315ca02815Sjsg
15325ca02815Sjsg if (!psr_interrupt_error_check(intel_dp))
15335ca02815Sjsg return;
15345ca02815Sjsg
1535c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
15365ca02815Sjsg intel_dp->psr.psr2_enabled ? "2" : "1");
15371bb76ff1Sjsg intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
15385ca02815Sjsg intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
1539c349dbc7Sjsg intel_psr_enable_sink(intel_dp);
15401bb76ff1Sjsg intel_psr_enable_source(intel_dp, crtc_state);
15415ca02815Sjsg intel_dp->psr.enabled = true;
15425ca02815Sjsg intel_dp->psr.paused = false;
1543c349dbc7Sjsg
1544c349dbc7Sjsg intel_psr_activate(intel_dp);
1545c349dbc7Sjsg }
1546c349dbc7Sjsg
intel_psr_exit(struct intel_dp * intel_dp)15475ca02815Sjsg static void intel_psr_exit(struct intel_dp *intel_dp)
1548c349dbc7Sjsg {
15495ca02815Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1550f005ef32Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1551c349dbc7Sjsg u32 val;
1552c349dbc7Sjsg
15535ca02815Sjsg if (!intel_dp->psr.active) {
1554f005ef32Sjsg if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
1555f005ef32Sjsg val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
1556c349dbc7Sjsg drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
1557c349dbc7Sjsg }
1558c349dbc7Sjsg
1559f005ef32Sjsg val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
1560c349dbc7Sjsg drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
1561c349dbc7Sjsg
1562c349dbc7Sjsg return;
1563c349dbc7Sjsg }
1564c349dbc7Sjsg
15655ca02815Sjsg if (intel_dp->psr.psr2_enabled) {
15665ca02815Sjsg tgl_disallow_dc3co_on_psr2_exit(intel_dp);
1567f005ef32Sjsg
1568f005ef32Sjsg val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
1569f005ef32Sjsg EDP_PSR2_ENABLE, 0);
1570f005ef32Sjsg
1571c349dbc7Sjsg drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
1572c349dbc7Sjsg } else {
1573f005ef32Sjsg val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
1574f005ef32Sjsg EDP_PSR_ENABLE, 0);
1575f005ef32Sjsg
1576c349dbc7Sjsg drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
1577c349dbc7Sjsg }
15785ca02815Sjsg intel_dp->psr.active = false;
1579c349dbc7Sjsg }
1580c349dbc7Sjsg
intel_psr_wait_exit_locked(struct intel_dp * intel_dp)15815ca02815Sjsg static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
1582c349dbc7Sjsg {
1583c349dbc7Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1584f005ef32Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
1585c349dbc7Sjsg i915_reg_t psr_status;
1586c349dbc7Sjsg u32 psr_status_mask;
1587c349dbc7Sjsg
15885ca02815Sjsg if (intel_dp->psr.psr2_enabled) {
1589f005ef32Sjsg psr_status = EDP_PSR2_STATUS(cpu_transcoder);
1590c349dbc7Sjsg psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
1591c349dbc7Sjsg } else {
1592f005ef32Sjsg psr_status = psr_status_reg(dev_priv, cpu_transcoder);
1593c349dbc7Sjsg psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
1594c349dbc7Sjsg }
1595c349dbc7Sjsg
1596c349dbc7Sjsg /* Wait till PSR is idle */
1597c349dbc7Sjsg if (intel_de_wait_for_clear(dev_priv, psr_status,
1598c349dbc7Sjsg psr_status_mask, 2000))
1599c349dbc7Sjsg drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
16005ca02815Sjsg }
1601c349dbc7Sjsg
intel_psr_disable_locked(struct intel_dp * intel_dp)16025ca02815Sjsg static void intel_psr_disable_locked(struct intel_dp *intel_dp)
16035ca02815Sjsg {
16045ca02815Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1605f005ef32Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
16065ca02815Sjsg enum phy phy = intel_port_to_phy(dev_priv,
16075ca02815Sjsg dp_to_dig_port(intel_dp)->base.port);
16085ca02815Sjsg
16095ca02815Sjsg lockdep_assert_held(&intel_dp->psr.lock);
16105ca02815Sjsg
16115ca02815Sjsg if (!intel_dp->psr.enabled)
16125ca02815Sjsg return;
16135ca02815Sjsg
16145ca02815Sjsg drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
16155ca02815Sjsg intel_dp->psr.psr2_enabled ? "2" : "1");
16165ca02815Sjsg
16175ca02815Sjsg intel_psr_exit(intel_dp);
16185ca02815Sjsg intel_psr_wait_exit_locked(intel_dp);
16195ca02815Sjsg
1620f005ef32Sjsg /*
1621f005ef32Sjsg * Wa_16013835468
1622f005ef32Sjsg * Wa_14015648006
1623f005ef32Sjsg */
1624f005ef32Sjsg if (DISPLAY_VER(dev_priv) >= 11)
16251bb76ff1Sjsg intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
16261bb76ff1Sjsg wa_16013835468_bit_get(intel_dp), 0);
1627f005ef32Sjsg
1628f005ef32Sjsg if (intel_dp->psr.psr2_enabled) {
1629f005ef32Sjsg /* Wa_16012604467:adlp,mtl[a0,b0] */
1630f005ef32Sjsg if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
1631f005ef32Sjsg intel_de_rmw(dev_priv,
1632f005ef32Sjsg MTL_CLKGATE_DIS_TRANS(cpu_transcoder),
1633f005ef32Sjsg MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
1634f005ef32Sjsg else if (IS_ALDERLAKE_P(dev_priv))
1635f005ef32Sjsg intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
1636f005ef32Sjsg CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
16371bb76ff1Sjsg }
16381bb76ff1Sjsg
16395ca02815Sjsg intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
16405ca02815Sjsg
1641c349dbc7Sjsg /* Disable PSR on Sink */
1642c349dbc7Sjsg drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
1643c349dbc7Sjsg
16445ca02815Sjsg if (intel_dp->psr.psr2_enabled)
1645c349dbc7Sjsg drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
1646c349dbc7Sjsg
16475ca02815Sjsg intel_dp->psr.enabled = false;
16481bb76ff1Sjsg intel_dp->psr.psr2_enabled = false;
16491bb76ff1Sjsg intel_dp->psr.psr2_sel_fetch_enabled = false;
16501bb76ff1Sjsg intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
1651c349dbc7Sjsg }
1652c349dbc7Sjsg
1653c349dbc7Sjsg /**
1654c349dbc7Sjsg * intel_psr_disable - Disable PSR
1655c349dbc7Sjsg * @intel_dp: Intel DP
1656c349dbc7Sjsg * @old_crtc_state: old CRTC state
1657c349dbc7Sjsg *
1658c349dbc7Sjsg * This function needs to be called before disabling pipe.
1659c349dbc7Sjsg */
intel_psr_disable(struct intel_dp * intel_dp,const struct intel_crtc_state * old_crtc_state)1660c349dbc7Sjsg void intel_psr_disable(struct intel_dp *intel_dp,
1661c349dbc7Sjsg const struct intel_crtc_state *old_crtc_state)
1662c349dbc7Sjsg {
1663c349dbc7Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1664c349dbc7Sjsg
1665c349dbc7Sjsg if (!old_crtc_state->has_psr)
1666c349dbc7Sjsg return;
1667c349dbc7Sjsg
16685ca02815Sjsg if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
1669c349dbc7Sjsg return;
1670c349dbc7Sjsg
16715ca02815Sjsg mutex_lock(&intel_dp->psr.lock);
1672c349dbc7Sjsg
1673c349dbc7Sjsg intel_psr_disable_locked(intel_dp);
1674c349dbc7Sjsg
16755ca02815Sjsg mutex_unlock(&intel_dp->psr.lock);
16765ca02815Sjsg cancel_work_sync(&intel_dp->psr.work);
16775ca02815Sjsg cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
1678c349dbc7Sjsg }
1679c349dbc7Sjsg
16805ca02815Sjsg /**
16815ca02815Sjsg * intel_psr_pause - Pause PSR
16825ca02815Sjsg * @intel_dp: Intel DP
16835ca02815Sjsg *
16845ca02815Sjsg * This function need to be called after enabling psr.
16855ca02815Sjsg */
intel_psr_pause(struct intel_dp * intel_dp)16865ca02815Sjsg void intel_psr_pause(struct intel_dp *intel_dp)
1687c349dbc7Sjsg {
16881bb76ff1Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
16895ca02815Sjsg struct intel_psr *psr = &intel_dp->psr;
16905ca02815Sjsg
16915ca02815Sjsg if (!CAN_PSR(intel_dp))
16925ca02815Sjsg return;
16935ca02815Sjsg
16945ca02815Sjsg mutex_lock(&psr->lock);
16955ca02815Sjsg
16965ca02815Sjsg if (!psr->enabled) {
16975ca02815Sjsg mutex_unlock(&psr->lock);
16985ca02815Sjsg return;
16995ca02815Sjsg }
17005ca02815Sjsg
17011bb76ff1Sjsg /* If we ever hit this, we will need to add refcount to pause/resume */
17021bb76ff1Sjsg drm_WARN_ON(&dev_priv->drm, psr->paused);
17031bb76ff1Sjsg
17045ca02815Sjsg intel_psr_exit(intel_dp);
17055ca02815Sjsg intel_psr_wait_exit_locked(intel_dp);
17065ca02815Sjsg psr->paused = true;
17075ca02815Sjsg
17085ca02815Sjsg mutex_unlock(&psr->lock);
17095ca02815Sjsg
17105ca02815Sjsg cancel_work_sync(&psr->work);
17115ca02815Sjsg cancel_delayed_work_sync(&psr->dc3co_work);
17125ca02815Sjsg }
17135ca02815Sjsg
17145ca02815Sjsg /**
17155ca02815Sjsg * intel_psr_resume - Resume PSR
17165ca02815Sjsg * @intel_dp: Intel DP
17175ca02815Sjsg *
17185ca02815Sjsg * This function need to be called after pausing psr.
17195ca02815Sjsg */
intel_psr_resume(struct intel_dp * intel_dp)17205ca02815Sjsg void intel_psr_resume(struct intel_dp *intel_dp)
17215ca02815Sjsg {
17225ca02815Sjsg struct intel_psr *psr = &intel_dp->psr;
17235ca02815Sjsg
17245ca02815Sjsg if (!CAN_PSR(intel_dp))
17255ca02815Sjsg return;
17265ca02815Sjsg
17275ca02815Sjsg mutex_lock(&psr->lock);
17285ca02815Sjsg
17295ca02815Sjsg if (!psr->paused)
17305ca02815Sjsg goto unlock;
17315ca02815Sjsg
17325ca02815Sjsg psr->paused = false;
17335ca02815Sjsg intel_psr_activate(intel_dp);
17345ca02815Sjsg
17355ca02815Sjsg unlock:
17365ca02815Sjsg mutex_unlock(&psr->lock);
17375ca02815Sjsg }
17385ca02815Sjsg
man_trk_ctl_enable_bit_get(struct drm_i915_private * dev_priv)17391bb76ff1Sjsg static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
17401bb76ff1Sjsg {
1741f005ef32Sjsg return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
1742f005ef32Sjsg PSR2_MAN_TRK_CTL_ENABLE;
17431bb76ff1Sjsg }
17441bb76ff1Sjsg
man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private * dev_priv)17451bb76ff1Sjsg static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
17461bb76ff1Sjsg {
1747f005ef32Sjsg return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
17481bb76ff1Sjsg ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
17491bb76ff1Sjsg PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
17501bb76ff1Sjsg }
17511bb76ff1Sjsg
man_trk_ctl_partial_frame_bit_get(struct drm_i915_private * dev_priv)17521bb76ff1Sjsg static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
17531bb76ff1Sjsg {
1754f005ef32Sjsg return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
17551bb76ff1Sjsg ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
17561bb76ff1Sjsg PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
17571bb76ff1Sjsg }
17581bb76ff1Sjsg
man_trk_ctl_continuos_full_frame(struct drm_i915_private * dev_priv)17591bb76ff1Sjsg static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
17601bb76ff1Sjsg {
1761f005ef32Sjsg return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
17621bb76ff1Sjsg ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
17631bb76ff1Sjsg PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
17641bb76ff1Sjsg }
17651bb76ff1Sjsg
psr_force_hw_tracking_exit(struct intel_dp * intel_dp)17665ca02815Sjsg static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
17675ca02815Sjsg {
17685ca02815Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1769f005ef32Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
17705ca02815Sjsg
17711bb76ff1Sjsg if (intel_dp->psr.psr2_sel_fetch_enabled)
17721bb76ff1Sjsg intel_de_write(dev_priv,
1773f005ef32Sjsg PSR2_MAN_TRK_CTL(cpu_transcoder),
17741bb76ff1Sjsg man_trk_ctl_enable_bit_get(dev_priv) |
17751bb76ff1Sjsg man_trk_ctl_partial_frame_bit_get(dev_priv) |
1776f005ef32Sjsg man_trk_ctl_single_full_frame_bit_get(dev_priv) |
1777f005ef32Sjsg man_trk_ctl_continuos_full_frame(dev_priv));
17781bb76ff1Sjsg
1779c349dbc7Sjsg /*
1780c349dbc7Sjsg * Display WA #0884: skl+
1781c349dbc7Sjsg * This documented WA for bxt can be safely applied
1782c349dbc7Sjsg * broadly so we can force HW tracking to exit PSR
1783c349dbc7Sjsg * instead of disabling and re-enabling.
1784c349dbc7Sjsg * Workaround tells us to write 0 to CUR_SURFLIVE_A,
1785c349dbc7Sjsg * but it makes more sense write to the current active
1786c349dbc7Sjsg * pipe.
17871bb76ff1Sjsg *
17881bb76ff1Sjsg * This workaround do not exist for platforms with display 10 or newer
17891bb76ff1Sjsg * but testing proved that it works for up display 13, for newer
17901bb76ff1Sjsg * than that testing will be needed.
1791c349dbc7Sjsg */
17925ca02815Sjsg intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
17931bb76ff1Sjsg }
17941bb76ff1Sjsg
intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane * plane,const struct intel_crtc_state * crtc_state)1795f005ef32Sjsg void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
17961bb76ff1Sjsg const struct intel_crtc_state *crtc_state)
17971bb76ff1Sjsg {
17981bb76ff1Sjsg struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
17991bb76ff1Sjsg enum pipe pipe = plane->pipe;
18001bb76ff1Sjsg
18011bb76ff1Sjsg if (!crtc_state->enable_psr2_sel_fetch)
18021bb76ff1Sjsg return;
18031bb76ff1Sjsg
18041bb76ff1Sjsg intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
18055ca02815Sjsg }
18065ca02815Sjsg
intel_psr2_program_plane_sel_fetch_arm(struct intel_plane * plane,const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state)1807f005ef32Sjsg void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
1808f005ef32Sjsg const struct intel_crtc_state *crtc_state,
1809f005ef32Sjsg const struct intel_plane_state *plane_state)
1810f005ef32Sjsg {
1811f005ef32Sjsg struct drm_i915_private *i915 = to_i915(plane->base.dev);
1812f005ef32Sjsg enum pipe pipe = plane->pipe;
1813f005ef32Sjsg
1814f005ef32Sjsg if (!crtc_state->enable_psr2_sel_fetch)
1815f005ef32Sjsg return;
1816f005ef32Sjsg
1817f005ef32Sjsg if (plane->id == PLANE_CURSOR)
1818f005ef32Sjsg intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1819f005ef32Sjsg plane_state->ctl);
1820f005ef32Sjsg else
1821f005ef32Sjsg intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
1822f005ef32Sjsg PLANE_SEL_FETCH_CTL_ENABLE);
1823f005ef32Sjsg }
1824f005ef32Sjsg
intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane * plane,const struct intel_crtc_state * crtc_state,const struct intel_plane_state * plane_state,int color_plane)1825f005ef32Sjsg void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
18265ca02815Sjsg const struct intel_crtc_state *crtc_state,
18275ca02815Sjsg const struct intel_plane_state *plane_state,
18285ca02815Sjsg int color_plane)
18295ca02815Sjsg {
18305ca02815Sjsg struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
18315ca02815Sjsg enum pipe pipe = plane->pipe;
18325ca02815Sjsg const struct drm_rect *clip;
18331bb76ff1Sjsg u32 val;
18341bb76ff1Sjsg int x, y;
18355ca02815Sjsg
18365ca02815Sjsg if (!crtc_state->enable_psr2_sel_fetch)
18375ca02815Sjsg return;
18385ca02815Sjsg
1839f005ef32Sjsg if (plane->id == PLANE_CURSOR)
18405ca02815Sjsg return;
18415ca02815Sjsg
18425ca02815Sjsg clip = &plane_state->psr2_sel_fetch_area;
18435ca02815Sjsg
18445ca02815Sjsg val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
18455ca02815Sjsg val |= plane_state->uapi.dst.x1;
18465ca02815Sjsg intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
18475ca02815Sjsg
18481bb76ff1Sjsg x = plane_state->view.color_plane[color_plane].x;
18491bb76ff1Sjsg
18501bb76ff1Sjsg /*
18511bb76ff1Sjsg * From Bspec: UV surface Start Y Position = half of Y plane Y
18521bb76ff1Sjsg * start position.
18531bb76ff1Sjsg */
18541bb76ff1Sjsg if (!color_plane)
18551bb76ff1Sjsg y = plane_state->view.color_plane[color_plane].y + clip->y1;
18561bb76ff1Sjsg else
18571bb76ff1Sjsg y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
18581bb76ff1Sjsg
18595ca02815Sjsg val = y << 16 | x;
18601bb76ff1Sjsg
18615ca02815Sjsg intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
18625ca02815Sjsg val);
18635ca02815Sjsg
18645ca02815Sjsg /* Sizes are 0 based */
18655ca02815Sjsg val = (drm_rect_height(clip) - 1) << 16;
18665ca02815Sjsg val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
18675ca02815Sjsg intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
1868c349dbc7Sjsg }
1869c349dbc7Sjsg
intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state * crtc_state)1870ad8b1aafSjsg void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
1871ad8b1aafSjsg {
18725ca02815Sjsg struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1873f005ef32Sjsg enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
18741bb76ff1Sjsg struct intel_encoder *encoder;
1875ad8b1aafSjsg
18761bb76ff1Sjsg if (!crtc_state->enable_psr2_sel_fetch)
1877ad8b1aafSjsg return;
1878ad8b1aafSjsg
18791bb76ff1Sjsg for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
18801bb76ff1Sjsg crtc_state->uapi.encoder_mask) {
18811bb76ff1Sjsg struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
18821bb76ff1Sjsg
18831bb76ff1Sjsg lockdep_assert_held(&intel_dp->psr.lock);
18841bb76ff1Sjsg if (intel_dp->psr.psr2_sel_fetch_cff_enabled)
18851bb76ff1Sjsg return;
18861bb76ff1Sjsg break;
18871bb76ff1Sjsg }
18881bb76ff1Sjsg
1889f005ef32Sjsg intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
1890ad8b1aafSjsg crtc_state->psr2_man_track_ctl);
1891ad8b1aafSjsg }
1892ad8b1aafSjsg
psr2_man_trk_ctl_calc(struct intel_crtc_state * crtc_state,struct drm_rect * clip,bool full_update)18935ca02815Sjsg static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
18945ca02815Sjsg struct drm_rect *clip, bool full_update)
18955ca02815Sjsg {
18965ca02815Sjsg struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
18975ca02815Sjsg struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
18981bb76ff1Sjsg u32 val = man_trk_ctl_enable_bit_get(dev_priv);
18991bb76ff1Sjsg
19001bb76ff1Sjsg /* SF partial frame enable has to be set even on full update */
19011bb76ff1Sjsg val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
19025ca02815Sjsg
19035ca02815Sjsg if (full_update) {
19041bb76ff1Sjsg val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
1905f005ef32Sjsg val |= man_trk_ctl_continuos_full_frame(dev_priv);
19065ca02815Sjsg goto exit;
19075ca02815Sjsg }
19085ca02815Sjsg
19095ca02815Sjsg if (clip->y1 == -1)
19105ca02815Sjsg goto exit;
19115ca02815Sjsg
1912f005ef32Sjsg if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
19135ca02815Sjsg val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
19141bb76ff1Sjsg val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
19155ca02815Sjsg } else {
19165ca02815Sjsg drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
19175ca02815Sjsg
19185ca02815Sjsg val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
19195ca02815Sjsg val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
19205ca02815Sjsg }
19215ca02815Sjsg exit:
19225ca02815Sjsg crtc_state->psr2_man_track_ctl = val;
19235ca02815Sjsg }
19245ca02815Sjsg
clip_area_update(struct drm_rect * overlap_damage_area,struct drm_rect * damage_area,struct drm_rect * pipe_src)19255ca02815Sjsg static void clip_area_update(struct drm_rect *overlap_damage_area,
19261bb76ff1Sjsg struct drm_rect *damage_area,
19271bb76ff1Sjsg struct drm_rect *pipe_src)
19285ca02815Sjsg {
19291bb76ff1Sjsg if (!drm_rect_intersect(damage_area, pipe_src))
19301bb76ff1Sjsg return;
19311bb76ff1Sjsg
19325ca02815Sjsg if (overlap_damage_area->y1 == -1) {
19335ca02815Sjsg overlap_damage_area->y1 = damage_area->y1;
19345ca02815Sjsg overlap_damage_area->y2 = damage_area->y2;
19355ca02815Sjsg return;
19365ca02815Sjsg }
19375ca02815Sjsg
19385ca02815Sjsg if (damage_area->y1 < overlap_damage_area->y1)
19395ca02815Sjsg overlap_damage_area->y1 = damage_area->y1;
19405ca02815Sjsg
19415ca02815Sjsg if (damage_area->y2 > overlap_damage_area->y2)
19425ca02815Sjsg overlap_damage_area->y2 = damage_area->y2;
19435ca02815Sjsg }
19445ca02815Sjsg
intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state * crtc_state,struct drm_rect * pipe_clip)19455ca02815Sjsg static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
19465ca02815Sjsg struct drm_rect *pipe_clip)
19475ca02815Sjsg {
19485ca02815Sjsg struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1949f005ef32Sjsg const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
1950f005ef32Sjsg u16 y_alignment;
1951f005ef32Sjsg
1952f005ef32Sjsg /* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
1953f005ef32Sjsg if (crtc_state->dsc.compression_enable &&
1954f005ef32Sjsg (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
1955f005ef32Sjsg y_alignment = vdsc_cfg->slice_height;
1956f005ef32Sjsg else
1957f005ef32Sjsg y_alignment = crtc_state->su_y_granularity;
19585ca02815Sjsg
19595ca02815Sjsg pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
19605ca02815Sjsg if (pipe_clip->y2 % y_alignment)
19615ca02815Sjsg pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
19625ca02815Sjsg }
19635ca02815Sjsg
19641bb76ff1Sjsg /*
19651bb76ff1Sjsg * TODO: Not clear how to handle planes with negative position,
19661bb76ff1Sjsg * also planes are not updated if they have a negative X
19671bb76ff1Sjsg * position so for now doing a full update in this cases
19681bb76ff1Sjsg *
19691bb76ff1Sjsg * Plane scaling and rotation is not supported by selective fetch and both
19701bb76ff1Sjsg * properties can change without a modeset, so need to be check at every
19711bb76ff1Sjsg * atomic commit.
19721bb76ff1Sjsg */
psr2_sel_fetch_plane_state_supported(const struct intel_plane_state * plane_state)19731bb76ff1Sjsg static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
19741bb76ff1Sjsg {
19751bb76ff1Sjsg if (plane_state->uapi.dst.y1 < 0 ||
19761bb76ff1Sjsg plane_state->uapi.dst.x1 < 0 ||
19771bb76ff1Sjsg plane_state->scaler_id >= 0 ||
19781bb76ff1Sjsg plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
19791bb76ff1Sjsg return false;
19801bb76ff1Sjsg
19811bb76ff1Sjsg return true;
19821bb76ff1Sjsg }
19831bb76ff1Sjsg
19841bb76ff1Sjsg /*
19851bb76ff1Sjsg * Check for pipe properties that is not supported by selective fetch.
19861bb76ff1Sjsg *
19871bb76ff1Sjsg * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
19881bb76ff1Sjsg * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
19891bb76ff1Sjsg * enabled and going to the full update path.
19901bb76ff1Sjsg */
psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state * crtc_state)19911bb76ff1Sjsg static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
19921bb76ff1Sjsg {
19931bb76ff1Sjsg if (crtc_state->scaler_state.scaler_id >= 0)
19941bb76ff1Sjsg return false;
19951bb76ff1Sjsg
19961bb76ff1Sjsg return true;
19971bb76ff1Sjsg }
19981bb76ff1Sjsg
intel_psr2_sel_fetch_update(struct intel_atomic_state * state,struct intel_crtc * crtc)19995ca02815Sjsg int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
2000ad8b1aafSjsg struct intel_crtc *crtc)
2001ad8b1aafSjsg {
20021bb76ff1Sjsg struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2003ad8b1aafSjsg struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
20045ca02815Sjsg struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
20055ca02815Sjsg struct intel_plane_state *new_plane_state, *old_plane_state;
20065ca02815Sjsg struct intel_plane *plane;
20075ca02815Sjsg bool full_update = false;
20085ca02815Sjsg int i, ret;
2009ad8b1aafSjsg
2010ad8b1aafSjsg if (!crtc_state->enable_psr2_sel_fetch)
20115ca02815Sjsg return 0;
2012ad8b1aafSjsg
20131bb76ff1Sjsg if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
20141bb76ff1Sjsg full_update = true;
20151bb76ff1Sjsg goto skip_sel_fetch_set_loop;
20161bb76ff1Sjsg }
20175ca02815Sjsg
20185ca02815Sjsg /*
20195ca02815Sjsg * Calculate minimal selective fetch area of each plane and calculate
20205ca02815Sjsg * the pipe damaged area.
20215ca02815Sjsg * In the next loop the plane selective fetch area will actually be set
20225ca02815Sjsg * using whole pipe damaged area.
20235ca02815Sjsg */
20245ca02815Sjsg for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
20255ca02815Sjsg new_plane_state, i) {
20261bb76ff1Sjsg struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
20271bb76ff1Sjsg .x2 = INT_MAX };
20285ca02815Sjsg
20295ca02815Sjsg if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
20305ca02815Sjsg continue;
20315ca02815Sjsg
20325ca02815Sjsg if (!new_plane_state->uapi.visible &&
20335ca02815Sjsg !old_plane_state->uapi.visible)
20345ca02815Sjsg continue;
20355ca02815Sjsg
20361bb76ff1Sjsg if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
20375ca02815Sjsg full_update = true;
20385ca02815Sjsg break;
20395ca02815Sjsg }
20405ca02815Sjsg
20415ca02815Sjsg /*
20425ca02815Sjsg * If visibility or plane moved, mark the whole plane area as
20435ca02815Sjsg * damaged as it needs to be complete redraw in the new and old
20445ca02815Sjsg * position.
20455ca02815Sjsg */
20465ca02815Sjsg if (new_plane_state->uapi.visible != old_plane_state->uapi.visible ||
20475ca02815Sjsg !drm_rect_equals(&new_plane_state->uapi.dst,
20485ca02815Sjsg &old_plane_state->uapi.dst)) {
20495ca02815Sjsg if (old_plane_state->uapi.visible) {
20505ca02815Sjsg damaged_area.y1 = old_plane_state->uapi.dst.y1;
20515ca02815Sjsg damaged_area.y2 = old_plane_state->uapi.dst.y2;
20521bb76ff1Sjsg clip_area_update(&pipe_clip, &damaged_area,
20531bb76ff1Sjsg &crtc_state->pipe_src);
20545ca02815Sjsg }
20555ca02815Sjsg
20565ca02815Sjsg if (new_plane_state->uapi.visible) {
20575ca02815Sjsg damaged_area.y1 = new_plane_state->uapi.dst.y1;
20585ca02815Sjsg damaged_area.y2 = new_plane_state->uapi.dst.y2;
20591bb76ff1Sjsg clip_area_update(&pipe_clip, &damaged_area,
20601bb76ff1Sjsg &crtc_state->pipe_src);
20615ca02815Sjsg }
20625ca02815Sjsg continue;
20631bb76ff1Sjsg } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
20641bb76ff1Sjsg /* If alpha changed mark the whole plane area as damaged */
20655ca02815Sjsg damaged_area.y1 = new_plane_state->uapi.dst.y1;
20665ca02815Sjsg damaged_area.y2 = new_plane_state->uapi.dst.y2;
20671bb76ff1Sjsg clip_area_update(&pipe_clip, &damaged_area,
20681bb76ff1Sjsg &crtc_state->pipe_src);
20695ca02815Sjsg continue;
20705ca02815Sjsg }
20715ca02815Sjsg
20721bb76ff1Sjsg src = drm_plane_state_src(&new_plane_state->uapi);
20731bb76ff1Sjsg drm_rect_fp_to_int(&src, &src);
20745ca02815Sjsg
20751bb76ff1Sjsg if (!drm_atomic_helper_damage_merged(&old_plane_state->uapi,
20761bb76ff1Sjsg &new_plane_state->uapi, &damaged_area))
20775ca02815Sjsg continue;
20785ca02815Sjsg
20795ca02815Sjsg damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
20805ca02815Sjsg damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
20811bb76ff1Sjsg damaged_area.x1 += new_plane_state->uapi.dst.x1 - src.x1;
20821bb76ff1Sjsg damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
20831bb76ff1Sjsg
20841bb76ff1Sjsg clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
20851bb76ff1Sjsg }
20861bb76ff1Sjsg
20871bb76ff1Sjsg /*
20881bb76ff1Sjsg * TODO: For now we are just using full update in case
20891bb76ff1Sjsg * selective fetch area calculation fails. To optimize this we
20901bb76ff1Sjsg * should identify cases where this happens and fix the area
20911bb76ff1Sjsg * calculation for those.
20921bb76ff1Sjsg */
20931bb76ff1Sjsg if (pipe_clip.y1 == -1) {
20941bb76ff1Sjsg drm_info_once(&dev_priv->drm,
20951bb76ff1Sjsg "Selective fetch area calculation failed in pipe %c\n",
20961bb76ff1Sjsg pipe_name(crtc->pipe));
20971bb76ff1Sjsg full_update = true;
20985ca02815Sjsg }
20995ca02815Sjsg
21005ca02815Sjsg if (full_update)
21015ca02815Sjsg goto skip_sel_fetch_set_loop;
21025ca02815Sjsg
2103f005ef32Sjsg /* Wa_14014971492 */
2104f005ef32Sjsg if ((IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
2105f005ef32Sjsg IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
2106f005ef32Sjsg crtc_state->splitter.enable)
2107f005ef32Sjsg pipe_clip.y1 = 0;
2108f005ef32Sjsg
21091bb76ff1Sjsg ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
21101bb76ff1Sjsg if (ret)
21111bb76ff1Sjsg return ret;
21121bb76ff1Sjsg
21135ca02815Sjsg intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
21145ca02815Sjsg
21155ca02815Sjsg /*
21165ca02815Sjsg * Now that we have the pipe damaged area check if it intersect with
21175ca02815Sjsg * every plane, if it does set the plane selective fetch area.
21185ca02815Sjsg */
21195ca02815Sjsg for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
21205ca02815Sjsg new_plane_state, i) {
21215ca02815Sjsg struct drm_rect *sel_fetch_area, inter;
21221bb76ff1Sjsg struct intel_plane *linked = new_plane_state->planar_linked_plane;
21235ca02815Sjsg
21245ca02815Sjsg if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc ||
21255ca02815Sjsg !new_plane_state->uapi.visible)
21265ca02815Sjsg continue;
21275ca02815Sjsg
21285ca02815Sjsg inter = pipe_clip;
21295ca02815Sjsg if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
21305ca02815Sjsg continue;
21315ca02815Sjsg
21321bb76ff1Sjsg if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
21331bb76ff1Sjsg full_update = true;
21341bb76ff1Sjsg break;
21351bb76ff1Sjsg }
21361bb76ff1Sjsg
21375ca02815Sjsg sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
21385ca02815Sjsg sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
21395ca02815Sjsg sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
21405ca02815Sjsg crtc_state->update_planes |= BIT(plane->id);
21411bb76ff1Sjsg
21421bb76ff1Sjsg /*
21431bb76ff1Sjsg * Sel_fetch_area is calculated for UV plane. Use
21441bb76ff1Sjsg * same area for Y plane as well.
21451bb76ff1Sjsg */
21461bb76ff1Sjsg if (linked) {
21471bb76ff1Sjsg struct intel_plane_state *linked_new_plane_state;
21481bb76ff1Sjsg struct drm_rect *linked_sel_fetch_area;
21491bb76ff1Sjsg
21501bb76ff1Sjsg linked_new_plane_state = intel_atomic_get_plane_state(state, linked);
21511bb76ff1Sjsg if (IS_ERR(linked_new_plane_state))
21521bb76ff1Sjsg return PTR_ERR(linked_new_plane_state);
21531bb76ff1Sjsg
21541bb76ff1Sjsg linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area;
21551bb76ff1Sjsg linked_sel_fetch_area->y1 = sel_fetch_area->y1;
21561bb76ff1Sjsg linked_sel_fetch_area->y2 = sel_fetch_area->y2;
21571bb76ff1Sjsg crtc_state->update_planes |= BIT(linked->id);
21581bb76ff1Sjsg }
21595ca02815Sjsg }
21605ca02815Sjsg
21615ca02815Sjsg skip_sel_fetch_set_loop:
21625ca02815Sjsg psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update);
21635ca02815Sjsg return 0;
2164ad8b1aafSjsg }
2165ad8b1aafSjsg
intel_psr_pre_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)21661bb76ff1Sjsg void intel_psr_pre_plane_update(struct intel_atomic_state *state,
21671bb76ff1Sjsg struct intel_crtc *crtc)
2168c349dbc7Sjsg {
21691bb76ff1Sjsg struct drm_i915_private *i915 = to_i915(state->base.dev);
21701bb76ff1Sjsg const struct intel_crtc_state *old_crtc_state =
21711bb76ff1Sjsg intel_atomic_get_old_crtc_state(state, crtc);
21721bb76ff1Sjsg const struct intel_crtc_state *new_crtc_state =
21731bb76ff1Sjsg intel_atomic_get_new_crtc_state(state, crtc);
21741bb76ff1Sjsg struct intel_encoder *encoder;
2175c349dbc7Sjsg
21761bb76ff1Sjsg if (!HAS_PSR(i915))
2177c349dbc7Sjsg return;
2178c349dbc7Sjsg
21791bb76ff1Sjsg for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
21801bb76ff1Sjsg old_crtc_state->uapi.encoder_mask) {
21811bb76ff1Sjsg struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
21821bb76ff1Sjsg struct intel_psr *psr = &intel_dp->psr;
21831bb76ff1Sjsg bool needs_to_disable = false;
2184c349dbc7Sjsg
21851bb76ff1Sjsg mutex_lock(&psr->lock);
2186c349dbc7Sjsg
21871bb76ff1Sjsg /*
21881bb76ff1Sjsg * Reasons to disable:
21891bb76ff1Sjsg * - PSR disabled in new state
21901bb76ff1Sjsg * - All planes will go inactive
21911bb76ff1Sjsg * - Changing between PSR versions
2192f005ef32Sjsg * - Display WA #1136: skl, bxt
21931bb76ff1Sjsg */
21941bb76ff1Sjsg needs_to_disable |= intel_crtc_needs_modeset(new_crtc_state);
21951bb76ff1Sjsg needs_to_disable |= !new_crtc_state->has_psr;
21961bb76ff1Sjsg needs_to_disable |= !new_crtc_state->active_planes;
21971bb76ff1Sjsg needs_to_disable |= new_crtc_state->has_psr2 != psr->psr2_enabled;
2198f005ef32Sjsg needs_to_disable |= DISPLAY_VER(i915) < 11 &&
2199f005ef32Sjsg new_crtc_state->wm_level_disabled;
22001bb76ff1Sjsg
22011bb76ff1Sjsg if (psr->enabled && needs_to_disable)
22021bb76ff1Sjsg intel_psr_disable_locked(intel_dp);
2203f005ef32Sjsg else if (psr->enabled && new_crtc_state->wm_level_disabled)
2204f005ef32Sjsg /* Wa_14015648006 */
2205f005ef32Sjsg wm_optimization_wa(intel_dp, new_crtc_state);
22061bb76ff1Sjsg
22071bb76ff1Sjsg mutex_unlock(&psr->lock);
22081bb76ff1Sjsg }
22091bb76ff1Sjsg }
22101bb76ff1Sjsg
_intel_psr_post_plane_update(const struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)22111bb76ff1Sjsg static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
22121bb76ff1Sjsg const struct intel_crtc_state *crtc_state)
22131bb76ff1Sjsg {
22141bb76ff1Sjsg struct drm_i915_private *dev_priv = to_i915(state->base.dev);
22151bb76ff1Sjsg struct intel_encoder *encoder;
22161bb76ff1Sjsg
22171bb76ff1Sjsg if (!crtc_state->has_psr)
22181bb76ff1Sjsg return;
22191bb76ff1Sjsg
22201bb76ff1Sjsg for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
22211bb76ff1Sjsg crtc_state->uapi.encoder_mask) {
22221bb76ff1Sjsg struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
22231bb76ff1Sjsg struct intel_psr *psr = &intel_dp->psr;
2224f005ef32Sjsg bool keep_disabled = false;
22251bb76ff1Sjsg
22261bb76ff1Sjsg mutex_lock(&psr->lock);
22271bb76ff1Sjsg
22281bb76ff1Sjsg drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
22291bb76ff1Sjsg
2230f005ef32Sjsg keep_disabled |= psr->sink_not_reliable;
2231f005ef32Sjsg keep_disabled |= !crtc_state->active_planes;
2232f005ef32Sjsg
2233f005ef32Sjsg /* Display WA #1136: skl, bxt */
2234f005ef32Sjsg keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
2235f005ef32Sjsg crtc_state->wm_level_disabled;
2236f005ef32Sjsg
2237f005ef32Sjsg if (!psr->enabled && !keep_disabled)
22381bb76ff1Sjsg intel_psr_enable_locked(intel_dp, crtc_state);
2239f005ef32Sjsg else if (psr->enabled && !crtc_state->wm_level_disabled)
2240f005ef32Sjsg /* Wa_14015648006 */
2241f005ef32Sjsg wm_optimization_wa(intel_dp, crtc_state);
22421bb76ff1Sjsg
2243c349dbc7Sjsg /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
2244c349dbc7Sjsg if (crtc_state->crc_enabled && psr->enabled)
22455ca02815Sjsg psr_force_hw_tracking_exit(intel_dp);
22461bb76ff1Sjsg
22471bb76ff1Sjsg mutex_unlock(&psr->lock);
22481bb76ff1Sjsg }
22491bb76ff1Sjsg }
22501bb76ff1Sjsg
intel_psr_post_plane_update(const struct intel_atomic_state * state)22511bb76ff1Sjsg void intel_psr_post_plane_update(const struct intel_atomic_state *state)
22521bb76ff1Sjsg {
22531bb76ff1Sjsg struct drm_i915_private *dev_priv = to_i915(state->base.dev);
22541bb76ff1Sjsg struct intel_crtc_state *crtc_state;
22551bb76ff1Sjsg struct intel_crtc *crtc;
22561bb76ff1Sjsg int i;
22571bb76ff1Sjsg
22581bb76ff1Sjsg if (!HAS_PSR(dev_priv))
22591bb76ff1Sjsg return;
22601bb76ff1Sjsg
22611bb76ff1Sjsg for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
22621bb76ff1Sjsg _intel_psr_post_plane_update(state, crtc_state);
22631bb76ff1Sjsg }
22641bb76ff1Sjsg
_psr2_ready_for_pipe_update_locked(struct intel_dp * intel_dp)22651bb76ff1Sjsg static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
22661bb76ff1Sjsg {
22671bb76ff1Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2268f005ef32Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
22691bb76ff1Sjsg
2270c349dbc7Sjsg /*
22711bb76ff1Sjsg * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
22721bb76ff1Sjsg * As all higher states has bit 4 of PSR2 state set we can just wait for
22731bb76ff1Sjsg * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
2274c349dbc7Sjsg */
22751bb76ff1Sjsg return intel_de_wait_for_clear(dev_priv,
2276f005ef32Sjsg EDP_PSR2_STATUS(cpu_transcoder),
22771bb76ff1Sjsg EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
2278c349dbc7Sjsg }
2279c349dbc7Sjsg
_psr1_ready_for_pipe_update_locked(struct intel_dp * intel_dp)22801bb76ff1Sjsg static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
2281c349dbc7Sjsg {
22825ca02815Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2283f005ef32Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2284c349dbc7Sjsg
2285c349dbc7Sjsg /*
2286c349dbc7Sjsg * From bspec: Panel Self Refresh (BDW+)
2287c349dbc7Sjsg * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
2288c349dbc7Sjsg * exit training time + 1.5 ms of aux channel handshake. 50 ms is
2289c349dbc7Sjsg * defensive enough to cover everything.
2290c349dbc7Sjsg */
22911bb76ff1Sjsg return intel_de_wait_for_clear(dev_priv,
2292f005ef32Sjsg psr_status_reg(dev_priv, cpu_transcoder),
22931bb76ff1Sjsg EDP_PSR_STATUS_STATE_MASK, 50);
2294c349dbc7Sjsg }
2295c349dbc7Sjsg
22965ca02815Sjsg /**
22971bb76ff1Sjsg * intel_psr_wait_for_idle_locked - wait for PSR be ready for a pipe update
22985ca02815Sjsg * @new_crtc_state: new CRTC state
22995ca02815Sjsg *
23005ca02815Sjsg * This function is expected to be called from pipe_update_start() where it is
23015ca02815Sjsg * not expected to race with PSR enable or disable.
23025ca02815Sjsg */
intel_psr_wait_for_idle_locked(const struct intel_crtc_state * new_crtc_state)23031bb76ff1Sjsg void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
2304c349dbc7Sjsg {
23055ca02815Sjsg struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
23065ca02815Sjsg struct intel_encoder *encoder;
23075ca02815Sjsg
23085ca02815Sjsg if (!new_crtc_state->has_psr)
23095ca02815Sjsg return;
23105ca02815Sjsg
23115ca02815Sjsg for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
23125ca02815Sjsg new_crtc_state->uapi.encoder_mask) {
23135ca02815Sjsg struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
23141bb76ff1Sjsg int ret;
23155ca02815Sjsg
23161bb76ff1Sjsg lockdep_assert_held(&intel_dp->psr.lock);
23171bb76ff1Sjsg
23181bb76ff1Sjsg if (!intel_dp->psr.enabled)
23195ca02815Sjsg continue;
23205ca02815Sjsg
23211bb76ff1Sjsg if (intel_dp->psr.psr2_enabled)
23221bb76ff1Sjsg ret = _psr2_ready_for_pipe_update_locked(intel_dp);
23231bb76ff1Sjsg else
23241bb76ff1Sjsg ret = _psr1_ready_for_pipe_update_locked(intel_dp);
23251bb76ff1Sjsg
23261bb76ff1Sjsg if (ret)
23271bb76ff1Sjsg drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
23285ca02815Sjsg }
23295ca02815Sjsg }
23305ca02815Sjsg
__psr_wait_for_idle_locked(struct intel_dp * intel_dp)23315ca02815Sjsg static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
23325ca02815Sjsg {
23335ca02815Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2334f005ef32Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2335c349dbc7Sjsg i915_reg_t reg;
2336c349dbc7Sjsg u32 mask;
2337c349dbc7Sjsg int err;
2338c349dbc7Sjsg
23395ca02815Sjsg if (!intel_dp->psr.enabled)
2340c349dbc7Sjsg return false;
2341c349dbc7Sjsg
23425ca02815Sjsg if (intel_dp->psr.psr2_enabled) {
2343f005ef32Sjsg reg = EDP_PSR2_STATUS(cpu_transcoder);
2344c349dbc7Sjsg mask = EDP_PSR2_STATUS_STATE_MASK;
2345c349dbc7Sjsg } else {
2346f005ef32Sjsg reg = psr_status_reg(dev_priv, cpu_transcoder);
2347c349dbc7Sjsg mask = EDP_PSR_STATUS_STATE_MASK;
2348c349dbc7Sjsg }
2349c349dbc7Sjsg
23505ca02815Sjsg mutex_unlock(&intel_dp->psr.lock);
2351c349dbc7Sjsg
2352c349dbc7Sjsg err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
2353c349dbc7Sjsg if (err)
2354c349dbc7Sjsg drm_err(&dev_priv->drm,
2355c349dbc7Sjsg "Timed out waiting for PSR Idle for re-enable\n");
2356c349dbc7Sjsg
2357c349dbc7Sjsg /* After the unlocked wait, verify that PSR is still wanted! */
23585ca02815Sjsg mutex_lock(&intel_dp->psr.lock);
23595ca02815Sjsg return err == 0 && intel_dp->psr.enabled;
2360c349dbc7Sjsg }
2361c349dbc7Sjsg
intel_psr_fastset_force(struct drm_i915_private * dev_priv)2362c349dbc7Sjsg static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
2363c349dbc7Sjsg {
23645ca02815Sjsg struct drm_connector_list_iter conn_iter;
2365c349dbc7Sjsg struct drm_modeset_acquire_ctx ctx;
2366c349dbc7Sjsg struct drm_atomic_state *state;
23675ca02815Sjsg struct drm_connector *conn;
23685ca02815Sjsg int err = 0;
2369c349dbc7Sjsg
2370f005ef32Sjsg state = drm_atomic_state_alloc(&dev_priv->drm);
2371c349dbc7Sjsg if (!state)
2372c349dbc7Sjsg return -ENOMEM;
2373c349dbc7Sjsg
2374c349dbc7Sjsg drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2375f005ef32Sjsg
2376c349dbc7Sjsg state->acquire_ctx = &ctx;
2377f005ef32Sjsg to_intel_atomic_state(state)->internal = true;
2378c349dbc7Sjsg
2379c349dbc7Sjsg retry:
2380f005ef32Sjsg drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
23815ca02815Sjsg drm_for_each_connector_iter(conn, &conn_iter) {
23825ca02815Sjsg struct drm_connector_state *conn_state;
23835ca02815Sjsg struct drm_crtc_state *crtc_state;
2384c349dbc7Sjsg
23855ca02815Sjsg if (conn->connector_type != DRM_MODE_CONNECTOR_eDP)
23865ca02815Sjsg continue;
23875ca02815Sjsg
23885ca02815Sjsg conn_state = drm_atomic_get_connector_state(state, conn);
23895ca02815Sjsg if (IS_ERR(conn_state)) {
23905ca02815Sjsg err = PTR_ERR(conn_state);
2391c349dbc7Sjsg break;
2392c349dbc7Sjsg }
23935ca02815Sjsg
23945ca02815Sjsg if (!conn_state->crtc)
23955ca02815Sjsg continue;
23965ca02815Sjsg
23975ca02815Sjsg crtc_state = drm_atomic_get_crtc_state(state, conn_state->crtc);
23985ca02815Sjsg if (IS_ERR(crtc_state)) {
23995ca02815Sjsg err = PTR_ERR(crtc_state);
24005ca02815Sjsg break;
2401c349dbc7Sjsg }
2402c349dbc7Sjsg
24035ca02815Sjsg /* Mark mode as changed to trigger a pipe->update() */
24045ca02815Sjsg crtc_state->mode_changed = true;
24055ca02815Sjsg }
24065ca02815Sjsg drm_connector_list_iter_end(&conn_iter);
24075ca02815Sjsg
24085ca02815Sjsg if (err == 0)
2409c349dbc7Sjsg err = drm_atomic_commit(state);
2410c349dbc7Sjsg
2411c349dbc7Sjsg if (err == -EDEADLK) {
2412c349dbc7Sjsg drm_atomic_state_clear(state);
2413c349dbc7Sjsg err = drm_modeset_backoff(&ctx);
2414c349dbc7Sjsg if (!err)
2415c349dbc7Sjsg goto retry;
2416c349dbc7Sjsg }
2417c349dbc7Sjsg
2418c349dbc7Sjsg drm_modeset_drop_locks(&ctx);
2419c349dbc7Sjsg drm_modeset_acquire_fini(&ctx);
2420c349dbc7Sjsg drm_atomic_state_put(state);
2421c349dbc7Sjsg
2422c349dbc7Sjsg return err;
2423c349dbc7Sjsg }
2424c349dbc7Sjsg
intel_psr_debug_set(struct intel_dp * intel_dp,u64 val)24255ca02815Sjsg int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
2426c349dbc7Sjsg {
24275ca02815Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2428c349dbc7Sjsg const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
2429c349dbc7Sjsg u32 old_mode;
2430c349dbc7Sjsg int ret;
2431c349dbc7Sjsg
2432c349dbc7Sjsg if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
24335ca02815Sjsg mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
2434c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
2435c349dbc7Sjsg return -EINVAL;
2436c349dbc7Sjsg }
2437c349dbc7Sjsg
24385ca02815Sjsg ret = mutex_lock_interruptible(&intel_dp->psr.lock);
2439c349dbc7Sjsg if (ret)
2440c349dbc7Sjsg return ret;
2441c349dbc7Sjsg
24425ca02815Sjsg old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
24435ca02815Sjsg intel_dp->psr.debug = val;
2444c349dbc7Sjsg
2445c349dbc7Sjsg /*
2446c349dbc7Sjsg * Do it right away if it's already enabled, otherwise it will be done
2447c349dbc7Sjsg * when enabling the source.
2448c349dbc7Sjsg */
24495ca02815Sjsg if (intel_dp->psr.enabled)
24505ca02815Sjsg psr_irq_control(intel_dp);
2451c349dbc7Sjsg
24525ca02815Sjsg mutex_unlock(&intel_dp->psr.lock);
2453c349dbc7Sjsg
2454c349dbc7Sjsg if (old_mode != mode)
2455c349dbc7Sjsg ret = intel_psr_fastset_force(dev_priv);
2456c349dbc7Sjsg
2457c349dbc7Sjsg return ret;
2458c349dbc7Sjsg }
2459c349dbc7Sjsg
intel_psr_handle_irq(struct intel_dp * intel_dp)24605ca02815Sjsg static void intel_psr_handle_irq(struct intel_dp *intel_dp)
2461c349dbc7Sjsg {
24625ca02815Sjsg struct intel_psr *psr = &intel_dp->psr;
2463c349dbc7Sjsg
24645ca02815Sjsg intel_psr_disable_locked(intel_dp);
2465c349dbc7Sjsg psr->sink_not_reliable = true;
2466c349dbc7Sjsg /* let's make sure that sink is awaken */
24675ca02815Sjsg drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
2468c349dbc7Sjsg }
2469c349dbc7Sjsg
intel_psr_work(struct work_struct * work)2470c349dbc7Sjsg static void intel_psr_work(struct work_struct *work)
2471c349dbc7Sjsg {
24725ca02815Sjsg struct intel_dp *intel_dp =
24735ca02815Sjsg container_of(work, typeof(*intel_dp), psr.work);
2474c349dbc7Sjsg
24755ca02815Sjsg mutex_lock(&intel_dp->psr.lock);
2476c349dbc7Sjsg
24775ca02815Sjsg if (!intel_dp->psr.enabled)
2478c349dbc7Sjsg goto unlock;
2479c349dbc7Sjsg
24805ca02815Sjsg if (READ_ONCE(intel_dp->psr.irq_aux_error))
24815ca02815Sjsg intel_psr_handle_irq(intel_dp);
2482c349dbc7Sjsg
2483c349dbc7Sjsg /*
2484c349dbc7Sjsg * We have to make sure PSR is ready for re-enable
2485c349dbc7Sjsg * otherwise it keeps disabled until next full enable/disable cycle.
2486c349dbc7Sjsg * PSR might take some time to get fully disabled
2487c349dbc7Sjsg * and be ready for re-enable.
2488c349dbc7Sjsg */
24895ca02815Sjsg if (!__psr_wait_for_idle_locked(intel_dp))
2490c349dbc7Sjsg goto unlock;
2491c349dbc7Sjsg
2492c349dbc7Sjsg /*
2493c349dbc7Sjsg * The delayed work can race with an invalidate hence we need to
2494c349dbc7Sjsg * recheck. Since psr_flush first clears this and then reschedules we
2495c349dbc7Sjsg * won't ever miss a flush when bailing out here.
2496c349dbc7Sjsg */
24975ca02815Sjsg if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
2498c349dbc7Sjsg goto unlock;
2499c349dbc7Sjsg
25005ca02815Sjsg intel_psr_activate(intel_dp);
2501c349dbc7Sjsg unlock:
25025ca02815Sjsg mutex_unlock(&intel_dp->psr.lock);
2503c349dbc7Sjsg }
2504c349dbc7Sjsg
_psr_invalidate_handle(struct intel_dp * intel_dp)25051bb76ff1Sjsg static void _psr_invalidate_handle(struct intel_dp *intel_dp)
25061bb76ff1Sjsg {
25071bb76ff1Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2508f005ef32Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
25091bb76ff1Sjsg
25101bb76ff1Sjsg if (intel_dp->psr.psr2_sel_fetch_enabled) {
25111bb76ff1Sjsg u32 val;
25121bb76ff1Sjsg
25131bb76ff1Sjsg if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
25141bb76ff1Sjsg /* Send one update otherwise lag is observed in screen */
25151bb76ff1Sjsg intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
25161bb76ff1Sjsg return;
25171bb76ff1Sjsg }
25181bb76ff1Sjsg
25191bb76ff1Sjsg val = man_trk_ctl_enable_bit_get(dev_priv) |
25201bb76ff1Sjsg man_trk_ctl_partial_frame_bit_get(dev_priv) |
25211bb76ff1Sjsg man_trk_ctl_continuos_full_frame(dev_priv);
2522f005ef32Sjsg intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder), val);
25231bb76ff1Sjsg intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
25241bb76ff1Sjsg intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
25251bb76ff1Sjsg } else {
25261bb76ff1Sjsg intel_psr_exit(intel_dp);
25271bb76ff1Sjsg }
25281bb76ff1Sjsg }
25291bb76ff1Sjsg
2530c349dbc7Sjsg /**
25311bb76ff1Sjsg * intel_psr_invalidate - Invalidate PSR
2532c349dbc7Sjsg * @dev_priv: i915 device
2533c349dbc7Sjsg * @frontbuffer_bits: frontbuffer plane tracking bits
2534c349dbc7Sjsg * @origin: which operation caused the invalidate
2535c349dbc7Sjsg *
2536c349dbc7Sjsg * Since the hardware frontbuffer tracking has gaps we need to integrate
2537c349dbc7Sjsg * with the software frontbuffer tracking. This function gets called every
2538c349dbc7Sjsg * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
2539c349dbc7Sjsg * disabled if the frontbuffer mask contains a buffer relevant to PSR.
2540c349dbc7Sjsg *
2541c349dbc7Sjsg * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
2542c349dbc7Sjsg */
intel_psr_invalidate(struct drm_i915_private * dev_priv,unsigned frontbuffer_bits,enum fb_op_origin origin)2543c349dbc7Sjsg void intel_psr_invalidate(struct drm_i915_private *dev_priv,
2544c349dbc7Sjsg unsigned frontbuffer_bits, enum fb_op_origin origin)
2545c349dbc7Sjsg {
25465ca02815Sjsg struct intel_encoder *encoder;
2547c349dbc7Sjsg
2548c349dbc7Sjsg if (origin == ORIGIN_FLIP)
2549c349dbc7Sjsg return;
2550c349dbc7Sjsg
25515ca02815Sjsg for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
25525ca02815Sjsg unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
25535ca02815Sjsg struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
25545ca02815Sjsg
25555ca02815Sjsg mutex_lock(&intel_dp->psr.lock);
25565ca02815Sjsg if (!intel_dp->psr.enabled) {
25575ca02815Sjsg mutex_unlock(&intel_dp->psr.lock);
25585ca02815Sjsg continue;
2559c349dbc7Sjsg }
2560c349dbc7Sjsg
25615ca02815Sjsg pipe_frontbuffer_bits &=
25625ca02815Sjsg INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
25635ca02815Sjsg intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
2564c349dbc7Sjsg
25655ca02815Sjsg if (pipe_frontbuffer_bits)
25661bb76ff1Sjsg _psr_invalidate_handle(intel_dp);
2567c349dbc7Sjsg
25685ca02815Sjsg mutex_unlock(&intel_dp->psr.lock);
2569c349dbc7Sjsg }
25705ca02815Sjsg }
2571c349dbc7Sjsg /*
2572c349dbc7Sjsg * When we will be completely rely on PSR2 S/W tracking in future,
2573c349dbc7Sjsg * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
25741bb76ff1Sjsg * event also therefore tgl_dc3co_flush_locked() require to be changed
2575c349dbc7Sjsg * accordingly in future.
2576c349dbc7Sjsg */
2577c349dbc7Sjsg static void
tgl_dc3co_flush_locked(struct intel_dp * intel_dp,unsigned int frontbuffer_bits,enum fb_op_origin origin)25781bb76ff1Sjsg tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
25795ca02815Sjsg enum fb_op_origin origin)
2580c349dbc7Sjsg {
2581f005ef32Sjsg struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2582f005ef32Sjsg
25831bb76ff1Sjsg if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
25841bb76ff1Sjsg !intel_dp->psr.active)
25851bb76ff1Sjsg return;
2586c349dbc7Sjsg
2587c349dbc7Sjsg /*
2588c349dbc7Sjsg * At every frontbuffer flush flip event modified delay of delayed work,
2589c349dbc7Sjsg * when delayed work schedules that means display has been idle.
2590c349dbc7Sjsg */
2591c349dbc7Sjsg if (!(frontbuffer_bits &
25925ca02815Sjsg INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
25931bb76ff1Sjsg return;
2594c349dbc7Sjsg
25955ca02815Sjsg tgl_psr2_enable_dc3co(intel_dp);
2596f005ef32Sjsg mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work,
25975ca02815Sjsg intel_dp->psr.dc3co_exit_delay);
25981bb76ff1Sjsg }
2599c349dbc7Sjsg
_psr_flush_handle(struct intel_dp * intel_dp)26001bb76ff1Sjsg static void _psr_flush_handle(struct intel_dp *intel_dp)
26011bb76ff1Sjsg {
26021bb76ff1Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2603f005ef32Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
26041bb76ff1Sjsg
26051bb76ff1Sjsg if (intel_dp->psr.psr2_sel_fetch_enabled) {
26061bb76ff1Sjsg if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
26071bb76ff1Sjsg /* can we turn CFF off? */
26081bb76ff1Sjsg if (intel_dp->psr.busy_frontbuffer_bits == 0) {
26091bb76ff1Sjsg u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
26101bb76ff1Sjsg man_trk_ctl_partial_frame_bit_get(dev_priv) |
2611f005ef32Sjsg man_trk_ctl_single_full_frame_bit_get(dev_priv) |
2612f005ef32Sjsg man_trk_ctl_continuos_full_frame(dev_priv);
26131bb76ff1Sjsg
26141bb76ff1Sjsg /*
2615f005ef32Sjsg * Set psr2_sel_fetch_cff_enabled as false to allow selective
2616f005ef32Sjsg * updates. Still keep cff bit enabled as we don't have proper
2617f005ef32Sjsg * SU configuration in case update is sent for any reason after
2618f005ef32Sjsg * sff bit gets cleared by the HW on next vblank.
26191bb76ff1Sjsg */
2620f005ef32Sjsg intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
26211bb76ff1Sjsg val);
26221bb76ff1Sjsg intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
26231bb76ff1Sjsg intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
26241bb76ff1Sjsg }
26251bb76ff1Sjsg } else {
26261bb76ff1Sjsg /*
26271bb76ff1Sjsg * continuous full frame is disabled, only a single full
26281bb76ff1Sjsg * frame is required
26291bb76ff1Sjsg */
26301bb76ff1Sjsg psr_force_hw_tracking_exit(intel_dp);
26311bb76ff1Sjsg }
26321bb76ff1Sjsg } else {
26331bb76ff1Sjsg psr_force_hw_tracking_exit(intel_dp);
26341bb76ff1Sjsg
26351bb76ff1Sjsg if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
2636f005ef32Sjsg queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
26371bb76ff1Sjsg }
2638c349dbc7Sjsg }
2639c349dbc7Sjsg
2640c349dbc7Sjsg /**
2641c349dbc7Sjsg * intel_psr_flush - Flush PSR
2642c349dbc7Sjsg * @dev_priv: i915 device
2643c349dbc7Sjsg * @frontbuffer_bits: frontbuffer plane tracking bits
2644c349dbc7Sjsg * @origin: which operation caused the flush
2645c349dbc7Sjsg *
2646c349dbc7Sjsg * Since the hardware frontbuffer tracking has gaps we need to integrate
2647c349dbc7Sjsg * with the software frontbuffer tracking. This function gets called every
2648c349dbc7Sjsg * time frontbuffer rendering has completed and flushed out to memory. PSR
2649c349dbc7Sjsg * can be enabled again if no other frontbuffer relevant to PSR is dirty.
2650c349dbc7Sjsg *
2651c349dbc7Sjsg * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
2652c349dbc7Sjsg */
intel_psr_flush(struct drm_i915_private * dev_priv,unsigned frontbuffer_bits,enum fb_op_origin origin)2653c349dbc7Sjsg void intel_psr_flush(struct drm_i915_private *dev_priv,
2654c349dbc7Sjsg unsigned frontbuffer_bits, enum fb_op_origin origin)
2655c349dbc7Sjsg {
26565ca02815Sjsg struct intel_encoder *encoder;
26575ca02815Sjsg
26585ca02815Sjsg for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
26595ca02815Sjsg unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
26605ca02815Sjsg struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2661c349dbc7Sjsg
26625ca02815Sjsg mutex_lock(&intel_dp->psr.lock);
26635ca02815Sjsg if (!intel_dp->psr.enabled) {
26645ca02815Sjsg mutex_unlock(&intel_dp->psr.lock);
26655ca02815Sjsg continue;
2666c349dbc7Sjsg }
2667c349dbc7Sjsg
26685ca02815Sjsg pipe_frontbuffer_bits &=
26695ca02815Sjsg INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
26705ca02815Sjsg intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
26715ca02815Sjsg
26725ca02815Sjsg /*
26735ca02815Sjsg * If the PSR is paused by an explicit intel_psr_paused() call,
26745ca02815Sjsg * we have to ensure that the PSR is not activated until
26755ca02815Sjsg * intel_psr_resume() is called.
26765ca02815Sjsg */
26771bb76ff1Sjsg if (intel_dp->psr.paused)
26781bb76ff1Sjsg goto unlock;
26791bb76ff1Sjsg
26801bb76ff1Sjsg if (origin == ORIGIN_FLIP ||
26811bb76ff1Sjsg (origin == ORIGIN_CURSOR_UPDATE &&
26821bb76ff1Sjsg !intel_dp->psr.psr2_sel_fetch_enabled)) {
26831bb76ff1Sjsg tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
26841bb76ff1Sjsg goto unlock;
26855ca02815Sjsg }
2686c349dbc7Sjsg
26871bb76ff1Sjsg if (pipe_frontbuffer_bits == 0)
26881bb76ff1Sjsg goto unlock;
2689c349dbc7Sjsg
26901bb76ff1Sjsg /* By definition flush = invalidate + flush */
26911bb76ff1Sjsg _psr_flush_handle(intel_dp);
26921bb76ff1Sjsg unlock:
26935ca02815Sjsg mutex_unlock(&intel_dp->psr.lock);
26945ca02815Sjsg }
2695c349dbc7Sjsg }
2696c349dbc7Sjsg
2697c349dbc7Sjsg /**
2698c349dbc7Sjsg * intel_psr_init - Init basic PSR work and mutex.
26995ca02815Sjsg * @intel_dp: Intel DP
2700c349dbc7Sjsg *
27015ca02815Sjsg * This function is called after the initializing connector.
27025ca02815Sjsg * (the initializing of connector treats the handling of connector capabilities)
27035ca02815Sjsg * And it initializes basic PSR stuff for each DP Encoder.
2704c349dbc7Sjsg */
intel_psr_init(struct intel_dp * intel_dp)27055ca02815Sjsg void intel_psr_init(struct intel_dp *intel_dp)
2706c349dbc7Sjsg {
27071bb76ff1Sjsg struct intel_connector *connector = intel_dp->attached_connector;
27085ca02815Sjsg struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
27095ca02815Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
27105ca02815Sjsg
2711c349dbc7Sjsg if (!HAS_PSR(dev_priv))
2712c349dbc7Sjsg return;
2713c349dbc7Sjsg
27145ca02815Sjsg /*
27155ca02815Sjsg * HSW spec explicitly says PSR is tied to port A.
27165ca02815Sjsg * BDW+ platforms have a instance of PSR registers per transcoder but
27175ca02815Sjsg * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
27185ca02815Sjsg * than eDP one.
27195ca02815Sjsg * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
27205ca02815Sjsg * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
27215ca02815Sjsg * But GEN12 supports a instance of PSR registers per transcoder.
27225ca02815Sjsg */
27235ca02815Sjsg if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
27245ca02815Sjsg drm_dbg_kms(&dev_priv->drm,
27255ca02815Sjsg "PSR condition failed: Port not supported\n");
2726c349dbc7Sjsg return;
27275ca02815Sjsg }
27285ca02815Sjsg
27295ca02815Sjsg intel_dp->psr.source_support = true;
2730c349dbc7Sjsg
2731c349dbc7Sjsg /* Set link_standby x link_off defaults */
27321bb76ff1Sjsg if (DISPLAY_VER(dev_priv) < 12)
2733c349dbc7Sjsg /* For new platforms up to TGL let's respect VBT back again */
27341bb76ff1Sjsg intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
2735c349dbc7Sjsg
27365ca02815Sjsg INIT_WORK(&intel_dp->psr.work, intel_psr_work);
27375ca02815Sjsg INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
27385ca02815Sjsg rw_init(&intel_dp->psr.lock, "psrlk");
2739c349dbc7Sjsg }
2740c349dbc7Sjsg
psr_get_status_and_error_status(struct intel_dp * intel_dp,u8 * status,u8 * error_status)2741c349dbc7Sjsg static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
2742c349dbc7Sjsg u8 *status, u8 *error_status)
2743c349dbc7Sjsg {
2744c349dbc7Sjsg struct drm_dp_aux *aux = &intel_dp->aux;
2745c349dbc7Sjsg int ret;
2746c349dbc7Sjsg
2747c349dbc7Sjsg ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
2748c349dbc7Sjsg if (ret != 1)
2749c349dbc7Sjsg return ret;
2750c349dbc7Sjsg
2751c349dbc7Sjsg ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
2752c349dbc7Sjsg if (ret != 1)
2753c349dbc7Sjsg return ret;
2754c349dbc7Sjsg
2755c349dbc7Sjsg *status = *status & DP_PSR_SINK_STATE_MASK;
2756c349dbc7Sjsg
2757c349dbc7Sjsg return 0;
2758c349dbc7Sjsg }
2759c349dbc7Sjsg
psr_alpm_check(struct intel_dp * intel_dp)2760c349dbc7Sjsg static void psr_alpm_check(struct intel_dp *intel_dp)
2761c349dbc7Sjsg {
2762c349dbc7Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2763c349dbc7Sjsg struct drm_dp_aux *aux = &intel_dp->aux;
27645ca02815Sjsg struct intel_psr *psr = &intel_dp->psr;
2765c349dbc7Sjsg u8 val;
2766c349dbc7Sjsg int r;
2767c349dbc7Sjsg
2768c349dbc7Sjsg if (!psr->psr2_enabled)
2769c349dbc7Sjsg return;
2770c349dbc7Sjsg
2771c349dbc7Sjsg r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
2772c349dbc7Sjsg if (r != 1) {
2773c349dbc7Sjsg drm_err(&dev_priv->drm, "Error reading ALPM status\n");
2774c349dbc7Sjsg return;
2775c349dbc7Sjsg }
2776c349dbc7Sjsg
2777c349dbc7Sjsg if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
2778c349dbc7Sjsg intel_psr_disable_locked(intel_dp);
2779c349dbc7Sjsg psr->sink_not_reliable = true;
2780c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm,
2781c349dbc7Sjsg "ALPM lock timeout error, disabling PSR\n");
2782c349dbc7Sjsg
2783c349dbc7Sjsg /* Clearing error */
2784c349dbc7Sjsg drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
2785c349dbc7Sjsg }
2786c349dbc7Sjsg }
2787c349dbc7Sjsg
psr_capability_changed_check(struct intel_dp * intel_dp)2788c349dbc7Sjsg static void psr_capability_changed_check(struct intel_dp *intel_dp)
2789c349dbc7Sjsg {
2790c349dbc7Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
27915ca02815Sjsg struct intel_psr *psr = &intel_dp->psr;
2792c349dbc7Sjsg u8 val;
2793c349dbc7Sjsg int r;
2794c349dbc7Sjsg
2795c349dbc7Sjsg r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
2796c349dbc7Sjsg if (r != 1) {
2797c349dbc7Sjsg drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
2798c349dbc7Sjsg return;
2799c349dbc7Sjsg }
2800c349dbc7Sjsg
2801c349dbc7Sjsg if (val & DP_PSR_CAPS_CHANGE) {
2802c349dbc7Sjsg intel_psr_disable_locked(intel_dp);
2803c349dbc7Sjsg psr->sink_not_reliable = true;
2804c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm,
2805c349dbc7Sjsg "Sink PSR capability changed, disabling PSR\n");
2806c349dbc7Sjsg
2807c349dbc7Sjsg /* Clearing it */
2808c349dbc7Sjsg drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
2809c349dbc7Sjsg }
2810c349dbc7Sjsg }
2811c349dbc7Sjsg
intel_psr_short_pulse(struct intel_dp * intel_dp)2812c349dbc7Sjsg void intel_psr_short_pulse(struct intel_dp *intel_dp)
2813c349dbc7Sjsg {
2814c349dbc7Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
28155ca02815Sjsg struct intel_psr *psr = &intel_dp->psr;
2816c349dbc7Sjsg u8 status, error_status;
2817c349dbc7Sjsg const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
2818c349dbc7Sjsg DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
2819c349dbc7Sjsg DP_PSR_LINK_CRC_ERROR;
2820c349dbc7Sjsg
28215ca02815Sjsg if (!CAN_PSR(intel_dp))
2822c349dbc7Sjsg return;
2823c349dbc7Sjsg
2824c349dbc7Sjsg mutex_lock(&psr->lock);
2825c349dbc7Sjsg
28265ca02815Sjsg if (!psr->enabled)
2827c349dbc7Sjsg goto exit;
2828c349dbc7Sjsg
2829c349dbc7Sjsg if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
2830c349dbc7Sjsg drm_err(&dev_priv->drm,
2831c349dbc7Sjsg "Error reading PSR status or error status\n");
2832c349dbc7Sjsg goto exit;
2833c349dbc7Sjsg }
2834c349dbc7Sjsg
2835c349dbc7Sjsg if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
2836c349dbc7Sjsg intel_psr_disable_locked(intel_dp);
2837c349dbc7Sjsg psr->sink_not_reliable = true;
2838c349dbc7Sjsg }
2839c349dbc7Sjsg
2840c349dbc7Sjsg if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
2841c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm,
2842c349dbc7Sjsg "PSR sink internal error, disabling PSR\n");
2843c349dbc7Sjsg if (error_status & DP_PSR_RFB_STORAGE_ERROR)
2844c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm,
2845c349dbc7Sjsg "PSR RFB storage error, disabling PSR\n");
2846c349dbc7Sjsg if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
2847c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm,
2848c349dbc7Sjsg "PSR VSC SDP uncorrectable error, disabling PSR\n");
2849c349dbc7Sjsg if (error_status & DP_PSR_LINK_CRC_ERROR)
2850c349dbc7Sjsg drm_dbg_kms(&dev_priv->drm,
2851c349dbc7Sjsg "PSR Link CRC error, disabling PSR\n");
2852c349dbc7Sjsg
2853c349dbc7Sjsg if (error_status & ~errors)
2854c349dbc7Sjsg drm_err(&dev_priv->drm,
2855c349dbc7Sjsg "PSR_ERROR_STATUS unhandled errors %x\n",
2856c349dbc7Sjsg error_status & ~errors);
2857c349dbc7Sjsg /* clear status register */
2858c349dbc7Sjsg drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
2859c349dbc7Sjsg
2860c349dbc7Sjsg psr_alpm_check(intel_dp);
2861c349dbc7Sjsg psr_capability_changed_check(intel_dp);
2862c349dbc7Sjsg
2863c349dbc7Sjsg exit:
2864c349dbc7Sjsg mutex_unlock(&psr->lock);
2865c349dbc7Sjsg }
2866c349dbc7Sjsg
intel_psr_enabled(struct intel_dp * intel_dp)2867c349dbc7Sjsg bool intel_psr_enabled(struct intel_dp *intel_dp)
2868c349dbc7Sjsg {
2869c349dbc7Sjsg bool ret;
2870c349dbc7Sjsg
28715ca02815Sjsg if (!CAN_PSR(intel_dp))
2872c349dbc7Sjsg return false;
2873c349dbc7Sjsg
28745ca02815Sjsg mutex_lock(&intel_dp->psr.lock);
28755ca02815Sjsg ret = intel_dp->psr.enabled;
28765ca02815Sjsg mutex_unlock(&intel_dp->psr.lock);
2877c349dbc7Sjsg
2878c349dbc7Sjsg return ret;
2879c349dbc7Sjsg }
28801bb76ff1Sjsg
28811bb76ff1Sjsg /**
28821bb76ff1Sjsg * intel_psr_lock - grab PSR lock
28831bb76ff1Sjsg * @crtc_state: the crtc state
28841bb76ff1Sjsg *
28851bb76ff1Sjsg * This is initially meant to be used by around CRTC update, when
28861bb76ff1Sjsg * vblank sensitive registers are updated and we need grab the lock
28871bb76ff1Sjsg * before it to avoid vblank evasion.
28881bb76ff1Sjsg */
intel_psr_lock(const struct intel_crtc_state * crtc_state)28891bb76ff1Sjsg void intel_psr_lock(const struct intel_crtc_state *crtc_state)
28901bb76ff1Sjsg {
28911bb76ff1Sjsg struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
28921bb76ff1Sjsg struct intel_encoder *encoder;
28931bb76ff1Sjsg
28941bb76ff1Sjsg if (!crtc_state->has_psr)
28951bb76ff1Sjsg return;
28961bb76ff1Sjsg
28971bb76ff1Sjsg for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
28981bb76ff1Sjsg crtc_state->uapi.encoder_mask) {
28991bb76ff1Sjsg struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
29001bb76ff1Sjsg
29011bb76ff1Sjsg mutex_lock(&intel_dp->psr.lock);
29021bb76ff1Sjsg break;
29031bb76ff1Sjsg }
29041bb76ff1Sjsg }
29051bb76ff1Sjsg
29061bb76ff1Sjsg /**
29071bb76ff1Sjsg * intel_psr_unlock - release PSR lock
29081bb76ff1Sjsg * @crtc_state: the crtc state
29091bb76ff1Sjsg *
29101bb76ff1Sjsg * Release the PSR lock that was held during pipe update.
29111bb76ff1Sjsg */
intel_psr_unlock(const struct intel_crtc_state * crtc_state)29121bb76ff1Sjsg void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
29131bb76ff1Sjsg {
29141bb76ff1Sjsg struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
29151bb76ff1Sjsg struct intel_encoder *encoder;
29161bb76ff1Sjsg
29171bb76ff1Sjsg if (!crtc_state->has_psr)
29181bb76ff1Sjsg return;
29191bb76ff1Sjsg
29201bb76ff1Sjsg for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
29211bb76ff1Sjsg crtc_state->uapi.encoder_mask) {
29221bb76ff1Sjsg struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
29231bb76ff1Sjsg
29241bb76ff1Sjsg mutex_unlock(&intel_dp->psr.lock);
29251bb76ff1Sjsg break;
29261bb76ff1Sjsg }
29271bb76ff1Sjsg }
2928f005ef32Sjsg
2929f005ef32Sjsg static void
psr_source_status(struct intel_dp * intel_dp,struct seq_file * m)2930f005ef32Sjsg psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
2931f005ef32Sjsg {
2932f005ef32Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2933f005ef32Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2934f005ef32Sjsg const char *status = "unknown";
2935f005ef32Sjsg u32 val, status_val;
2936f005ef32Sjsg
2937f005ef32Sjsg if (intel_dp->psr.psr2_enabled) {
2938f005ef32Sjsg static const char * const live_status[] = {
2939f005ef32Sjsg "IDLE",
2940f005ef32Sjsg "CAPTURE",
2941f005ef32Sjsg "CAPTURE_FS",
2942f005ef32Sjsg "SLEEP",
2943f005ef32Sjsg "BUFON_FW",
2944f005ef32Sjsg "ML_UP",
2945f005ef32Sjsg "SU_STANDBY",
2946f005ef32Sjsg "FAST_SLEEP",
2947f005ef32Sjsg "DEEP_SLEEP",
2948f005ef32Sjsg "BUF_ON",
2949f005ef32Sjsg "TG_ON"
2950f005ef32Sjsg };
2951f005ef32Sjsg val = intel_de_read(dev_priv, EDP_PSR2_STATUS(cpu_transcoder));
2952f005ef32Sjsg status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
2953f005ef32Sjsg if (status_val < ARRAY_SIZE(live_status))
2954f005ef32Sjsg status = live_status[status_val];
2955f005ef32Sjsg } else {
2956f005ef32Sjsg static const char * const live_status[] = {
2957f005ef32Sjsg "IDLE",
2958f005ef32Sjsg "SRDONACK",
2959f005ef32Sjsg "SRDENT",
2960f005ef32Sjsg "BUFOFF",
2961f005ef32Sjsg "BUFON",
2962f005ef32Sjsg "AUXACK",
2963f005ef32Sjsg "SRDOFFACK",
2964f005ef32Sjsg "SRDENT_ON",
2965f005ef32Sjsg };
2966f005ef32Sjsg val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
2967f005ef32Sjsg status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
2968f005ef32Sjsg if (status_val < ARRAY_SIZE(live_status))
2969f005ef32Sjsg status = live_status[status_val];
2970f005ef32Sjsg }
2971f005ef32Sjsg
2972f005ef32Sjsg seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2973f005ef32Sjsg }
2974f005ef32Sjsg
intel_psr_status(struct seq_file * m,struct intel_dp * intel_dp)2975f005ef32Sjsg static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
2976f005ef32Sjsg {
2977f005ef32Sjsg struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2978f005ef32Sjsg enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
2979f005ef32Sjsg struct intel_psr *psr = &intel_dp->psr;
2980f005ef32Sjsg intel_wakeref_t wakeref;
2981f005ef32Sjsg const char *status;
2982f005ef32Sjsg bool enabled;
2983f005ef32Sjsg u32 val;
2984f005ef32Sjsg
2985f005ef32Sjsg seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
2986f005ef32Sjsg if (psr->sink_support)
2987f005ef32Sjsg seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
2988f005ef32Sjsg seq_puts(m, "\n");
2989f005ef32Sjsg
2990f005ef32Sjsg if (!psr->sink_support)
2991f005ef32Sjsg return 0;
2992f005ef32Sjsg
2993f005ef32Sjsg wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2994f005ef32Sjsg mutex_lock(&psr->lock);
2995f005ef32Sjsg
2996f005ef32Sjsg if (psr->enabled)
2997f005ef32Sjsg status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2998f005ef32Sjsg else
2999f005ef32Sjsg status = "disabled";
3000f005ef32Sjsg seq_printf(m, "PSR mode: %s\n", status);
3001f005ef32Sjsg
3002f005ef32Sjsg if (!psr->enabled) {
3003f005ef32Sjsg seq_printf(m, "PSR sink not reliable: %s\n",
3004f005ef32Sjsg str_yes_no(psr->sink_not_reliable));
3005f005ef32Sjsg
3006f005ef32Sjsg goto unlock;
3007f005ef32Sjsg }
3008f005ef32Sjsg
3009f005ef32Sjsg if (psr->psr2_enabled) {
3010f005ef32Sjsg val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
3011f005ef32Sjsg enabled = val & EDP_PSR2_ENABLE;
3012f005ef32Sjsg } else {
3013f005ef32Sjsg val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
3014f005ef32Sjsg enabled = val & EDP_PSR_ENABLE;
3015f005ef32Sjsg }
3016f005ef32Sjsg seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
3017f005ef32Sjsg str_enabled_disabled(enabled), val);
3018f005ef32Sjsg psr_source_status(intel_dp, m);
3019f005ef32Sjsg seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
3020f005ef32Sjsg psr->busy_frontbuffer_bits);
3021f005ef32Sjsg
3022f005ef32Sjsg /*
3023f005ef32Sjsg * SKL+ Perf counter is reset to 0 everytime DC state is entered
3024f005ef32Sjsg */
3025f005ef32Sjsg val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
3026f005ef32Sjsg seq_printf(m, "Performance counter: %u\n",
3027f005ef32Sjsg REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
3028f005ef32Sjsg
3029f005ef32Sjsg if (psr->debug & I915_PSR_DEBUG_IRQ) {
3030f005ef32Sjsg seq_printf(m, "Last attempted entry at: %lld\n",
3031f005ef32Sjsg psr->last_entry_attempt);
3032f005ef32Sjsg seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
3033f005ef32Sjsg }
3034f005ef32Sjsg
3035f005ef32Sjsg if (psr->psr2_enabled) {
3036f005ef32Sjsg u32 su_frames_val[3];
3037f005ef32Sjsg int frame;
3038f005ef32Sjsg
3039f005ef32Sjsg /*
3040f005ef32Sjsg * Reading all 3 registers before hand to minimize crossing a
3041f005ef32Sjsg * frame boundary between register reads
3042f005ef32Sjsg */
3043f005ef32Sjsg for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
3044f005ef32Sjsg val = intel_de_read(dev_priv, PSR2_SU_STATUS(cpu_transcoder, frame));
3045f005ef32Sjsg su_frames_val[frame / 3] = val;
3046f005ef32Sjsg }
3047f005ef32Sjsg
3048f005ef32Sjsg seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
3049f005ef32Sjsg
3050f005ef32Sjsg for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
3051f005ef32Sjsg u32 su_blocks;
3052f005ef32Sjsg
3053f005ef32Sjsg su_blocks = su_frames_val[frame / 3] &
3054f005ef32Sjsg PSR2_SU_STATUS_MASK(frame);
3055f005ef32Sjsg su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
3056f005ef32Sjsg seq_printf(m, "%d\t%d\n", frame, su_blocks);
3057f005ef32Sjsg }
3058f005ef32Sjsg
3059f005ef32Sjsg seq_printf(m, "PSR2 selective fetch: %s\n",
3060f005ef32Sjsg str_enabled_disabled(psr->psr2_sel_fetch_enabled));
3061f005ef32Sjsg }
3062f005ef32Sjsg
3063f005ef32Sjsg unlock:
3064f005ef32Sjsg mutex_unlock(&psr->lock);
3065f005ef32Sjsg intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3066f005ef32Sjsg
3067f005ef32Sjsg return 0;
3068f005ef32Sjsg }
3069f005ef32Sjsg
i915_edp_psr_status_show(struct seq_file * m,void * data)3070f005ef32Sjsg static int i915_edp_psr_status_show(struct seq_file *m, void *data)
3071f005ef32Sjsg {
3072f005ef32Sjsg struct drm_i915_private *dev_priv = m->private;
3073f005ef32Sjsg struct intel_dp *intel_dp = NULL;
3074f005ef32Sjsg struct intel_encoder *encoder;
3075f005ef32Sjsg
3076f005ef32Sjsg if (!HAS_PSR(dev_priv))
3077f005ef32Sjsg return -ENODEV;
3078f005ef32Sjsg
3079f005ef32Sjsg /* Find the first EDP which supports PSR */
3080f005ef32Sjsg for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3081f005ef32Sjsg intel_dp = enc_to_intel_dp(encoder);
3082f005ef32Sjsg break;
3083f005ef32Sjsg }
3084f005ef32Sjsg
3085f005ef32Sjsg if (!intel_dp)
3086f005ef32Sjsg return -ENODEV;
3087f005ef32Sjsg
3088f005ef32Sjsg return intel_psr_status(m, intel_dp);
3089f005ef32Sjsg }
3090f005ef32Sjsg DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
3091f005ef32Sjsg
3092f005ef32Sjsg static int
i915_edp_psr_debug_set(void * data,u64 val)3093f005ef32Sjsg i915_edp_psr_debug_set(void *data, u64 val)
3094f005ef32Sjsg {
3095f005ef32Sjsg struct drm_i915_private *dev_priv = data;
3096f005ef32Sjsg struct intel_encoder *encoder;
3097f005ef32Sjsg intel_wakeref_t wakeref;
3098f005ef32Sjsg int ret = -ENODEV;
3099f005ef32Sjsg
3100f005ef32Sjsg if (!HAS_PSR(dev_priv))
3101f005ef32Sjsg return ret;
3102f005ef32Sjsg
3103f005ef32Sjsg for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3104f005ef32Sjsg struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3105f005ef32Sjsg
3106f005ef32Sjsg drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
3107f005ef32Sjsg
3108f005ef32Sjsg wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
3109f005ef32Sjsg
3110f005ef32Sjsg // TODO: split to each transcoder's PSR debug state
3111f005ef32Sjsg ret = intel_psr_debug_set(intel_dp, val);
3112f005ef32Sjsg
3113f005ef32Sjsg intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
3114f005ef32Sjsg }
3115f005ef32Sjsg
3116f005ef32Sjsg return ret;
3117f005ef32Sjsg }
3118f005ef32Sjsg
3119f005ef32Sjsg static int
i915_edp_psr_debug_get(void * data,u64 * val)3120f005ef32Sjsg i915_edp_psr_debug_get(void *data, u64 *val)
3121f005ef32Sjsg {
3122f005ef32Sjsg struct drm_i915_private *dev_priv = data;
3123f005ef32Sjsg struct intel_encoder *encoder;
3124f005ef32Sjsg
3125f005ef32Sjsg if (!HAS_PSR(dev_priv))
3126f005ef32Sjsg return -ENODEV;
3127f005ef32Sjsg
3128f005ef32Sjsg for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
3129f005ef32Sjsg struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
3130f005ef32Sjsg
3131f005ef32Sjsg // TODO: split to each transcoder's PSR debug state
3132f005ef32Sjsg *val = READ_ONCE(intel_dp->psr.debug);
3133f005ef32Sjsg return 0;
3134f005ef32Sjsg }
3135f005ef32Sjsg
3136f005ef32Sjsg return -ENODEV;
3137f005ef32Sjsg }
3138f005ef32Sjsg
3139f005ef32Sjsg DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
3140f005ef32Sjsg i915_edp_psr_debug_get, i915_edp_psr_debug_set,
3141f005ef32Sjsg "%llu\n");
3142f005ef32Sjsg
intel_psr_debugfs_register(struct drm_i915_private * i915)3143f005ef32Sjsg void intel_psr_debugfs_register(struct drm_i915_private *i915)
3144f005ef32Sjsg {
3145f005ef32Sjsg struct drm_minor *minor = i915->drm.primary;
3146f005ef32Sjsg
3147f005ef32Sjsg debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
3148f005ef32Sjsg i915, &i915_edp_psr_debug_fops);
3149f005ef32Sjsg
3150f005ef32Sjsg debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
3151f005ef32Sjsg i915, &i915_edp_psr_status_fops);
3152f005ef32Sjsg }
3153f005ef32Sjsg
i915_psr_sink_status_show(struct seq_file * m,void * data)3154f005ef32Sjsg static int i915_psr_sink_status_show(struct seq_file *m, void *data)
3155f005ef32Sjsg {
3156f005ef32Sjsg struct intel_connector *connector = m->private;
3157f005ef32Sjsg struct intel_dp *intel_dp = intel_attached_dp(connector);
3158f005ef32Sjsg static const char * const sink_status[] = {
3159f005ef32Sjsg "inactive",
3160f005ef32Sjsg "transition to active, capture and display",
3161f005ef32Sjsg "active, display from RFB",
3162f005ef32Sjsg "active, capture and display on sink device timings",
3163f005ef32Sjsg "transition to inactive, capture and display, timing re-sync",
3164f005ef32Sjsg "reserved",
3165f005ef32Sjsg "reserved",
3166f005ef32Sjsg "sink internal error",
3167f005ef32Sjsg };
3168f005ef32Sjsg const char *str;
3169f005ef32Sjsg int ret;
3170f005ef32Sjsg u8 val;
3171f005ef32Sjsg
3172f005ef32Sjsg if (!CAN_PSR(intel_dp)) {
3173f005ef32Sjsg seq_puts(m, "PSR Unsupported\n");
3174f005ef32Sjsg return -ENODEV;
3175f005ef32Sjsg }
3176f005ef32Sjsg
3177f005ef32Sjsg if (connector->base.status != connector_status_connected)
3178f005ef32Sjsg return -ENODEV;
3179f005ef32Sjsg
3180f005ef32Sjsg ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
3181f005ef32Sjsg if (ret != 1)
3182f005ef32Sjsg return ret < 0 ? ret : -EIO;
3183f005ef32Sjsg
3184f005ef32Sjsg val &= DP_PSR_SINK_STATE_MASK;
3185f005ef32Sjsg if (val < ARRAY_SIZE(sink_status))
3186f005ef32Sjsg str = sink_status[val];
3187f005ef32Sjsg else
3188f005ef32Sjsg str = "unknown";
3189f005ef32Sjsg
3190f005ef32Sjsg seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
3191f005ef32Sjsg
3192f005ef32Sjsg return 0;
3193f005ef32Sjsg }
3194f005ef32Sjsg DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
3195f005ef32Sjsg
i915_psr_status_show(struct seq_file * m,void * data)3196f005ef32Sjsg static int i915_psr_status_show(struct seq_file *m, void *data)
3197f005ef32Sjsg {
3198f005ef32Sjsg struct intel_connector *connector = m->private;
3199f005ef32Sjsg struct intel_dp *intel_dp = intel_attached_dp(connector);
3200f005ef32Sjsg
3201f005ef32Sjsg return intel_psr_status(m, intel_dp);
3202f005ef32Sjsg }
3203f005ef32Sjsg DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
3204f005ef32Sjsg
intel_psr_connector_debugfs_add(struct intel_connector * connector)3205f005ef32Sjsg void intel_psr_connector_debugfs_add(struct intel_connector *connector)
3206f005ef32Sjsg {
3207f005ef32Sjsg struct drm_i915_private *i915 = to_i915(connector->base.dev);
3208f005ef32Sjsg struct dentry *root = connector->base.debugfs_entry;
3209f005ef32Sjsg
3210f005ef32Sjsg if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
3211f005ef32Sjsg return;
3212f005ef32Sjsg
3213f005ef32Sjsg debugfs_create_file("i915_psr_sink_status", 0444, root,
3214f005ef32Sjsg connector, &i915_psr_sink_status_fops);
3215f005ef32Sjsg
3216f005ef32Sjsg if (HAS_PSR(i915))
3217f005ef32Sjsg debugfs_create_file("i915_psr_status", 0444, root,
3218f005ef32Sjsg connector, &i915_psr_status_fops);
3219f005ef32Sjsg }
3220