1 /* $NetBSD: intel_psr.c,v 1.3 2021/12/19 11:49:11 riastradh Exp $ */
2
3 /*
4 * Copyright © 2014 Intel Corporation
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: intel_psr.c,v 1.3 2021/12/19 11:49:11 riastradh Exp $");
28
29 #include <drm/drm_atomic_helper.h>
30
31 #include "display/intel_dp.h"
32
33 #include "i915_drv.h"
34 #include "intel_atomic.h"
35 #include "intel_display_types.h"
36 #include "intel_psr.h"
37 #include "intel_sprite.h"
38
39 #include <linux/nbsd-namespace.h>
40
41 /**
42 * DOC: Panel Self Refresh (PSR/SRD)
43 *
44 * Since Haswell Display controller supports Panel Self-Refresh on display
45 * panels witch have a remote frame buffer (RFB) implemented according to PSR
46 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
47 * when system is idle but display is on as it eliminates display refresh
48 * request to DDR memory completely as long as the frame buffer for that
49 * display is unchanged.
50 *
51 * Panel Self Refresh must be supported by both Hardware (source) and
52 * Panel (sink).
53 *
54 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
55 * to power down the link and memory controller. For DSI panels the same idea
56 * is called "manual mode".
57 *
58 * The implementation uses the hardware-based PSR support which automatically
59 * enters/exits self-refresh mode. The hardware takes care of sending the
60 * required DP aux message and could even retrain the link (that part isn't
61 * enabled yet though). The hardware also keeps track of any frontbuffer
62 * changes to know when to exit self-refresh mode again. Unfortunately that
63 * part doesn't work too well, hence why the i915 PSR support uses the
64 * software frontbuffer tracking to make sure it doesn't miss a screen
65 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
66 * get called by the frontbuffer tracking code. Note that because of locking
67 * issues the self-refresh re-enable code is done from a work queue, which
68 * must be correctly synchronized/cancelled when shutting down the pipe."
69 */
70
psr_global_enabled(u32 debug)71 static bool psr_global_enabled(u32 debug)
72 {
73 switch (debug & I915_PSR_DEBUG_MODE_MASK) {
74 case I915_PSR_DEBUG_DEFAULT:
75 return i915_modparams.enable_psr;
76 case I915_PSR_DEBUG_DISABLE:
77 return false;
78 default:
79 return true;
80 }
81 }
82
intel_psr2_enabled(struct drm_i915_private * dev_priv,const struct intel_crtc_state * crtc_state)83 static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
84 const struct intel_crtc_state *crtc_state)
85 {
86 /* Cannot enable DSC and PSR2 simultaneously */
87 WARN_ON(crtc_state->dsc.compression_enable &&
88 crtc_state->has_psr2);
89
90 switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
91 case I915_PSR_DEBUG_DISABLE:
92 case I915_PSR_DEBUG_FORCE_PSR1:
93 return false;
94 default:
95 return crtc_state->has_psr2;
96 }
97 }
98
psr_irq_control(struct drm_i915_private * dev_priv)99 static void psr_irq_control(struct drm_i915_private *dev_priv)
100 {
101 enum transcoder trans_shift;
102 u32 mask, val;
103 i915_reg_t imr_reg;
104
105 /*
106 * gen12+ has registers relative to transcoder and one per transcoder
107 * using the same bit definition: handle it as TRANSCODER_EDP to force
108 * 0 shift in bit definition
109 */
110 if (INTEL_GEN(dev_priv) >= 12) {
111 trans_shift = 0;
112 imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
113 } else {
114 trans_shift = dev_priv->psr.transcoder;
115 imr_reg = EDP_PSR_IMR;
116 }
117
118 mask = EDP_PSR_ERROR(trans_shift);
119 if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ)
120 mask |= EDP_PSR_POST_EXIT(trans_shift) |
121 EDP_PSR_PRE_ENTRY(trans_shift);
122
123 /* Warning: it is masking/setting reserved bits too */
124 val = I915_READ(imr_reg);
125 val &= ~EDP_PSR_TRANS_MASK(trans_shift);
126 val |= ~mask;
127 I915_WRITE(imr_reg, val);
128 }
129
psr_event_print(u32 val,bool psr2_enabled)130 static void psr_event_print(u32 val, bool psr2_enabled)
131 {
132 DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val);
133 if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
134 DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
135 if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
136 DRM_DEBUG_KMS("\tPSR2 disabled\n");
137 if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
138 DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
139 if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
140 DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
141 if (val & PSR_EVENT_GRAPHICS_RESET)
142 DRM_DEBUG_KMS("\tGraphics reset\n");
143 if (val & PSR_EVENT_PCH_INTERRUPT)
144 DRM_DEBUG_KMS("\tPCH interrupt\n");
145 if (val & PSR_EVENT_MEMORY_UP)
146 DRM_DEBUG_KMS("\tMemory up\n");
147 if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
148 DRM_DEBUG_KMS("\tFront buffer modification\n");
149 if (val & PSR_EVENT_WD_TIMER_EXPIRE)
150 DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
151 if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
152 DRM_DEBUG_KMS("\tPIPE registers updated\n");
153 if (val & PSR_EVENT_REGISTER_UPDATE)
154 DRM_DEBUG_KMS("\tRegister updated\n");
155 if (val & PSR_EVENT_HDCP_ENABLE)
156 DRM_DEBUG_KMS("\tHDCP enabled\n");
157 if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
158 DRM_DEBUG_KMS("\tKVMR session enabled\n");
159 if (val & PSR_EVENT_VBI_ENABLE)
160 DRM_DEBUG_KMS("\tVBI enabled\n");
161 if (val & PSR_EVENT_LPSP_MODE_EXIT)
162 DRM_DEBUG_KMS("\tLPSP mode exited\n");
163 if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
164 DRM_DEBUG_KMS("\tPSR disabled\n");
165 }
166
intel_psr_irq_handler(struct drm_i915_private * dev_priv,u32 psr_iir)167 void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
168 {
169 enum transcoder cpu_transcoder = dev_priv->psr.transcoder;
170 enum transcoder trans_shift;
171 i915_reg_t imr_reg;
172 ktime_t time_ns = ktime_get();
173
174 if (INTEL_GEN(dev_priv) >= 12) {
175 trans_shift = 0;
176 imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
177 } else {
178 trans_shift = dev_priv->psr.transcoder;
179 imr_reg = EDP_PSR_IMR;
180 }
181
182 if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
183 dev_priv->psr.last_entry_attempt = time_ns;
184 DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
185 transcoder_name(cpu_transcoder));
186 }
187
188 if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
189 dev_priv->psr.last_exit = time_ns;
190 DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
191 transcoder_name(cpu_transcoder));
192
193 if (INTEL_GEN(dev_priv) >= 9) {
194 u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
195 bool psr2_enabled = dev_priv->psr.psr2_enabled;
196
197 I915_WRITE(PSR_EVENT(cpu_transcoder), val);
198 psr_event_print(val, psr2_enabled);
199 }
200 }
201
202 if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
203 u32 val;
204
205 DRM_WARN("[transcoder %s] PSR aux error\n",
206 transcoder_name(cpu_transcoder));
207
208 dev_priv->psr.irq_aux_error = true;
209
210 /*
211 * If this interruption is not masked it will keep
212 * interrupting so fast that it prevents the scheduled
213 * work to run.
214 * Also after a PSR error, we don't want to arm PSR
215 * again so we don't care about unmask the interruption
216 * or unset irq_aux_error.
217 */
218 val = I915_READ(imr_reg);
219 val |= EDP_PSR_ERROR(trans_shift);
220 I915_WRITE(imr_reg, val);
221
222 schedule_work(&dev_priv->psr.work);
223 }
224 }
225
intel_dp_get_alpm_status(struct intel_dp * intel_dp)226 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
227 {
228 u8 alpm_caps = 0;
229
230 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
231 &alpm_caps) != 1)
232 return false;
233 return alpm_caps & DP_ALPM_CAP;
234 }
235
intel_dp_get_sink_sync_latency(struct intel_dp * intel_dp)236 static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
237 {
238 u8 val = 8; /* assume the worst if we can't read the value */
239
240 if (drm_dp_dpcd_readb(&intel_dp->aux,
241 DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
242 val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
243 else
244 DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
245 return val;
246 }
247
intel_dp_get_su_x_granulartiy(struct intel_dp * intel_dp)248 static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
249 {
250 u16 val;
251 ssize_t r;
252
253 /*
254 * Returning the default X granularity if granularity not required or
255 * if DPCD read fails
256 */
257 if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
258 return 4;
259
260 r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
261 if (r != 2)
262 DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
263
264 /*
265 * Spec says that if the value read is 0 the default granularity should
266 * be used instead.
267 */
268 if (r != 2 || val == 0)
269 val = 4;
270
271 return val;
272 }
273
intel_psr_init_dpcd(struct intel_dp * intel_dp)274 void intel_psr_init_dpcd(struct intel_dp *intel_dp)
275 {
276 struct drm_i915_private *dev_priv =
277 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
278
279 if (dev_priv->psr.dp) {
280 DRM_WARN("More than one eDP panel found, PSR support should be extended\n");
281 return;
282 }
283
284 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
285 sizeof(intel_dp->psr_dpcd));
286
287 if (!intel_dp->psr_dpcd[0])
288 return;
289 DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
290 intel_dp->psr_dpcd[0]);
291
292 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
293 DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
294 return;
295 }
296
297 if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
298 DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
299 return;
300 }
301
302 dev_priv->psr.sink_support = true;
303 dev_priv->psr.sink_sync_latency =
304 intel_dp_get_sink_sync_latency(intel_dp);
305
306 dev_priv->psr.dp = intel_dp;
307
308 if (INTEL_GEN(dev_priv) >= 9 &&
309 (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
310 bool y_req = intel_dp->psr_dpcd[1] &
311 DP_PSR2_SU_Y_COORDINATE_REQUIRED;
312 bool alpm = intel_dp_get_alpm_status(intel_dp);
313
314 /*
315 * All panels that supports PSR version 03h (PSR2 +
316 * Y-coordinate) can handle Y-coordinates in VSC but we are
317 * only sure that it is going to be used when required by the
318 * panel. This way panel is capable to do selective update
319 * without a aux frame sync.
320 *
321 * To support PSR version 02h and PSR version 03h without
322 * Y-coordinate requirement panels we would need to enable
323 * GTC first.
324 */
325 dev_priv->psr.sink_psr2_support = y_req && alpm;
326 DRM_DEBUG_KMS("PSR2 %ssupported\n",
327 dev_priv->psr.sink_psr2_support ? "" : "not ");
328
329 if (dev_priv->psr.sink_psr2_support) {
330 dev_priv->psr.colorimetry_support =
331 intel_dp_get_colorimetry_status(intel_dp);
332 dev_priv->psr.su_x_granularity =
333 intel_dp_get_su_x_granulartiy(intel_dp);
334 }
335 }
336 }
337
intel_psr_setup_vsc(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)338 static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
339 const struct intel_crtc_state *crtc_state)
340 {
341 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
342 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
343 struct dp_sdp psr_vsc;
344
345 if (dev_priv->psr.psr2_enabled) {
346 /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
347 memset(&psr_vsc, 0, sizeof(psr_vsc));
348 psr_vsc.sdp_header.HB0 = 0;
349 psr_vsc.sdp_header.HB1 = 0x7;
350 if (dev_priv->psr.colorimetry_support) {
351 psr_vsc.sdp_header.HB2 = 0x5;
352 psr_vsc.sdp_header.HB3 = 0x13;
353 } else {
354 psr_vsc.sdp_header.HB2 = 0x4;
355 psr_vsc.sdp_header.HB3 = 0xe;
356 }
357 } else {
358 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
359 memset(&psr_vsc, 0, sizeof(psr_vsc));
360 psr_vsc.sdp_header.HB0 = 0;
361 psr_vsc.sdp_header.HB1 = 0x7;
362 psr_vsc.sdp_header.HB2 = 0x2;
363 psr_vsc.sdp_header.HB3 = 0x8;
364 }
365
366 intel_dig_port->write_infoframe(&intel_dig_port->base,
367 crtc_state,
368 DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
369 }
370
hsw_psr_setup_aux(struct intel_dp * intel_dp)371 static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
372 {
373 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
374 u32 aux_clock_divider, aux_ctl;
375 int i;
376 static const u8 aux_msg[] = {
377 [0] = DP_AUX_NATIVE_WRITE << 4,
378 [1] = DP_SET_POWER >> 8,
379 [2] = DP_SET_POWER & 0xff,
380 [3] = 1 - 1,
381 [4] = DP_SET_POWER_D0,
382 };
383 u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
384 EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
385 EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
386 EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
387
388 BUILD_BUG_ON(sizeof(aux_msg) > 20);
389 for (i = 0; i < sizeof(aux_msg); i += 4)
390 I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
391 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
392
393 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
394
395 /* Start with bits set for DDI_AUX_CTL register */
396 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
397 aux_clock_divider);
398
399 /* Select only valid bits for SRD_AUX_CTL */
400 aux_ctl &= psr_aux_mask;
401 I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl);
402 }
403
intel_psr_enable_sink(struct intel_dp * intel_dp)404 static void intel_psr_enable_sink(struct intel_dp *intel_dp)
405 {
406 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
407 u8 dpcd_val = DP_PSR_ENABLE;
408
409 /* Enable ALPM at sink for psr2 */
410 if (dev_priv->psr.psr2_enabled) {
411 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
412 DP_ALPM_ENABLE |
413 DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
414
415 dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
416 } else {
417 if (dev_priv->psr.link_standby)
418 dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
419
420 if (INTEL_GEN(dev_priv) >= 8)
421 dpcd_val |= DP_PSR_CRC_VERIFICATION;
422 }
423
424 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
425
426 drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
427 }
428
intel_psr1_get_tp_time(struct intel_dp * intel_dp)429 static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
430 {
431 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
432 u32 val = 0;
433
434 if (INTEL_GEN(dev_priv) >= 11)
435 val |= EDP_PSR_TP4_TIME_0US;
436
437 if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
438 val |= EDP_PSR_TP1_TIME_0us;
439 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
440 val |= EDP_PSR_TP1_TIME_100us;
441 else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
442 val |= EDP_PSR_TP1_TIME_500us;
443 else
444 val |= EDP_PSR_TP1_TIME_2500us;
445
446 if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
447 val |= EDP_PSR_TP2_TP3_TIME_0us;
448 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
449 val |= EDP_PSR_TP2_TP3_TIME_100us;
450 else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
451 val |= EDP_PSR_TP2_TP3_TIME_500us;
452 else
453 val |= EDP_PSR_TP2_TP3_TIME_2500us;
454
455 if (intel_dp_source_supports_hbr2(intel_dp) &&
456 drm_dp_tps3_supported(intel_dp->dpcd))
457 val |= EDP_PSR_TP1_TP3_SEL;
458 else
459 val |= EDP_PSR_TP1_TP2_SEL;
460
461 return val;
462 }
463
hsw_activate_psr1(struct intel_dp * intel_dp)464 static void hsw_activate_psr1(struct intel_dp *intel_dp)
465 {
466 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
467 u32 max_sleep_time = 0x1f;
468 u32 val = EDP_PSR_ENABLE;
469
470 /* Let's use 6 as the minimum to cover all known cases including the
471 * off-by-one issue that HW has in some cases.
472 */
473 int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
474
475 /* sink_sync_latency of 8 means source has to wait for more than 8
476 * frames, we'll go with 9 frames for now
477 */
478 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
479 val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
480
481 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
482 if (IS_HASWELL(dev_priv))
483 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
484
485 if (dev_priv->psr.link_standby)
486 val |= EDP_PSR_LINK_STANDBY;
487
488 val |= intel_psr1_get_tp_time(intel_dp);
489
490 if (INTEL_GEN(dev_priv) >= 8)
491 val |= EDP_PSR_CRC_ENABLE;
492
493 val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) &
494 EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
495 I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
496 }
497
hsw_activate_psr2(struct intel_dp * intel_dp)498 static void hsw_activate_psr2(struct intel_dp *intel_dp)
499 {
500 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
501 u32 val;
502
503 /* Let's use 6 as the minimum to cover all known cases including the
504 * off-by-one issue that HW has in some cases.
505 */
506 int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
507
508 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
509 val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
510
511 val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
512 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
513 val |= EDP_Y_COORDINATE_ENABLE;
514
515 val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
516
517 if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
518 dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
519 val |= EDP_PSR2_TP2_TIME_50us;
520 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
521 val |= EDP_PSR2_TP2_TIME_100us;
522 else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
523 val |= EDP_PSR2_TP2_TIME_500us;
524 else
525 val |= EDP_PSR2_TP2_TIME_2500us;
526
527 /*
528 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
529 * recommending keep this bit unset while PSR2 is enabled.
530 */
531 I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
532
533 I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
534 }
535
536 static bool
transcoder_has_psr2(struct drm_i915_private * dev_priv,enum transcoder trans)537 transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
538 {
539 if (INTEL_GEN(dev_priv) < 9)
540 return false;
541 else if (INTEL_GEN(dev_priv) >= 12)
542 return trans == TRANSCODER_A;
543 else
544 return trans == TRANSCODER_EDP;
545 }
546
intel_get_frame_time_us(const struct intel_crtc_state * cstate)547 static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
548 {
549 if (!cstate || !cstate->hw.active)
550 return 0;
551
552 return DIV_ROUND_UP(1000 * 1000,
553 drm_mode_vrefresh(&cstate->hw.adjusted_mode));
554 }
555
psr2_program_idle_frames(struct drm_i915_private * dev_priv,u32 idle_frames)556 static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
557 u32 idle_frames)
558 {
559 u32 val;
560
561 idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
562 val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
563 val &= ~EDP_PSR2_IDLE_FRAME_MASK;
564 val |= idle_frames;
565 I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
566 }
567
tgl_psr2_enable_dc3co(struct drm_i915_private * dev_priv)568 static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
569 {
570 psr2_program_idle_frames(dev_priv, 0);
571 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
572 }
573
tgl_psr2_disable_dc3co(struct drm_i915_private * dev_priv)574 static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv)
575 {
576 int idle_frames;
577
578 intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
579 /*
580 * Restore PSR2 idle frame let's use 6 as the minimum to cover all known
581 * cases including the off-by-one issue that HW has in some cases.
582 */
583 idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
584 idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
585 psr2_program_idle_frames(dev_priv, idle_frames);
586 }
587
tgl_dc5_idle_thread(struct work_struct * work)588 static void tgl_dc5_idle_thread(struct work_struct *work)
589 {
590 struct drm_i915_private *dev_priv =
591 container_of(work, typeof(*dev_priv), psr.idle_work.work);
592
593 mutex_lock(&dev_priv->psr.lock);
594 /* If delayed work is pending, it is not idle */
595 if (delayed_work_pending(&dev_priv->psr.idle_work))
596 goto unlock;
597
598 DRM_DEBUG_KMS("DC5/6 idle thread\n");
599 tgl_psr2_disable_dc3co(dev_priv);
600 unlock:
601 mutex_unlock(&dev_priv->psr.lock);
602 }
603
tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private * dev_priv)604 static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv)
605 {
606 if (!dev_priv->psr.dc3co_enabled)
607 return;
608
609 cancel_delayed_work(&dev_priv->psr.idle_work);
610 /* Before PSR2 exit disallow dc3co*/
611 tgl_psr2_disable_dc3co(dev_priv);
612 }
613
intel_psr2_config_valid(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)614 static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
615 struct intel_crtc_state *crtc_state)
616 {
617 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
618 int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
619 int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
620 int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
621
622 if (!dev_priv->psr.sink_psr2_support)
623 return false;
624
625 if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
626 DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n",
627 transcoder_name(crtc_state->cpu_transcoder));
628 return false;
629 }
630
631 /*
632 * DSC and PSR2 cannot be enabled simultaneously. If a requested
633 * resolution requires DSC to be enabled, priority is given to DSC
634 * over PSR2.
635 */
636 if (crtc_state->dsc.compression_enable) {
637 DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
638 return false;
639 }
640
641 if (INTEL_GEN(dev_priv) >= 12) {
642 psr_max_h = 5120;
643 psr_max_v = 3200;
644 max_bpp = 30;
645 } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
646 psr_max_h = 4096;
647 psr_max_v = 2304;
648 max_bpp = 24;
649 } else if (IS_GEN(dev_priv, 9)) {
650 psr_max_h = 3640;
651 psr_max_v = 2304;
652 max_bpp = 24;
653 }
654
655 if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
656 DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
657 crtc_hdisplay, crtc_vdisplay,
658 psr_max_h, psr_max_v);
659 return false;
660 }
661
662 if (crtc_state->pipe_bpp > max_bpp) {
663 DRM_DEBUG_KMS("PSR2 not enabled, pipe bpp %d > max supported %d\n",
664 crtc_state->pipe_bpp, max_bpp);
665 return false;
666 }
667
668 /*
669 * HW sends SU blocks of size four scan lines, which means the starting
670 * X coordinate and Y granularity requirements will always be met. We
671 * only need to validate the SU block width is a multiple of
672 * x granularity.
673 */
674 if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
675 DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
676 crtc_hdisplay, dev_priv->psr.su_x_granularity);
677 return false;
678 }
679
680 if (crtc_state->crc_enabled) {
681 DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
682 return false;
683 }
684
685 return true;
686 }
687
intel_psr_compute_config(struct intel_dp * intel_dp,struct intel_crtc_state * crtc_state)688 void intel_psr_compute_config(struct intel_dp *intel_dp,
689 struct intel_crtc_state *crtc_state)
690 {
691 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
692 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
693 const struct drm_display_mode *adjusted_mode =
694 &crtc_state->hw.adjusted_mode;
695 int psr_setup_time;
696
697 if (!CAN_PSR(dev_priv))
698 return;
699
700 if (intel_dp != dev_priv->psr.dp)
701 return;
702
703 /*
704 * HSW spec explicitly says PSR is tied to port A.
705 * BDW+ platforms have a instance of PSR registers per transcoder but
706 * for now it only supports one instance of PSR, so lets keep it
707 * hardcoded to PORT_A
708 */
709 if (dig_port->base.port != PORT_A) {
710 DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
711 return;
712 }
713
714 if (dev_priv->psr.sink_not_reliable) {
715 DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
716 return;
717 }
718
719 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
720 DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n");
721 return;
722 }
723
724 psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
725 if (psr_setup_time < 0) {
726 DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
727 intel_dp->psr_dpcd[1]);
728 return;
729 }
730
731 if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
732 adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
733 DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
734 psr_setup_time);
735 return;
736 }
737
738 crtc_state->has_psr = true;
739 crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
740 }
741
intel_psr_activate(struct intel_dp * intel_dp)742 static void intel_psr_activate(struct intel_dp *intel_dp)
743 {
744 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
745
746 if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder))
747 WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
748
749 WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
750 WARN_ON(dev_priv->psr.active);
751 lockdep_assert_held(&dev_priv->psr.lock);
752
753 /* psr1 and psr2 are mutually exclusive.*/
754 if (dev_priv->psr.psr2_enabled)
755 hsw_activate_psr2(intel_dp);
756 else
757 hsw_activate_psr1(intel_dp);
758
759 dev_priv->psr.active = true;
760 }
761
intel_psr_enable_source(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)762 static void intel_psr_enable_source(struct intel_dp *intel_dp,
763 const struct intel_crtc_state *crtc_state)
764 {
765 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
766 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
767 u32 mask;
768
769 /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
770 * use hardcoded values PSR AUX transactions
771 */
772 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
773 hsw_psr_setup_aux(intel_dp);
774
775 if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
776 !IS_GEMINILAKE(dev_priv))) {
777 i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
778 u32 chicken = I915_READ(reg);
779
780 chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
781 PSR2_ADD_VERTICAL_LINE_COUNT;
782 I915_WRITE(reg, chicken);
783 }
784
785 /*
786 * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
787 * mask LPSP to avoid dependency on other drivers that might block
788 * runtime_pm besides preventing other hw tracking issues now we
789 * can rely on frontbuffer tracking.
790 */
791 mask = EDP_PSR_DEBUG_MASK_MEMUP |
792 EDP_PSR_DEBUG_MASK_HPD |
793 EDP_PSR_DEBUG_MASK_LPSP |
794 EDP_PSR_DEBUG_MASK_MAX_SLEEP;
795
796 if (INTEL_GEN(dev_priv) < 11)
797 mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
798
799 I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask);
800
801 psr_irq_control(dev_priv);
802 }
803
intel_psr_enable_locked(struct drm_i915_private * dev_priv,const struct intel_crtc_state * crtc_state)804 static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
805 const struct intel_crtc_state *crtc_state)
806 {
807 struct intel_dp *intel_dp = dev_priv->psr.dp;
808 u32 val;
809
810 WARN_ON(dev_priv->psr.enabled);
811
812 dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
813 dev_priv->psr.busy_frontbuffer_bits = 0;
814 dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
815 dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
816 dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state);
817 dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
818
819 /*
820 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
821 * will still keep the error set even after the reset done in the
822 * irq_preinstall and irq_uninstall hooks.
823 * And enabling in this situation cause the screen to freeze in the
824 * first time that PSR HW tries to activate so lets keep PSR disabled
825 * to avoid any rendering problems.
826 */
827 if (INTEL_GEN(dev_priv) >= 12) {
828 val = I915_READ(TRANS_PSR_IIR(dev_priv->psr.transcoder));
829 val &= EDP_PSR_ERROR(0);
830 } else {
831 val = I915_READ(EDP_PSR_IIR);
832 val &= EDP_PSR_ERROR(dev_priv->psr.transcoder);
833 }
834 if (val) {
835 dev_priv->psr.sink_not_reliable = true;
836 DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n");
837 return;
838 }
839
840 DRM_DEBUG_KMS("Enabling PSR%s\n",
841 dev_priv->psr.psr2_enabled ? "2" : "1");
842 intel_psr_setup_vsc(intel_dp, crtc_state);
843 intel_psr_enable_sink(intel_dp);
844 intel_psr_enable_source(intel_dp, crtc_state);
845 dev_priv->psr.enabled = true;
846
847 intel_psr_activate(intel_dp);
848 }
849
850 /**
851 * intel_psr_enable - Enable PSR
852 * @intel_dp: Intel DP
853 * @crtc_state: new CRTC state
854 *
855 * This function can only be called after the pipe is fully trained and enabled.
856 */
intel_psr_enable(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)857 void intel_psr_enable(struct intel_dp *intel_dp,
858 const struct intel_crtc_state *crtc_state)
859 {
860 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
861
862 if (!crtc_state->has_psr)
863 return;
864
865 if (WARN_ON(!CAN_PSR(dev_priv)))
866 return;
867
868 WARN_ON(dev_priv->drrs.dp);
869
870 mutex_lock(&dev_priv->psr.lock);
871
872 if (!psr_global_enabled(dev_priv->psr.debug)) {
873 DRM_DEBUG_KMS("PSR disabled by flag\n");
874 goto unlock;
875 }
876
877 intel_psr_enable_locked(dev_priv, crtc_state);
878
879 unlock:
880 mutex_unlock(&dev_priv->psr.lock);
881 }
882
intel_psr_exit(struct drm_i915_private * dev_priv)883 static void intel_psr_exit(struct drm_i915_private *dev_priv)
884 {
885 u32 val;
886
887 if (!dev_priv->psr.active) {
888 if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) {
889 val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
890 WARN_ON(val & EDP_PSR2_ENABLE);
891 }
892
893 val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
894 WARN_ON(val & EDP_PSR_ENABLE);
895
896 return;
897 }
898
899 if (dev_priv->psr.psr2_enabled) {
900 tgl_disallow_dc3co_on_psr2_exit(dev_priv);
901 val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
902 WARN_ON(!(val & EDP_PSR2_ENABLE));
903 val &= ~EDP_PSR2_ENABLE;
904 I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
905 } else {
906 val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
907 WARN_ON(!(val & EDP_PSR_ENABLE));
908 val &= ~EDP_PSR_ENABLE;
909 I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
910 }
911 dev_priv->psr.active = false;
912 }
913
intel_psr_disable_locked(struct intel_dp * intel_dp)914 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
915 {
916 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
917 i915_reg_t psr_status;
918 u32 psr_status_mask;
919
920 lockdep_assert_held(&dev_priv->psr.lock);
921
922 if (!dev_priv->psr.enabled)
923 return;
924
925 DRM_DEBUG_KMS("Disabling PSR%s\n",
926 dev_priv->psr.psr2_enabled ? "2" : "1");
927
928 intel_psr_exit(dev_priv);
929
930 if (dev_priv->psr.psr2_enabled) {
931 psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
932 psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
933 } else {
934 psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder);
935 psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
936 }
937
938 /* Wait till PSR is idle */
939 if (intel_de_wait_for_clear(dev_priv, psr_status,
940 psr_status_mask, 2000))
941 DRM_ERROR("Timed out waiting PSR idle state\n");
942
943 /* Disable PSR on Sink */
944 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
945
946 if (dev_priv->psr.psr2_enabled)
947 drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
948
949 dev_priv->psr.enabled = false;
950 }
951
952 /**
953 * intel_psr_disable - Disable PSR
954 * @intel_dp: Intel DP
955 * @old_crtc_state: old CRTC state
956 *
957 * This function needs to be called before disabling pipe.
958 */
intel_psr_disable(struct intel_dp * intel_dp,const struct intel_crtc_state * old_crtc_state)959 void intel_psr_disable(struct intel_dp *intel_dp,
960 const struct intel_crtc_state *old_crtc_state)
961 {
962 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
963
964 if (!old_crtc_state->has_psr)
965 return;
966
967 if (WARN_ON(!CAN_PSR(dev_priv)))
968 return;
969
970 mutex_lock(&dev_priv->psr.lock);
971
972 intel_psr_disable_locked(intel_dp);
973
974 mutex_unlock(&dev_priv->psr.lock);
975 cancel_work_sync(&dev_priv->psr.work);
976 cancel_delayed_work_sync(&dev_priv->psr.idle_work);
977 }
978
psr_force_hw_tracking_exit(struct drm_i915_private * dev_priv)979 static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
980 {
981 if (INTEL_GEN(dev_priv) >= 9)
982 /*
983 * Display WA #0884: skl+
984 * This documented WA for bxt can be safely applied
985 * broadly so we can force HW tracking to exit PSR
986 * instead of disabling and re-enabling.
987 * Workaround tells us to write 0 to CUR_SURFLIVE_A,
988 * but it makes more sense write to the current active
989 * pipe.
990 */
991 I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
992 else
993 /*
994 * A write to CURSURFLIVE do not cause HW tracking to exit PSR
995 * on older gens so doing the manual exit instead.
996 */
997 intel_psr_exit(dev_priv);
998 }
999
1000 /**
1001 * intel_psr_update - Update PSR state
1002 * @intel_dp: Intel DP
1003 * @crtc_state: new CRTC state
1004 *
1005 * This functions will update PSR states, disabling, enabling or switching PSR
1006 * version when executing fastsets. For full modeset, intel_psr_disable() and
1007 * intel_psr_enable() should be called instead.
1008 */
intel_psr_update(struct intel_dp * intel_dp,const struct intel_crtc_state * crtc_state)1009 void intel_psr_update(struct intel_dp *intel_dp,
1010 const struct intel_crtc_state *crtc_state)
1011 {
1012 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1013 struct i915_psr *psr = &dev_priv->psr;
1014 bool enable, psr2_enable;
1015
1016 if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
1017 return;
1018
1019 mutex_lock(&dev_priv->psr.lock);
1020
1021 enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
1022 psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
1023
1024 if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
1025 /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
1026 if (crtc_state->crc_enabled && psr->enabled)
1027 psr_force_hw_tracking_exit(dev_priv);
1028 else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) {
1029 /*
1030 * Activate PSR again after a force exit when enabling
1031 * CRC in older gens
1032 */
1033 if (!dev_priv->psr.active &&
1034 !dev_priv->psr.busy_frontbuffer_bits)
1035 schedule_work(&dev_priv->psr.work);
1036 }
1037
1038 goto unlock;
1039 }
1040
1041 if (psr->enabled)
1042 intel_psr_disable_locked(intel_dp);
1043
1044 if (enable)
1045 intel_psr_enable_locked(dev_priv, crtc_state);
1046
1047 unlock:
1048 mutex_unlock(&dev_priv->psr.lock);
1049 }
1050
1051 /**
1052 * intel_psr_wait_for_idle - wait for PSR1 to idle
1053 * @new_crtc_state: new CRTC state
1054 * @out_value: PSR status in case of failure
1055 *
1056 * This function is expected to be called from pipe_update_start() where it is
1057 * not expected to race with PSR enable or disable.
1058 *
1059 * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
1060 */
intel_psr_wait_for_idle(const struct intel_crtc_state * new_crtc_state,u32 * out_value)1061 int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
1062 u32 *out_value)
1063 {
1064 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
1065 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1066
1067 if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
1068 return 0;
1069
1070 /* FIXME: Update this for PSR2 if we need to wait for idle */
1071 if (READ_ONCE(dev_priv->psr.psr2_enabled))
1072 return 0;
1073
1074 /*
1075 * From bspec: Panel Self Refresh (BDW+)
1076 * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
1077 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
1078 * defensive enough to cover everything.
1079 */
1080
1081 return __intel_wait_for_register(&dev_priv->uncore,
1082 EDP_PSR_STATUS(dev_priv->psr.transcoder),
1083 EDP_PSR_STATUS_STATE_MASK,
1084 EDP_PSR_STATUS_STATE_IDLE, 2, 50,
1085 out_value);
1086 }
1087
__psr_wait_for_idle_locked(struct drm_i915_private * dev_priv)1088 static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
1089 {
1090 i915_reg_t reg;
1091 u32 mask;
1092 int err;
1093
1094 if (!dev_priv->psr.enabled)
1095 return false;
1096
1097 if (dev_priv->psr.psr2_enabled) {
1098 reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
1099 mask = EDP_PSR2_STATUS_STATE_MASK;
1100 } else {
1101 reg = EDP_PSR_STATUS(dev_priv->psr.transcoder);
1102 mask = EDP_PSR_STATUS_STATE_MASK;
1103 }
1104
1105 mutex_unlock(&dev_priv->psr.lock);
1106
1107 err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
1108 if (err)
1109 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
1110
1111 /* After the unlocked wait, verify that PSR is still wanted! */
1112 mutex_lock(&dev_priv->psr.lock);
1113 return err == 0 && dev_priv->psr.enabled;
1114 }
1115
intel_psr_fastset_force(struct drm_i915_private * dev_priv)1116 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
1117 {
1118 struct drm_device *dev = &dev_priv->drm;
1119 struct drm_modeset_acquire_ctx ctx;
1120 struct drm_atomic_state *state;
1121 struct intel_crtc *crtc;
1122 int err;
1123
1124 state = drm_atomic_state_alloc(dev);
1125 if (!state)
1126 return -ENOMEM;
1127
1128 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
1129 state->acquire_ctx = &ctx;
1130
1131 retry:
1132 for_each_intel_crtc(dev, crtc) {
1133 struct intel_crtc_state *crtc_state =
1134 intel_atomic_get_crtc_state(state, crtc);
1135
1136 if (IS_ERR(crtc_state)) {
1137 err = PTR_ERR(crtc_state);
1138 goto error;
1139 }
1140
1141 if (crtc_state->hw.active && crtc_state->has_psr) {
1142 /* Mark mode as changed to trigger a pipe->update() */
1143 crtc_state->uapi.mode_changed = true;
1144 break;
1145 }
1146 }
1147
1148 err = drm_atomic_commit(state);
1149
1150 error:
1151 if (err == -EDEADLK) {
1152 drm_atomic_state_clear(state);
1153 err = drm_modeset_backoff(&ctx);
1154 if (!err)
1155 goto retry;
1156 }
1157
1158 drm_modeset_drop_locks(&ctx);
1159 drm_modeset_acquire_fini(&ctx);
1160 drm_atomic_state_put(state);
1161
1162 return err;
1163 }
1164
intel_psr_debug_set(struct drm_i915_private * dev_priv,u64 val)1165 int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
1166 {
1167 const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
1168 u32 old_mode;
1169 int ret;
1170
1171 if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
1172 mode > I915_PSR_DEBUG_FORCE_PSR1) {
1173 DRM_DEBUG_KMS("Invalid debug mask %"PRIx64"\n", val);
1174 return -EINVAL;
1175 }
1176
1177 ret = mutex_lock_interruptible(&dev_priv->psr.lock);
1178 if (ret)
1179 return ret;
1180
1181 old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
1182 dev_priv->psr.debug = val;
1183
1184 /*
1185 * Do it right away if it's already enabled, otherwise it will be done
1186 * when enabling the source.
1187 */
1188 if (dev_priv->psr.enabled)
1189 psr_irq_control(dev_priv);
1190
1191 mutex_unlock(&dev_priv->psr.lock);
1192
1193 if (old_mode != mode)
1194 ret = intel_psr_fastset_force(dev_priv);
1195
1196 return ret;
1197 }
1198
intel_psr_handle_irq(struct drm_i915_private * dev_priv)1199 static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
1200 {
1201 struct i915_psr *psr = &dev_priv->psr;
1202
1203 intel_psr_disable_locked(psr->dp);
1204 psr->sink_not_reliable = true;
1205 /* let's make sure that sink is awaken */
1206 drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
1207 }
1208
intel_psr_work(struct work_struct * work)1209 static void intel_psr_work(struct work_struct *work)
1210 {
1211 struct drm_i915_private *dev_priv =
1212 container_of(work, typeof(*dev_priv), psr.work);
1213
1214 mutex_lock(&dev_priv->psr.lock);
1215
1216 if (!dev_priv->psr.enabled)
1217 goto unlock;
1218
1219 if (READ_ONCE(dev_priv->psr.irq_aux_error))
1220 intel_psr_handle_irq(dev_priv);
1221
1222 /*
1223 * We have to make sure PSR is ready for re-enable
1224 * otherwise it keeps disabled until next full enable/disable cycle.
1225 * PSR might take some time to get fully disabled
1226 * and be ready for re-enable.
1227 */
1228 if (!__psr_wait_for_idle_locked(dev_priv))
1229 goto unlock;
1230
1231 /*
1232 * The delayed work can race with an invalidate hence we need to
1233 * recheck. Since psr_flush first clears this and then reschedules we
1234 * won't ever miss a flush when bailing out here.
1235 */
1236 if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
1237 goto unlock;
1238
1239 intel_psr_activate(dev_priv->psr.dp);
1240 unlock:
1241 mutex_unlock(&dev_priv->psr.lock);
1242 }
1243
1244 /**
1245 * intel_psr_invalidate - Invalidade PSR
1246 * @dev_priv: i915 device
1247 * @frontbuffer_bits: frontbuffer plane tracking bits
1248 * @origin: which operation caused the invalidate
1249 *
1250 * Since the hardware frontbuffer tracking has gaps we need to integrate
1251 * with the software frontbuffer tracking. This function gets called every
1252 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
1253 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
1254 *
1255 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
1256 */
intel_psr_invalidate(struct drm_i915_private * dev_priv,unsigned frontbuffer_bits,enum fb_op_origin origin)1257 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
1258 unsigned frontbuffer_bits, enum fb_op_origin origin)
1259 {
1260 if (!CAN_PSR(dev_priv))
1261 return;
1262
1263 if (origin == ORIGIN_FLIP)
1264 return;
1265
1266 mutex_lock(&dev_priv->psr.lock);
1267 if (!dev_priv->psr.enabled) {
1268 mutex_unlock(&dev_priv->psr.lock);
1269 return;
1270 }
1271
1272 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
1273 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
1274
1275 if (frontbuffer_bits)
1276 intel_psr_exit(dev_priv);
1277
1278 mutex_unlock(&dev_priv->psr.lock);
1279 }
1280
1281 /*
1282 * When we will be completely rely on PSR2 S/W tracking in future,
1283 * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
1284 * event also therefore tgl_dc3co_flush() require to be changed
1285 * accrodingly in future.
1286 */
1287 static void
tgl_dc3co_flush(struct drm_i915_private * dev_priv,unsigned int frontbuffer_bits,enum fb_op_origin origin)1288 tgl_dc3co_flush(struct drm_i915_private *dev_priv,
1289 unsigned int frontbuffer_bits, enum fb_op_origin origin)
1290 {
1291 u32 delay;
1292
1293 mutex_lock(&dev_priv->psr.lock);
1294
1295 if (!dev_priv->psr.dc3co_enabled)
1296 goto unlock;
1297
1298 if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active)
1299 goto unlock;
1300
1301 /*
1302 * At every frontbuffer flush flip event modified delay of delayed work,
1303 * when delayed work schedules that means display has been idle.
1304 */
1305 if (!(frontbuffer_bits &
1306 INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe)))
1307 goto unlock;
1308
1309 tgl_psr2_enable_dc3co(dev_priv);
1310 /* DC5/DC6 required idle frames = 6 */
1311 delay = 6 * dev_priv->psr.dc3co_exit_delay;
1312 mod_delayed_work(system_wq, &dev_priv->psr.idle_work,
1313 usecs_to_jiffies(delay));
1314
1315 unlock:
1316 mutex_unlock(&dev_priv->psr.lock);
1317 }
1318
1319 /**
1320 * intel_psr_flush - Flush PSR
1321 * @dev_priv: i915 device
1322 * @frontbuffer_bits: frontbuffer plane tracking bits
1323 * @origin: which operation caused the flush
1324 *
1325 * Since the hardware frontbuffer tracking has gaps we need to integrate
1326 * with the software frontbuffer tracking. This function gets called every
1327 * time frontbuffer rendering has completed and flushed out to memory. PSR
1328 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
1329 *
1330 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
1331 */
intel_psr_flush(struct drm_i915_private * dev_priv,unsigned frontbuffer_bits,enum fb_op_origin origin)1332 void intel_psr_flush(struct drm_i915_private *dev_priv,
1333 unsigned frontbuffer_bits, enum fb_op_origin origin)
1334 {
1335 if (!CAN_PSR(dev_priv))
1336 return;
1337
1338 if (origin == ORIGIN_FLIP) {
1339 tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin);
1340 return;
1341 }
1342
1343 mutex_lock(&dev_priv->psr.lock);
1344 if (!dev_priv->psr.enabled) {
1345 mutex_unlock(&dev_priv->psr.lock);
1346 return;
1347 }
1348
1349 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
1350 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
1351
1352 /* By definition flush = invalidate + flush */
1353 if (frontbuffer_bits)
1354 psr_force_hw_tracking_exit(dev_priv);
1355
1356 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
1357 schedule_work(&dev_priv->psr.work);
1358 mutex_unlock(&dev_priv->psr.lock);
1359 }
1360
1361 /**
1362 * intel_psr_init - Init basic PSR work and mutex.
1363 * @dev_priv: i915 device private
1364 *
1365 * This function is called only once at driver load to initialize basic
1366 * PSR stuff.
1367 */
intel_psr_init(struct drm_i915_private * dev_priv)1368 void intel_psr_init(struct drm_i915_private *dev_priv)
1369 {
1370 if (!HAS_PSR(dev_priv))
1371 return;
1372
1373 if (!dev_priv->psr.sink_support)
1374 return;
1375
1376 if (IS_HASWELL(dev_priv))
1377 /*
1378 * HSW don't have PSR registers on the same space as transcoder
1379 * so set this to a value that when subtract to the register
1380 * in transcoder space results in the right offset for HSW
1381 */
1382 dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;
1383
1384 if (i915_modparams.enable_psr == -1)
1385 if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
1386 i915_modparams.enable_psr = 0;
1387
1388 /* Set link_standby x link_off defaults */
1389 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1390 /* HSW and BDW require workarounds that we don't implement. */
1391 dev_priv->psr.link_standby = false;
1392 else if (INTEL_GEN(dev_priv) < 12)
1393 /* For new platforms up to TGL let's respect VBT back again */
1394 dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
1395
1396 INIT_WORK(&dev_priv->psr.work, intel_psr_work);
1397 INIT_DELAYED_WORK(&dev_priv->psr.idle_work, tgl_dc5_idle_thread);
1398 mutex_init(&dev_priv->psr.lock);
1399 }
1400
psr_get_status_and_error_status(struct intel_dp * intel_dp,u8 * status,u8 * error_status)1401 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
1402 u8 *status, u8 *error_status)
1403 {
1404 struct drm_dp_aux *aux = &intel_dp->aux;
1405 int ret;
1406
1407 ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
1408 if (ret != 1)
1409 return ret;
1410
1411 ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
1412 if (ret != 1)
1413 return ret;
1414
1415 *status = *status & DP_PSR_SINK_STATE_MASK;
1416
1417 return 0;
1418 }
1419
psr_alpm_check(struct intel_dp * intel_dp)1420 static void psr_alpm_check(struct intel_dp *intel_dp)
1421 {
1422 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1423 struct drm_dp_aux *aux = &intel_dp->aux;
1424 struct i915_psr *psr = &dev_priv->psr;
1425 u8 val;
1426 int r;
1427
1428 if (!psr->psr2_enabled)
1429 return;
1430
1431 r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
1432 if (r != 1) {
1433 DRM_ERROR("Error reading ALPM status\n");
1434 return;
1435 }
1436
1437 if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
1438 intel_psr_disable_locked(intel_dp);
1439 psr->sink_not_reliable = true;
1440 DRM_DEBUG_KMS("ALPM lock timeout error, disabling PSR\n");
1441
1442 /* Clearing error */
1443 drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
1444 }
1445 }
1446
psr_capability_changed_check(struct intel_dp * intel_dp)1447 static void psr_capability_changed_check(struct intel_dp *intel_dp)
1448 {
1449 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1450 struct i915_psr *psr = &dev_priv->psr;
1451 u8 val;
1452 int r;
1453
1454 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
1455 if (r != 1) {
1456 DRM_ERROR("Error reading DP_PSR_ESI\n");
1457 return;
1458 }
1459
1460 if (val & DP_PSR_CAPS_CHANGE) {
1461 intel_psr_disable_locked(intel_dp);
1462 psr->sink_not_reliable = true;
1463 DRM_DEBUG_KMS("Sink PSR capability changed, disabling PSR\n");
1464
1465 /* Clearing it */
1466 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
1467 }
1468 }
1469
intel_psr_short_pulse(struct intel_dp * intel_dp)1470 void intel_psr_short_pulse(struct intel_dp *intel_dp)
1471 {
1472 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1473 struct i915_psr *psr = &dev_priv->psr;
1474 u8 status, error_status;
1475 const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
1476 DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
1477 DP_PSR_LINK_CRC_ERROR;
1478
1479 if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
1480 return;
1481
1482 mutex_lock(&psr->lock);
1483
1484 if (!psr->enabled || psr->dp != intel_dp)
1485 goto exit;
1486
1487 if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
1488 DRM_ERROR("Error reading PSR status or error status\n");
1489 goto exit;
1490 }
1491
1492 if (status == DP_PSR_SINK_INTERNAL_ERROR || (error_status & errors)) {
1493 intel_psr_disable_locked(intel_dp);
1494 psr->sink_not_reliable = true;
1495 }
1496
1497 if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
1498 DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
1499 if (error_status & DP_PSR_RFB_STORAGE_ERROR)
1500 DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
1501 if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
1502 DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
1503 if (error_status & DP_PSR_LINK_CRC_ERROR)
1504 DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n");
1505
1506 if (error_status & ~errors)
1507 DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
1508 error_status & ~errors);
1509 /* clear status register */
1510 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
1511
1512 psr_alpm_check(intel_dp);
1513 psr_capability_changed_check(intel_dp);
1514
1515 exit:
1516 mutex_unlock(&psr->lock);
1517 }
1518
intel_psr_enabled(struct intel_dp * intel_dp)1519 bool intel_psr_enabled(struct intel_dp *intel_dp)
1520 {
1521 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1522 bool ret;
1523
1524 if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
1525 return false;
1526
1527 mutex_lock(&dev_priv->psr.lock);
1528 ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
1529 mutex_unlock(&dev_priv->psr.lock);
1530
1531 return ret;
1532 }
1533
intel_psr_atomic_check(struct drm_connector * connector,struct drm_connector_state * old_state,struct drm_connector_state * new_state)1534 void intel_psr_atomic_check(struct drm_connector *connector,
1535 struct drm_connector_state *old_state,
1536 struct drm_connector_state *new_state)
1537 {
1538 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1539 struct intel_connector *intel_connector;
1540 struct intel_digital_port *dig_port;
1541 struct drm_crtc_state *crtc_state;
1542
1543 if (!CAN_PSR(dev_priv) || !new_state->crtc ||
1544 dev_priv->psr.initially_probed)
1545 return;
1546
1547 intel_connector = to_intel_connector(connector);
1548 dig_port = enc_to_dig_port(intel_connector->encoder);
1549 if (dev_priv->psr.dp != &dig_port->dp)
1550 return;
1551
1552 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
1553 new_state->crtc);
1554 crtc_state->mode_changed = true;
1555 dev_priv->psr.initially_probed = true;
1556 }
1557