1c349dbc7Sjsg /* 2c349dbc7Sjsg * Copyright © 2008-2015 Intel Corporation 3c349dbc7Sjsg * 4c349dbc7Sjsg * Permission is hereby granted, free of charge, to any person obtaining a 5c349dbc7Sjsg * copy of this software and associated documentation files (the "Software"), 6c349dbc7Sjsg * to deal in the Software without restriction, including without limitation 7c349dbc7Sjsg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8c349dbc7Sjsg * and/or sell copies of the Software, and to permit persons to whom the 9c349dbc7Sjsg * Software is furnished to do so, subject to the following conditions: 10c349dbc7Sjsg * 11c349dbc7Sjsg * The above copyright notice and this permission notice (including the next 12c349dbc7Sjsg * paragraph) shall be included in all copies or substantial portions of the 13c349dbc7Sjsg * Software. 14c349dbc7Sjsg * 15c349dbc7Sjsg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16c349dbc7Sjsg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17c349dbc7Sjsg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18c349dbc7Sjsg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19c349dbc7Sjsg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20c349dbc7Sjsg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21c349dbc7Sjsg * IN THE SOFTWARE. 22c349dbc7Sjsg */ 23c349dbc7Sjsg 241bb76ff1Sjsg #include "i915_drv.h" 25c349dbc7Sjsg #include "intel_display_types.h" 26c349dbc7Sjsg #include "intel_dp.h" 27c349dbc7Sjsg #include "intel_dp_link_training.h" 28c349dbc7Sjsg 29f005ef32Sjsg #define LT_MSG_PREFIX "[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] " 30f005ef32Sjsg #define LT_MSG_ARGS(_intel_dp, _dp_phy) (_intel_dp)->attached_connector->base.base.id, \ 31f005ef32Sjsg (_intel_dp)->attached_connector->base.name, \ 32f005ef32Sjsg dp_to_dig_port(_intel_dp)->base.base.base.id, \ 33f005ef32Sjsg dp_to_dig_port(_intel_dp)->base.base.name, \ 34f005ef32Sjsg drm_dp_phy_name(_dp_phy) 35f005ef32Sjsg 36f005ef32Sjsg #define lt_dbg(_intel_dp, _dp_phy, _format, ...) \ 37f005ef32Sjsg drm_dbg_kms(&dp_to_i915(_intel_dp)->drm, \ 38f005ef32Sjsg LT_MSG_PREFIX _format, \ 39f005ef32Sjsg LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__) 40f005ef32Sjsg 41f005ef32Sjsg #define lt_err(_intel_dp, _dp_phy, _format, ...) do { \ 42f005ef32Sjsg if (intel_digital_port_connected(&dp_to_dig_port(_intel_dp)->base)) \ 43f005ef32Sjsg drm_err(&dp_to_i915(_intel_dp)->drm, \ 44f005ef32Sjsg LT_MSG_PREFIX _format, \ 45f005ef32Sjsg LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__); \ 46f005ef32Sjsg else \ 47f005ef32Sjsg lt_dbg(_intel_dp, _dp_phy, "Sink disconnected: " _format, ## __VA_ARGS__); \ 48f005ef32Sjsg } while (0) 49f005ef32Sjsg 505ca02815Sjsg static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp) 515ca02815Sjsg { 525ca02815Sjsg memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps)); 535ca02815Sjsg } 545ca02815Sjsg 555ca02815Sjsg static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp) 565ca02815Sjsg { 575ca02815Sjsg intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT - 585ca02815Sjsg DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] = 0; 595ca02815Sjsg } 605ca02815Sjsg 615ca02815Sjsg static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp, 625ca02815Sjsg enum drm_dp_phy dp_phy) 635ca02815Sjsg { 645ca02815Sjsg return intel_dp->lttpr_phy_caps[dp_phy - DP_PHY_LTTPR1]; 655ca02815Sjsg } 665ca02815Sjsg 675ca02815Sjsg static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp, 681bb76ff1Sjsg const u8 dpcd[DP_RECEIVER_CAP_SIZE], 695ca02815Sjsg enum drm_dp_phy dp_phy) 705ca02815Sjsg { 715ca02815Sjsg u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); 725ca02815Sjsg 731bb76ff1Sjsg if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dpcd, dp_phy, phy_caps) < 0) { 74f005ef32Sjsg lt_dbg(intel_dp, dp_phy, "failed to read the PHY caps\n"); 755ca02815Sjsg return; 765ca02815Sjsg } 775ca02815Sjsg 78f005ef32Sjsg lt_dbg(intel_dp, dp_phy, "PHY capabilities: %*ph\n", 795ca02815Sjsg (int)sizeof(intel_dp->lttpr_phy_caps[0]), 805ca02815Sjsg phy_caps); 815ca02815Sjsg } 825ca02815Sjsg 831bb76ff1Sjsg static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp, 841bb76ff1Sjsg const u8 dpcd[DP_RECEIVER_CAP_SIZE]) 855ca02815Sjsg { 861bb76ff1Sjsg int ret; 875ca02815Sjsg 881bb76ff1Sjsg ret = drm_dp_read_lttpr_common_caps(&intel_dp->aux, dpcd, 891bb76ff1Sjsg intel_dp->lttpr_common_caps); 901bb76ff1Sjsg if (ret < 0) 915ca02815Sjsg goto reset_caps; 925ca02815Sjsg 93f005ef32Sjsg lt_dbg(intel_dp, DP_PHY_DPRX, "LTTPR common capabilities: %*ph\n", 945ca02815Sjsg (int)sizeof(intel_dp->lttpr_common_caps), 955ca02815Sjsg intel_dp->lttpr_common_caps); 965ca02815Sjsg 975ca02815Sjsg /* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */ 985ca02815Sjsg if (intel_dp->lttpr_common_caps[0] < 0x14) 995ca02815Sjsg goto reset_caps; 1005ca02815Sjsg 1015ca02815Sjsg return true; 1025ca02815Sjsg 1035ca02815Sjsg reset_caps: 1045ca02815Sjsg intel_dp_reset_lttpr_common_caps(intel_dp); 1055ca02815Sjsg return false; 1065ca02815Sjsg } 1075ca02815Sjsg 1085ca02815Sjsg static bool 1095ca02815Sjsg intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable) 1105ca02815Sjsg { 1115ca02815Sjsg u8 val = enable ? DP_PHY_REPEATER_MODE_TRANSPARENT : 1125ca02815Sjsg DP_PHY_REPEATER_MODE_NON_TRANSPARENT; 1135ca02815Sjsg 1145ca02815Sjsg return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1; 1155ca02815Sjsg } 1165ca02815Sjsg 117*5f1cde00Sjsg static bool intel_dp_lttpr_transparent_mode_enabled(struct intel_dp *intel_dp) 118*5f1cde00Sjsg { 119*5f1cde00Sjsg return intel_dp->lttpr_common_caps[DP_PHY_REPEATER_MODE - 120*5f1cde00Sjsg DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] == 121*5f1cde00Sjsg DP_PHY_REPEATER_MODE_TRANSPARENT; 122*5f1cde00Sjsg } 123*5f1cde00Sjsg 124*5f1cde00Sjsg /* 125*5f1cde00Sjsg * Read the LTTPR common capabilities and switch the LTTPR PHYs to 126*5f1cde00Sjsg * non-transparent mode if this is supported. Preserve the 127*5f1cde00Sjsg * transparent/non-transparent mode on an active link. 128*5f1cde00Sjsg * 129*5f1cde00Sjsg * Return the number of detected LTTPRs in non-transparent mode or 0 if the 130*5f1cde00Sjsg * LTTPRs are in transparent mode or the detection failed. 131*5f1cde00Sjsg */ 132*5f1cde00Sjsg static int intel_dp_init_lttpr_phys(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) 1335ca02815Sjsg { 1345ca02815Sjsg int lttpr_count; 1355ca02815Sjsg 1361bb76ff1Sjsg if (!intel_dp_read_lttpr_common_caps(intel_dp, dpcd)) 1375ca02815Sjsg return 0; 1385ca02815Sjsg 1395ca02815Sjsg lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps); 1405ca02815Sjsg /* 1415ca02815Sjsg * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are 1425ca02815Sjsg * detected as this breaks link training at least on the Dell WD19TB 1435ca02815Sjsg * dock. 1445ca02815Sjsg */ 1455ca02815Sjsg if (lttpr_count == 0) 1465ca02815Sjsg return 0; 1475ca02815Sjsg 1485ca02815Sjsg /* 149*5f1cde00Sjsg * Don't change the mode on an active link, to prevent a loss of link 150*5f1cde00Sjsg * synchronization. See DP Standard v2.0 3.6.7. about the LTTPR 151*5f1cde00Sjsg * resetting its internal state when the mode is changed from 152*5f1cde00Sjsg * non-transparent to transparent. 153*5f1cde00Sjsg */ 154*5f1cde00Sjsg if (intel_dp->link_trained) { 155*5f1cde00Sjsg if (lttpr_count < 0 || intel_dp_lttpr_transparent_mode_enabled(intel_dp)) 156*5f1cde00Sjsg goto out_reset_lttpr_count; 157*5f1cde00Sjsg 158*5f1cde00Sjsg return lttpr_count; 159*5f1cde00Sjsg } 160*5f1cde00Sjsg 161*5f1cde00Sjsg /* 1625ca02815Sjsg * See DP Standard v2.0 3.6.6.1. about the explicit disabling of 1635ca02815Sjsg * non-transparent mode and the disable->enable non-transparent mode 1645ca02815Sjsg * sequence. 1655ca02815Sjsg */ 1665ca02815Sjsg intel_dp_set_lttpr_transparent_mode(intel_dp, true); 1675ca02815Sjsg 1685ca02815Sjsg /* 1695ca02815Sjsg * In case of unsupported number of LTTPRs or failing to switch to 1705ca02815Sjsg * non-transparent mode fall-back to transparent link training mode, 1715ca02815Sjsg * still taking into account any LTTPR common lane- rate/count limits. 1725ca02815Sjsg */ 1735ca02815Sjsg if (lttpr_count < 0) 1745ca02815Sjsg return 0; 1755ca02815Sjsg 1765ca02815Sjsg if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) { 177f005ef32Sjsg lt_dbg(intel_dp, DP_PHY_DPRX, 178f005ef32Sjsg "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n"); 1795ca02815Sjsg 1805ca02815Sjsg intel_dp_set_lttpr_transparent_mode(intel_dp, true); 181*5f1cde00Sjsg 182*5f1cde00Sjsg goto out_reset_lttpr_count; 183*5f1cde00Sjsg } 184*5f1cde00Sjsg 185*5f1cde00Sjsg return lttpr_count; 186*5f1cde00Sjsg 187*5f1cde00Sjsg out_reset_lttpr_count: 1885ca02815Sjsg intel_dp_reset_lttpr_count(intel_dp); 1895ca02815Sjsg 1905ca02815Sjsg return 0; 1915ca02815Sjsg } 1925ca02815Sjsg 193*5f1cde00Sjsg static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEIVER_CAP_SIZE]) 194*5f1cde00Sjsg { 195*5f1cde00Sjsg int lttpr_count; 196*5f1cde00Sjsg int i; 197*5f1cde00Sjsg 198*5f1cde00Sjsg lttpr_count = intel_dp_init_lttpr_phys(intel_dp, dpcd); 199*5f1cde00Sjsg 2005ca02815Sjsg for (i = 0; i < lttpr_count; i++) 2011bb76ff1Sjsg intel_dp_read_lttpr_phy_caps(intel_dp, dpcd, DP_PHY_LTTPR(i)); 2025ca02815Sjsg 2035ca02815Sjsg return lttpr_count; 2045ca02815Sjsg } 2055ca02815Sjsg 2065ca02815Sjsg /** 2075ca02815Sjsg * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode 2085ca02815Sjsg * @intel_dp: Intel DP struct 2095ca02815Sjsg * 2105ca02815Sjsg * Read the LTTPR common and DPRX capabilities and switch to non-transparent 2115ca02815Sjsg * link training mode if any is detected and read the PHY capabilities for all 2125ca02815Sjsg * detected LTTPRs. In case of an LTTPR detection error or if the number of 2135ca02815Sjsg * LTTPRs is more than is supported (8), fall back to the no-LTTPR, 2145ca02815Sjsg * transparent mode link training mode. 2155ca02815Sjsg * 2165ca02815Sjsg * Returns: 2175ca02815Sjsg * >0 if LTTPRs were detected and the non-transparent LT mode was set. The 2185ca02815Sjsg * DPRX capabilities are read out. 2195ca02815Sjsg * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a 2205ca02815Sjsg * detection failure and the transparent LT mode was set. The DPRX 2215ca02815Sjsg * capabilities are read out. 2225ca02815Sjsg * <0 Reading out the DPRX capabilities failed. 2235ca02815Sjsg */ 2245ca02815Sjsg int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp) 2255ca02815Sjsg { 2261bb76ff1Sjsg struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2271bb76ff1Sjsg int lttpr_count = 0; 2285ca02815Sjsg 2291bb76ff1Sjsg /* 2301bb76ff1Sjsg * Detecting LTTPRs must be avoided on platforms with an AUX timeout 2311bb76ff1Sjsg * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1). 2321bb76ff1Sjsg */ 2331bb76ff1Sjsg if (!intel_dp_is_edp(intel_dp) && 2341bb76ff1Sjsg (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) { 2351bb76ff1Sjsg u8 dpcd[DP_RECEIVER_CAP_SIZE]; 2361bb76ff1Sjsg 2371bb76ff1Sjsg if (drm_dp_dpcd_probe(&intel_dp->aux, DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV)) 2381bb76ff1Sjsg return -EIO; 2391bb76ff1Sjsg 2401bb76ff1Sjsg if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd)) 2411bb76ff1Sjsg return -EIO; 2421bb76ff1Sjsg 2431bb76ff1Sjsg lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd); 2441bb76ff1Sjsg } 2451bb76ff1Sjsg 2461bb76ff1Sjsg /* 2471bb76ff1Sjsg * The DPTX shall read the DPRX caps after LTTPR detection, so re-read 2481bb76ff1Sjsg * it here. 2491bb76ff1Sjsg */ 2505ca02815Sjsg if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) { 2515ca02815Sjsg intel_dp_reset_lttpr_common_caps(intel_dp); 2525ca02815Sjsg return -EIO; 2535ca02815Sjsg } 2545ca02815Sjsg 2555ca02815Sjsg return lttpr_count; 2565ca02815Sjsg } 2575ca02815Sjsg 258ad8b1aafSjsg static u8 dp_voltage_max(u8 preemph) 259ad8b1aafSjsg { 260ad8b1aafSjsg switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) { 261ad8b1aafSjsg case DP_TRAIN_PRE_EMPH_LEVEL_0: 262ad8b1aafSjsg return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 263ad8b1aafSjsg case DP_TRAIN_PRE_EMPH_LEVEL_1: 264ad8b1aafSjsg return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 265ad8b1aafSjsg case DP_TRAIN_PRE_EMPH_LEVEL_2: 266ad8b1aafSjsg return DP_TRAIN_VOLTAGE_SWING_LEVEL_1; 267ad8b1aafSjsg case DP_TRAIN_PRE_EMPH_LEVEL_3: 268ad8b1aafSjsg default: 269ad8b1aafSjsg return DP_TRAIN_VOLTAGE_SWING_LEVEL_0; 270ad8b1aafSjsg } 271ad8b1aafSjsg } 272ad8b1aafSjsg 2735ca02815Sjsg static u8 intel_dp_lttpr_voltage_max(struct intel_dp *intel_dp, 2745ca02815Sjsg enum drm_dp_phy dp_phy) 2755ca02815Sjsg { 2765ca02815Sjsg const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); 2775ca02815Sjsg 2785ca02815Sjsg if (drm_dp_lttpr_voltage_swing_level_3_supported(phy_caps)) 2795ca02815Sjsg return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 2805ca02815Sjsg else 2815ca02815Sjsg return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 2825ca02815Sjsg } 2835ca02815Sjsg 2845ca02815Sjsg static u8 intel_dp_lttpr_preemph_max(struct intel_dp *intel_dp, 2855ca02815Sjsg enum drm_dp_phy dp_phy) 2865ca02815Sjsg { 2875ca02815Sjsg const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); 2885ca02815Sjsg 2895ca02815Sjsg if (drm_dp_lttpr_pre_emphasis_level_3_supported(phy_caps)) 2905ca02815Sjsg return DP_TRAIN_PRE_EMPH_LEVEL_3; 2915ca02815Sjsg else 2925ca02815Sjsg return DP_TRAIN_PRE_EMPH_LEVEL_2; 2935ca02815Sjsg } 2945ca02815Sjsg 2955ca02815Sjsg static bool 2965ca02815Sjsg intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp, 2975ca02815Sjsg enum drm_dp_phy dp_phy) 298c349dbc7Sjsg { 299ad8b1aafSjsg struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3005ca02815Sjsg int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps); 3015ca02815Sjsg 3025ca02815Sjsg drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX); 3035ca02815Sjsg 3045ca02815Sjsg return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1); 3055ca02815Sjsg } 3065ca02815Sjsg 3075ca02815Sjsg static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp, 3085ca02815Sjsg const struct intel_crtc_state *crtc_state, 3095ca02815Sjsg enum drm_dp_phy dp_phy) 3105ca02815Sjsg { 3115ca02815Sjsg struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3125ca02815Sjsg u8 voltage_max; 3135ca02815Sjsg 3145ca02815Sjsg /* 3155ca02815Sjsg * Get voltage_max from the DPTX_PHY (source or LTTPR) upstream from 3165ca02815Sjsg * the DPRX_PHY we train. 3175ca02815Sjsg */ 3185ca02815Sjsg if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) 3195ca02815Sjsg voltage_max = intel_dp->voltage_max(intel_dp, crtc_state); 3205ca02815Sjsg else 3215ca02815Sjsg voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1); 3225ca02815Sjsg 3235ca02815Sjsg drm_WARN_ON_ONCE(&i915->drm, 3245ca02815Sjsg voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 && 3255ca02815Sjsg voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3); 3265ca02815Sjsg 3275ca02815Sjsg return voltage_max; 3285ca02815Sjsg } 3295ca02815Sjsg 3305ca02815Sjsg static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp, 3315ca02815Sjsg enum drm_dp_phy dp_phy) 3325ca02815Sjsg { 3335ca02815Sjsg struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3345ca02815Sjsg u8 preemph_max; 3355ca02815Sjsg 3365ca02815Sjsg /* 3375ca02815Sjsg * Get preemph_max from the DPTX_PHY (source or LTTPR) upstream from 3385ca02815Sjsg * the DPRX_PHY we train. 3395ca02815Sjsg */ 3405ca02815Sjsg if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) 3415ca02815Sjsg preemph_max = intel_dp->preemph_max(intel_dp); 3425ca02815Sjsg else 3435ca02815Sjsg preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1); 3445ca02815Sjsg 3455ca02815Sjsg drm_WARN_ON_ONCE(&i915->drm, 3465ca02815Sjsg preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 && 3475ca02815Sjsg preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3); 3485ca02815Sjsg 3495ca02815Sjsg return preemph_max; 3505ca02815Sjsg } 3515ca02815Sjsg 3521bb76ff1Sjsg static bool has_per_lane_signal_levels(struct intel_dp *intel_dp, 3531bb76ff1Sjsg enum drm_dp_phy dp_phy) 3541bb76ff1Sjsg { 3551bb76ff1Sjsg struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3561bb76ff1Sjsg 3571bb76ff1Sjsg return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) || 3581bb76ff1Sjsg DISPLAY_VER(i915) >= 11; 3591bb76ff1Sjsg } 3601bb76ff1Sjsg 3611bb76ff1Sjsg /* 128b/132b */ 3621bb76ff1Sjsg static u8 intel_dp_get_lane_adjust_tx_ffe_preset(struct intel_dp *intel_dp, 3635ca02815Sjsg const struct intel_crtc_state *crtc_state, 3645ca02815Sjsg enum drm_dp_phy dp_phy, 3651bb76ff1Sjsg const u8 link_status[DP_LINK_STATUS_SIZE], 3661bb76ff1Sjsg int lane) 3671bb76ff1Sjsg { 3681bb76ff1Sjsg u8 tx_ffe = 0; 3691bb76ff1Sjsg 3701bb76ff1Sjsg if (has_per_lane_signal_levels(intel_dp, dp_phy)) { 3711bb76ff1Sjsg lane = min(lane, crtc_state->lane_count - 1); 3721bb76ff1Sjsg tx_ffe = drm_dp_get_adjust_tx_ffe_preset(link_status, lane); 3731bb76ff1Sjsg } else { 3741bb76ff1Sjsg for (lane = 0; lane < crtc_state->lane_count; lane++) 3751bb76ff1Sjsg tx_ffe = max(tx_ffe, drm_dp_get_adjust_tx_ffe_preset(link_status, lane)); 3761bb76ff1Sjsg } 3771bb76ff1Sjsg 3781bb76ff1Sjsg return tx_ffe; 3791bb76ff1Sjsg } 3801bb76ff1Sjsg 3811bb76ff1Sjsg /* 8b/10b */ 3821bb76ff1Sjsg static u8 intel_dp_get_lane_adjust_vswing_preemph(struct intel_dp *intel_dp, 3831bb76ff1Sjsg const struct intel_crtc_state *crtc_state, 3841bb76ff1Sjsg enum drm_dp_phy dp_phy, 3851bb76ff1Sjsg const u8 link_status[DP_LINK_STATUS_SIZE], 3861bb76ff1Sjsg int lane) 3875ca02815Sjsg { 388c349dbc7Sjsg u8 v = 0; 389c349dbc7Sjsg u8 p = 0; 390c349dbc7Sjsg u8 voltage_max; 391c349dbc7Sjsg u8 preemph_max; 392c349dbc7Sjsg 3931bb76ff1Sjsg if (has_per_lane_signal_levels(intel_dp, dp_phy)) { 3941bb76ff1Sjsg lane = min(lane, crtc_state->lane_count - 1); 3951bb76ff1Sjsg 3961bb76ff1Sjsg v = drm_dp_get_adjust_request_voltage(link_status, lane); 3971bb76ff1Sjsg p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); 3981bb76ff1Sjsg } else { 3995ca02815Sjsg for (lane = 0; lane < crtc_state->lane_count; lane++) { 400ad8b1aafSjsg v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane)); 401ad8b1aafSjsg p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane)); 402c349dbc7Sjsg } 4031bb76ff1Sjsg } 404c349dbc7Sjsg 4055ca02815Sjsg preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy); 406c349dbc7Sjsg if (p >= preemph_max) 407c349dbc7Sjsg p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 408c349dbc7Sjsg 409ad8b1aafSjsg v = min(v, dp_voltage_max(p)); 410ad8b1aafSjsg 4115ca02815Sjsg voltage_max = intel_dp_phy_voltage_max(intel_dp, crtc_state, dp_phy); 412ad8b1aafSjsg if (v >= voltage_max) 413ad8b1aafSjsg v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 414ad8b1aafSjsg 4151bb76ff1Sjsg return v | p; 4161bb76ff1Sjsg } 4171bb76ff1Sjsg 4181bb76ff1Sjsg static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp, 4191bb76ff1Sjsg const struct intel_crtc_state *crtc_state, 4201bb76ff1Sjsg enum drm_dp_phy dp_phy, 4211bb76ff1Sjsg const u8 link_status[DP_LINK_STATUS_SIZE], 4221bb76ff1Sjsg int lane) 4231bb76ff1Sjsg { 4241bb76ff1Sjsg if (intel_dp_is_uhbr(crtc_state)) 4251bb76ff1Sjsg return intel_dp_get_lane_adjust_tx_ffe_preset(intel_dp, crtc_state, 4261bb76ff1Sjsg dp_phy, link_status, lane); 4271bb76ff1Sjsg else 4281bb76ff1Sjsg return intel_dp_get_lane_adjust_vswing_preemph(intel_dp, crtc_state, 4291bb76ff1Sjsg dp_phy, link_status, lane); 4301bb76ff1Sjsg } 4311bb76ff1Sjsg 4321bb76ff1Sjsg #define TRAIN_REQ_FMT "%d/%d/%d/%d" 4331bb76ff1Sjsg #define _TRAIN_REQ_VSWING_ARGS(link_status, lane) \ 4341bb76ff1Sjsg (drm_dp_get_adjust_request_voltage((link_status), (lane)) >> DP_TRAIN_VOLTAGE_SWING_SHIFT) 4351bb76ff1Sjsg #define TRAIN_REQ_VSWING_ARGS(link_status) \ 4361bb76ff1Sjsg _TRAIN_REQ_VSWING_ARGS(link_status, 0), \ 4371bb76ff1Sjsg _TRAIN_REQ_VSWING_ARGS(link_status, 1), \ 4381bb76ff1Sjsg _TRAIN_REQ_VSWING_ARGS(link_status, 2), \ 4391bb76ff1Sjsg _TRAIN_REQ_VSWING_ARGS(link_status, 3) 4401bb76ff1Sjsg #define _TRAIN_REQ_PREEMPH_ARGS(link_status, lane) \ 4411bb76ff1Sjsg (drm_dp_get_adjust_request_pre_emphasis((link_status), (lane)) >> DP_TRAIN_PRE_EMPHASIS_SHIFT) 4421bb76ff1Sjsg #define TRAIN_REQ_PREEMPH_ARGS(link_status) \ 4431bb76ff1Sjsg _TRAIN_REQ_PREEMPH_ARGS(link_status, 0), \ 4441bb76ff1Sjsg _TRAIN_REQ_PREEMPH_ARGS(link_status, 1), \ 4451bb76ff1Sjsg _TRAIN_REQ_PREEMPH_ARGS(link_status, 2), \ 4461bb76ff1Sjsg _TRAIN_REQ_PREEMPH_ARGS(link_status, 3) 4471bb76ff1Sjsg #define _TRAIN_REQ_TX_FFE_ARGS(link_status, lane) \ 4481bb76ff1Sjsg drm_dp_get_adjust_tx_ffe_preset((link_status), (lane)) 4491bb76ff1Sjsg #define TRAIN_REQ_TX_FFE_ARGS(link_status) \ 4501bb76ff1Sjsg _TRAIN_REQ_TX_FFE_ARGS(link_status, 0), \ 4511bb76ff1Sjsg _TRAIN_REQ_TX_FFE_ARGS(link_status, 1), \ 4521bb76ff1Sjsg _TRAIN_REQ_TX_FFE_ARGS(link_status, 2), \ 4531bb76ff1Sjsg _TRAIN_REQ_TX_FFE_ARGS(link_status, 3) 4541bb76ff1Sjsg 4551bb76ff1Sjsg void 4561bb76ff1Sjsg intel_dp_get_adjust_train(struct intel_dp *intel_dp, 4571bb76ff1Sjsg const struct intel_crtc_state *crtc_state, 4581bb76ff1Sjsg enum drm_dp_phy dp_phy, 4591bb76ff1Sjsg const u8 link_status[DP_LINK_STATUS_SIZE]) 4601bb76ff1Sjsg { 4611bb76ff1Sjsg int lane; 4621bb76ff1Sjsg 4631bb76ff1Sjsg if (intel_dp_is_uhbr(crtc_state)) { 464f005ef32Sjsg lt_dbg(intel_dp, dp_phy, 465f005ef32Sjsg "128b/132b, lanes: %d, " 4661bb76ff1Sjsg "TX FFE request: " TRAIN_REQ_FMT "\n", 4671bb76ff1Sjsg crtc_state->lane_count, 4681bb76ff1Sjsg TRAIN_REQ_TX_FFE_ARGS(link_status)); 4691bb76ff1Sjsg } else { 470f005ef32Sjsg lt_dbg(intel_dp, dp_phy, 471f005ef32Sjsg "8b/10b, lanes: %d, " 4721bb76ff1Sjsg "vswing request: " TRAIN_REQ_FMT ", " 4731bb76ff1Sjsg "pre-emphasis request: " TRAIN_REQ_FMT "\n", 4741bb76ff1Sjsg crtc_state->lane_count, 4751bb76ff1Sjsg TRAIN_REQ_VSWING_ARGS(link_status), 4761bb76ff1Sjsg TRAIN_REQ_PREEMPH_ARGS(link_status)); 4771bb76ff1Sjsg } 4781bb76ff1Sjsg 479c349dbc7Sjsg for (lane = 0; lane < 4; lane++) 4801bb76ff1Sjsg intel_dp->train_set[lane] = 4811bb76ff1Sjsg intel_dp_get_lane_adjust_train(intel_dp, crtc_state, 4821bb76ff1Sjsg dp_phy, link_status, lane); 483c349dbc7Sjsg } 484c349dbc7Sjsg 4855ca02815Sjsg static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp, 4865ca02815Sjsg enum drm_dp_phy dp_phy) 487c349dbc7Sjsg { 4885ca02815Sjsg return dp_phy == DP_PHY_DPRX ? 4895ca02815Sjsg DP_TRAINING_PATTERN_SET : 4905ca02815Sjsg DP_TRAINING_PATTERN_SET_PHY_REPEATER(dp_phy); 491c349dbc7Sjsg } 492c349dbc7Sjsg 4935ca02815Sjsg static bool 4945ca02815Sjsg intel_dp_set_link_train(struct intel_dp *intel_dp, 4955ca02815Sjsg const struct intel_crtc_state *crtc_state, 4965ca02815Sjsg enum drm_dp_phy dp_phy, 4975ca02815Sjsg u8 dp_train_pat) 4985ca02815Sjsg { 4995ca02815Sjsg int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy); 5005ca02815Sjsg u8 buf[sizeof(intel_dp->train_set) + 1]; 5015ca02815Sjsg int len; 502c349dbc7Sjsg 5035ca02815Sjsg intel_dp_program_link_training_pattern(intel_dp, crtc_state, 5041bb76ff1Sjsg dp_phy, dp_train_pat); 5055ca02815Sjsg 5065ca02815Sjsg buf[0] = dp_train_pat; 5075ca02815Sjsg /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */ 5085ca02815Sjsg memcpy(buf + 1, intel_dp->train_set, crtc_state->lane_count); 5095ca02815Sjsg len = crtc_state->lane_count + 1; 5105ca02815Sjsg 5115ca02815Sjsg return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len; 5125ca02815Sjsg } 5135ca02815Sjsg 5145ca02815Sjsg static char dp_training_pattern_name(u8 train_pat) 5155ca02815Sjsg { 5165ca02815Sjsg switch (train_pat) { 5175ca02815Sjsg case DP_TRAINING_PATTERN_1: 5185ca02815Sjsg case DP_TRAINING_PATTERN_2: 5195ca02815Sjsg case DP_TRAINING_PATTERN_3: 5205ca02815Sjsg return '0' + train_pat; 5215ca02815Sjsg case DP_TRAINING_PATTERN_4: 5225ca02815Sjsg return '4'; 5235ca02815Sjsg default: 5245ca02815Sjsg MISSING_CASE(train_pat); 5255ca02815Sjsg return '?'; 5265ca02815Sjsg } 5275ca02815Sjsg } 5285ca02815Sjsg 5295ca02815Sjsg void 5305ca02815Sjsg intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, 5315ca02815Sjsg const struct intel_crtc_state *crtc_state, 5321bb76ff1Sjsg enum drm_dp_phy dp_phy, 5335ca02815Sjsg u8 dp_train_pat) 5345ca02815Sjsg { 5355ca02815Sjsg u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat); 5365ca02815Sjsg 5375ca02815Sjsg if (train_pat != DP_TRAINING_PATTERN_DISABLE) 538f005ef32Sjsg lt_dbg(intel_dp, dp_phy, "Using DP training pattern TPS%c\n", 5395ca02815Sjsg dp_training_pattern_name(train_pat)); 5405ca02815Sjsg 5415ca02815Sjsg intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat); 5425ca02815Sjsg } 5435ca02815Sjsg 5441bb76ff1Sjsg #define TRAIN_SET_FMT "%d%s/%d%s/%d%s/%d%s" 5451bb76ff1Sjsg #define _TRAIN_SET_VSWING_ARGS(train_set) \ 5461bb76ff1Sjsg ((train_set) & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT, \ 5471bb76ff1Sjsg (train_set) & DP_TRAIN_MAX_SWING_REACHED ? "(max)" : "" 5481bb76ff1Sjsg #define TRAIN_SET_VSWING_ARGS(train_set) \ 5491bb76ff1Sjsg _TRAIN_SET_VSWING_ARGS((train_set)[0]), \ 5501bb76ff1Sjsg _TRAIN_SET_VSWING_ARGS((train_set)[1]), \ 5511bb76ff1Sjsg _TRAIN_SET_VSWING_ARGS((train_set)[2]), \ 5521bb76ff1Sjsg _TRAIN_SET_VSWING_ARGS((train_set)[3]) 5531bb76ff1Sjsg #define _TRAIN_SET_PREEMPH_ARGS(train_set) \ 5541bb76ff1Sjsg ((train_set) & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT, \ 5551bb76ff1Sjsg (train_set) & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? "(max)" : "" 5561bb76ff1Sjsg #define TRAIN_SET_PREEMPH_ARGS(train_set) \ 5571bb76ff1Sjsg _TRAIN_SET_PREEMPH_ARGS((train_set)[0]), \ 5581bb76ff1Sjsg _TRAIN_SET_PREEMPH_ARGS((train_set)[1]), \ 5591bb76ff1Sjsg _TRAIN_SET_PREEMPH_ARGS((train_set)[2]), \ 5601bb76ff1Sjsg _TRAIN_SET_PREEMPH_ARGS((train_set)[3]) 5611bb76ff1Sjsg #define _TRAIN_SET_TX_FFE_ARGS(train_set) \ 5621bb76ff1Sjsg ((train_set) & DP_TX_FFE_PRESET_VALUE_MASK), "" 5631bb76ff1Sjsg #define TRAIN_SET_TX_FFE_ARGS(train_set) \ 5641bb76ff1Sjsg _TRAIN_SET_TX_FFE_ARGS((train_set)[0]), \ 5651bb76ff1Sjsg _TRAIN_SET_TX_FFE_ARGS((train_set)[1]), \ 5661bb76ff1Sjsg _TRAIN_SET_TX_FFE_ARGS((train_set)[2]), \ 5671bb76ff1Sjsg _TRAIN_SET_TX_FFE_ARGS((train_set)[3]) 5681bb76ff1Sjsg 5695ca02815Sjsg void intel_dp_set_signal_levels(struct intel_dp *intel_dp, 5705ca02815Sjsg const struct intel_crtc_state *crtc_state, 5715ca02815Sjsg enum drm_dp_phy dp_phy) 5725ca02815Sjsg { 5731bb76ff1Sjsg struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 5745ca02815Sjsg 5751bb76ff1Sjsg if (intel_dp_is_uhbr(crtc_state)) { 576f005ef32Sjsg lt_dbg(intel_dp, dp_phy, 577f005ef32Sjsg "128b/132b, lanes: %d, " 5781bb76ff1Sjsg "TX FFE presets: " TRAIN_SET_FMT "\n", 5791bb76ff1Sjsg crtc_state->lane_count, 5801bb76ff1Sjsg TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set)); 5811bb76ff1Sjsg } else { 582f005ef32Sjsg lt_dbg(intel_dp, dp_phy, 583f005ef32Sjsg "8b/10b, lanes: %d, " 5841bb76ff1Sjsg "vswing levels: " TRAIN_SET_FMT ", " 5851bb76ff1Sjsg "pre-emphasis levels: " TRAIN_SET_FMT "\n", 5861bb76ff1Sjsg crtc_state->lane_count, 5871bb76ff1Sjsg TRAIN_SET_VSWING_ARGS(intel_dp->train_set), 5881bb76ff1Sjsg TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set)); 5891bb76ff1Sjsg } 5905ca02815Sjsg 5915ca02815Sjsg if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) 5921bb76ff1Sjsg encoder->set_signal_levels(encoder, crtc_state); 593c349dbc7Sjsg } 594c349dbc7Sjsg 595c349dbc7Sjsg static bool 596c349dbc7Sjsg intel_dp_reset_link_train(struct intel_dp *intel_dp, 5975ca02815Sjsg const struct intel_crtc_state *crtc_state, 5985ca02815Sjsg enum drm_dp_phy dp_phy, 599c349dbc7Sjsg u8 dp_train_pat) 600c349dbc7Sjsg { 601c349dbc7Sjsg memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); 6025ca02815Sjsg intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy); 6035ca02815Sjsg return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat); 604c349dbc7Sjsg } 605c349dbc7Sjsg 606c349dbc7Sjsg static bool 6075ca02815Sjsg intel_dp_update_link_train(struct intel_dp *intel_dp, 6085ca02815Sjsg const struct intel_crtc_state *crtc_state, 6095ca02815Sjsg enum drm_dp_phy dp_phy) 610c349dbc7Sjsg { 6115ca02815Sjsg int reg = dp_phy == DP_PHY_DPRX ? 6125ca02815Sjsg DP_TRAINING_LANE0_SET : 6135ca02815Sjsg DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy); 614c349dbc7Sjsg int ret; 615c349dbc7Sjsg 6165ca02815Sjsg intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy); 617c349dbc7Sjsg 6185ca02815Sjsg ret = drm_dp_dpcd_write(&intel_dp->aux, reg, 6195ca02815Sjsg intel_dp->train_set, crtc_state->lane_count); 620c349dbc7Sjsg 6215ca02815Sjsg return ret == crtc_state->lane_count; 622c349dbc7Sjsg } 623c349dbc7Sjsg 6241bb76ff1Sjsg /* 128b/132b */ 6251bb76ff1Sjsg static bool intel_dp_lane_max_tx_ffe_reached(u8 train_set_lane) 6261bb76ff1Sjsg { 6271bb76ff1Sjsg return (train_set_lane & DP_TX_FFE_PRESET_VALUE_MASK) == 6281bb76ff1Sjsg DP_TX_FFE_PRESET_VALUE_MASK; 6291bb76ff1Sjsg } 6301bb76ff1Sjsg 6311bb76ff1Sjsg /* 6321bb76ff1Sjsg * 8b/10b 6331bb76ff1Sjsg * 6341bb76ff1Sjsg * FIXME: The DP spec is very confusing here, also the Link CTS spec seems to 6351bb76ff1Sjsg * have self contradicting tests around this area. 6361bb76ff1Sjsg * 6371bb76ff1Sjsg * In lieu of better ideas let's just stop when we've reached the max supported 6381bb76ff1Sjsg * vswing with its max pre-emphasis, which is either 2+1 or 3+0 depending on 6391bb76ff1Sjsg * whether vswing level 3 is supported or not. 6401bb76ff1Sjsg */ 6411bb76ff1Sjsg static bool intel_dp_lane_max_vswing_reached(u8 train_set_lane) 6421bb76ff1Sjsg { 6431bb76ff1Sjsg u8 v = (train_set_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >> 6441bb76ff1Sjsg DP_TRAIN_VOLTAGE_SWING_SHIFT; 6451bb76ff1Sjsg u8 p = (train_set_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >> 6461bb76ff1Sjsg DP_TRAIN_PRE_EMPHASIS_SHIFT; 6471bb76ff1Sjsg 6481bb76ff1Sjsg if ((train_set_lane & DP_TRAIN_MAX_SWING_REACHED) == 0) 6491bb76ff1Sjsg return false; 6501bb76ff1Sjsg 6511bb76ff1Sjsg if (v + p != 3) 6521bb76ff1Sjsg return false; 6531bb76ff1Sjsg 6541bb76ff1Sjsg return true; 6551bb76ff1Sjsg } 6561bb76ff1Sjsg 6575ca02815Sjsg static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp, 6585ca02815Sjsg const struct intel_crtc_state *crtc_state) 659c349dbc7Sjsg { 660c349dbc7Sjsg int lane; 661c349dbc7Sjsg 6621bb76ff1Sjsg for (lane = 0; lane < crtc_state->lane_count; lane++) { 6631bb76ff1Sjsg u8 train_set_lane = intel_dp->train_set[lane]; 6641bb76ff1Sjsg 6651bb76ff1Sjsg if (intel_dp_is_uhbr(crtc_state)) { 6661bb76ff1Sjsg if (!intel_dp_lane_max_tx_ffe_reached(train_set_lane)) 667c349dbc7Sjsg return false; 6681bb76ff1Sjsg } else { 6691bb76ff1Sjsg if (!intel_dp_lane_max_vswing_reached(train_set_lane)) 6701bb76ff1Sjsg return false; 6711bb76ff1Sjsg } 6721bb76ff1Sjsg } 673c349dbc7Sjsg 674c349dbc7Sjsg return true; 675c349dbc7Sjsg } 676c349dbc7Sjsg 677f005ef32Sjsg static void 678f005ef32Sjsg intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp, 679f005ef32Sjsg const struct intel_crtc_state *crtc_state) 680f005ef32Sjsg { 681f005ef32Sjsg u8 link_config[2]; 682f005ef32Sjsg 683f005ef32Sjsg link_config[0] = crtc_state->vrr.flipline ? DP_MSA_TIMING_PAR_IGNORE_EN : 0; 684f005ef32Sjsg link_config[1] = intel_dp_is_uhbr(crtc_state) ? 685f005ef32Sjsg DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B; 686f005ef32Sjsg drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); 687f005ef32Sjsg } 688f005ef32Sjsg 689f005ef32Sjsg static void 690f005ef32Sjsg intel_dp_update_link_bw_set(struct intel_dp *intel_dp, 691f005ef32Sjsg const struct intel_crtc_state *crtc_state, 692f005ef32Sjsg u8 link_bw, u8 rate_select) 693f005ef32Sjsg { 694f005ef32Sjsg u8 lane_count = crtc_state->lane_count; 695f005ef32Sjsg 696f005ef32Sjsg if (crtc_state->enhanced_framing) 697f005ef32Sjsg lane_count |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 698f005ef32Sjsg 699f005ef32Sjsg if (link_bw) { 700f005ef32Sjsg /* DP and eDP v1.3 and earlier link bw set method. */ 701f005ef32Sjsg u8 link_config[] = { link_bw, lane_count }; 702f005ef32Sjsg 703f005ef32Sjsg drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 704f005ef32Sjsg ARRAY_SIZE(link_config)); 705f005ef32Sjsg } else { 706f005ef32Sjsg /* 707f005ef32Sjsg * eDP v1.4 and later link rate set method. 708f005ef32Sjsg * 709f005ef32Sjsg * eDP v1.4x sinks shall ignore DP_LINK_RATE_SET if 710f005ef32Sjsg * DP_LINK_BW_SET is set. Avoid writing DP_LINK_BW_SET. 711f005ef32Sjsg * 712f005ef32Sjsg * eDP v1.5 sinks allow choosing either, and the last choice 713f005ef32Sjsg * shall be active. 714f005ef32Sjsg */ 715f005ef32Sjsg drm_dp_dpcd_writeb(&intel_dp->aux, DP_LANE_COUNT_SET, lane_count); 716f005ef32Sjsg drm_dp_dpcd_writeb(&intel_dp->aux, DP_LINK_RATE_SET, rate_select); 717f005ef32Sjsg } 718f005ef32Sjsg } 719f005ef32Sjsg 7205ca02815Sjsg /* 7215ca02815Sjsg * Prepare link training by configuring the link parameters. On DDI platforms 7225ca02815Sjsg * also enable the port here. 7235ca02815Sjsg */ 724c349dbc7Sjsg static bool 7255ca02815Sjsg intel_dp_prepare_link_train(struct intel_dp *intel_dp, 7265ca02815Sjsg const struct intel_crtc_state *crtc_state) 727c349dbc7Sjsg { 728c349dbc7Sjsg u8 link_bw, rate_select; 729c349dbc7Sjsg 730c349dbc7Sjsg if (intel_dp->prepare_link_retrain) 7315ca02815Sjsg intel_dp->prepare_link_retrain(intel_dp, crtc_state); 732c349dbc7Sjsg 7335ca02815Sjsg intel_dp_compute_rate(intel_dp, crtc_state->port_clock, 734c349dbc7Sjsg &link_bw, &rate_select); 735c349dbc7Sjsg 736f7cabdecSjsg /* 737f7cabdecSjsg * WaEdpLinkRateDataReload 738f7cabdecSjsg * 739f7cabdecSjsg * Parade PS8461E MUX (used on varius TGL+ laptops) needs 740f7cabdecSjsg * to snoop the link rates reported by the sink when we 741f7cabdecSjsg * use LINK_RATE_SET in order to operate in jitter cleaning 742f7cabdecSjsg * mode (as opposed to redriver mode). Unfortunately it 743f7cabdecSjsg * loses track of the snooped link rates when powered down, 744f7cabdecSjsg * so we need to make it re-snoop often. Without this high 745f7cabdecSjsg * link rates are not stable. 746f7cabdecSjsg */ 747f7cabdecSjsg if (!link_bw) { 748f7cabdecSjsg __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 749f7cabdecSjsg 750f005ef32Sjsg lt_dbg(intel_dp, DP_PHY_DPRX, "Reloading eDP link rates\n"); 751f7cabdecSjsg 752f7cabdecSjsg drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 753f7cabdecSjsg sink_rates, sizeof(sink_rates)); 754f7cabdecSjsg } 755f7cabdecSjsg 756c349dbc7Sjsg if (link_bw) 757f005ef32Sjsg lt_dbg(intel_dp, DP_PHY_DPRX, "Using LINK_BW_SET value %02x\n", 758f005ef32Sjsg link_bw); 759c349dbc7Sjsg else 760f005ef32Sjsg lt_dbg(intel_dp, DP_PHY_DPRX, 761f005ef32Sjsg "Using LINK_RATE_SET value %02x\n", 762f005ef32Sjsg rate_select); 763f005ef32Sjsg /* 764f005ef32Sjsg * Spec DP2.1 Section 3.5.2.16 765f005ef32Sjsg * Prior to LT DPTX should set 128b/132b DP Channel coding and then set link rate 766f005ef32Sjsg */ 767f005ef32Sjsg intel_dp_update_downspread_ctrl(intel_dp, crtc_state); 768f005ef32Sjsg intel_dp_update_link_bw_set(intel_dp, crtc_state, link_bw, 769f005ef32Sjsg rate_select); 770c349dbc7Sjsg 7715ca02815Sjsg return true; 7725ca02815Sjsg } 7735ca02815Sjsg 7741bb76ff1Sjsg static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_state, 7751bb76ff1Sjsg const u8 old_link_status[DP_LINK_STATUS_SIZE], 7761bb76ff1Sjsg const u8 new_link_status[DP_LINK_STATUS_SIZE]) 7775ca02815Sjsg { 7781bb76ff1Sjsg int lane; 7791bb76ff1Sjsg 7801bb76ff1Sjsg for (lane = 0; lane < crtc_state->lane_count; lane++) { 7811bb76ff1Sjsg u8 old, new; 7821bb76ff1Sjsg 7831bb76ff1Sjsg if (intel_dp_is_uhbr(crtc_state)) { 7841bb76ff1Sjsg old = drm_dp_get_adjust_tx_ffe_preset(old_link_status, lane); 7851bb76ff1Sjsg new = drm_dp_get_adjust_tx_ffe_preset(new_link_status, lane); 7861bb76ff1Sjsg } else { 7871bb76ff1Sjsg old = drm_dp_get_adjust_request_voltage(old_link_status, lane) | 7881bb76ff1Sjsg drm_dp_get_adjust_request_pre_emphasis(old_link_status, lane); 7891bb76ff1Sjsg new = drm_dp_get_adjust_request_voltage(new_link_status, lane) | 7901bb76ff1Sjsg drm_dp_get_adjust_request_pre_emphasis(new_link_status, lane); 7911bb76ff1Sjsg } 7921bb76ff1Sjsg 7931bb76ff1Sjsg if (old != new) 7941bb76ff1Sjsg return true; 7951bb76ff1Sjsg } 7961bb76ff1Sjsg 7971bb76ff1Sjsg return false; 7981bb76ff1Sjsg } 7991bb76ff1Sjsg 8001bb76ff1Sjsg void 8011bb76ff1Sjsg intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy, 8021bb76ff1Sjsg const u8 link_status[DP_LINK_STATUS_SIZE]) 8031bb76ff1Sjsg { 804f005ef32Sjsg lt_dbg(intel_dp, dp_phy, 805f005ef32Sjsg "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n", 8061bb76ff1Sjsg link_status[0], link_status[1], link_status[2], 8071bb76ff1Sjsg link_status[3], link_status[4], link_status[5]); 8085ca02815Sjsg } 8095ca02815Sjsg 8105ca02815Sjsg /* 8115ca02815Sjsg * Perform the link training clock recovery phase on the given DP PHY using 8125ca02815Sjsg * training pattern 1. 8135ca02815Sjsg */ 8145ca02815Sjsg static bool 8155ca02815Sjsg intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, 8165ca02815Sjsg const struct intel_crtc_state *crtc_state, 8175ca02815Sjsg enum drm_dp_phy dp_phy) 8185ca02815Sjsg { 8191bb76ff1Sjsg u8 old_link_status[DP_LINK_STATUS_SIZE] = {}; 8205ca02815Sjsg int voltage_tries, cr_tries, max_cr_tries; 8211bb76ff1Sjsg u8 link_status[DP_LINK_STATUS_SIZE]; 8225ca02815Sjsg bool max_vswing_reached = false; 8231bb76ff1Sjsg int delay_us; 8241bb76ff1Sjsg 8251bb76ff1Sjsg delay_us = drm_dp_read_clock_recovery_delay(&intel_dp->aux, 8261bb76ff1Sjsg intel_dp->dpcd, dp_phy, 8271bb76ff1Sjsg intel_dp_is_uhbr(crtc_state)); 8285ca02815Sjsg 829c349dbc7Sjsg /* clock recovery */ 8305ca02815Sjsg if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy, 831c349dbc7Sjsg DP_TRAINING_PATTERN_1 | 832c349dbc7Sjsg DP_LINK_SCRAMBLING_DISABLE)) { 833f005ef32Sjsg lt_err(intel_dp, dp_phy, "Failed to enable link training\n"); 834c349dbc7Sjsg return false; 835c349dbc7Sjsg } 836c349dbc7Sjsg 837c349dbc7Sjsg /* 838c349dbc7Sjsg * The DP 1.4 spec defines the max clock recovery retries value 839c349dbc7Sjsg * as 10 but for pre-DP 1.4 devices we set a very tolerant 840c349dbc7Sjsg * retry limit of 80 (4 voltage levels x 4 preemphasis levels x 841c349dbc7Sjsg * x 5 identical voltage retries). Since the previous specs didn't 842c349dbc7Sjsg * define a limit and created the possibility of an infinite loop 843c349dbc7Sjsg * we want to prevent any sync from triggering that corner case. 844c349dbc7Sjsg */ 845c349dbc7Sjsg if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) 846c349dbc7Sjsg max_cr_tries = 10; 847c349dbc7Sjsg else 848c349dbc7Sjsg max_cr_tries = 80; 849c349dbc7Sjsg 850c349dbc7Sjsg voltage_tries = 1; 851c349dbc7Sjsg for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) { 8521bb76ff1Sjsg usleep_range(delay_us, 2 * delay_us); 853c349dbc7Sjsg 8545ca02815Sjsg if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, 8555ca02815Sjsg link_status) < 0) { 856f005ef32Sjsg lt_err(intel_dp, dp_phy, "Failed to get link status\n"); 857c349dbc7Sjsg return false; 858c349dbc7Sjsg } 859c349dbc7Sjsg 8605ca02815Sjsg if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) { 861f005ef32Sjsg lt_dbg(intel_dp, dp_phy, "Clock recovery OK\n"); 862c349dbc7Sjsg return true; 863c349dbc7Sjsg } 864c349dbc7Sjsg 865c349dbc7Sjsg if (voltage_tries == 5) { 8661bb76ff1Sjsg intel_dp_dump_link_status(intel_dp, dp_phy, link_status); 867f005ef32Sjsg lt_dbg(intel_dp, dp_phy, "Same voltage tried 5 times\n"); 868c349dbc7Sjsg return false; 869c349dbc7Sjsg } 870c349dbc7Sjsg 871c349dbc7Sjsg if (max_vswing_reached) { 8721bb76ff1Sjsg intel_dp_dump_link_status(intel_dp, dp_phy, link_status); 873f005ef32Sjsg lt_dbg(intel_dp, dp_phy, "Max Voltage Swing reached\n"); 874c349dbc7Sjsg return false; 875c349dbc7Sjsg } 876c349dbc7Sjsg 877c349dbc7Sjsg /* Update training set as requested by target */ 8785ca02815Sjsg intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy, 8795ca02815Sjsg link_status); 8805ca02815Sjsg if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { 881f005ef32Sjsg lt_err(intel_dp, dp_phy, "Failed to update link training\n"); 882c349dbc7Sjsg return false; 883c349dbc7Sjsg } 884c349dbc7Sjsg 8851bb76ff1Sjsg if (!intel_dp_adjust_request_changed(crtc_state, old_link_status, link_status)) 886c349dbc7Sjsg ++voltage_tries; 887c349dbc7Sjsg else 888c349dbc7Sjsg voltage_tries = 1; 889c349dbc7Sjsg 8901bb76ff1Sjsg memcpy(old_link_status, link_status, sizeof(link_status)); 8911bb76ff1Sjsg 8925ca02815Sjsg if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state)) 893c349dbc7Sjsg max_vswing_reached = true; 894c349dbc7Sjsg } 8951bb76ff1Sjsg 8961bb76ff1Sjsg intel_dp_dump_link_status(intel_dp, dp_phy, link_status); 897f005ef32Sjsg lt_err(intel_dp, dp_phy, "Failed clock recovery %d times, giving up!\n", 898f005ef32Sjsg max_cr_tries); 8991bb76ff1Sjsg 900c349dbc7Sjsg return false; 901c349dbc7Sjsg } 902c349dbc7Sjsg 903c349dbc7Sjsg /* 9041bb76ff1Sjsg * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2 9051bb76ff1Sjsg * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or 9061bb76ff1Sjsg * 1.2 devices that support it, TPS2 otherwise. 907c349dbc7Sjsg */ 9085ca02815Sjsg static u32 intel_dp_training_pattern(struct intel_dp *intel_dp, 9095ca02815Sjsg const struct intel_crtc_state *crtc_state, 9105ca02815Sjsg enum drm_dp_phy dp_phy) 911c349dbc7Sjsg { 9121bb76ff1Sjsg struct drm_i915_private *i915 = dp_to_i915(intel_dp); 913c349dbc7Sjsg bool source_tps3, sink_tps3, source_tps4, sink_tps4; 914c349dbc7Sjsg 9151bb76ff1Sjsg /* UHBR+ use separate 128b/132b TPS2 */ 9161bb76ff1Sjsg if (intel_dp_is_uhbr(crtc_state)) 9171bb76ff1Sjsg return DP_TRAINING_PATTERN_2; 9181bb76ff1Sjsg 919c349dbc7Sjsg /* 9201bb76ff1Sjsg * TPS4 support is mandatory for all downstream devices that 9211bb76ff1Sjsg * support HBR3. There are no known eDP panels that support 9221bb76ff1Sjsg * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification. 9235ca02815Sjsg * LTTPRs must support TPS4. 924c349dbc7Sjsg */ 9251bb76ff1Sjsg source_tps4 = intel_dp_source_supports_tps4(i915); 9265ca02815Sjsg sink_tps4 = dp_phy != DP_PHY_DPRX || 9275ca02815Sjsg drm_dp_tps4_supported(intel_dp->dpcd); 928c349dbc7Sjsg if (source_tps4 && sink_tps4) { 929c349dbc7Sjsg return DP_TRAINING_PATTERN_4; 9305ca02815Sjsg } else if (crtc_state->port_clock == 810000) { 931c349dbc7Sjsg if (!source_tps4) 932f005ef32Sjsg lt_dbg(intel_dp, dp_phy, 9331bb76ff1Sjsg "8.1 Gbps link rate without source TPS4 support\n"); 934c349dbc7Sjsg if (!sink_tps4) 935f005ef32Sjsg lt_dbg(intel_dp, dp_phy, 936c349dbc7Sjsg "8.1 Gbps link rate without sink TPS4 support\n"); 937c349dbc7Sjsg } 9381bb76ff1Sjsg 939c349dbc7Sjsg /* 9401bb76ff1Sjsg * TPS3 support is mandatory for downstream devices that 9411bb76ff1Sjsg * support HBR2. However, not all sinks follow the spec. 942c349dbc7Sjsg */ 9431bb76ff1Sjsg source_tps3 = intel_dp_source_supports_tps3(i915); 9445ca02815Sjsg sink_tps3 = dp_phy != DP_PHY_DPRX || 9455ca02815Sjsg drm_dp_tps3_supported(intel_dp->dpcd); 946c349dbc7Sjsg if (source_tps3 && sink_tps3) { 947c349dbc7Sjsg return DP_TRAINING_PATTERN_3; 9485ca02815Sjsg } else if (crtc_state->port_clock >= 540000) { 949c349dbc7Sjsg if (!source_tps3) 950f005ef32Sjsg lt_dbg(intel_dp, dp_phy, 9511bb76ff1Sjsg ">=5.4/6.48 Gbps link rate without source TPS3 support\n"); 952c349dbc7Sjsg if (!sink_tps3) 953f005ef32Sjsg lt_dbg(intel_dp, dp_phy, 954c349dbc7Sjsg ">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); 955c349dbc7Sjsg } 956c349dbc7Sjsg 957c349dbc7Sjsg return DP_TRAINING_PATTERN_2; 958c349dbc7Sjsg } 959c349dbc7Sjsg 9605ca02815Sjsg /* 9615ca02815Sjsg * Perform the link training channel equalization phase on the given DP PHY 9625ca02815Sjsg * using one of training pattern 2, 3 or 4 depending on the source and 9635ca02815Sjsg * sink capabilities. 9645ca02815Sjsg */ 965c349dbc7Sjsg static bool 9665ca02815Sjsg intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, 9675ca02815Sjsg const struct intel_crtc_state *crtc_state, 9685ca02815Sjsg enum drm_dp_phy dp_phy) 969c349dbc7Sjsg { 970c349dbc7Sjsg int tries; 971c349dbc7Sjsg u32 training_pattern; 972c349dbc7Sjsg u8 link_status[DP_LINK_STATUS_SIZE]; 973c349dbc7Sjsg bool channel_eq = false; 9741bb76ff1Sjsg int delay_us; 9751bb76ff1Sjsg 9761bb76ff1Sjsg delay_us = drm_dp_read_channel_eq_delay(&intel_dp->aux, 9771bb76ff1Sjsg intel_dp->dpcd, dp_phy, 9781bb76ff1Sjsg intel_dp_is_uhbr(crtc_state)); 979c349dbc7Sjsg 9805ca02815Sjsg training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy); 981c349dbc7Sjsg /* Scrambling is disabled for TPS2/3 and enabled for TPS4 */ 982c349dbc7Sjsg if (training_pattern != DP_TRAINING_PATTERN_4) 983c349dbc7Sjsg training_pattern |= DP_LINK_SCRAMBLING_DISABLE; 984c349dbc7Sjsg 985c349dbc7Sjsg /* channel equalization */ 9865ca02815Sjsg if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, 987c349dbc7Sjsg training_pattern)) { 988f005ef32Sjsg lt_err(intel_dp, dp_phy, "Failed to start channel equalization\n"); 989c349dbc7Sjsg return false; 990c349dbc7Sjsg } 991c349dbc7Sjsg 992c349dbc7Sjsg for (tries = 0; tries < 5; tries++) { 9931bb76ff1Sjsg usleep_range(delay_us, 2 * delay_us); 9941bb76ff1Sjsg 9955ca02815Sjsg if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, 9965ca02815Sjsg link_status) < 0) { 997f005ef32Sjsg lt_err(intel_dp, dp_phy, "Failed to get link status\n"); 998c349dbc7Sjsg break; 999c349dbc7Sjsg } 1000c349dbc7Sjsg 1001c349dbc7Sjsg /* Make sure clock is still ok */ 1002c349dbc7Sjsg if (!drm_dp_clock_recovery_ok(link_status, 10035ca02815Sjsg crtc_state->lane_count)) { 10041bb76ff1Sjsg intel_dp_dump_link_status(intel_dp, dp_phy, link_status); 1005f005ef32Sjsg lt_dbg(intel_dp, dp_phy, 1006f005ef32Sjsg "Clock recovery check failed, cannot continue channel equalization\n"); 1007c349dbc7Sjsg break; 1008c349dbc7Sjsg } 1009c349dbc7Sjsg 1010c349dbc7Sjsg if (drm_dp_channel_eq_ok(link_status, 10115ca02815Sjsg crtc_state->lane_count)) { 1012c349dbc7Sjsg channel_eq = true; 1013f005ef32Sjsg lt_dbg(intel_dp, dp_phy, "Channel EQ done. DP Training successful\n"); 1014c349dbc7Sjsg break; 1015c349dbc7Sjsg } 1016c349dbc7Sjsg 1017c349dbc7Sjsg /* Update training set as requested by target */ 10185ca02815Sjsg intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy, 10195ca02815Sjsg link_status); 10205ca02815Sjsg if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) { 1021f005ef32Sjsg lt_err(intel_dp, dp_phy, "Failed to update link training\n"); 1022c349dbc7Sjsg break; 1023c349dbc7Sjsg } 1024c349dbc7Sjsg } 1025c349dbc7Sjsg 1026c349dbc7Sjsg /* Try 5 times, else fail and try at lower BW */ 1027c349dbc7Sjsg if (tries == 5) { 10281bb76ff1Sjsg intel_dp_dump_link_status(intel_dp, dp_phy, link_status); 1029f005ef32Sjsg lt_dbg(intel_dp, dp_phy, "Channel equalization failed 5 times\n"); 1030c349dbc7Sjsg } 1031c349dbc7Sjsg 1032c349dbc7Sjsg return channel_eq; 1033c349dbc7Sjsg } 1034c349dbc7Sjsg 10355ca02815Sjsg static bool intel_dp_disable_dpcd_training_pattern(struct intel_dp *intel_dp, 10365ca02815Sjsg enum drm_dp_phy dp_phy) 10375ca02815Sjsg { 10385ca02815Sjsg int reg = intel_dp_training_pattern_set_reg(intel_dp, dp_phy); 10395ca02815Sjsg u8 val = DP_TRAINING_PATTERN_DISABLE; 10405ca02815Sjsg 10415ca02815Sjsg return drm_dp_dpcd_write(&intel_dp->aux, reg, &val, 1) == 1; 10425ca02815Sjsg } 10435ca02815Sjsg 10441bb76ff1Sjsg static int 10451bb76ff1Sjsg intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp, 10461bb76ff1Sjsg const struct intel_crtc_state *crtc_state) 10471bb76ff1Sjsg { 10481bb76ff1Sjsg u8 sink_status; 10491bb76ff1Sjsg int ret; 10501bb76ff1Sjsg 10511bb76ff1Sjsg ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_STATUS, &sink_status); 10521bb76ff1Sjsg if (ret != 1) { 1053f005ef32Sjsg lt_dbg(intel_dp, DP_PHY_DPRX, "Failed to read sink status\n"); 10541bb76ff1Sjsg return ret < 0 ? ret : -EIO; 10551bb76ff1Sjsg } 10561bb76ff1Sjsg 10571bb76ff1Sjsg return sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION ? 1 : 0; 10581bb76ff1Sjsg } 10591bb76ff1Sjsg 10605ca02815Sjsg /** 10615ca02815Sjsg * intel_dp_stop_link_train - stop link training 10625ca02815Sjsg * @intel_dp: DP struct 10635ca02815Sjsg * @crtc_state: state for CRTC attached to the encoder 10645ca02815Sjsg * 10655ca02815Sjsg * Stop the link training of the @intel_dp port, disabling the training 10665ca02815Sjsg * pattern in the sink's DPCD, and disabling the test pattern symbol 10675ca02815Sjsg * generation on the port. 10685ca02815Sjsg * 10695ca02815Sjsg * What symbols are output on the port after this point is 10705ca02815Sjsg * platform specific: On DDI/VLV/CHV platforms it will be the idle pattern 10715ca02815Sjsg * with the pipe being disabled, on older platforms it's HW specific if/how an 10725ca02815Sjsg * idle pattern is generated, as the pipe is already enabled here for those. 10735ca02815Sjsg * 10745ca02815Sjsg * This function must be called after intel_dp_start_link_train(). 10755ca02815Sjsg */ 10765ca02815Sjsg void intel_dp_stop_link_train(struct intel_dp *intel_dp, 10775ca02815Sjsg const struct intel_crtc_state *crtc_state) 1078c349dbc7Sjsg { 1079c349dbc7Sjsg intel_dp->link_trained = true; 1080c349dbc7Sjsg 10815ca02815Sjsg intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX); 10821bb76ff1Sjsg intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX, 1083c349dbc7Sjsg DP_TRAINING_PATTERN_DISABLE); 10841bb76ff1Sjsg 10851bb76ff1Sjsg if (intel_dp_is_uhbr(crtc_state) && 10861bb76ff1Sjsg wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) { 1087f005ef32Sjsg lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n"); 10881bb76ff1Sjsg } 1089c349dbc7Sjsg } 1090c349dbc7Sjsg 10915ca02815Sjsg static bool 10925ca02815Sjsg intel_dp_link_train_phy(struct intel_dp *intel_dp, 10935ca02815Sjsg const struct intel_crtc_state *crtc_state, 10945ca02815Sjsg enum drm_dp_phy dp_phy) 1095c349dbc7Sjsg { 10965ca02815Sjsg bool ret = false; 1097c349dbc7Sjsg 10985ca02815Sjsg if (!intel_dp_link_training_clock_recovery(intel_dp, crtc_state, dp_phy)) 10995ca02815Sjsg goto out; 1100c349dbc7Sjsg 11015ca02815Sjsg if (!intel_dp_link_training_channel_equalization(intel_dp, crtc_state, dp_phy)) 11025ca02815Sjsg goto out; 11035ca02815Sjsg 11045ca02815Sjsg ret = true; 11055ca02815Sjsg 11065ca02815Sjsg out: 1107f005ef32Sjsg lt_dbg(intel_dp, dp_phy, 1108f005ef32Sjsg "Link Training %s at link rate = %d, lane count = %d\n", 11095ca02815Sjsg ret ? "passed" : "failed", 11101bb76ff1Sjsg crtc_state->port_clock, crtc_state->lane_count); 1111c349dbc7Sjsg 11125ca02815Sjsg return ret; 11135ca02815Sjsg } 11145ca02815Sjsg 11155ca02815Sjsg static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp, 11165ca02815Sjsg const struct intel_crtc_state *crtc_state) 11175ca02815Sjsg { 11185ca02815Sjsg struct intel_connector *intel_connector = intel_dp->attached_connector; 1119f005ef32Sjsg struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1120f005ef32Sjsg 1121f005ef32Sjsg if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) { 1122f005ef32Sjsg lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n"); 1123f005ef32Sjsg return; 1124f005ef32Sjsg } 1125ad8b1aafSjsg 1126ad8b1aafSjsg if (intel_dp->hobl_active) { 1127f005ef32Sjsg lt_dbg(intel_dp, DP_PHY_DPRX, 1128f005ef32Sjsg "Link Training failed with HOBL active, not enabling it from now on\n"); 1129ad8b1aafSjsg intel_dp->hobl_failed = true; 1130ad8b1aafSjsg } else if (intel_dp_get_link_train_fallback_values(intel_dp, 11315ca02815Sjsg crtc_state->port_clock, 11325ca02815Sjsg crtc_state->lane_count)) { 1133ad8b1aafSjsg return; 1134ad8b1aafSjsg } 1135ad8b1aafSjsg 1136c349dbc7Sjsg /* Schedule a Hotplug Uevent to userspace to start modeset */ 1137f005ef32Sjsg queue_work(i915->unordered_wq, &intel_connector->modeset_retry_work); 1138c349dbc7Sjsg } 11395ca02815Sjsg 11405ca02815Sjsg /* Perform the link training on all LTTPRs and the DPRX on a link. */ 11415ca02815Sjsg static bool 11425ca02815Sjsg intel_dp_link_train_all_phys(struct intel_dp *intel_dp, 11435ca02815Sjsg const struct intel_crtc_state *crtc_state, 11445ca02815Sjsg int lttpr_count) 11455ca02815Sjsg { 11465ca02815Sjsg bool ret = true; 11475ca02815Sjsg int i; 11485ca02815Sjsg 11495ca02815Sjsg for (i = lttpr_count - 1; i >= 0; i--) { 11505ca02815Sjsg enum drm_dp_phy dp_phy = DP_PHY_LTTPR(i); 11515ca02815Sjsg 11525ca02815Sjsg ret = intel_dp_link_train_phy(intel_dp, crtc_state, dp_phy); 11535ca02815Sjsg intel_dp_disable_dpcd_training_pattern(intel_dp, dp_phy); 11545ca02815Sjsg 11555ca02815Sjsg if (!ret) 11565ca02815Sjsg break; 11575ca02815Sjsg } 11585ca02815Sjsg 11595ca02815Sjsg if (ret) 11605ca02815Sjsg ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX); 11615ca02815Sjsg 11625ca02815Sjsg if (intel_dp->set_idle_link_train) 11635ca02815Sjsg intel_dp->set_idle_link_train(intel_dp, crtc_state); 11645ca02815Sjsg 11655ca02815Sjsg return ret; 11665ca02815Sjsg } 11675ca02815Sjsg 11681bb76ff1Sjsg /* 11691bb76ff1Sjsg * 128b/132b DP LANEx_EQ_DONE Sequence (DP 2.0 E11 3.5.2.16.1) 11701bb76ff1Sjsg */ 11711bb76ff1Sjsg static bool 11721bb76ff1Sjsg intel_dp_128b132b_lane_eq(struct intel_dp *intel_dp, 11731bb76ff1Sjsg const struct intel_crtc_state *crtc_state) 11741bb76ff1Sjsg { 11751bb76ff1Sjsg u8 link_status[DP_LINK_STATUS_SIZE]; 11761bb76ff1Sjsg int delay_us; 11771bb76ff1Sjsg int try, max_tries = 20; 11781bb76ff1Sjsg unsigned long deadline; 11791bb76ff1Sjsg bool timeout = false; 11801bb76ff1Sjsg 11811bb76ff1Sjsg /* 11821bb76ff1Sjsg * Reset signal levels. Start transmitting 128b/132b TPS1. 11831bb76ff1Sjsg * 11841bb76ff1Sjsg * Put DPRX and LTTPRs (if any) into intra-hop AUX mode by writing TPS1 11851bb76ff1Sjsg * in DP_TRAINING_PATTERN_SET. 11861bb76ff1Sjsg */ 11871bb76ff1Sjsg if (!intel_dp_reset_link_train(intel_dp, crtc_state, DP_PHY_DPRX, 11881bb76ff1Sjsg DP_TRAINING_PATTERN_1)) { 1189f005ef32Sjsg lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS1\n"); 11901bb76ff1Sjsg return false; 11911bb76ff1Sjsg } 11921bb76ff1Sjsg 11931bb76ff1Sjsg delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux); 11941bb76ff1Sjsg 11951bb76ff1Sjsg /* Read the initial TX FFE settings. */ 11961bb76ff1Sjsg if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { 1197f005ef32Sjsg lt_err(intel_dp, DP_PHY_DPRX, "Failed to read TX FFE presets\n"); 11981bb76ff1Sjsg return false; 11991bb76ff1Sjsg } 12001bb76ff1Sjsg 12011bb76ff1Sjsg /* Update signal levels and training set as requested. */ 12021bb76ff1Sjsg intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status); 12031bb76ff1Sjsg if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) { 1204f005ef32Sjsg lt_err(intel_dp, DP_PHY_DPRX, "Failed to set initial TX FFE settings\n"); 12051bb76ff1Sjsg return false; 12061bb76ff1Sjsg } 12071bb76ff1Sjsg 12081bb76ff1Sjsg /* Start transmitting 128b/132b TPS2. */ 12091bb76ff1Sjsg if (!intel_dp_set_link_train(intel_dp, crtc_state, DP_PHY_DPRX, 12101bb76ff1Sjsg DP_TRAINING_PATTERN_2)) { 1211f005ef32Sjsg lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS2\n"); 12121bb76ff1Sjsg return false; 12131bb76ff1Sjsg } 12141bb76ff1Sjsg 12151bb76ff1Sjsg /* Time budget for the LANEx_EQ_DONE Sequence */ 12161bb76ff1Sjsg deadline = jiffies + msecs_to_jiffies_timeout(400); 12171bb76ff1Sjsg 12181bb76ff1Sjsg for (try = 0; try < max_tries; try++) { 12191bb76ff1Sjsg usleep_range(delay_us, 2 * delay_us); 12201bb76ff1Sjsg 12211bb76ff1Sjsg /* 12221bb76ff1Sjsg * The delay may get updated. The transmitter shall read the 12231bb76ff1Sjsg * delay before link status during link training. 12241bb76ff1Sjsg */ 12251bb76ff1Sjsg delay_us = drm_dp_128b132b_read_aux_rd_interval(&intel_dp->aux); 12261bb76ff1Sjsg 12271bb76ff1Sjsg if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { 1228f005ef32Sjsg lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n"); 12291bb76ff1Sjsg return false; 12301bb76ff1Sjsg } 12311bb76ff1Sjsg 12321bb76ff1Sjsg if (drm_dp_128b132b_link_training_failed(link_status)) { 12331bb76ff1Sjsg intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); 1234f005ef32Sjsg lt_err(intel_dp, DP_PHY_DPRX, 1235f005ef32Sjsg "Downstream link training failure\n"); 12361bb76ff1Sjsg return false; 12371bb76ff1Sjsg } 12381bb76ff1Sjsg 12391bb76ff1Sjsg if (drm_dp_128b132b_lane_channel_eq_done(link_status, crtc_state->lane_count)) { 1240f005ef32Sjsg lt_dbg(intel_dp, DP_PHY_DPRX, "Lane channel eq done\n"); 12411bb76ff1Sjsg break; 12421bb76ff1Sjsg } 12431bb76ff1Sjsg 12441bb76ff1Sjsg if (timeout) { 12451bb76ff1Sjsg intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); 1246f005ef32Sjsg lt_err(intel_dp, DP_PHY_DPRX, "Lane channel eq timeout\n"); 12471bb76ff1Sjsg return false; 12481bb76ff1Sjsg } 12491bb76ff1Sjsg 12501bb76ff1Sjsg if (time_after(jiffies, deadline)) 12511bb76ff1Sjsg timeout = true; /* try one last time after deadline */ 12521bb76ff1Sjsg 12531bb76ff1Sjsg /* Update signal levels and training set as requested. */ 12541bb76ff1Sjsg intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, link_status); 12551bb76ff1Sjsg if (!intel_dp_update_link_train(intel_dp, crtc_state, DP_PHY_DPRX)) { 1256f005ef32Sjsg lt_err(intel_dp, DP_PHY_DPRX, "Failed to update TX FFE settings\n"); 12571bb76ff1Sjsg return false; 12581bb76ff1Sjsg } 12591bb76ff1Sjsg } 12601bb76ff1Sjsg 12611bb76ff1Sjsg if (try == max_tries) { 12621bb76ff1Sjsg intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); 1263f005ef32Sjsg lt_err(intel_dp, DP_PHY_DPRX, "Max loop count reached\n"); 12641bb76ff1Sjsg return false; 12651bb76ff1Sjsg } 12661bb76ff1Sjsg 12671bb76ff1Sjsg for (;;) { 12681bb76ff1Sjsg if (time_after(jiffies, deadline)) 12691bb76ff1Sjsg timeout = true; /* try one last time after deadline */ 12701bb76ff1Sjsg 12711bb76ff1Sjsg if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { 1272f005ef32Sjsg lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n"); 12731bb76ff1Sjsg return false; 12741bb76ff1Sjsg } 12751bb76ff1Sjsg 12761bb76ff1Sjsg if (drm_dp_128b132b_link_training_failed(link_status)) { 12771bb76ff1Sjsg intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); 1278f005ef32Sjsg lt_err(intel_dp, DP_PHY_DPRX, "Downstream link training failure\n"); 12791bb76ff1Sjsg return false; 12801bb76ff1Sjsg } 12811bb76ff1Sjsg 12821bb76ff1Sjsg if (drm_dp_128b132b_eq_interlane_align_done(link_status)) { 1283f005ef32Sjsg lt_dbg(intel_dp, DP_PHY_DPRX, "Interlane align done\n"); 12841bb76ff1Sjsg break; 12851bb76ff1Sjsg } 12861bb76ff1Sjsg 12871bb76ff1Sjsg if (timeout) { 12881bb76ff1Sjsg intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); 1289f005ef32Sjsg lt_err(intel_dp, DP_PHY_DPRX, "Interlane align timeout\n"); 12901bb76ff1Sjsg return false; 12911bb76ff1Sjsg } 12921bb76ff1Sjsg 12931bb76ff1Sjsg usleep_range(2000, 3000); 12941bb76ff1Sjsg } 12951bb76ff1Sjsg 12961bb76ff1Sjsg return true; 12971bb76ff1Sjsg } 12981bb76ff1Sjsg 12991bb76ff1Sjsg /* 13001bb76ff1Sjsg * 128b/132b DP LANEx_CDS_DONE Sequence (DP 2.0 E11 3.5.2.16.2) 13011bb76ff1Sjsg */ 13021bb76ff1Sjsg static bool 13031bb76ff1Sjsg intel_dp_128b132b_lane_cds(struct intel_dp *intel_dp, 13041bb76ff1Sjsg const struct intel_crtc_state *crtc_state, 13051bb76ff1Sjsg int lttpr_count) 13061bb76ff1Sjsg { 13071bb76ff1Sjsg u8 link_status[DP_LINK_STATUS_SIZE]; 13081bb76ff1Sjsg unsigned long deadline; 13091bb76ff1Sjsg 13101bb76ff1Sjsg if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TRAINING_PATTERN_SET, 13111bb76ff1Sjsg DP_TRAINING_PATTERN_2_CDS) != 1) { 1312f005ef32Sjsg lt_err(intel_dp, DP_PHY_DPRX, "Failed to start 128b/132b TPS2 CDS\n"); 13131bb76ff1Sjsg return false; 13141bb76ff1Sjsg } 13151bb76ff1Sjsg 13161bb76ff1Sjsg /* Time budget for the LANEx_CDS_DONE Sequence */ 13171bb76ff1Sjsg deadline = jiffies + msecs_to_jiffies_timeout((lttpr_count + 1) * 20); 13181bb76ff1Sjsg 13191bb76ff1Sjsg for (;;) { 13201bb76ff1Sjsg bool timeout = false; 13211bb76ff1Sjsg 13221bb76ff1Sjsg if (time_after(jiffies, deadline)) 13231bb76ff1Sjsg timeout = true; /* try one last time after deadline */ 13241bb76ff1Sjsg 13251bb76ff1Sjsg usleep_range(2000, 3000); 13261bb76ff1Sjsg 13271bb76ff1Sjsg if (drm_dp_dpcd_read_link_status(&intel_dp->aux, link_status) < 0) { 1328f005ef32Sjsg lt_err(intel_dp, DP_PHY_DPRX, "Failed to read link status\n"); 13291bb76ff1Sjsg return false; 13301bb76ff1Sjsg } 13311bb76ff1Sjsg 13321bb76ff1Sjsg if (drm_dp_128b132b_eq_interlane_align_done(link_status) && 13331bb76ff1Sjsg drm_dp_128b132b_cds_interlane_align_done(link_status) && 13341bb76ff1Sjsg drm_dp_128b132b_lane_symbol_locked(link_status, crtc_state->lane_count)) { 1335f005ef32Sjsg lt_dbg(intel_dp, DP_PHY_DPRX, "CDS interlane align done\n"); 13361bb76ff1Sjsg break; 13371bb76ff1Sjsg } 13381bb76ff1Sjsg 13391bb76ff1Sjsg if (drm_dp_128b132b_link_training_failed(link_status)) { 13401bb76ff1Sjsg intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); 1341f005ef32Sjsg lt_err(intel_dp, DP_PHY_DPRX, "Downstream link training failure\n"); 13421bb76ff1Sjsg return false; 13431bb76ff1Sjsg } 13441bb76ff1Sjsg 13451bb76ff1Sjsg if (timeout) { 13461bb76ff1Sjsg intel_dp_dump_link_status(intel_dp, DP_PHY_DPRX, link_status); 1347f005ef32Sjsg lt_err(intel_dp, DP_PHY_DPRX, "CDS timeout\n"); 13481bb76ff1Sjsg return false; 13491bb76ff1Sjsg } 13501bb76ff1Sjsg } 13511bb76ff1Sjsg 13521bb76ff1Sjsg return true; 13531bb76ff1Sjsg } 13541bb76ff1Sjsg 13551bb76ff1Sjsg /* 13561bb76ff1Sjsg * 128b/132b link training sequence. (DP 2.0 E11 SCR on link training.) 13571bb76ff1Sjsg */ 13581bb76ff1Sjsg static bool 13591bb76ff1Sjsg intel_dp_128b132b_link_train(struct intel_dp *intel_dp, 13601bb76ff1Sjsg const struct intel_crtc_state *crtc_state, 13611bb76ff1Sjsg int lttpr_count) 13621bb76ff1Sjsg { 13631bb76ff1Sjsg bool passed = false; 13641bb76ff1Sjsg 13651bb76ff1Sjsg if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) { 1366f005ef32Sjsg lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n"); 13671bb76ff1Sjsg return false; 13681bb76ff1Sjsg } 13691bb76ff1Sjsg 13701bb76ff1Sjsg if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) && 13711bb76ff1Sjsg intel_dp_128b132b_lane_cds(intel_dp, crtc_state, lttpr_count)) 13721bb76ff1Sjsg passed = true; 13731bb76ff1Sjsg 1374f005ef32Sjsg lt_dbg(intel_dp, DP_PHY_DPRX, 1375f005ef32Sjsg "128b/132b Link Training %s at link rate = %d, lane count = %d\n", 13761bb76ff1Sjsg passed ? "passed" : "failed", 13771bb76ff1Sjsg crtc_state->port_clock, crtc_state->lane_count); 13781bb76ff1Sjsg 13791bb76ff1Sjsg return passed; 13801bb76ff1Sjsg } 13811bb76ff1Sjsg 13825ca02815Sjsg /** 13835ca02815Sjsg * intel_dp_start_link_train - start link training 13845ca02815Sjsg * @intel_dp: DP struct 13855ca02815Sjsg * @crtc_state: state for CRTC attached to the encoder 13865ca02815Sjsg * 13875ca02815Sjsg * Start the link training of the @intel_dp port, scheduling a fallback 13885ca02815Sjsg * retraining with reduced link rate/lane parameters if the link training 13895ca02815Sjsg * fails. 13905ca02815Sjsg * After calling this function intel_dp_stop_link_train() must be called. 13915ca02815Sjsg */ 13925ca02815Sjsg void intel_dp_start_link_train(struct intel_dp *intel_dp, 13935ca02815Sjsg const struct intel_crtc_state *crtc_state) 13945ca02815Sjsg { 1395f005ef32Sjsg struct drm_i915_private *i915 = dp_to_i915(intel_dp); 13961bb76ff1Sjsg bool passed; 13975ca02815Sjsg /* 1398*5f1cde00Sjsg * Reinit the LTTPRs here to ensure that they are switched to 1399*5f1cde00Sjsg * non-transparent mode. During an earlier LTTPR detection this 1400*5f1cde00Sjsg * could've been prevented by an active link. 14015ca02815Sjsg */ 14025ca02815Sjsg int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp); 14035ca02815Sjsg 14045ca02815Sjsg if (lttpr_count < 0) 14055ca02815Sjsg /* Still continue with enabling the port and link training. */ 14065ca02815Sjsg lttpr_count = 0; 14075ca02815Sjsg 14081bb76ff1Sjsg intel_dp_prepare_link_train(intel_dp, crtc_state); 14091bb76ff1Sjsg 14101bb76ff1Sjsg if (intel_dp_is_uhbr(crtc_state)) 14111bb76ff1Sjsg passed = intel_dp_128b132b_link_train(intel_dp, crtc_state, lttpr_count); 14121bb76ff1Sjsg else 14131bb76ff1Sjsg passed = intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count); 14141bb76ff1Sjsg 1415f005ef32Sjsg /* 1416f005ef32Sjsg * Ignore the link failure in CI 1417f005ef32Sjsg * 1418f005ef32Sjsg * In fixed enviroments like CI, sometimes unexpected long HPDs are 1419f005ef32Sjsg * generated by the displays. If ignore_long_hpd flag is set, such long 1420f005ef32Sjsg * HPDs are ignored. And probably as a consequence of these ignored 1421f005ef32Sjsg * long HPDs, subsequent link trainings are failed resulting into CI 1422f005ef32Sjsg * execution failures. 1423f005ef32Sjsg * 1424f005ef32Sjsg * For test cases which rely on the link training or processing of HPDs 1425f005ef32Sjsg * ignore_long_hpd flag can unset from the testcase. 1426f005ef32Sjsg */ 1427f005ef32Sjsg if (!passed && i915->display.hotplug.ignore_long_hpd) { 1428f005ef32Sjsg lt_dbg(intel_dp, DP_PHY_DPRX, "Ignore the link failure\n"); 1429f005ef32Sjsg return; 1430f005ef32Sjsg } 1431f005ef32Sjsg 14321bb76ff1Sjsg if (!passed) 14335ca02815Sjsg intel_dp_schedule_fallback_link_training(intel_dp, crtc_state); 14345ca02815Sjsg } 1435f005ef32Sjsg 1436f005ef32Sjsg void intel_dp_128b132b_sdp_crc16(struct intel_dp *intel_dp, 1437f005ef32Sjsg const struct intel_crtc_state *crtc_state) 1438f005ef32Sjsg { 1439f005ef32Sjsg /* 1440f005ef32Sjsg * VIDEO_DIP_CTL register bit 31 should be set to '0' to not 1441f005ef32Sjsg * disable SDP CRC. This is applicable for Display version 13. 1442f005ef32Sjsg * Default value of bit 31 is '0' hence discarding the write 1443f005ef32Sjsg * TODO: Corrective actions on SDP corruption yet to be defined 1444f005ef32Sjsg */ 1445f005ef32Sjsg if (intel_dp_is_uhbr(crtc_state)) 1446f005ef32Sjsg /* DP v2.0 SCR on SDP CRC16 for 128b/132b Link Layer */ 1447f005ef32Sjsg drm_dp_dpcd_writeb(&intel_dp->aux, 1448f005ef32Sjsg DP_SDP_ERROR_DETECTION_CONFIGURATION, 1449f005ef32Sjsg DP_SDP_CRC16_128B132B_EN); 1450f005ef32Sjsg 1451f005ef32Sjsg lt_dbg(intel_dp, DP_PHY_DPRX, "DP2.0 SDP CRC16 for 128b/132b enabled\n"); 1452f005ef32Sjsg } 1453