1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/export.h> 29 #include <linux/i2c.h> 30 #include <linux/notifier.h> 31 #include <linux/slab.h> 32 #include <linux/timekeeping.h> 33 #include <linux/types.h> 34 35 #include <asm/byteorder.h> 36 37 #include <drm/drm_atomic_helper.h> 38 #include <drm/drm_crtc.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_edid.h> 41 #include <drm/drm_probe_helper.h> 42 43 #include "g4x_dp.h" 44 #include "i915_debugfs.h" 45 #include "i915_drv.h" 46 #include "intel_atomic.h" 47 #include "intel_audio.h" 48 #include "intel_connector.h" 49 #include "intel_ddi.h" 50 #include "intel_de.h" 51 #include "intel_display_types.h" 52 #include "intel_dp.h" 53 #include "intel_dp_aux.h" 54 #include "intel_dp_hdcp.h" 55 #include "intel_dp_link_training.h" 56 #include "intel_dp_mst.h" 57 #include "intel_dpio_phy.h" 58 #include "intel_dpll.h" 59 #include "intel_drrs.h" 60 #include "intel_fifo_underrun.h" 61 #include "intel_hdcp.h" 62 #include "intel_hdmi.h" 63 #include "intel_hotplug.h" 64 #include "intel_lspcon.h" 65 #include "intel_lvds.h" 66 #include "intel_panel.h" 67 #include "intel_pps.h" 68 #include "intel_psr.h" 69 #include "intel_sideband.h" 70 #include "intel_tc.h" 71 #include "intel_vdsc.h" 72 #include "intel_vrr.h" 73 74 #define DP_DPRX_ESI_LEN 14 75 76 /* DP DSC throughput values used for slice count calculations KPixels/s */ 77 #define DP_DSC_PEAK_PIXEL_RATE 2720000 78 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 79 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 80 81 /* DP DSC FEC Overhead factor = 1/(0.972261) */ 82 #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 83 84 /* Compliance test status bits */ 85 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 86 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK) 87 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK) 88 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) 89 90 91 /* Constants for DP DSC configurations */ 92 static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15}; 93 94 /* With Single pipe configuration, HW is capable of supporting maximum 95 * of 4 slices per line. 96 */ 97 static const u8 valid_dsc_slicecount[] = {1, 2, 4}; 98 99 /** 100 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH) 101 * @intel_dp: DP struct 102 * 103 * If a CPU or PCH DP output is attached to an eDP panel, this function 104 * will return true, and false otherwise. 105 */ 106 bool intel_dp_is_edp(struct intel_dp *intel_dp) 107 { 108 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 109 110 return dig_port->base.type == INTEL_OUTPUT_EDP; 111 } 112 113 static void intel_dp_unset_edid(struct intel_dp *intel_dp); 114 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc); 115 116 static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) 117 { 118 intel_dp->sink_rates[0] = 162000; 119 intel_dp->num_sink_rates = 1; 120 } 121 122 /* update sink rates from dpcd */ 123 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) 124 { 125 static const int dp_rates[] = { 126 162000, 270000, 540000, 810000 127 }; 128 int i, max_rate; 129 int max_lttpr_rate; 130 131 if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) { 132 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */ 133 static const int quirk_rates[] = { 162000, 270000, 324000 }; 134 135 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates)); 136 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates); 137 138 return; 139 } 140 141 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); 142 max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps); 143 if (max_lttpr_rate) 144 max_rate = min(max_rate, max_lttpr_rate); 145 146 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { 147 if (dp_rates[i] > max_rate) 148 break; 149 intel_dp->sink_rates[i] = dp_rates[i]; 150 } 151 152 intel_dp->num_sink_rates = i; 153 } 154 155 /* Get length of rates array potentially limited by max_rate. */ 156 static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) 157 { 158 int i; 159 160 /* Limit results by potentially reduced max rate */ 161 for (i = 0; i < len; i++) { 162 if (rates[len - i - 1] <= max_rate) 163 return len - i; 164 } 165 166 return 0; 167 } 168 169 /* Get length of common rates array potentially limited by max_rate. */ 170 static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, 171 int max_rate) 172 { 173 return intel_dp_rate_limit_len(intel_dp->common_rates, 174 intel_dp->num_common_rates, max_rate); 175 } 176 177 /* Theoretical max between source and sink */ 178 static int intel_dp_max_common_rate(struct intel_dp *intel_dp) 179 { 180 return intel_dp->common_rates[intel_dp->num_common_rates - 1]; 181 } 182 183 /* Theoretical max between source and sink */ 184 static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) 185 { 186 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 187 int source_max = dig_port->max_lanes; 188 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 189 int fia_max = intel_tc_port_fia_max_lane_count(dig_port); 190 int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); 191 192 if (lttpr_max) 193 sink_max = min(sink_max, lttpr_max); 194 195 return min3(source_max, sink_max, fia_max); 196 } 197 198 int intel_dp_max_lane_count(struct intel_dp *intel_dp) 199 { 200 return intel_dp->max_link_lane_count; 201 } 202 203 int 204 intel_dp_link_required(int pixel_clock, int bpp) 205 { 206 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */ 207 return DIV_ROUND_UP(pixel_clock * bpp, 8); 208 } 209 210 int 211 intel_dp_max_data_rate(int max_link_clock, int max_lanes) 212 { 213 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the 214 * link rate that is generally expressed in Gbps. Since, 8 bits of data 215 * is transmitted every LS_Clk per lane, there is no need to account for 216 * the channel encoding that is done in the PHY layer here. 217 */ 218 219 return max_link_clock * max_lanes; 220 } 221 222 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp) 223 { 224 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 225 struct intel_encoder *encoder = &intel_dig_port->base; 226 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 227 228 return DISPLAY_VER(dev_priv) >= 12 || 229 (DISPLAY_VER(dev_priv) == 11 && 230 encoder->port != PORT_A); 231 } 232 233 static int icl_max_source_rate(struct intel_dp *intel_dp) 234 { 235 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 236 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 237 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); 238 239 if (intel_phy_is_combo(dev_priv, phy) && 240 !intel_dp_is_edp(intel_dp)) 241 return 540000; 242 243 return 810000; 244 } 245 246 static int ehl_max_source_rate(struct intel_dp *intel_dp) 247 { 248 if (intel_dp_is_edp(intel_dp)) 249 return 540000; 250 251 return 810000; 252 } 253 254 static void 255 intel_dp_set_source_rates(struct intel_dp *intel_dp) 256 { 257 /* The values must be in increasing order */ 258 static const int icl_rates[] = { 259 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000 260 }; 261 static const int bxt_rates[] = { 262 162000, 216000, 243000, 270000, 324000, 432000, 540000 263 }; 264 static const int skl_rates[] = { 265 162000, 216000, 270000, 324000, 432000, 540000 266 }; 267 static const int hsw_rates[] = { 268 162000, 270000, 540000 269 }; 270 static const int g4x_rates[] = { 271 162000, 270000 272 }; 273 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 274 struct intel_encoder *encoder = &dig_port->base; 275 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 276 const int *source_rates; 277 int size, max_rate = 0, vbt_max_rate; 278 279 /* This should only be done once */ 280 drm_WARN_ON(&dev_priv->drm, 281 intel_dp->source_rates || intel_dp->num_source_rates); 282 283 if (DISPLAY_VER(dev_priv) >= 11) { 284 source_rates = icl_rates; 285 size = ARRAY_SIZE(icl_rates); 286 if (IS_JSL_EHL(dev_priv)) 287 max_rate = ehl_max_source_rate(intel_dp); 288 else 289 max_rate = icl_max_source_rate(intel_dp); 290 } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 291 source_rates = bxt_rates; 292 size = ARRAY_SIZE(bxt_rates); 293 } else if (DISPLAY_VER(dev_priv) == 9) { 294 source_rates = skl_rates; 295 size = ARRAY_SIZE(skl_rates); 296 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) || 297 IS_BROADWELL(dev_priv)) { 298 source_rates = hsw_rates; 299 size = ARRAY_SIZE(hsw_rates); 300 } else { 301 source_rates = g4x_rates; 302 size = ARRAY_SIZE(g4x_rates); 303 } 304 305 vbt_max_rate = intel_bios_dp_max_link_rate(encoder); 306 if (max_rate && vbt_max_rate) 307 max_rate = min(max_rate, vbt_max_rate); 308 else if (vbt_max_rate) 309 max_rate = vbt_max_rate; 310 311 if (max_rate) 312 size = intel_dp_rate_limit_len(source_rates, size, max_rate); 313 314 intel_dp->source_rates = source_rates; 315 intel_dp->num_source_rates = size; 316 } 317 318 static int intersect_rates(const int *source_rates, int source_len, 319 const int *sink_rates, int sink_len, 320 int *common_rates) 321 { 322 int i = 0, j = 0, k = 0; 323 324 while (i < source_len && j < sink_len) { 325 if (source_rates[i] == sink_rates[j]) { 326 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) 327 return k; 328 common_rates[k] = source_rates[i]; 329 ++k; 330 ++i; 331 ++j; 332 } else if (source_rates[i] < sink_rates[j]) { 333 ++i; 334 } else { 335 ++j; 336 } 337 } 338 return k; 339 } 340 341 /* return index of rate in rates array, or -1 if not found */ 342 static int intel_dp_rate_index(const int *rates, int len, int rate) 343 { 344 int i; 345 346 for (i = 0; i < len; i++) 347 if (rate == rates[i]) 348 return i; 349 350 return -1; 351 } 352 353 static void intel_dp_set_common_rates(struct intel_dp *intel_dp) 354 { 355 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 356 357 drm_WARN_ON(&i915->drm, 358 !intel_dp->num_source_rates || !intel_dp->num_sink_rates); 359 360 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates, 361 intel_dp->num_source_rates, 362 intel_dp->sink_rates, 363 intel_dp->num_sink_rates, 364 intel_dp->common_rates); 365 366 /* Paranoia, there should always be something in common. */ 367 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) { 368 intel_dp->common_rates[0] = 162000; 369 intel_dp->num_common_rates = 1; 370 } 371 } 372 373 static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate, 374 u8 lane_count) 375 { 376 /* 377 * FIXME: we need to synchronize the current link parameters with 378 * hardware readout. Currently fast link training doesn't work on 379 * boot-up. 380 */ 381 if (link_rate == 0 || 382 link_rate > intel_dp->max_link_rate) 383 return false; 384 385 if (lane_count == 0 || 386 lane_count > intel_dp_max_lane_count(intel_dp)) 387 return false; 388 389 return true; 390 } 391 392 static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp, 393 int link_rate, 394 u8 lane_count) 395 { 396 const struct drm_display_mode *fixed_mode = 397 intel_dp->attached_connector->panel.fixed_mode; 398 int mode_rate, max_rate; 399 400 mode_rate = intel_dp_link_required(fixed_mode->clock, 18); 401 max_rate = intel_dp_max_data_rate(link_rate, lane_count); 402 if (mode_rate > max_rate) 403 return false; 404 405 return true; 406 } 407 408 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, 409 int link_rate, u8 lane_count) 410 { 411 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 412 int index; 413 414 /* 415 * TODO: Enable fallback on MST links once MST link compute can handle 416 * the fallback params. 417 */ 418 if (intel_dp->is_mst) { 419 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 420 return -1; 421 } 422 423 if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) { 424 drm_dbg_kms(&i915->drm, 425 "Retrying Link training for eDP with max parameters\n"); 426 intel_dp->use_max_params = true; 427 return 0; 428 } 429 430 index = intel_dp_rate_index(intel_dp->common_rates, 431 intel_dp->num_common_rates, 432 link_rate); 433 if (index > 0) { 434 if (intel_dp_is_edp(intel_dp) && 435 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 436 intel_dp->common_rates[index - 1], 437 lane_count)) { 438 drm_dbg_kms(&i915->drm, 439 "Retrying Link training for eDP with same parameters\n"); 440 return 0; 441 } 442 intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; 443 intel_dp->max_link_lane_count = lane_count; 444 } else if (lane_count > 1) { 445 if (intel_dp_is_edp(intel_dp) && 446 !intel_dp_can_link_train_fallback_for_edp(intel_dp, 447 intel_dp_max_common_rate(intel_dp), 448 lane_count >> 1)) { 449 drm_dbg_kms(&i915->drm, 450 "Retrying Link training for eDP with same parameters\n"); 451 return 0; 452 } 453 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 454 intel_dp->max_link_lane_count = lane_count >> 1; 455 } else { 456 drm_err(&i915->drm, "Link Training Unsuccessful\n"); 457 return -1; 458 } 459 460 return 0; 461 } 462 463 u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 464 { 465 return div_u64(mul_u32_u32(mode_clock, 1000000U), 466 DP_DSC_FEC_OVERHEAD_FACTOR); 467 } 468 469 static int 470 small_joiner_ram_size_bits(struct drm_i915_private *i915) 471 { 472 if (DISPLAY_VER(i915) >= 11) 473 return 7680 * 8; 474 else 475 return 6144 * 8; 476 } 477 478 static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, 479 u32 link_clock, u32 lane_count, 480 u32 mode_clock, u32 mode_hdisplay, 481 bool bigjoiner, 482 u32 pipe_bpp) 483 { 484 u32 bits_per_pixel, max_bpp_small_joiner_ram; 485 int i; 486 487 /* 488 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 489 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 490 * for SST -> TimeSlotsPerMTP is 1, 491 * for MST -> TimeSlotsPerMTP has to be calculated 492 */ 493 bits_per_pixel = (link_clock * lane_count * 8) / 494 intel_dp_mode_to_fec_clock(mode_clock); 495 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel); 496 497 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 498 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / 499 mode_hdisplay; 500 501 if (bigjoiner) 502 max_bpp_small_joiner_ram *= 2; 503 504 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n", 505 max_bpp_small_joiner_ram); 506 507 /* 508 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 509 * check, output bpp from small joiner RAM check) 510 */ 511 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 512 513 if (bigjoiner) { 514 u32 max_bpp_bigjoiner = 515 i915->max_cdclk_freq * 48 / 516 intel_dp_mode_to_fec_clock(mode_clock); 517 518 DRM_DEBUG_KMS("Max big joiner bpp: %u\n", max_bpp_bigjoiner); 519 bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner); 520 } 521 522 /* Error out if the max bpp is less than smallest allowed valid bpp */ 523 if (bits_per_pixel < valid_dsc_bpp[0]) { 524 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n", 525 bits_per_pixel, valid_dsc_bpp[0]); 526 return 0; 527 } 528 529 /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */ 530 if (DISPLAY_VER(i915) >= 13) { 531 bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1); 532 } else { 533 /* Find the nearest match in the array of known BPPs from VESA */ 534 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 535 if (bits_per_pixel < valid_dsc_bpp[i + 1]) 536 break; 537 } 538 bits_per_pixel = valid_dsc_bpp[i]; 539 } 540 541 /* 542 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 543 * fractional part is 0 544 */ 545 return bits_per_pixel << 4; 546 } 547 548 static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 549 int mode_clock, int mode_hdisplay, 550 bool bigjoiner) 551 { 552 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 553 u8 min_slice_count, i; 554 int max_slice_width; 555 556 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 557 min_slice_count = DIV_ROUND_UP(mode_clock, 558 DP_DSC_MAX_ENC_THROUGHPUT_0); 559 else 560 min_slice_count = DIV_ROUND_UP(mode_clock, 561 DP_DSC_MAX_ENC_THROUGHPUT_1); 562 563 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 564 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 565 drm_dbg_kms(&i915->drm, 566 "Unsupported slice width %d by DP DSC Sink device\n", 567 max_slice_width); 568 return 0; 569 } 570 /* Also take into account max slice width */ 571 min_slice_count = max_t(u8, min_slice_count, 572 DIV_ROUND_UP(mode_hdisplay, 573 max_slice_width)); 574 575 /* Find the closest match to the valid slice count values */ 576 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 577 u8 test_slice_count = valid_dsc_slicecount[i] << bigjoiner; 578 579 if (test_slice_count > 580 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, false)) 581 break; 582 583 /* big joiner needs small joiner to be enabled */ 584 if (bigjoiner && test_slice_count < 4) 585 continue; 586 587 if (min_slice_count <= test_slice_count) 588 return test_slice_count; 589 } 590 591 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n", 592 min_slice_count); 593 return 0; 594 } 595 596 static enum intel_output_format 597 intel_dp_output_format(struct drm_connector *connector, 598 const struct drm_display_mode *mode) 599 { 600 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 601 const struct drm_display_info *info = &connector->display_info; 602 603 if (!connector->ycbcr_420_allowed || 604 !drm_mode_is_420_only(info, mode)) 605 return INTEL_OUTPUT_FORMAT_RGB; 606 607 if (intel_dp->dfp.rgb_to_ycbcr && 608 intel_dp->dfp.ycbcr_444_to_420) 609 return INTEL_OUTPUT_FORMAT_RGB; 610 611 if (intel_dp->dfp.ycbcr_444_to_420) 612 return INTEL_OUTPUT_FORMAT_YCBCR444; 613 else 614 return INTEL_OUTPUT_FORMAT_YCBCR420; 615 } 616 617 int intel_dp_min_bpp(enum intel_output_format output_format) 618 { 619 if (output_format == INTEL_OUTPUT_FORMAT_RGB) 620 return 6 * 3; 621 else 622 return 8 * 3; 623 } 624 625 static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp) 626 { 627 /* 628 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output 629 * format of the number of bytes per pixel will be half the number 630 * of bytes of RGB pixel. 631 */ 632 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 633 bpp /= 2; 634 635 return bpp; 636 } 637 638 static int 639 intel_dp_mode_min_output_bpp(struct drm_connector *connector, 640 const struct drm_display_mode *mode) 641 { 642 enum intel_output_format output_format = 643 intel_dp_output_format(connector, mode); 644 645 return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format)); 646 } 647 648 static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, 649 int hdisplay) 650 { 651 /* 652 * Older platforms don't like hdisplay==4096 with DP. 653 * 654 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline 655 * and frame counter increment), but we don't get vblank interrupts, 656 * and the pipe underruns immediately. The link also doesn't seem 657 * to get trained properly. 658 * 659 * On CHV the vblank interrupts don't seem to disappear but 660 * otherwise the symptoms are similar. 661 * 662 * TODO: confirm the behaviour on HSW+ 663 */ 664 return hdisplay == 4096 && !HAS_DDI(dev_priv); 665 } 666 667 static enum drm_mode_status 668 intel_dp_mode_valid_downstream(struct intel_connector *connector, 669 const struct drm_display_mode *mode, 670 int target_clock) 671 { 672 struct intel_dp *intel_dp = intel_attached_dp(connector); 673 const struct drm_display_info *info = &connector->base.display_info; 674 int tmds_clock; 675 676 /* If PCON supports FRL MODE, check FRL bandwidth constraints */ 677 if (intel_dp->dfp.pcon_max_frl_bw) { 678 int target_bw; 679 int max_frl_bw; 680 int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode); 681 682 target_bw = bpp * target_clock; 683 684 max_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 685 686 /* converting bw from Gbps to Kbps*/ 687 max_frl_bw = max_frl_bw * 1000000; 688 689 if (target_bw > max_frl_bw) 690 return MODE_CLOCK_HIGH; 691 692 return MODE_OK; 693 } 694 695 if (intel_dp->dfp.max_dotclock && 696 target_clock > intel_dp->dfp.max_dotclock) 697 return MODE_CLOCK_HIGH; 698 699 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */ 700 tmds_clock = target_clock; 701 if (drm_mode_is_420_only(info, mode)) 702 tmds_clock /= 2; 703 704 if (intel_dp->dfp.min_tmds_clock && 705 tmds_clock < intel_dp->dfp.min_tmds_clock) 706 return MODE_CLOCK_LOW; 707 if (intel_dp->dfp.max_tmds_clock && 708 tmds_clock > intel_dp->dfp.max_tmds_clock) 709 return MODE_CLOCK_HIGH; 710 711 return MODE_OK; 712 } 713 714 static enum drm_mode_status 715 intel_dp_mode_valid(struct drm_connector *connector, 716 struct drm_display_mode *mode) 717 { 718 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 719 struct intel_connector *intel_connector = to_intel_connector(connector); 720 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 721 struct drm_i915_private *dev_priv = to_i915(connector->dev); 722 int target_clock = mode->clock; 723 int max_rate, mode_rate, max_lanes, max_link_clock; 724 int max_dotclk = dev_priv->max_dotclk_freq; 725 u16 dsc_max_output_bpp = 0; 726 u8 dsc_slice_count = 0; 727 enum drm_mode_status status; 728 bool dsc = false, bigjoiner = false; 729 730 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 731 return MODE_NO_DBLESCAN; 732 733 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 734 return MODE_H_ILLEGAL; 735 736 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 737 if (mode->hdisplay != fixed_mode->hdisplay) 738 return MODE_PANEL; 739 740 if (mode->vdisplay != fixed_mode->vdisplay) 741 return MODE_PANEL; 742 743 target_clock = fixed_mode->clock; 744 } 745 746 if (mode->clock < 10000) 747 return MODE_CLOCK_LOW; 748 749 if ((target_clock > max_dotclk || mode->hdisplay > 5120) && 750 intel_dp_can_bigjoiner(intel_dp)) { 751 bigjoiner = true; 752 max_dotclk *= 2; 753 } 754 if (target_clock > max_dotclk) 755 return MODE_CLOCK_HIGH; 756 757 max_link_clock = intel_dp_max_link_rate(intel_dp); 758 max_lanes = intel_dp_max_lane_count(intel_dp); 759 760 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 761 mode_rate = intel_dp_link_required(target_clock, 762 intel_dp_mode_min_output_bpp(connector, mode)); 763 764 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) 765 return MODE_H_ILLEGAL; 766 767 /* 768 * Output bpp is stored in 6.4 format so right shift by 4 to get the 769 * integer value since we support only integer values of bpp. 770 */ 771 if (DISPLAY_VER(dev_priv) >= 10 && 772 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) { 773 /* 774 * TBD pass the connector BPC, 775 * for now U8_MAX so that max BPC on that platform would be picked 776 */ 777 int pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, U8_MAX); 778 779 if (intel_dp_is_edp(intel_dp)) { 780 dsc_max_output_bpp = 781 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4; 782 dsc_slice_count = 783 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 784 true); 785 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { 786 dsc_max_output_bpp = 787 intel_dp_dsc_get_output_bpp(dev_priv, 788 max_link_clock, 789 max_lanes, 790 target_clock, 791 mode->hdisplay, 792 bigjoiner, 793 pipe_bpp) >> 4; 794 dsc_slice_count = 795 intel_dp_dsc_get_slice_count(intel_dp, 796 target_clock, 797 mode->hdisplay, 798 bigjoiner); 799 } 800 801 dsc = dsc_max_output_bpp && dsc_slice_count; 802 } 803 804 /* 805 * Big joiner configuration needs DSC for TGL which is not true for 806 * XE_LPD where uncompressed joiner is supported. 807 */ 808 if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) 809 return MODE_CLOCK_HIGH; 810 811 if (mode_rate > max_rate && !dsc) 812 return MODE_CLOCK_HIGH; 813 814 status = intel_dp_mode_valid_downstream(intel_connector, 815 mode, target_clock); 816 if (status != MODE_OK) 817 return status; 818 819 return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner); 820 } 821 822 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) 823 { 824 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 825 826 return max_rate >= 540000; 827 } 828 829 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp) 830 { 831 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1]; 832 833 return max_rate >= 810000; 834 } 835 836 static void snprintf_int_array(char *str, size_t len, 837 const int *array, int nelem) 838 { 839 int i; 840 841 str[0] = '\0'; 842 843 for (i = 0; i < nelem; i++) { 844 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]); 845 if (r >= len) 846 return; 847 str += r; 848 len -= r; 849 } 850 } 851 852 static void intel_dp_print_rates(struct intel_dp *intel_dp) 853 { 854 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 855 char str[128]; /* FIXME: too big for stack? */ 856 857 if (!drm_debug_enabled(DRM_UT_KMS)) 858 return; 859 860 snprintf_int_array(str, sizeof(str), 861 intel_dp->source_rates, intel_dp->num_source_rates); 862 drm_dbg_kms(&i915->drm, "source rates: %s\n", str); 863 864 snprintf_int_array(str, sizeof(str), 865 intel_dp->sink_rates, intel_dp->num_sink_rates); 866 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str); 867 868 snprintf_int_array(str, sizeof(str), 869 intel_dp->common_rates, intel_dp->num_common_rates); 870 drm_dbg_kms(&i915->drm, "common rates: %s\n", str); 871 } 872 873 int 874 intel_dp_max_link_rate(struct intel_dp *intel_dp) 875 { 876 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 877 int len; 878 879 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); 880 if (drm_WARN_ON(&i915->drm, len <= 0)) 881 return 162000; 882 883 return intel_dp->common_rates[len - 1]; 884 } 885 886 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) 887 { 888 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 889 int i = intel_dp_rate_index(intel_dp->sink_rates, 890 intel_dp->num_sink_rates, rate); 891 892 if (drm_WARN_ON(&i915->drm, i < 0)) 893 i = 0; 894 895 return i; 896 } 897 898 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, 899 u8 *link_bw, u8 *rate_select) 900 { 901 /* eDP 1.4 rate select method. */ 902 if (intel_dp->use_rate_select) { 903 *link_bw = 0; 904 *rate_select = 905 intel_dp_rate_select(intel_dp, port_clock); 906 } else { 907 *link_bw = drm_dp_link_rate_to_bw_code(port_clock); 908 *rate_select = 0; 909 } 910 } 911 912 static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, 913 const struct intel_crtc_state *pipe_config) 914 { 915 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 916 917 /* On TGL, FEC is supported on all Pipes */ 918 if (DISPLAY_VER(dev_priv) >= 12) 919 return true; 920 921 if (DISPLAY_VER(dev_priv) == 11 && pipe_config->cpu_transcoder != TRANSCODER_A) 922 return true; 923 924 return false; 925 } 926 927 static bool intel_dp_supports_fec(struct intel_dp *intel_dp, 928 const struct intel_crtc_state *pipe_config) 929 { 930 return intel_dp_source_supports_fec(intel_dp, pipe_config) && 931 drm_dp_sink_supports_fec(intel_dp->fec_capable); 932 } 933 934 static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, 935 const struct intel_crtc_state *crtc_state) 936 { 937 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP) && !crtc_state->fec_enable) 938 return false; 939 940 return intel_dsc_source_support(crtc_state) && 941 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd); 942 } 943 944 static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp, 945 const struct intel_crtc_state *crtc_state) 946 { 947 return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 || 948 (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 949 intel_dp->dfp.ycbcr_444_to_420); 950 } 951 952 static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp, 953 const struct intel_crtc_state *crtc_state, int bpc) 954 { 955 int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8; 956 957 if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) 958 clock /= 2; 959 960 return clock; 961 } 962 963 static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp, 964 const struct intel_crtc_state *crtc_state, int bpc) 965 { 966 int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc); 967 968 if (intel_dp->dfp.min_tmds_clock && 969 tmds_clock < intel_dp->dfp.min_tmds_clock) 970 return false; 971 972 if (intel_dp->dfp.max_tmds_clock && 973 tmds_clock > intel_dp->dfp.max_tmds_clock) 974 return false; 975 976 return true; 977 } 978 979 static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp, 980 const struct intel_crtc_state *crtc_state, 981 int bpc) 982 { 983 984 return intel_hdmi_deep_color_possible(crtc_state, bpc, 985 intel_dp->has_hdmi_sink, 986 intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) && 987 intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc); 988 } 989 990 static int intel_dp_max_bpp(struct intel_dp *intel_dp, 991 const struct intel_crtc_state *crtc_state) 992 { 993 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 994 struct intel_connector *intel_connector = intel_dp->attached_connector; 995 int bpp, bpc; 996 997 bpc = crtc_state->pipe_bpp / 3; 998 999 if (intel_dp->dfp.max_bpc) 1000 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc); 1001 1002 if (intel_dp->dfp.min_tmds_clock) { 1003 for (; bpc >= 10; bpc -= 2) { 1004 if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc)) 1005 break; 1006 } 1007 } 1008 1009 bpp = bpc * 3; 1010 if (intel_dp_is_edp(intel_dp)) { 1011 /* Get bpp from vbt only for panels that dont have bpp in edid */ 1012 if (intel_connector->base.display_info.bpc == 0 && 1013 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) { 1014 drm_dbg_kms(&dev_priv->drm, 1015 "clamping bpp for eDP panel to BIOS-provided %i\n", 1016 dev_priv->vbt.edp.bpp); 1017 bpp = dev_priv->vbt.edp.bpp; 1018 } 1019 } 1020 1021 return bpp; 1022 } 1023 1024 /* Adjust link config limits based on compliance test requests. */ 1025 void 1026 intel_dp_adjust_compliance_config(struct intel_dp *intel_dp, 1027 struct intel_crtc_state *pipe_config, 1028 struct link_config_limits *limits) 1029 { 1030 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1031 1032 /* For DP Compliance we override the computed bpp for the pipe */ 1033 if (intel_dp->compliance.test_data.bpc != 0) { 1034 int bpp = 3 * intel_dp->compliance.test_data.bpc; 1035 1036 limits->min_bpp = limits->max_bpp = bpp; 1037 pipe_config->dither_force_disable = bpp == 6 * 3; 1038 1039 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp); 1040 } 1041 1042 /* Use values requested by Compliance Test Request */ 1043 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) { 1044 int index; 1045 1046 /* Validate the compliance test data since max values 1047 * might have changed due to link train fallback. 1048 */ 1049 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate, 1050 intel_dp->compliance.test_lane_count)) { 1051 index = intel_dp_rate_index(intel_dp->common_rates, 1052 intel_dp->num_common_rates, 1053 intel_dp->compliance.test_link_rate); 1054 if (index >= 0) 1055 limits->min_clock = limits->max_clock = index; 1056 limits->min_lane_count = limits->max_lane_count = 1057 intel_dp->compliance.test_lane_count; 1058 } 1059 } 1060 } 1061 1062 /* Optimize link config in order: max bpp, min clock, min lanes */ 1063 static int 1064 intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, 1065 struct intel_crtc_state *pipe_config, 1066 const struct link_config_limits *limits) 1067 { 1068 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1069 int bpp, clock, lane_count; 1070 int mode_rate, link_clock, link_avail; 1071 1072 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { 1073 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); 1074 1075 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 1076 output_bpp); 1077 1078 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { 1079 for (lane_count = limits->min_lane_count; 1080 lane_count <= limits->max_lane_count; 1081 lane_count <<= 1) { 1082 link_clock = intel_dp->common_rates[clock]; 1083 link_avail = intel_dp_max_data_rate(link_clock, 1084 lane_count); 1085 1086 if (mode_rate <= link_avail) { 1087 pipe_config->lane_count = lane_count; 1088 pipe_config->pipe_bpp = bpp; 1089 pipe_config->port_clock = link_clock; 1090 1091 return 0; 1092 } 1093 } 1094 } 1095 } 1096 1097 return -EINVAL; 1098 } 1099 1100 static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc) 1101 { 1102 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1103 int i, num_bpc; 1104 u8 dsc_bpc[3] = {0}; 1105 u8 dsc_max_bpc; 1106 1107 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ 1108 if (DISPLAY_VER(i915) >= 12) 1109 dsc_max_bpc = min_t(u8, 12, max_req_bpc); 1110 else 1111 dsc_max_bpc = min_t(u8, 10, max_req_bpc); 1112 1113 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd, 1114 dsc_bpc); 1115 for (i = 0; i < num_bpc; i++) { 1116 if (dsc_max_bpc >= dsc_bpc[i]) 1117 return dsc_bpc[i] * 3; 1118 } 1119 1120 return 0; 1121 } 1122 1123 #define DSC_SUPPORTED_VERSION_MIN 1 1124 1125 static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, 1126 struct intel_crtc_state *crtc_state) 1127 { 1128 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1129 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1130 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; 1131 u8 line_buf_depth; 1132 int ret; 1133 1134 /* 1135 * RC_MODEL_SIZE is currently a constant across all configurations. 1136 * 1137 * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and 1138 * DP_DSC_RC_BUF_SIZE for this. 1139 */ 1140 vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; 1141 1142 /* 1143 * Slice Height of 8 works for all currently available panels. So start 1144 * with that if pic_height is an integral multiple of 8. Eventually add 1145 * logic to try multiple slice heights. 1146 */ 1147 if (vdsc_cfg->pic_height % 8 == 0) 1148 vdsc_cfg->slice_height = 8; 1149 else if (vdsc_cfg->pic_height % 4 == 0) 1150 vdsc_cfg->slice_height = 4; 1151 else 1152 vdsc_cfg->slice_height = 2; 1153 1154 ret = intel_dsc_compute_params(encoder, crtc_state); 1155 if (ret) 1156 return ret; 1157 1158 vdsc_cfg->dsc_version_major = 1159 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 1160 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT; 1161 vdsc_cfg->dsc_version_minor = 1162 min(DSC_SUPPORTED_VERSION_MIN, 1163 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] & 1164 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT); 1165 1166 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] & 1167 DP_DSC_RGB; 1168 1169 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd); 1170 if (!line_buf_depth) { 1171 drm_dbg_kms(&i915->drm, 1172 "DSC Sink Line Buffer Depth invalid\n"); 1173 return -EINVAL; 1174 } 1175 1176 if (vdsc_cfg->dsc_version_minor == 2) 1177 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ? 1178 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth; 1179 else 1180 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ? 1181 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth; 1182 1183 vdsc_cfg->block_pred_enable = 1184 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] & 1185 DP_DSC_BLK_PREDICTION_IS_SUPPORTED; 1186 1187 return drm_dsc_compute_rc_parameters(vdsc_cfg); 1188 } 1189 1190 static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, 1191 struct intel_crtc_state *pipe_config, 1192 struct drm_connector_state *conn_state, 1193 struct link_config_limits *limits) 1194 { 1195 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1196 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); 1197 const struct drm_display_mode *adjusted_mode = 1198 &pipe_config->hw.adjusted_mode; 1199 int pipe_bpp; 1200 int ret; 1201 1202 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) && 1203 intel_dp_supports_fec(intel_dp, pipe_config); 1204 1205 if (!intel_dp_supports_dsc(intel_dp, pipe_config)) 1206 return -EINVAL; 1207 1208 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc); 1209 1210 /* Min Input BPC for ICL+ is 8 */ 1211 if (pipe_bpp < 8 * 3) { 1212 drm_dbg_kms(&dev_priv->drm, 1213 "No DSC support for less than 8bpc\n"); 1214 return -EINVAL; 1215 } 1216 1217 /* 1218 * For now enable DSC for max bpp, max link rate, max lane count. 1219 * Optimize this later for the minimum possible link rate/lane count 1220 * with DSC enabled for the requested mode. 1221 */ 1222 pipe_config->pipe_bpp = pipe_bpp; 1223 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock]; 1224 pipe_config->lane_count = limits->max_lane_count; 1225 1226 if (intel_dp_is_edp(intel_dp)) { 1227 pipe_config->dsc.compressed_bpp = 1228 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, 1229 pipe_config->pipe_bpp); 1230 pipe_config->dsc.slice_count = 1231 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 1232 true); 1233 } else { 1234 u16 dsc_max_output_bpp; 1235 u8 dsc_dp_slice_count; 1236 1237 dsc_max_output_bpp = 1238 intel_dp_dsc_get_output_bpp(dev_priv, 1239 pipe_config->port_clock, 1240 pipe_config->lane_count, 1241 adjusted_mode->crtc_clock, 1242 adjusted_mode->crtc_hdisplay, 1243 pipe_config->bigjoiner, 1244 pipe_bpp); 1245 dsc_dp_slice_count = 1246 intel_dp_dsc_get_slice_count(intel_dp, 1247 adjusted_mode->crtc_clock, 1248 adjusted_mode->crtc_hdisplay, 1249 pipe_config->bigjoiner); 1250 if (!dsc_max_output_bpp || !dsc_dp_slice_count) { 1251 drm_dbg_kms(&dev_priv->drm, 1252 "Compressed BPP/Slice Count not supported\n"); 1253 return -EINVAL; 1254 } 1255 pipe_config->dsc.compressed_bpp = min_t(u16, 1256 dsc_max_output_bpp >> 4, 1257 pipe_config->pipe_bpp); 1258 pipe_config->dsc.slice_count = dsc_dp_slice_count; 1259 } 1260 1261 /* As of today we support DSC for only RGB */ 1262 if (intel_dp->force_dsc_bpp) { 1263 if (intel_dp->force_dsc_bpp >= 8 && 1264 intel_dp->force_dsc_bpp < pipe_bpp) { 1265 drm_dbg_kms(&dev_priv->drm, 1266 "DSC BPP forced to %d", 1267 intel_dp->force_dsc_bpp); 1268 pipe_config->dsc.compressed_bpp = 1269 intel_dp->force_dsc_bpp; 1270 } else { 1271 drm_dbg_kms(&dev_priv->drm, 1272 "Invalid DSC BPP %d", 1273 intel_dp->force_dsc_bpp); 1274 } 1275 } 1276 1277 /* 1278 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate 1279 * is greater than the maximum Cdclock and if slice count is even 1280 * then we need to use 2 VDSC instances. 1281 */ 1282 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq || 1283 pipe_config->bigjoiner) { 1284 if (pipe_config->dsc.slice_count < 2) { 1285 drm_dbg_kms(&dev_priv->drm, 1286 "Cannot split stream to use 2 VDSC instances\n"); 1287 return -EINVAL; 1288 } 1289 1290 pipe_config->dsc.dsc_split = true; 1291 } 1292 1293 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config); 1294 if (ret < 0) { 1295 drm_dbg_kms(&dev_priv->drm, 1296 "Cannot compute valid DSC parameters for Input Bpp = %d " 1297 "Compressed BPP = %d\n", 1298 pipe_config->pipe_bpp, 1299 pipe_config->dsc.compressed_bpp); 1300 return ret; 1301 } 1302 1303 pipe_config->dsc.compression_enable = true; 1304 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d " 1305 "Compressed Bpp = %d Slice Count = %d\n", 1306 pipe_config->pipe_bpp, 1307 pipe_config->dsc.compressed_bpp, 1308 pipe_config->dsc.slice_count); 1309 1310 return 0; 1311 } 1312 1313 static int 1314 intel_dp_compute_link_config(struct intel_encoder *encoder, 1315 struct intel_crtc_state *pipe_config, 1316 struct drm_connector_state *conn_state) 1317 { 1318 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1319 const struct drm_display_mode *adjusted_mode = 1320 &pipe_config->hw.adjusted_mode; 1321 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1322 struct link_config_limits limits; 1323 int common_len; 1324 int ret; 1325 1326 common_len = intel_dp_common_len_rate_limit(intel_dp, 1327 intel_dp->max_link_rate); 1328 1329 /* No common link rates between source and sink */ 1330 drm_WARN_ON(encoder->base.dev, common_len <= 0); 1331 1332 limits.min_clock = 0; 1333 limits.max_clock = common_len - 1; 1334 1335 limits.min_lane_count = 1; 1336 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 1337 1338 limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format); 1339 limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config); 1340 1341 if (intel_dp->use_max_params) { 1342 /* 1343 * Use the maximum clock and number of lanes the eDP panel 1344 * advertizes being capable of in case the initial fast 1345 * optimal params failed us. The panels are generally 1346 * designed to support only a single clock and lane 1347 * configuration, and typically on older panels these 1348 * values correspond to the native resolution of the panel. 1349 */ 1350 limits.min_lane_count = limits.max_lane_count; 1351 limits.min_clock = limits.max_clock; 1352 } 1353 1354 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 1355 1356 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i " 1357 "max rate %d max bpp %d pixel clock %iKHz\n", 1358 limits.max_lane_count, 1359 intel_dp->common_rates[limits.max_clock], 1360 limits.max_bpp, adjusted_mode->crtc_clock); 1361 1362 if ((adjusted_mode->crtc_clock > i915->max_dotclk_freq || 1363 adjusted_mode->crtc_hdisplay > 5120) && 1364 intel_dp_can_bigjoiner(intel_dp)) 1365 pipe_config->bigjoiner = true; 1366 1367 /* 1368 * Optimize for slow and wide for everything, because there are some 1369 * eDP 1.3 and 1.4 panels don't work well with fast and narrow. 1370 */ 1371 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); 1372 1373 /* 1374 * Pipe joiner needs compression upto display12 due to BW limitation. DG2 1375 * onwards pipe joiner can be enabled without compression. 1376 */ 1377 drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); 1378 if (ret || intel_dp->force_dsc_en || (DISPLAY_VER(i915) < 13 && 1379 pipe_config->bigjoiner)) { 1380 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config, 1381 conn_state, &limits); 1382 if (ret < 0) 1383 return ret; 1384 } 1385 1386 if (pipe_config->dsc.compression_enable) { 1387 drm_dbg_kms(&i915->drm, 1388 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", 1389 pipe_config->lane_count, pipe_config->port_clock, 1390 pipe_config->pipe_bpp, 1391 pipe_config->dsc.compressed_bpp); 1392 1393 drm_dbg_kms(&i915->drm, 1394 "DP link rate required %i available %i\n", 1395 intel_dp_link_required(adjusted_mode->crtc_clock, 1396 pipe_config->dsc.compressed_bpp), 1397 intel_dp_max_data_rate(pipe_config->port_clock, 1398 pipe_config->lane_count)); 1399 } else { 1400 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n", 1401 pipe_config->lane_count, pipe_config->port_clock, 1402 pipe_config->pipe_bpp); 1403 1404 drm_dbg_kms(&i915->drm, 1405 "DP link rate required %i available %i\n", 1406 intel_dp_link_required(adjusted_mode->crtc_clock, 1407 pipe_config->pipe_bpp), 1408 intel_dp_max_data_rate(pipe_config->port_clock, 1409 pipe_config->lane_count)); 1410 } 1411 return 0; 1412 } 1413 1414 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, 1415 const struct drm_connector_state *conn_state) 1416 { 1417 const struct intel_digital_connector_state *intel_conn_state = 1418 to_intel_digital_connector_state(conn_state); 1419 const struct drm_display_mode *adjusted_mode = 1420 &crtc_state->hw.adjusted_mode; 1421 1422 /* 1423 * Our YCbCr output is always limited range. 1424 * crtc_state->limited_color_range only applies to RGB, 1425 * and it must never be set for YCbCr or we risk setting 1426 * some conflicting bits in PIPECONF which will mess up 1427 * the colors on the monitor. 1428 */ 1429 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) 1430 return false; 1431 1432 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 1433 /* 1434 * See: 1435 * CEA-861-E - 5.1 Default Encoding Parameters 1436 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 1437 */ 1438 return crtc_state->pipe_bpp != 18 && 1439 drm_default_rgb_quant_range(adjusted_mode) == 1440 HDMI_QUANTIZATION_RANGE_LIMITED; 1441 } else { 1442 return intel_conn_state->broadcast_rgb == 1443 INTEL_BROADCAST_RGB_LIMITED; 1444 } 1445 } 1446 1447 static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv, 1448 enum port port) 1449 { 1450 if (IS_G4X(dev_priv)) 1451 return false; 1452 if (DISPLAY_VER(dev_priv) < 12 && port == PORT_A) 1453 return false; 1454 1455 return true; 1456 } 1457 1458 static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state, 1459 const struct drm_connector_state *conn_state, 1460 struct drm_dp_vsc_sdp *vsc) 1461 { 1462 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 1463 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1464 1465 /* 1466 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 1467 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/ 1468 * Colorimetry Format indication. 1469 */ 1470 vsc->revision = 0x5; 1471 vsc->length = 0x13; 1472 1473 /* DP 1.4a spec, Table 2-120 */ 1474 switch (crtc_state->output_format) { 1475 case INTEL_OUTPUT_FORMAT_YCBCR444: 1476 vsc->pixelformat = DP_PIXELFORMAT_YUV444; 1477 break; 1478 case INTEL_OUTPUT_FORMAT_YCBCR420: 1479 vsc->pixelformat = DP_PIXELFORMAT_YUV420; 1480 break; 1481 case INTEL_OUTPUT_FORMAT_RGB: 1482 default: 1483 vsc->pixelformat = DP_PIXELFORMAT_RGB; 1484 } 1485 1486 switch (conn_state->colorspace) { 1487 case DRM_MODE_COLORIMETRY_BT709_YCC: 1488 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 1489 break; 1490 case DRM_MODE_COLORIMETRY_XVYCC_601: 1491 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601; 1492 break; 1493 case DRM_MODE_COLORIMETRY_XVYCC_709: 1494 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709; 1495 break; 1496 case DRM_MODE_COLORIMETRY_SYCC_601: 1497 vsc->colorimetry = DP_COLORIMETRY_SYCC_601; 1498 break; 1499 case DRM_MODE_COLORIMETRY_OPYCC_601: 1500 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601; 1501 break; 1502 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 1503 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC; 1504 break; 1505 case DRM_MODE_COLORIMETRY_BT2020_RGB: 1506 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB; 1507 break; 1508 case DRM_MODE_COLORIMETRY_BT2020_YCC: 1509 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC; 1510 break; 1511 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: 1512 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: 1513 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB; 1514 break; 1515 default: 1516 /* 1517 * RGB->YCBCR color conversion uses the BT.709 1518 * color space. 1519 */ 1520 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 1521 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC; 1522 else 1523 vsc->colorimetry = DP_COLORIMETRY_DEFAULT; 1524 break; 1525 } 1526 1527 vsc->bpc = crtc_state->pipe_bpp / 3; 1528 1529 /* only RGB pixelformat supports 6 bpc */ 1530 drm_WARN_ON(&dev_priv->drm, 1531 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB); 1532 1533 /* all YCbCr are always limited range */ 1534 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA; 1535 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED; 1536 } 1537 1538 static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, 1539 struct intel_crtc_state *crtc_state, 1540 const struct drm_connector_state *conn_state) 1541 { 1542 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc; 1543 1544 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */ 1545 if (crtc_state->has_psr) 1546 return; 1547 1548 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) 1549 return; 1550 1551 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC); 1552 vsc->sdp_type = DP_SDP_VSC; 1553 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 1554 &crtc_state->infoframes.vsc); 1555 } 1556 1557 void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp, 1558 const struct intel_crtc_state *crtc_state, 1559 const struct drm_connector_state *conn_state, 1560 struct drm_dp_vsc_sdp *vsc) 1561 { 1562 vsc->sdp_type = DP_SDP_VSC; 1563 1564 if (intel_dp->psr.psr2_enabled) { 1565 if (intel_dp->psr.colorimetry_support && 1566 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) { 1567 /* [PSR2, +Colorimetry] */ 1568 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state, 1569 vsc); 1570 } else { 1571 /* 1572 * [PSR2, -Colorimetry] 1573 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11 1574 * 3D stereo + PSR/PSR2 + Y-coordinate. 1575 */ 1576 vsc->revision = 0x4; 1577 vsc->length = 0xe; 1578 } 1579 } else { 1580 /* 1581 * [PSR1] 1582 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118 1583 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or 1584 * higher). 1585 */ 1586 vsc->revision = 0x2; 1587 vsc->length = 0x8; 1588 } 1589 } 1590 1591 static void 1592 intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, 1593 struct intel_crtc_state *crtc_state, 1594 const struct drm_connector_state *conn_state) 1595 { 1596 int ret; 1597 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 1598 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm; 1599 1600 if (!conn_state->hdr_output_metadata) 1601 return; 1602 1603 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state); 1604 1605 if (ret) { 1606 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n"); 1607 return; 1608 } 1609 1610 crtc_state->infoframes.enable |= 1611 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); 1612 } 1613 1614 int 1615 intel_dp_compute_config(struct intel_encoder *encoder, 1616 struct intel_crtc_state *pipe_config, 1617 struct drm_connector_state *conn_state) 1618 { 1619 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1620 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; 1621 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1622 enum port port = encoder->port; 1623 struct intel_connector *intel_connector = intel_dp->attached_connector; 1624 struct intel_digital_connector_state *intel_conn_state = 1625 to_intel_digital_connector_state(conn_state); 1626 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N); 1627 int ret = 0, output_bpp; 1628 1629 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A) 1630 pipe_config->has_pch_encoder = true; 1631 1632 pipe_config->output_format = intel_dp_output_format(&intel_connector->base, 1633 adjusted_mode); 1634 1635 if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { 1636 ret = intel_pch_panel_fitting(pipe_config, conn_state); 1637 if (ret) 1638 return ret; 1639 } 1640 1641 if (!intel_dp_port_has_audio(dev_priv, port)) 1642 pipe_config->has_audio = false; 1643 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) 1644 pipe_config->has_audio = intel_dp->has_audio; 1645 else 1646 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON; 1647 1648 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 1649 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 1650 adjusted_mode); 1651 1652 if (HAS_GMCH(dev_priv)) 1653 ret = intel_gmch_panel_fitting(pipe_config, conn_state); 1654 else 1655 ret = intel_pch_panel_fitting(pipe_config, conn_state); 1656 if (ret) 1657 return ret; 1658 } 1659 1660 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 1661 return -EINVAL; 1662 1663 if (HAS_GMCH(dev_priv) && 1664 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 1665 return -EINVAL; 1666 1667 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 1668 return -EINVAL; 1669 1670 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) 1671 return -EINVAL; 1672 1673 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); 1674 if (ret < 0) 1675 return ret; 1676 1677 pipe_config->limited_color_range = 1678 intel_dp_limited_color_range(pipe_config, conn_state); 1679 1680 if (pipe_config->dsc.compression_enable) 1681 output_bpp = pipe_config->dsc.compressed_bpp; 1682 else 1683 output_bpp = intel_dp_output_bpp(pipe_config->output_format, 1684 pipe_config->pipe_bpp); 1685 1686 if (intel_dp->mso_link_count) { 1687 int n = intel_dp->mso_link_count; 1688 int overlap = intel_dp->mso_pixel_overlap; 1689 1690 pipe_config->splitter.enable = true; 1691 pipe_config->splitter.link_count = n; 1692 pipe_config->splitter.pixel_overlap = overlap; 1693 1694 drm_dbg_kms(&dev_priv->drm, "MSO link count %d, pixel overlap %d\n", 1695 n, overlap); 1696 1697 adjusted_mode->crtc_hdisplay = adjusted_mode->crtc_hdisplay / n + overlap; 1698 adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hblank_start / n + overlap; 1699 adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_end / n + overlap; 1700 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hsync_start / n + overlap; 1701 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_end / n + overlap; 1702 adjusted_mode->crtc_htotal = adjusted_mode->crtc_htotal / n + overlap; 1703 adjusted_mode->crtc_clock /= n; 1704 } 1705 1706 intel_link_compute_m_n(output_bpp, 1707 pipe_config->lane_count, 1708 adjusted_mode->crtc_clock, 1709 pipe_config->port_clock, 1710 &pipe_config->dp_m_n, 1711 constant_n, pipe_config->fec_enable); 1712 1713 /* FIXME: abstract this better */ 1714 if (pipe_config->splitter.enable) 1715 pipe_config->dp_m_n.gmch_m *= pipe_config->splitter.link_count; 1716 1717 if (!HAS_DDI(dev_priv)) 1718 g4x_dp_set_clock(encoder, pipe_config); 1719 1720 intel_vrr_compute_config(pipe_config, conn_state); 1721 intel_psr_compute_config(intel_dp, pipe_config); 1722 intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp, 1723 constant_n); 1724 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state); 1725 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state); 1726 1727 return 0; 1728 } 1729 1730 void intel_dp_set_link_params(struct intel_dp *intel_dp, 1731 int link_rate, int lane_count) 1732 { 1733 intel_dp->link_trained = false; 1734 intel_dp->link_rate = link_rate; 1735 intel_dp->lane_count = lane_count; 1736 } 1737 1738 static void intel_dp_reset_max_link_params(struct intel_dp *intel_dp) 1739 { 1740 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp); 1741 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp); 1742 } 1743 1744 /* Enable backlight PWM and backlight PP control. */ 1745 void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state, 1746 const struct drm_connector_state *conn_state) 1747 { 1748 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder)); 1749 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1750 1751 if (!intel_dp_is_edp(intel_dp)) 1752 return; 1753 1754 drm_dbg_kms(&i915->drm, "\n"); 1755 1756 intel_panel_enable_backlight(crtc_state, conn_state); 1757 intel_pps_backlight_on(intel_dp); 1758 } 1759 1760 /* Disable backlight PP control and backlight PWM. */ 1761 void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state) 1762 { 1763 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)); 1764 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1765 1766 if (!intel_dp_is_edp(intel_dp)) 1767 return; 1768 1769 drm_dbg_kms(&i915->drm, "\n"); 1770 1771 intel_pps_backlight_off(intel_dp); 1772 intel_panel_disable_backlight(old_conn_state); 1773 } 1774 1775 static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp) 1776 { 1777 /* 1778 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus 1779 * be capable of signalling downstream hpd with a long pulse. 1780 * Whether or not that means D3 is safe to use is not clear, 1781 * but let's assume so until proven otherwise. 1782 * 1783 * FIXME should really check all downstream ports... 1784 */ 1785 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 && 1786 drm_dp_is_branch(intel_dp->dpcd) && 1787 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD; 1788 } 1789 1790 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, 1791 const struct intel_crtc_state *crtc_state, 1792 bool enable) 1793 { 1794 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1795 int ret; 1796 1797 if (!crtc_state->dsc.compression_enable) 1798 return; 1799 1800 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, 1801 enable ? DP_DECOMPRESSION_EN : 0); 1802 if (ret < 0) 1803 drm_dbg_kms(&i915->drm, 1804 "Failed to %s sink decompression state\n", 1805 enabledisable(enable)); 1806 } 1807 1808 static void 1809 intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) 1810 { 1811 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1812 u8 oui[] = { 0x00, 0xaa, 0x01 }; 1813 u8 buf[3] = { 0 }; 1814 1815 /* 1816 * During driver init, we want to be careful and avoid changing the source OUI if it's 1817 * already set to what we want, so as to avoid clearing any state by accident 1818 */ 1819 if (careful) { 1820 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) 1821 drm_err(&i915->drm, "Failed to read source OUI\n"); 1822 1823 if (memcmp(oui, buf, sizeof(oui)) == 0) 1824 return; 1825 } 1826 1827 if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) 1828 drm_err(&i915->drm, "Failed to write source OUI\n"); 1829 1830 intel_dp->last_oui_write = jiffies; 1831 } 1832 1833 void intel_dp_wait_source_oui(struct intel_dp *intel_dp) 1834 { 1835 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1836 1837 drm_dbg_kms(&i915->drm, "Performing OUI wait\n"); 1838 wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30); 1839 } 1840 1841 /* If the device supports it, try to set the power state appropriately */ 1842 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) 1843 { 1844 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; 1845 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1846 int ret, i; 1847 1848 /* Should have a valid DPCD by this point */ 1849 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1850 return; 1851 1852 if (mode != DP_SET_POWER_D0) { 1853 if (downstream_hpd_needs_d0(intel_dp)) 1854 return; 1855 1856 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 1857 } else { 1858 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp); 1859 1860 lspcon_resume(dp_to_dig_port(intel_dp)); 1861 1862 /* Write the source OUI as early as possible */ 1863 if (intel_dp_is_edp(intel_dp)) 1864 intel_edp_init_source_oui(intel_dp, false); 1865 1866 /* 1867 * When turning on, we need to retry for 1ms to give the sink 1868 * time to wake up. 1869 */ 1870 for (i = 0; i < 3; i++) { 1871 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, mode); 1872 if (ret == 1) 1873 break; 1874 drm_msleep(1); 1875 } 1876 1877 if (ret == 1 && lspcon->active) 1878 lspcon_wait_pcon_mode(lspcon); 1879 } 1880 1881 if (ret != 1) 1882 drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Set power to %s failed\n", 1883 encoder->base.base.id, encoder->base.name, 1884 mode == DP_SET_POWER_D0 ? "D0" : "D3"); 1885 } 1886 1887 static bool 1888 intel_dp_get_dpcd(struct intel_dp *intel_dp); 1889 1890 /** 1891 * intel_dp_sync_state - sync the encoder state during init/resume 1892 * @encoder: intel encoder to sync 1893 * @crtc_state: state for the CRTC connected to the encoder 1894 * 1895 * Sync any state stored in the encoder wrt. HW state during driver init 1896 * and system resume. 1897 */ 1898 void intel_dp_sync_state(struct intel_encoder *encoder, 1899 const struct intel_crtc_state *crtc_state) 1900 { 1901 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1902 1903 if (!crtc_state) 1904 return; 1905 1906 /* 1907 * Don't clobber DPCD if it's been already read out during output 1908 * setup (eDP) or detect. 1909 */ 1910 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 1911 intel_dp_get_dpcd(intel_dp); 1912 1913 intel_dp_reset_max_link_params(intel_dp); 1914 } 1915 1916 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, 1917 struct intel_crtc_state *crtc_state) 1918 { 1919 struct drm_i915_private *i915 = to_i915(encoder->base.dev); 1920 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1921 1922 /* 1923 * If BIOS has set an unsupported or non-standard link rate for some 1924 * reason force an encoder recompute and full modeset. 1925 */ 1926 if (intel_dp_rate_index(intel_dp->source_rates, intel_dp->num_source_rates, 1927 crtc_state->port_clock) < 0) { 1928 drm_dbg_kms(&i915->drm, "Forcing full modeset due to unsupported link rate\n"); 1929 crtc_state->uapi.connectors_changed = true; 1930 return false; 1931 } 1932 1933 /* 1934 * FIXME hack to force full modeset when DSC is being used. 1935 * 1936 * As long as we do not have full state readout and config comparison 1937 * of crtc_state->dsc, we have no way to ensure reliable fastset. 1938 * Remove once we have readout for DSC. 1939 */ 1940 if (crtc_state->dsc.compression_enable) { 1941 drm_dbg_kms(&i915->drm, "Forcing full modeset due to DSC being enabled\n"); 1942 crtc_state->uapi.mode_changed = true; 1943 return false; 1944 } 1945 1946 if (CAN_PSR(intel_dp)) { 1947 drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n"); 1948 crtc_state->uapi.mode_changed = true; 1949 return false; 1950 } 1951 1952 return true; 1953 } 1954 1955 static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp) 1956 { 1957 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 1958 1959 /* Clear the cached register set to avoid using stale values */ 1960 1961 memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd)); 1962 1963 if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER, 1964 intel_dp->pcon_dsc_dpcd, 1965 sizeof(intel_dp->pcon_dsc_dpcd)) < 0) 1966 drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n", 1967 DP_PCON_DSC_ENCODER); 1968 1969 drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n", 1970 (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd); 1971 } 1972 1973 static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask) 1974 { 1975 int bw_gbps[] = {9, 18, 24, 32, 40, 48}; 1976 int i; 1977 1978 for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) { 1979 if (frl_bw_mask & (1 << i)) 1980 return bw_gbps[i]; 1981 } 1982 return 0; 1983 } 1984 1985 static int intel_dp_pcon_set_frl_mask(int max_frl) 1986 { 1987 switch (max_frl) { 1988 case 48: 1989 return DP_PCON_FRL_BW_MASK_48GBPS; 1990 case 40: 1991 return DP_PCON_FRL_BW_MASK_40GBPS; 1992 case 32: 1993 return DP_PCON_FRL_BW_MASK_32GBPS; 1994 case 24: 1995 return DP_PCON_FRL_BW_MASK_24GBPS; 1996 case 18: 1997 return DP_PCON_FRL_BW_MASK_18GBPS; 1998 case 9: 1999 return DP_PCON_FRL_BW_MASK_9GBPS; 2000 } 2001 2002 return 0; 2003 } 2004 2005 static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp) 2006 { 2007 struct intel_connector *intel_connector = intel_dp->attached_connector; 2008 struct drm_connector *connector = &intel_connector->base; 2009 int max_frl_rate; 2010 int max_lanes, rate_per_lane; 2011 int max_dsc_lanes, dsc_rate_per_lane; 2012 2013 max_lanes = connector->display_info.hdmi.max_lanes; 2014 rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane; 2015 max_frl_rate = max_lanes * rate_per_lane; 2016 2017 if (connector->display_info.hdmi.dsc_cap.v_1p2) { 2018 max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes; 2019 dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane; 2020 if (max_dsc_lanes && dsc_rate_per_lane) 2021 max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane); 2022 } 2023 2024 return max_frl_rate; 2025 } 2026 2027 static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) 2028 { 2029 #define TIMEOUT_FRL_READY_MS 500 2030 #define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000 2031 2032 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2033 int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret; 2034 u8 max_frl_bw_mask = 0, frl_trained_mask; 2035 bool is_active; 2036 2037 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); 2038 if (ret < 0) 2039 return ret; 2040 2041 max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; 2042 drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw); 2043 2044 max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp); 2045 drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw); 2046 2047 max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw); 2048 2049 if (max_frl_bw <= 0) 2050 return -EINVAL; 2051 2052 ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false); 2053 if (ret < 0) 2054 return ret; 2055 /* Wait for PCON to be FRL Ready */ 2056 wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS); 2057 2058 if (!is_active) 2059 return -ETIMEDOUT; 2060 2061 max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); 2062 ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, 2063 DP_PCON_ENABLE_SEQUENTIAL_LINK); 2064 if (ret < 0) 2065 return ret; 2066 ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, 2067 DP_PCON_FRL_LINK_TRAIN_NORMAL); 2068 if (ret < 0) 2069 return ret; 2070 ret = drm_dp_pcon_frl_enable(&intel_dp->aux); 2071 if (ret < 0) 2072 return ret; 2073 /* 2074 * Wait for FRL to be completed 2075 * Check if the HDMI Link is up and active. 2076 */ 2077 wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS); 2078 2079 if (!is_active) 2080 return -ETIMEDOUT; 2081 2082 /* Verify HDMI Link configuration shows FRL Mode */ 2083 if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) != 2084 DP_PCON_HDMI_MODE_FRL) { 2085 drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n"); 2086 return -EINVAL; 2087 } 2088 drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask); 2089 2090 intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask); 2091 intel_dp->frl.is_trained = true; 2092 drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps); 2093 2094 return 0; 2095 } 2096 2097 static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) 2098 { 2099 if (drm_dp_is_branch(intel_dp->dpcd) && 2100 intel_dp->has_hdmi_sink && 2101 intel_dp_hdmi_sink_max_frl(intel_dp) > 0) 2102 return true; 2103 2104 return false; 2105 } 2106 2107 void intel_dp_check_frl_training(struct intel_dp *intel_dp) 2108 { 2109 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 2110 2111 /* 2112 * Always go for FRL training if: 2113 * -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7) 2114 * -sink is HDMI2.1 2115 */ 2116 if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) || 2117 !intel_dp_is_hdmi_2_1_sink(intel_dp) || 2118 intel_dp->frl.is_trained) 2119 return; 2120 2121 if (intel_dp_pcon_start_frl_training(intel_dp) < 0) { 2122 int ret, mode; 2123 2124 drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n"); 2125 ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); 2126 mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); 2127 2128 if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) 2129 drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n"); 2130 } else { 2131 drm_dbg(&dev_priv->drm, "FRL training Completed\n"); 2132 } 2133 } 2134 2135 static int 2136 intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state) 2137 { 2138 int vactive = crtc_state->hw.adjusted_mode.vdisplay; 2139 2140 return intel_hdmi_dsc_get_slice_height(vactive); 2141 } 2142 2143 static int 2144 intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp, 2145 const struct intel_crtc_state *crtc_state) 2146 { 2147 struct intel_connector *intel_connector = intel_dp->attached_connector; 2148 struct drm_connector *connector = &intel_connector->base; 2149 int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice; 2150 int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices; 2151 int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd); 2152 int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd); 2153 2154 return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices, 2155 pcon_max_slice_width, 2156 hdmi_max_slices, hdmi_throughput); 2157 } 2158 2159 static int 2160 intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp, 2161 const struct intel_crtc_state *crtc_state, 2162 int num_slices, int slice_width) 2163 { 2164 struct intel_connector *intel_connector = intel_dp->attached_connector; 2165 struct drm_connector *connector = &intel_connector->base; 2166 int output_format = crtc_state->output_format; 2167 bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp; 2168 int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd); 2169 int hdmi_max_chunk_bytes = 2170 connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024; 2171 2172 return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width, 2173 num_slices, output_format, hdmi_all_bpp, 2174 hdmi_max_chunk_bytes); 2175 } 2176 2177 void 2178 intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, 2179 const struct intel_crtc_state *crtc_state) 2180 { 2181 u8 pps_param[6]; 2182 int slice_height; 2183 int slice_width; 2184 int num_slices; 2185 int bits_per_pixel; 2186 int ret; 2187 struct intel_connector *intel_connector = intel_dp->attached_connector; 2188 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2189 struct drm_connector *connector; 2190 bool hdmi_is_dsc_1_2; 2191 2192 if (!intel_dp_is_hdmi_2_1_sink(intel_dp)) 2193 return; 2194 2195 if (!intel_connector) 2196 return; 2197 connector = &intel_connector->base; 2198 hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2; 2199 2200 if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) || 2201 !hdmi_is_dsc_1_2) 2202 return; 2203 2204 slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state); 2205 if (!slice_height) 2206 return; 2207 2208 num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state); 2209 if (!num_slices) 2210 return; 2211 2212 slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, 2213 num_slices); 2214 2215 bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state, 2216 num_slices, slice_width); 2217 if (!bits_per_pixel) 2218 return; 2219 2220 pps_param[0] = slice_height & 0xFF; 2221 pps_param[1] = slice_height >> 8; 2222 pps_param[2] = slice_width & 0xFF; 2223 pps_param[3] = slice_width >> 8; 2224 pps_param[4] = bits_per_pixel & 0xFF; 2225 pps_param[5] = (bits_per_pixel >> 8) & 0x3; 2226 2227 ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param); 2228 if (ret < 0) 2229 drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n"); 2230 } 2231 2232 void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, 2233 const struct intel_crtc_state *crtc_state) 2234 { 2235 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2236 u8 tmp; 2237 2238 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13) 2239 return; 2240 2241 if (!drm_dp_is_branch(intel_dp->dpcd)) 2242 return; 2243 2244 tmp = intel_dp->has_hdmi_sink ? 2245 DP_HDMI_DVI_OUTPUT_CONFIG : 0; 2246 2247 if (drm_dp_dpcd_writeb(&intel_dp->aux, 2248 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1) 2249 drm_dbg_kms(&i915->drm, "Failed to %s protocol converter HDMI mode\n", 2250 enabledisable(intel_dp->has_hdmi_sink)); 2251 2252 tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && 2253 intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; 2254 2255 if (drm_dp_dpcd_writeb(&intel_dp->aux, 2256 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) 2257 drm_dbg_kms(&i915->drm, 2258 "Failed to %s protocol converter YCbCr 4:2:0 conversion mode\n", 2259 enabledisable(intel_dp->dfp.ycbcr_444_to_420)); 2260 2261 tmp = 0; 2262 if (intel_dp->dfp.rgb_to_ycbcr) { 2263 bool bt2020, bt709; 2264 2265 /* 2266 * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only 2267 * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default. 2268 * 2269 */ 2270 tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE; 2271 2272 bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 2273 intel_dp->downstream_ports, 2274 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV); 2275 bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 2276 intel_dp->downstream_ports, 2277 DP_DS_HDMI_BT709_RGB_YCBCR_CONV); 2278 switch (crtc_state->infoframes.vsc.colorimetry) { 2279 case DP_COLORIMETRY_BT2020_RGB: 2280 case DP_COLORIMETRY_BT2020_YCC: 2281 if (bt2020) 2282 tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE; 2283 break; 2284 case DP_COLORIMETRY_BT709_YCC: 2285 case DP_COLORIMETRY_XVYCC_709: 2286 if (bt709) 2287 tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE; 2288 break; 2289 default: 2290 break; 2291 } 2292 } 2293 2294 if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0) 2295 drm_dbg_kms(&i915->drm, 2296 "Failed to %s protocol converter RGB->YCbCr conversion mode\n", 2297 enabledisable(tmp)); 2298 } 2299 2300 2301 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp) 2302 { 2303 u8 dprx = 0; 2304 2305 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST, 2306 &dprx) != 1) 2307 return false; 2308 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED; 2309 } 2310 2311 static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp) 2312 { 2313 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2314 2315 /* 2316 * Clear the cached register set to avoid using stale values 2317 * for the sinks that do not support DSC. 2318 */ 2319 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 2320 2321 /* Clear fec_capable to avoid using stale values */ 2322 intel_dp->fec_capable = 0; 2323 2324 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */ 2325 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 || 2326 intel_dp->edp_dpcd[0] >= DP_EDP_14) { 2327 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT, 2328 intel_dp->dsc_dpcd, 2329 sizeof(intel_dp->dsc_dpcd)) < 0) 2330 drm_err(&i915->drm, 2331 "Failed to read DPCD register 0x%x\n", 2332 DP_DSC_SUPPORT); 2333 2334 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n", 2335 (int)sizeof(intel_dp->dsc_dpcd), 2336 intel_dp->dsc_dpcd); 2337 2338 /* FEC is supported only on DP 1.4 */ 2339 if (!intel_dp_is_edp(intel_dp) && 2340 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY, 2341 &intel_dp->fec_capable) < 0) 2342 drm_err(&i915->drm, 2343 "Failed to read FEC DPCD register\n"); 2344 2345 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n", 2346 intel_dp->fec_capable); 2347 } 2348 } 2349 2350 static void intel_edp_mso_mode_fixup(struct intel_connector *connector, 2351 struct drm_display_mode *mode) 2352 { 2353 struct intel_dp *intel_dp = intel_attached_dp(connector); 2354 struct drm_i915_private *i915 = to_i915(connector->base.dev); 2355 int n = intel_dp->mso_link_count; 2356 int overlap = intel_dp->mso_pixel_overlap; 2357 2358 if (!mode || !n) 2359 return; 2360 2361 mode->hdisplay = (mode->hdisplay - overlap) * n; 2362 mode->hsync_start = (mode->hsync_start - overlap) * n; 2363 mode->hsync_end = (mode->hsync_end - overlap) * n; 2364 mode->htotal = (mode->htotal - overlap) * n; 2365 mode->clock *= n; 2366 2367 drm_mode_set_name(mode); 2368 2369 drm_dbg_kms(&i915->drm, 2370 "[CONNECTOR:%d:%s] using generated MSO mode: ", 2371 connector->base.base.id, connector->base.name); 2372 drm_mode_debug_printmodeline(mode); 2373 } 2374 2375 static void intel_edp_mso_init(struct intel_dp *intel_dp) 2376 { 2377 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2378 u8 mso; 2379 2380 if (intel_dp->edp_dpcd[0] < DP_EDP_14) 2381 return; 2382 2383 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_MSO_LINK_CAPABILITIES, &mso) != 1) { 2384 drm_err(&i915->drm, "Failed to read MSO cap\n"); 2385 return; 2386 } 2387 2388 /* Valid configurations are SST or MSO 2x1, 2x2, 4x1 */ 2389 mso &= DP_EDP_MSO_NUMBER_OF_LINKS_MASK; 2390 if (mso % 2 || mso > drm_dp_max_lane_count(intel_dp->dpcd)) { 2391 drm_err(&i915->drm, "Invalid MSO link count cap %u\n", mso); 2392 mso = 0; 2393 } 2394 2395 if (mso) { 2396 drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration\n", 2397 mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso); 2398 if (!HAS_MSO(i915)) { 2399 drm_err(&i915->drm, "No source MSO support, disabling\n"); 2400 mso = 0; 2401 } 2402 } 2403 2404 intel_dp->mso_link_count = mso; 2405 intel_dp->mso_pixel_overlap = 0; /* FIXME: read from DisplayID v2.0 */ 2406 } 2407 2408 static bool 2409 intel_edp_init_dpcd(struct intel_dp *intel_dp) 2410 { 2411 struct drm_i915_private *dev_priv = 2412 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 2413 2414 /* this function is meant to be called only once */ 2415 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0); 2416 2417 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) 2418 return false; 2419 2420 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 2421 drm_dp_is_branch(intel_dp->dpcd)); 2422 2423 /* 2424 * Read the eDP display control registers. 2425 * 2426 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in 2427 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it 2428 * set, but require eDP 1.4+ detection (e.g. for supported link rates 2429 * method). The display control registers should read zero if they're 2430 * not supported anyway. 2431 */ 2432 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, 2433 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) == 2434 sizeof(intel_dp->edp_dpcd)) { 2435 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n", 2436 (int)sizeof(intel_dp->edp_dpcd), 2437 intel_dp->edp_dpcd); 2438 2439 intel_dp->use_max_params = intel_dp->edp_dpcd[0] < DP_EDP_14; 2440 } 2441 2442 /* 2443 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks 2444 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1] 2445 */ 2446 intel_psr_init_dpcd(intel_dp); 2447 2448 /* Clear the default sink rates */ 2449 intel_dp->num_sink_rates = 0; 2450 2451 /* Read the eDP 1.4+ supported link rates. */ 2452 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) { 2453 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 2454 int i; 2455 2456 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, 2457 sink_rates, sizeof(sink_rates)); 2458 2459 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { 2460 int val = le16_to_cpu(sink_rates[i]); 2461 2462 if (val == 0) 2463 break; 2464 2465 /* Value read multiplied by 200kHz gives the per-lane 2466 * link rate in kHz. The source rates are, however, 2467 * stored in terms of LS_Clk kHz. The full conversion 2468 * back to symbols is 2469 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte) 2470 */ 2471 intel_dp->sink_rates[i] = (val * 200) / 10; 2472 } 2473 intel_dp->num_sink_rates = i; 2474 } 2475 2476 /* 2477 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available, 2478 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise. 2479 */ 2480 if (intel_dp->num_sink_rates) 2481 intel_dp->use_rate_select = true; 2482 else 2483 intel_dp_set_sink_rates(intel_dp); 2484 2485 intel_dp_set_common_rates(intel_dp); 2486 intel_dp_reset_max_link_params(intel_dp); 2487 2488 /* Read the eDP DSC DPCD registers */ 2489 if (DISPLAY_VER(dev_priv) >= 10) 2490 intel_dp_get_dsc_sink_cap(intel_dp); 2491 2492 /* 2493 * If needed, program our source OUI so we can make various Intel-specific AUX services 2494 * available (such as HDR backlight controls) 2495 */ 2496 intel_edp_init_source_oui(intel_dp, true); 2497 2498 intel_edp_mso_init(intel_dp); 2499 2500 return true; 2501 } 2502 2503 static bool 2504 intel_dp_has_sink_count(struct intel_dp *intel_dp) 2505 { 2506 if (!intel_dp->attached_connector) 2507 return false; 2508 2509 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base, 2510 intel_dp->dpcd, 2511 &intel_dp->desc); 2512 } 2513 2514 static bool 2515 intel_dp_get_dpcd(struct intel_dp *intel_dp) 2516 { 2517 int ret; 2518 2519 if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0) 2520 return false; 2521 2522 /* 2523 * Don't clobber cached eDP rates. Also skip re-reading 2524 * the OUI/ID since we know it won't change. 2525 */ 2526 if (!intel_dp_is_edp(intel_dp)) { 2527 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, 2528 drm_dp_is_branch(intel_dp->dpcd)); 2529 2530 intel_dp_set_sink_rates(intel_dp); 2531 intel_dp_set_common_rates(intel_dp); 2532 } 2533 2534 if (intel_dp_has_sink_count(intel_dp)) { 2535 ret = drm_dp_read_sink_count(&intel_dp->aux); 2536 if (ret < 0) 2537 return false; 2538 2539 /* 2540 * Sink count can change between short pulse hpd hence 2541 * a member variable in intel_dp will track any changes 2542 * between short pulse interrupts. 2543 */ 2544 intel_dp->sink_count = ret; 2545 2546 /* 2547 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that 2548 * a dongle is present but no display. Unless we require to know 2549 * if a dongle is present or not, we don't need to update 2550 * downstream port information. So, an early return here saves 2551 * time from performing other operations which are not required. 2552 */ 2553 if (!intel_dp->sink_count) 2554 return false; 2555 } 2556 2557 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd, 2558 intel_dp->downstream_ports) == 0; 2559 } 2560 2561 static bool 2562 intel_dp_can_mst(struct intel_dp *intel_dp) 2563 { 2564 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2565 2566 return i915->params.enable_dp_mst && 2567 intel_dp->can_mst && 2568 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 2569 } 2570 2571 static void 2572 intel_dp_configure_mst(struct intel_dp *intel_dp) 2573 { 2574 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 2575 struct intel_encoder *encoder = 2576 &dp_to_dig_port(intel_dp)->base; 2577 bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd); 2578 2579 drm_dbg_kms(&i915->drm, 2580 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", 2581 encoder->base.base.id, encoder->base.name, 2582 yesno(intel_dp->can_mst), yesno(sink_can_mst), 2583 yesno(i915->params.enable_dp_mst)); 2584 2585 if (!intel_dp->can_mst) 2586 return; 2587 2588 intel_dp->is_mst = sink_can_mst && 2589 i915->params.enable_dp_mst; 2590 2591 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 2592 intel_dp->is_mst); 2593 } 2594 2595 static bool 2596 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) 2597 { 2598 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 2599 sink_irq_vector, DP_DPRX_ESI_LEN) == 2600 DP_DPRX_ESI_LEN; 2601 } 2602 2603 bool 2604 intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, 2605 const struct drm_connector_state *conn_state) 2606 { 2607 /* 2608 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication 2609 * of Color Encoding Format and Content Color Gamut], in order to 2610 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. 2611 */ 2612 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) 2613 return true; 2614 2615 switch (conn_state->colorspace) { 2616 case DRM_MODE_COLORIMETRY_SYCC_601: 2617 case DRM_MODE_COLORIMETRY_OPYCC_601: 2618 case DRM_MODE_COLORIMETRY_BT2020_YCC: 2619 case DRM_MODE_COLORIMETRY_BT2020_RGB: 2620 case DRM_MODE_COLORIMETRY_BT2020_CYCC: 2621 return true; 2622 default: 2623 break; 2624 } 2625 2626 return false; 2627 } 2628 2629 static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc, 2630 struct dp_sdp *sdp, size_t size) 2631 { 2632 size_t length = sizeof(struct dp_sdp); 2633 2634 if (size < length) 2635 return -ENOSPC; 2636 2637 memset(sdp, 0, size); 2638 2639 /* 2640 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119 2641 * VSC SDP Header Bytes 2642 */ 2643 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */ 2644 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */ 2645 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */ 2646 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */ 2647 2648 /* 2649 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as 2650 * per DP 1.4a spec. 2651 */ 2652 if (vsc->revision != 0x5) 2653 goto out; 2654 2655 /* VSC SDP Payload for DB16 through DB18 */ 2656 /* Pixel Encoding and Colorimetry Formats */ 2657 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */ 2658 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */ 2659 2660 switch (vsc->bpc) { 2661 case 6: 2662 /* 6bpc: 0x0 */ 2663 break; 2664 case 8: 2665 sdp->db[17] = 0x1; /* DB17[3:0] */ 2666 break; 2667 case 10: 2668 sdp->db[17] = 0x2; 2669 break; 2670 case 12: 2671 sdp->db[17] = 0x3; 2672 break; 2673 case 16: 2674 sdp->db[17] = 0x4; 2675 break; 2676 default: 2677 MISSING_CASE(vsc->bpc); 2678 break; 2679 } 2680 /* Dynamic Range and Component Bit Depth */ 2681 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA) 2682 sdp->db[17] |= 0x80; /* DB17[7] */ 2683 2684 /* Content Type */ 2685 sdp->db[18] = vsc->content_type & 0x7; 2686 2687 out: 2688 return length; 2689 } 2690 2691 static ssize_t 2692 intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe, 2693 struct dp_sdp *sdp, 2694 size_t size) 2695 { 2696 size_t length = sizeof(struct dp_sdp); 2697 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; 2698 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; 2699 ssize_t len; 2700 2701 if (size < length) 2702 return -ENOSPC; 2703 2704 memset(sdp, 0, size); 2705 2706 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); 2707 if (len < 0) { 2708 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); 2709 return -ENOSPC; 2710 } 2711 2712 if (len != infoframe_size) { 2713 DRM_DEBUG_KMS("wrong static hdr metadata size\n"); 2714 return -ENOSPC; 2715 } 2716 2717 /* 2718 * Set up the infoframe sdp packet for HDR static metadata. 2719 * Prepare VSC Header for SU as per DP 1.4a spec, 2720 * Table 2-100 and Table 2-101 2721 */ 2722 2723 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */ 2724 sdp->sdp_header.HB0 = 0; 2725 /* 2726 * Packet Type 80h + Non-audio INFOFRAME Type value 2727 * HDMI_INFOFRAME_TYPE_DRM: 0x87 2728 * - 80h + Non-audio INFOFRAME Type value 2729 * - InfoFrame Type: 0x07 2730 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame] 2731 */ 2732 sdp->sdp_header.HB1 = drm_infoframe->type; 2733 /* 2734 * Least Significant Eight Bits of (Data Byte Count – 1) 2735 * infoframe_size - 1 2736 */ 2737 sdp->sdp_header.HB2 = 0x1D; 2738 /* INFOFRAME SDP Version Number */ 2739 sdp->sdp_header.HB3 = (0x13 << 2); 2740 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 2741 sdp->db[0] = drm_infoframe->version; 2742 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 2743 sdp->db[1] = drm_infoframe->length; 2744 /* 2745 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after 2746 * HDMI_INFOFRAME_HEADER_SIZE 2747 */ 2748 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2); 2749 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], 2750 HDMI_DRM_INFOFRAME_SIZE); 2751 2752 /* 2753 * Size of DP infoframe sdp packet for HDR static metadata consists of 2754 * - DP SDP Header(struct dp_sdp_header): 4 bytes 2755 * - Two Data Blocks: 2 bytes 2756 * CTA Header Byte2 (INFOFRAME Version Number) 2757 * CTA Header Byte3 (Length of INFOFRAME) 2758 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes 2759 * 2760 * Prior to GEN11's GMP register size is identical to DP HDR static metadata 2761 * infoframe size. But GEN11+ has larger than that size, write_infoframe 2762 * will pad rest of the size. 2763 */ 2764 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE; 2765 } 2766 2767 static void intel_write_dp_sdp(struct intel_encoder *encoder, 2768 const struct intel_crtc_state *crtc_state, 2769 unsigned int type) 2770 { 2771 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2772 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2773 struct dp_sdp sdp = {}; 2774 ssize_t len; 2775 2776 if ((crtc_state->infoframes.enable & 2777 intel_hdmi_infoframe_enable(type)) == 0) 2778 return; 2779 2780 switch (type) { 2781 case DP_SDP_VSC: 2782 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp, 2783 sizeof(sdp)); 2784 break; 2785 case HDMI_PACKET_TYPE_GAMUT_METADATA: 2786 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm, 2787 &sdp, sizeof(sdp)); 2788 break; 2789 default: 2790 MISSING_CASE(type); 2791 return; 2792 } 2793 2794 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 2795 return; 2796 2797 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len); 2798 } 2799 2800 void intel_write_dp_vsc_sdp(struct intel_encoder *encoder, 2801 const struct intel_crtc_state *crtc_state, 2802 struct drm_dp_vsc_sdp *vsc) 2803 { 2804 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2805 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2806 struct dp_sdp sdp = {}; 2807 ssize_t len; 2808 2809 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp)); 2810 2811 if (drm_WARN_ON(&dev_priv->drm, len < 0)) 2812 return; 2813 2814 dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC, 2815 &sdp, len); 2816 } 2817 2818 void intel_dp_set_infoframes(struct intel_encoder *encoder, 2819 bool enable, 2820 const struct intel_crtc_state *crtc_state, 2821 const struct drm_connector_state *conn_state) 2822 { 2823 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2824 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder); 2825 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW | 2826 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW | 2827 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK; 2828 u32 val = intel_de_read(dev_priv, reg) & ~dip_enable; 2829 2830 /* TODO: Add DSC case (DIP_ENABLE_PPS) */ 2831 /* When PSR is enabled, this routine doesn't disable VSC DIP */ 2832 if (!crtc_state->has_psr) 2833 val &= ~VIDEO_DIP_ENABLE_VSC_HSW; 2834 2835 intel_de_write(dev_priv, reg, val); 2836 intel_de_posting_read(dev_priv, reg); 2837 2838 if (!enable) 2839 return; 2840 2841 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 2842 if (!crtc_state->has_psr) 2843 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC); 2844 2845 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA); 2846 } 2847 2848 static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc, 2849 const void *buffer, size_t size) 2850 { 2851 const struct dp_sdp *sdp = buffer; 2852 2853 if (size < sizeof(struct dp_sdp)) 2854 return -EINVAL; 2855 2856 memset(vsc, 0, sizeof(*vsc)); 2857 2858 if (sdp->sdp_header.HB0 != 0) 2859 return -EINVAL; 2860 2861 if (sdp->sdp_header.HB1 != DP_SDP_VSC) 2862 return -EINVAL; 2863 2864 vsc->sdp_type = sdp->sdp_header.HB1; 2865 vsc->revision = sdp->sdp_header.HB2; 2866 vsc->length = sdp->sdp_header.HB3; 2867 2868 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) || 2869 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) { 2870 /* 2871 * - HB2 = 0x2, HB3 = 0x8 2872 * VSC SDP supporting 3D stereo + PSR 2873 * - HB2 = 0x4, HB3 = 0xe 2874 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of 2875 * first scan line of the SU region (applies to eDP v1.4b 2876 * and higher). 2877 */ 2878 return 0; 2879 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) { 2880 /* 2881 * - HB2 = 0x5, HB3 = 0x13 2882 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry 2883 * Format. 2884 */ 2885 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf; 2886 vsc->colorimetry = sdp->db[16] & 0xf; 2887 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1; 2888 2889 switch (sdp->db[17] & 0x7) { 2890 case 0x0: 2891 vsc->bpc = 6; 2892 break; 2893 case 0x1: 2894 vsc->bpc = 8; 2895 break; 2896 case 0x2: 2897 vsc->bpc = 10; 2898 break; 2899 case 0x3: 2900 vsc->bpc = 12; 2901 break; 2902 case 0x4: 2903 vsc->bpc = 16; 2904 break; 2905 default: 2906 MISSING_CASE(sdp->db[17] & 0x7); 2907 return -EINVAL; 2908 } 2909 2910 vsc->content_type = sdp->db[18] & 0x7; 2911 } else { 2912 return -EINVAL; 2913 } 2914 2915 return 0; 2916 } 2917 2918 static int 2919 intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe, 2920 const void *buffer, size_t size) 2921 { 2922 int ret; 2923 2924 const struct dp_sdp *sdp = buffer; 2925 2926 if (size < sizeof(struct dp_sdp)) 2927 return -EINVAL; 2928 2929 if (sdp->sdp_header.HB0 != 0) 2930 return -EINVAL; 2931 2932 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM) 2933 return -EINVAL; 2934 2935 /* 2936 * Least Significant Eight Bits of (Data Byte Count – 1) 2937 * 1Dh (i.e., Data Byte Count = 30 bytes). 2938 */ 2939 if (sdp->sdp_header.HB2 != 0x1D) 2940 return -EINVAL; 2941 2942 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */ 2943 if ((sdp->sdp_header.HB3 & 0x3) != 0) 2944 return -EINVAL; 2945 2946 /* INFOFRAME SDP Version Number */ 2947 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13) 2948 return -EINVAL; 2949 2950 /* CTA Header Byte 2 (INFOFRAME Version Number) */ 2951 if (sdp->db[0] != 1) 2952 return -EINVAL; 2953 2954 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ 2955 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE) 2956 return -EINVAL; 2957 2958 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2], 2959 HDMI_DRM_INFOFRAME_SIZE); 2960 2961 return ret; 2962 } 2963 2964 static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder, 2965 struct intel_crtc_state *crtc_state, 2966 struct drm_dp_vsc_sdp *vsc) 2967 { 2968 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2969 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2970 unsigned int type = DP_SDP_VSC; 2971 struct dp_sdp sdp = {}; 2972 int ret; 2973 2974 /* When PSR is enabled, VSC SDP is handled by PSR routine */ 2975 if (crtc_state->has_psr) 2976 return; 2977 2978 if ((crtc_state->infoframes.enable & 2979 intel_hdmi_infoframe_enable(type)) == 0) 2980 return; 2981 2982 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp)); 2983 2984 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp)); 2985 2986 if (ret) 2987 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n"); 2988 } 2989 2990 static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder, 2991 struct intel_crtc_state *crtc_state, 2992 struct hdmi_drm_infoframe *drm_infoframe) 2993 { 2994 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 2995 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2996 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA; 2997 struct dp_sdp sdp = {}; 2998 int ret; 2999 3000 if ((crtc_state->infoframes.enable & 3001 intel_hdmi_infoframe_enable(type)) == 0) 3002 return; 3003 3004 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, 3005 sizeof(sdp)); 3006 3007 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp, 3008 sizeof(sdp)); 3009 3010 if (ret) 3011 drm_dbg_kms(&dev_priv->drm, 3012 "Failed to unpack DP HDR Metadata Infoframe SDP\n"); 3013 } 3014 3015 void intel_read_dp_sdp(struct intel_encoder *encoder, 3016 struct intel_crtc_state *crtc_state, 3017 unsigned int type) 3018 { 3019 switch (type) { 3020 case DP_SDP_VSC: 3021 intel_read_dp_vsc_sdp(encoder, crtc_state, 3022 &crtc_state->infoframes.vsc); 3023 break; 3024 case HDMI_PACKET_TYPE_GAMUT_METADATA: 3025 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state, 3026 &crtc_state->infoframes.drm.drm); 3027 break; 3028 default: 3029 MISSING_CASE(type); 3030 break; 3031 } 3032 } 3033 3034 static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) 3035 { 3036 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3037 int status = 0; 3038 int test_link_rate; 3039 u8 test_lane_count, test_link_bw; 3040 /* (DP CTS 1.2) 3041 * 4.3.1.11 3042 */ 3043 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */ 3044 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT, 3045 &test_lane_count); 3046 3047 if (status <= 0) { 3048 drm_dbg_kms(&i915->drm, "Lane count read failed\n"); 3049 return DP_TEST_NAK; 3050 } 3051 test_lane_count &= DP_MAX_LANE_COUNT_MASK; 3052 3053 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE, 3054 &test_link_bw); 3055 if (status <= 0) { 3056 drm_dbg_kms(&i915->drm, "Link Rate read failed\n"); 3057 return DP_TEST_NAK; 3058 } 3059 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); 3060 3061 /* Validate the requested link rate and lane count */ 3062 if (!intel_dp_link_params_valid(intel_dp, test_link_rate, 3063 test_lane_count)) 3064 return DP_TEST_NAK; 3065 3066 intel_dp->compliance.test_lane_count = test_lane_count; 3067 intel_dp->compliance.test_link_rate = test_link_rate; 3068 3069 return DP_TEST_ACK; 3070 } 3071 3072 static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp) 3073 { 3074 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3075 u8 test_pattern; 3076 u8 test_misc; 3077 __be16 h_width, v_height; 3078 int status = 0; 3079 3080 /* Read the TEST_PATTERN (DP CTS 3.1.5) */ 3081 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN, 3082 &test_pattern); 3083 if (status <= 0) { 3084 drm_dbg_kms(&i915->drm, "Test pattern read failed\n"); 3085 return DP_TEST_NAK; 3086 } 3087 if (test_pattern != DP_COLOR_RAMP) 3088 return DP_TEST_NAK; 3089 3090 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI, 3091 &h_width, 2); 3092 if (status <= 0) { 3093 drm_dbg_kms(&i915->drm, "H Width read failed\n"); 3094 return DP_TEST_NAK; 3095 } 3096 3097 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI, 3098 &v_height, 2); 3099 if (status <= 0) { 3100 drm_dbg_kms(&i915->drm, "V Height read failed\n"); 3101 return DP_TEST_NAK; 3102 } 3103 3104 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0, 3105 &test_misc); 3106 if (status <= 0) { 3107 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n"); 3108 return DP_TEST_NAK; 3109 } 3110 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB) 3111 return DP_TEST_NAK; 3112 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA) 3113 return DP_TEST_NAK; 3114 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) { 3115 case DP_TEST_BIT_DEPTH_6: 3116 intel_dp->compliance.test_data.bpc = 6; 3117 break; 3118 case DP_TEST_BIT_DEPTH_8: 3119 intel_dp->compliance.test_data.bpc = 8; 3120 break; 3121 default: 3122 return DP_TEST_NAK; 3123 } 3124 3125 intel_dp->compliance.test_data.video_pattern = test_pattern; 3126 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width); 3127 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height); 3128 /* Set test active flag here so userspace doesn't interrupt things */ 3129 intel_dp->compliance.test_active = true; 3130 3131 return DP_TEST_ACK; 3132 } 3133 3134 static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp) 3135 { 3136 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3137 u8 test_result = DP_TEST_ACK; 3138 struct intel_connector *intel_connector = intel_dp->attached_connector; 3139 struct drm_connector *connector = &intel_connector->base; 3140 3141 if (intel_connector->detect_edid == NULL || 3142 connector->edid_corrupt || 3143 intel_dp->aux.i2c_defer_count > 6) { 3144 /* Check EDID read for NACKs, DEFERs and corruption 3145 * (DP CTS 1.2 Core r1.1) 3146 * 4.2.2.4 : Failed EDID read, I2C_NAK 3147 * 4.2.2.5 : Failed EDID read, I2C_DEFER 3148 * 4.2.2.6 : EDID corruption detected 3149 * Use failsafe mode for all cases 3150 */ 3151 if (intel_dp->aux.i2c_nack_count > 0 || 3152 intel_dp->aux.i2c_defer_count > 0) 3153 drm_dbg_kms(&i915->drm, 3154 "EDID read had %d NACKs, %d DEFERs\n", 3155 intel_dp->aux.i2c_nack_count, 3156 intel_dp->aux.i2c_defer_count); 3157 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE; 3158 } else { 3159 struct edid *block = intel_connector->detect_edid; 3160 3161 /* We have to write the checksum 3162 * of the last block read 3163 */ 3164 block += intel_connector->detect_edid->extensions; 3165 3166 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM, 3167 block->checksum) <= 0) 3168 drm_dbg_kms(&i915->drm, 3169 "Failed to write EDID checksum\n"); 3170 3171 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE; 3172 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED; 3173 } 3174 3175 /* Set test active flag here so userspace doesn't interrupt things */ 3176 intel_dp->compliance.test_active = true; 3177 3178 return test_result; 3179 } 3180 3181 static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, 3182 const struct intel_crtc_state *crtc_state) 3183 { 3184 struct drm_i915_private *dev_priv = 3185 to_i915(dp_to_dig_port(intel_dp)->base.base.dev); 3186 struct drm_dp_phy_test_params *data = 3187 &intel_dp->compliance.test_data.phytest; 3188 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); 3189 enum pipe pipe = crtc->pipe; 3190 u32 pattern_val; 3191 3192 switch (data->phy_pattern) { 3193 case DP_PHY_TEST_PATTERN_NONE: 3194 DRM_DEBUG_KMS("Disable Phy Test Pattern\n"); 3195 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); 3196 break; 3197 case DP_PHY_TEST_PATTERN_D10_2: 3198 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n"); 3199 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3200 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); 3201 break; 3202 case DP_PHY_TEST_PATTERN_ERROR_COUNT: 3203 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n"); 3204 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3205 DDI_DP_COMP_CTL_ENABLE | 3206 DDI_DP_COMP_CTL_SCRAMBLED_0); 3207 break; 3208 case DP_PHY_TEST_PATTERN_PRBS7: 3209 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n"); 3210 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3211 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); 3212 break; 3213 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: 3214 /* 3215 * FIXME: Ideally pattern should come from DPCD 0x250. As 3216 * current firmware of DPR-100 could not set it, so hardcoding 3217 * now for complaince test. 3218 */ 3219 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); 3220 pattern_val = 0x3e0f83e0; 3221 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); 3222 pattern_val = 0x0f83e0f8; 3223 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val); 3224 pattern_val = 0x0000f83e; 3225 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val); 3226 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3227 DDI_DP_COMP_CTL_ENABLE | 3228 DDI_DP_COMP_CTL_CUSTOM80); 3229 break; 3230 case DP_PHY_TEST_PATTERN_CP2520: 3231 /* 3232 * FIXME: Ideally pattern should come from DPCD 0x24A. As 3233 * current firmware of DPR-100 could not set it, so hardcoding 3234 * now for complaince test. 3235 */ 3236 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n"); 3237 pattern_val = 0xFB; 3238 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 3239 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | 3240 pattern_val); 3241 break; 3242 default: 3243 WARN(1, "Invalid Phy Test Pattern\n"); 3244 } 3245 } 3246 3247 static void 3248 intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp, 3249 const struct intel_crtc_state *crtc_state) 3250 { 3251 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3252 struct drm_device *dev = dig_port->base.base.dev; 3253 struct drm_i915_private *dev_priv = to_i915(dev); 3254 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 3255 enum pipe pipe = crtc->pipe; 3256 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 3257 3258 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 3259 TRANS_DDI_FUNC_CTL(pipe)); 3260 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 3261 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 3262 3263 trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE | 3264 TGL_TRANS_DDI_PORT_MASK); 3265 trans_conf_value &= ~PIPECONF_ENABLE; 3266 dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE; 3267 3268 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 3269 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 3270 trans_ddi_func_ctl_value); 3271 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 3272 } 3273 3274 static void 3275 intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, 3276 const struct intel_crtc_state *crtc_state) 3277 { 3278 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3279 struct drm_device *dev = dig_port->base.base.dev; 3280 struct drm_i915_private *dev_priv = to_i915(dev); 3281 enum port port = dig_port->base.port; 3282 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 3283 enum pipe pipe = crtc->pipe; 3284 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value; 3285 3286 trans_ddi_func_ctl_value = intel_de_read(dev_priv, 3287 TRANS_DDI_FUNC_CTL(pipe)); 3288 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe)); 3289 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe)); 3290 3291 trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE | 3292 TGL_TRANS_DDI_SELECT_PORT(port); 3293 trans_conf_value |= PIPECONF_ENABLE; 3294 dp_tp_ctl_value |= DP_TP_CTL_ENABLE; 3295 3296 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value); 3297 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value); 3298 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), 3299 trans_ddi_func_ctl_value); 3300 } 3301 3302 static void intel_dp_process_phy_request(struct intel_dp *intel_dp, 3303 const struct intel_crtc_state *crtc_state) 3304 { 3305 struct drm_dp_phy_test_params *data = 3306 &intel_dp->compliance.test_data.phytest; 3307 u8 link_status[DP_LINK_STATUS_SIZE]; 3308 3309 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 3310 link_status) < 0) { 3311 DRM_DEBUG_KMS("failed to get link status\n"); 3312 return; 3313 } 3314 3315 /* retrieve vswing & pre-emphasis setting */ 3316 intel_dp_get_adjust_train(intel_dp, crtc_state, DP_PHY_DPRX, 3317 link_status); 3318 3319 intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state); 3320 3321 intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); 3322 3323 intel_dp_phy_pattern_update(intel_dp, crtc_state); 3324 3325 intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state); 3326 3327 drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET, 3328 intel_dp->train_set, crtc_state->lane_count); 3329 3330 drm_dp_set_phy_test_pattern(&intel_dp->aux, data, 3331 link_status[DP_DPCD_REV]); 3332 } 3333 3334 static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) 3335 { 3336 struct drm_dp_phy_test_params *data = 3337 &intel_dp->compliance.test_data.phytest; 3338 3339 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { 3340 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); 3341 return DP_TEST_NAK; 3342 } 3343 3344 /* Set test active flag here so userspace doesn't interrupt things */ 3345 intel_dp->compliance.test_active = true; 3346 3347 return DP_TEST_ACK; 3348 } 3349 3350 static void intel_dp_handle_test_request(struct intel_dp *intel_dp) 3351 { 3352 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3353 u8 response = DP_TEST_NAK; 3354 u8 request = 0; 3355 int status; 3356 3357 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request); 3358 if (status <= 0) { 3359 drm_dbg_kms(&i915->drm, 3360 "Could not read test request from sink\n"); 3361 goto update_status; 3362 } 3363 3364 switch (request) { 3365 case DP_TEST_LINK_TRAINING: 3366 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n"); 3367 response = intel_dp_autotest_link_training(intel_dp); 3368 break; 3369 case DP_TEST_LINK_VIDEO_PATTERN: 3370 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n"); 3371 response = intel_dp_autotest_video_pattern(intel_dp); 3372 break; 3373 case DP_TEST_LINK_EDID_READ: 3374 drm_dbg_kms(&i915->drm, "EDID test requested\n"); 3375 response = intel_dp_autotest_edid(intel_dp); 3376 break; 3377 case DP_TEST_LINK_PHY_TEST_PATTERN: 3378 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n"); 3379 response = intel_dp_autotest_phy_pattern(intel_dp); 3380 break; 3381 default: 3382 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n", 3383 request); 3384 break; 3385 } 3386 3387 if (response & DP_TEST_ACK) 3388 intel_dp->compliance.test_type = request; 3389 3390 update_status: 3391 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response); 3392 if (status <= 0) 3393 drm_dbg_kms(&i915->drm, 3394 "Could not write test response to sink\n"); 3395 } 3396 3397 static void 3398 intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled) 3399 { 3400 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled); 3401 3402 if (esi[1] & DP_CP_IRQ) { 3403 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 3404 *handled = true; 3405 } 3406 } 3407 3408 /** 3409 * intel_dp_check_mst_status - service any pending MST interrupts, check link status 3410 * @intel_dp: Intel DP struct 3411 * 3412 * Read any pending MST interrupts, call MST core to handle these and ack the 3413 * interrupts. Check if the main and AUX link state is ok. 3414 * 3415 * Returns: 3416 * - %true if pending interrupts were serviced (or no interrupts were 3417 * pending) w/o detecting an error condition. 3418 * - %false if an error condition - like AUX failure or a loss of link - is 3419 * detected, which needs servicing from the hotplug work. 3420 */ 3421 static bool 3422 intel_dp_check_mst_status(struct intel_dp *intel_dp) 3423 { 3424 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3425 bool link_ok = true; 3426 3427 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); 3428 3429 for (;;) { 3430 /* 3431 * The +2 is because DP_DPRX_ESI_LEN is 14, but we then 3432 * pass in "esi+10" to drm_dp_channel_eq_ok(), which 3433 * takes a 6-byte array. So we actually need 16 bytes 3434 * here. 3435 * 3436 * Somebody who knows what the limits actually are 3437 * should check this, but for now this is at least 3438 * harmless and avoids a valid compiler warning about 3439 * using more of the array than we have allocated. 3440 */ 3441 u8 esi[DP_DPRX_ESI_LEN+2] = {}; 3442 bool handled; 3443 int retry; 3444 3445 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { 3446 drm_dbg_kms(&i915->drm, 3447 "failed to get ESI - device may have failed\n"); 3448 link_ok = false; 3449 3450 break; 3451 } 3452 3453 /* check link status - esi[10] = 0x200c */ 3454 if (intel_dp->active_mst_links > 0 && link_ok && 3455 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 3456 drm_dbg_kms(&i915->drm, 3457 "channel EQ not ok, retraining\n"); 3458 link_ok = false; 3459 } 3460 3461 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); 3462 3463 intel_dp_mst_hpd_irq(intel_dp, esi, &handled); 3464 3465 if (!handled) 3466 break; 3467 3468 for (retry = 0; retry < 3; retry++) { 3469 int wret; 3470 3471 wret = drm_dp_dpcd_write(&intel_dp->aux, 3472 DP_SINK_COUNT_ESI+1, 3473 &esi[1], 3); 3474 if (wret == 3) 3475 break; 3476 } 3477 } 3478 3479 return link_ok; 3480 } 3481 3482 static void 3483 intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp) 3484 { 3485 bool is_active; 3486 u8 buf = 0; 3487 3488 is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux); 3489 if (intel_dp->frl.is_trained && !is_active) { 3490 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0) 3491 return; 3492 3493 buf &= ~DP_PCON_ENABLE_HDMI_LINK; 3494 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0) 3495 return; 3496 3497 drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base); 3498 3499 /* Restart FRL training or fall back to TMDS mode */ 3500 intel_dp_check_frl_training(intel_dp); 3501 } 3502 } 3503 3504 static bool 3505 intel_dp_needs_link_retrain(struct intel_dp *intel_dp) 3506 { 3507 u8 link_status[DP_LINK_STATUS_SIZE]; 3508 3509 if (!intel_dp->link_trained) 3510 return false; 3511 3512 /* 3513 * While PSR source HW is enabled, it will control main-link sending 3514 * frames, enabling and disabling it so trying to do a retrain will fail 3515 * as the link would or not be on or it could mix training patterns 3516 * and frame data at the same time causing retrain to fail. 3517 * Also when exiting PSR, HW will retrain the link anyways fixing 3518 * any link status error. 3519 */ 3520 if (intel_psr_enabled(intel_dp)) 3521 return false; 3522 3523 if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, 3524 link_status) < 0) 3525 return false; 3526 3527 /* 3528 * Validate the cached values of intel_dp->link_rate and 3529 * intel_dp->lane_count before attempting to retrain. 3530 * 3531 * FIXME would be nice to user the crtc state here, but since 3532 * we need to call this from the short HPD handler that seems 3533 * a bit hard. 3534 */ 3535 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate, 3536 intel_dp->lane_count)) 3537 return false; 3538 3539 /* Retrain if Channel EQ or CR not ok */ 3540 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count); 3541 } 3542 3543 static bool intel_dp_has_connector(struct intel_dp *intel_dp, 3544 const struct drm_connector_state *conn_state) 3545 { 3546 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3547 struct intel_encoder *encoder; 3548 enum pipe pipe; 3549 3550 if (!conn_state->best_encoder) 3551 return false; 3552 3553 /* SST */ 3554 encoder = &dp_to_dig_port(intel_dp)->base; 3555 if (conn_state->best_encoder == &encoder->base) 3556 return true; 3557 3558 /* MST */ 3559 for_each_pipe(i915, pipe) { 3560 encoder = &intel_dp->mst_encoders[pipe]->base; 3561 if (conn_state->best_encoder == &encoder->base) 3562 return true; 3563 } 3564 3565 return false; 3566 } 3567 3568 static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp, 3569 struct drm_modeset_acquire_ctx *ctx, 3570 u32 *crtc_mask) 3571 { 3572 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3573 struct drm_connector_list_iter conn_iter; 3574 struct intel_connector *connector; 3575 int ret = 0; 3576 3577 *crtc_mask = 0; 3578 3579 if (!intel_dp_needs_link_retrain(intel_dp)) 3580 return 0; 3581 3582 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 3583 for_each_intel_connector_iter(connector, &conn_iter) { 3584 struct drm_connector_state *conn_state = 3585 connector->base.state; 3586 struct intel_crtc_state *crtc_state; 3587 struct intel_crtc *crtc; 3588 3589 if (!intel_dp_has_connector(intel_dp, conn_state)) 3590 continue; 3591 3592 crtc = to_intel_crtc(conn_state->crtc); 3593 if (!crtc) 3594 continue; 3595 3596 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 3597 if (ret) 3598 break; 3599 3600 crtc_state = to_intel_crtc_state(crtc->base.state); 3601 3602 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 3603 3604 if (!crtc_state->hw.active) 3605 continue; 3606 3607 if (conn_state->commit && 3608 !try_wait_for_completion(&conn_state->commit->hw_done)) 3609 continue; 3610 3611 *crtc_mask |= drm_crtc_mask(&crtc->base); 3612 } 3613 drm_connector_list_iter_end(&conn_iter); 3614 3615 if (!intel_dp_needs_link_retrain(intel_dp)) 3616 *crtc_mask = 0; 3617 3618 return ret; 3619 } 3620 3621 static bool intel_dp_is_connected(struct intel_dp *intel_dp) 3622 { 3623 struct intel_connector *connector = intel_dp->attached_connector; 3624 3625 return connector->base.status == connector_status_connected || 3626 intel_dp->is_mst; 3627 } 3628 3629 int intel_dp_retrain_link(struct intel_encoder *encoder, 3630 struct drm_modeset_acquire_ctx *ctx) 3631 { 3632 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3633 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3634 struct intel_crtc *crtc; 3635 u32 crtc_mask; 3636 int ret; 3637 3638 if (!intel_dp_is_connected(intel_dp)) 3639 return 0; 3640 3641 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 3642 ctx); 3643 if (ret) 3644 return ret; 3645 3646 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask); 3647 if (ret) 3648 return ret; 3649 3650 if (crtc_mask == 0) 3651 return 0; 3652 3653 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n", 3654 encoder->base.base.id, encoder->base.name); 3655 3656 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3657 const struct intel_crtc_state *crtc_state = 3658 to_intel_crtc_state(crtc->base.state); 3659 3660 /* Suppress underruns caused by re-training */ 3661 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); 3662 if (crtc_state->has_pch_encoder) 3663 intel_set_pch_fifo_underrun_reporting(dev_priv, 3664 intel_crtc_pch_transcoder(crtc), false); 3665 } 3666 3667 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3668 const struct intel_crtc_state *crtc_state = 3669 to_intel_crtc_state(crtc->base.state); 3670 3671 /* retrain on the MST master transcoder */ 3672 if (DISPLAY_VER(dev_priv) >= 12 && 3673 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 3674 !intel_dp_mst_is_master_trans(crtc_state)) 3675 continue; 3676 3677 intel_dp_check_frl_training(intel_dp); 3678 intel_dp_pcon_dsc_configure(intel_dp, crtc_state); 3679 intel_dp_start_link_train(intel_dp, crtc_state); 3680 intel_dp_stop_link_train(intel_dp, crtc_state); 3681 break; 3682 } 3683 3684 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3685 const struct intel_crtc_state *crtc_state = 3686 to_intel_crtc_state(crtc->base.state); 3687 3688 /* Keep underrun reporting disabled until things are stable */ 3689 intel_wait_for_vblank(dev_priv, crtc->pipe); 3690 3691 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); 3692 if (crtc_state->has_pch_encoder) 3693 intel_set_pch_fifo_underrun_reporting(dev_priv, 3694 intel_crtc_pch_transcoder(crtc), true); 3695 } 3696 3697 return 0; 3698 } 3699 3700 static int intel_dp_prep_phy_test(struct intel_dp *intel_dp, 3701 struct drm_modeset_acquire_ctx *ctx, 3702 u32 *crtc_mask) 3703 { 3704 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3705 struct drm_connector_list_iter conn_iter; 3706 struct intel_connector *connector; 3707 int ret = 0; 3708 3709 *crtc_mask = 0; 3710 3711 drm_connector_list_iter_begin(&i915->drm, &conn_iter); 3712 for_each_intel_connector_iter(connector, &conn_iter) { 3713 struct drm_connector_state *conn_state = 3714 connector->base.state; 3715 struct intel_crtc_state *crtc_state; 3716 struct intel_crtc *crtc; 3717 3718 if (!intel_dp_has_connector(intel_dp, conn_state)) 3719 continue; 3720 3721 crtc = to_intel_crtc(conn_state->crtc); 3722 if (!crtc) 3723 continue; 3724 3725 ret = drm_modeset_lock(&crtc->base.mutex, ctx); 3726 if (ret) 3727 break; 3728 3729 crtc_state = to_intel_crtc_state(crtc->base.state); 3730 3731 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state)); 3732 3733 if (!crtc_state->hw.active) 3734 continue; 3735 3736 if (conn_state->commit && 3737 !try_wait_for_completion(&conn_state->commit->hw_done)) 3738 continue; 3739 3740 *crtc_mask |= drm_crtc_mask(&crtc->base); 3741 } 3742 drm_connector_list_iter_end(&conn_iter); 3743 3744 return ret; 3745 } 3746 3747 static int intel_dp_do_phy_test(struct intel_encoder *encoder, 3748 struct drm_modeset_acquire_ctx *ctx) 3749 { 3750 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 3751 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 3752 struct intel_crtc *crtc; 3753 u32 crtc_mask; 3754 int ret; 3755 3756 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, 3757 ctx); 3758 if (ret) 3759 return ret; 3760 3761 ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask); 3762 if (ret) 3763 return ret; 3764 3765 if (crtc_mask == 0) 3766 return 0; 3767 3768 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n", 3769 encoder->base.base.id, encoder->base.name); 3770 3771 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) { 3772 const struct intel_crtc_state *crtc_state = 3773 to_intel_crtc_state(crtc->base.state); 3774 3775 /* test on the MST master transcoder */ 3776 if (DISPLAY_VER(dev_priv) >= 12 && 3777 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) && 3778 !intel_dp_mst_is_master_trans(crtc_state)) 3779 continue; 3780 3781 intel_dp_process_phy_request(intel_dp, crtc_state); 3782 break; 3783 } 3784 3785 return 0; 3786 } 3787 3788 void intel_dp_phy_test(struct intel_encoder *encoder) 3789 { 3790 struct drm_modeset_acquire_ctx ctx; 3791 int ret; 3792 3793 drm_modeset_acquire_init(&ctx, 0); 3794 3795 for (;;) { 3796 ret = intel_dp_do_phy_test(encoder, &ctx); 3797 3798 if (ret == -EDEADLK) { 3799 drm_modeset_backoff(&ctx); 3800 continue; 3801 } 3802 3803 break; 3804 } 3805 3806 drm_modeset_drop_locks(&ctx); 3807 drm_modeset_acquire_fini(&ctx); 3808 drm_WARN(encoder->base.dev, ret, 3809 "Acquiring modeset locks failed with %i\n", ret); 3810 } 3811 3812 static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) 3813 { 3814 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3815 u8 val; 3816 3817 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3818 return; 3819 3820 if (drm_dp_dpcd_readb(&intel_dp->aux, 3821 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val) 3822 return; 3823 3824 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val); 3825 3826 if (val & DP_AUTOMATED_TEST_REQUEST) 3827 intel_dp_handle_test_request(intel_dp); 3828 3829 if (val & DP_CP_IRQ) 3830 intel_hdcp_handle_cp_irq(intel_dp->attached_connector); 3831 3832 if (val & DP_SINK_SPECIFIC_IRQ) 3833 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); 3834 } 3835 3836 static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp) 3837 { 3838 u8 val; 3839 3840 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 3841 return; 3842 3843 if (drm_dp_dpcd_readb(&intel_dp->aux, 3844 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) 3845 return; 3846 3847 if (drm_dp_dpcd_writeb(&intel_dp->aux, 3848 DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) 3849 return; 3850 3851 if (val & HDMI_LINK_STATUS_CHANGED) 3852 intel_dp_handle_hdmi_link_status_change(intel_dp); 3853 } 3854 3855 /* 3856 * According to DP spec 3857 * 5.1.2: 3858 * 1. Read DPCD 3859 * 2. Configure link according to Receiver Capabilities 3860 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 3861 * 4. Check link status on receipt of hot-plug interrupt 3862 * 3863 * intel_dp_short_pulse - handles short pulse interrupts 3864 * when full detection is not required. 3865 * Returns %true if short pulse is handled and full detection 3866 * is NOT required and %false otherwise. 3867 */ 3868 static bool 3869 intel_dp_short_pulse(struct intel_dp *intel_dp) 3870 { 3871 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 3872 u8 old_sink_count = intel_dp->sink_count; 3873 bool ret; 3874 3875 /* 3876 * Clearing compliance test variables to allow capturing 3877 * of values for next automated test request. 3878 */ 3879 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 3880 3881 /* 3882 * Now read the DPCD to see if it's actually running 3883 * If the current value of sink count doesn't match with 3884 * the value that was stored earlier or dpcd read failed 3885 * we need to do full detection 3886 */ 3887 ret = intel_dp_get_dpcd(intel_dp); 3888 3889 if ((old_sink_count != intel_dp->sink_count) || !ret) { 3890 /* No need to proceed if we are going to do full detect */ 3891 return false; 3892 } 3893 3894 intel_dp_check_device_service_irq(intel_dp); 3895 intel_dp_check_link_service_irq(intel_dp); 3896 3897 /* Handle CEC interrupts, if any */ 3898 drm_dp_cec_irq(&intel_dp->aux); 3899 3900 /* defer to the hotplug work for link retraining if needed */ 3901 if (intel_dp_needs_link_retrain(intel_dp)) 3902 return false; 3903 3904 intel_psr_short_pulse(intel_dp); 3905 3906 switch (intel_dp->compliance.test_type) { 3907 case DP_TEST_LINK_TRAINING: 3908 drm_dbg_kms(&dev_priv->drm, 3909 "Link Training Compliance Test requested\n"); 3910 /* Send a Hotplug Uevent to userspace to start modeset */ 3911 drm_kms_helper_hotplug_event(&dev_priv->drm); 3912 break; 3913 case DP_TEST_LINK_PHY_TEST_PATTERN: 3914 drm_dbg_kms(&dev_priv->drm, 3915 "PHY test pattern Compliance Test requested\n"); 3916 /* 3917 * Schedule long hpd to do the test 3918 * 3919 * FIXME get rid of the ad-hoc phy test modeset code 3920 * and properly incorporate it into the normal modeset. 3921 */ 3922 return false; 3923 } 3924 3925 return true; 3926 } 3927 3928 /* XXX this is probably wrong for multiple downstream ports */ 3929 static enum drm_connector_status 3930 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 3931 { 3932 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 3933 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3934 u8 *dpcd = intel_dp->dpcd; 3935 u8 type; 3936 3937 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp))) 3938 return connector_status_connected; 3939 3940 lspcon_resume(dig_port); 3941 3942 if (!intel_dp_get_dpcd(intel_dp)) 3943 return connector_status_disconnected; 3944 3945 /* if there's no downstream port, we're done */ 3946 if (!drm_dp_is_branch(dpcd)) 3947 return connector_status_connected; 3948 3949 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 3950 if (intel_dp_has_sink_count(intel_dp) && 3951 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 3952 return intel_dp->sink_count ? 3953 connector_status_connected : connector_status_disconnected; 3954 } 3955 3956 if (intel_dp_can_mst(intel_dp)) 3957 return connector_status_connected; 3958 3959 /* If no HPD, poke DDC gently */ 3960 if (drm_probe_ddc(&intel_dp->aux.ddc)) 3961 return connector_status_connected; 3962 3963 /* Well we tried, say unknown for unreliable port types */ 3964 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 3965 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 3966 if (type == DP_DS_PORT_TYPE_VGA || 3967 type == DP_DS_PORT_TYPE_NON_EDID) 3968 return connector_status_unknown; 3969 } else { 3970 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 3971 DP_DWN_STRM_PORT_TYPE_MASK; 3972 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 3973 type == DP_DWN_STRM_PORT_TYPE_OTHER) 3974 return connector_status_unknown; 3975 } 3976 3977 /* Anything else is out of spec, warn and ignore */ 3978 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n"); 3979 return connector_status_disconnected; 3980 } 3981 3982 static enum drm_connector_status 3983 edp_detect(struct intel_dp *intel_dp) 3984 { 3985 return connector_status_connected; 3986 } 3987 3988 /* 3989 * intel_digital_port_connected - is the specified port connected? 3990 * @encoder: intel_encoder 3991 * 3992 * In cases where there's a connector physically connected but it can't be used 3993 * by our hardware we also return false, since the rest of the driver should 3994 * pretty much treat the port as disconnected. This is relevant for type-C 3995 * (starting on ICL) where there's ownership involved. 3996 * 3997 * Return %true if port is connected, %false otherwise. 3998 */ 3999 bool intel_digital_port_connected(struct intel_encoder *encoder) 4000 { 4001 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 4002 struct intel_digital_port *dig_port = enc_to_dig_port(encoder); 4003 bool is_connected = false; 4004 intel_wakeref_t wakeref; 4005 4006 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref) 4007 is_connected = dig_port->connected(encoder); 4008 4009 return is_connected; 4010 } 4011 4012 static struct edid * 4013 intel_dp_get_edid(struct intel_dp *intel_dp) 4014 { 4015 struct intel_connector *intel_connector = intel_dp->attached_connector; 4016 4017 /* use cached edid if we have one */ 4018 if (intel_connector->edid) { 4019 /* invalid edid */ 4020 if (IS_ERR(intel_connector->edid)) 4021 return NULL; 4022 4023 return drm_edid_duplicate(intel_connector->edid); 4024 } else 4025 return drm_get_edid(&intel_connector->base, 4026 &intel_dp->aux.ddc); 4027 } 4028 4029 static void 4030 intel_dp_update_dfp(struct intel_dp *intel_dp, 4031 const struct edid *edid) 4032 { 4033 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4034 struct intel_connector *connector = intel_dp->attached_connector; 4035 4036 intel_dp->dfp.max_bpc = 4037 drm_dp_downstream_max_bpc(intel_dp->dpcd, 4038 intel_dp->downstream_ports, edid); 4039 4040 intel_dp->dfp.max_dotclock = 4041 drm_dp_downstream_max_dotclock(intel_dp->dpcd, 4042 intel_dp->downstream_ports); 4043 4044 intel_dp->dfp.min_tmds_clock = 4045 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd, 4046 intel_dp->downstream_ports, 4047 edid); 4048 intel_dp->dfp.max_tmds_clock = 4049 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd, 4050 intel_dp->downstream_ports, 4051 edid); 4052 4053 intel_dp->dfp.pcon_max_frl_bw = 4054 drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd, 4055 intel_dp->downstream_ports); 4056 4057 drm_dbg_kms(&i915->drm, 4058 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n", 4059 connector->base.base.id, connector->base.name, 4060 intel_dp->dfp.max_bpc, 4061 intel_dp->dfp.max_dotclock, 4062 intel_dp->dfp.min_tmds_clock, 4063 intel_dp->dfp.max_tmds_clock, 4064 intel_dp->dfp.pcon_max_frl_bw); 4065 4066 intel_dp_get_pcon_dsc_cap(intel_dp); 4067 } 4068 4069 static void 4070 intel_dp_update_420(struct intel_dp *intel_dp) 4071 { 4072 struct drm_i915_private *i915 = dp_to_i915(intel_dp); 4073 struct intel_connector *connector = intel_dp->attached_connector; 4074 bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr; 4075 4076 /* No YCbCr output support on gmch platforms */ 4077 if (HAS_GMCH(i915)) 4078 return; 4079 4080 /* 4081 * ILK doesn't seem capable of DP YCbCr output. The 4082 * displayed image is severly corrupted. SNB+ is fine. 4083 */ 4084 if (IS_IRONLAKE(i915)) 4085 return; 4086 4087 is_branch = drm_dp_is_branch(intel_dp->dpcd); 4088 ycbcr_420_passthrough = 4089 drm_dp_downstream_420_passthrough(intel_dp->dpcd, 4090 intel_dp->downstream_ports); 4091 /* on-board LSPCON always assumed to support 4:4:4->4:2:0 conversion */ 4092 ycbcr_444_to_420 = 4093 dp_to_dig_port(intel_dp)->lspcon.active || 4094 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, 4095 intel_dp->downstream_ports); 4096 rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, 4097 intel_dp->downstream_ports, 4098 DP_DS_HDMI_BT601_RGB_YCBCR_CONV | 4099 DP_DS_HDMI_BT709_RGB_YCBCR_CONV | 4100 DP_DS_HDMI_BT2020_RGB_YCBCR_CONV); 4101 4102 if (DISPLAY_VER(i915) >= 11) { 4103 /* Let PCON convert from RGB->YCbCr if possible */ 4104 if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) { 4105 intel_dp->dfp.rgb_to_ycbcr = true; 4106 intel_dp->dfp.ycbcr_444_to_420 = true; 4107 connector->base.ycbcr_420_allowed = true; 4108 } else { 4109 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */ 4110 intel_dp->dfp.ycbcr_444_to_420 = 4111 ycbcr_444_to_420 && !ycbcr_420_passthrough; 4112 4113 connector->base.ycbcr_420_allowed = 4114 !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough; 4115 } 4116 } else { 4117 /* 4:4:4->4:2:0 conversion is the only way */ 4118 intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420; 4119 4120 connector->base.ycbcr_420_allowed = ycbcr_444_to_420; 4121 } 4122 4123 drm_dbg_kms(&i915->drm, 4124 "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", 4125 connector->base.base.id, connector->base.name, 4126 yesno(intel_dp->dfp.rgb_to_ycbcr), 4127 yesno(connector->base.ycbcr_420_allowed), 4128 yesno(intel_dp->dfp.ycbcr_444_to_420)); 4129 } 4130 4131 static void 4132 intel_dp_set_edid(struct intel_dp *intel_dp) 4133 { 4134 struct intel_connector *connector = intel_dp->attached_connector; 4135 struct edid *edid; 4136 4137 intel_dp_unset_edid(intel_dp); 4138 edid = intel_dp_get_edid(intel_dp); 4139 connector->detect_edid = edid; 4140 4141 intel_dp_update_dfp(intel_dp, edid); 4142 intel_dp_update_420(intel_dp); 4143 4144 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) { 4145 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 4146 intel_dp->has_audio = drm_detect_monitor_audio(edid); 4147 } 4148 4149 drm_dp_cec_set_edid(&intel_dp->aux, edid); 4150 } 4151 4152 static void 4153 intel_dp_unset_edid(struct intel_dp *intel_dp) 4154 { 4155 struct intel_connector *connector = intel_dp->attached_connector; 4156 4157 drm_dp_cec_unset_edid(&intel_dp->aux); 4158 kfree(connector->detect_edid); 4159 connector->detect_edid = NULL; 4160 4161 intel_dp->has_hdmi_sink = false; 4162 intel_dp->has_audio = false; 4163 4164 intel_dp->dfp.max_bpc = 0; 4165 intel_dp->dfp.max_dotclock = 0; 4166 intel_dp->dfp.min_tmds_clock = 0; 4167 intel_dp->dfp.max_tmds_clock = 0; 4168 4169 intel_dp->dfp.pcon_max_frl_bw = 0; 4170 4171 intel_dp->dfp.ycbcr_444_to_420 = false; 4172 connector->base.ycbcr_420_allowed = false; 4173 } 4174 4175 static int 4176 intel_dp_detect(struct drm_connector *connector, 4177 struct drm_modeset_acquire_ctx *ctx, 4178 bool force) 4179 { 4180 struct drm_i915_private *dev_priv = to_i915(connector->dev); 4181 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4182 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4183 struct intel_encoder *encoder = &dig_port->base; 4184 enum drm_connector_status status; 4185 4186 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 4187 connector->base.id, connector->name); 4188 drm_WARN_ON(&dev_priv->drm, 4189 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); 4190 4191 if (!INTEL_DISPLAY_ENABLED(dev_priv)) 4192 return connector_status_disconnected; 4193 4194 /* Can't disconnect eDP */ 4195 if (intel_dp_is_edp(intel_dp)) 4196 status = edp_detect(intel_dp); 4197 else if (intel_digital_port_connected(encoder)) 4198 status = intel_dp_detect_dpcd(intel_dp); 4199 else 4200 status = connector_status_disconnected; 4201 4202 if (status == connector_status_disconnected) { 4203 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance)); 4204 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd)); 4205 4206 if (intel_dp->is_mst) { 4207 drm_dbg_kms(&dev_priv->drm, 4208 "MST device may have disappeared %d vs %d\n", 4209 intel_dp->is_mst, 4210 intel_dp->mst_mgr.mst_state); 4211 intel_dp->is_mst = false; 4212 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 4213 intel_dp->is_mst); 4214 } 4215 4216 goto out; 4217 } 4218 4219 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */ 4220 if (DISPLAY_VER(dev_priv) >= 11) 4221 intel_dp_get_dsc_sink_cap(intel_dp); 4222 4223 intel_dp_configure_mst(intel_dp); 4224 4225 /* 4226 * TODO: Reset link params when switching to MST mode, until MST 4227 * supports link training fallback params. 4228 */ 4229 if (intel_dp->reset_link_params || intel_dp->is_mst) { 4230 intel_dp_reset_max_link_params(intel_dp); 4231 intel_dp->reset_link_params = false; 4232 } 4233 4234 intel_dp_print_rates(intel_dp); 4235 4236 if (intel_dp->is_mst) { 4237 /* 4238 * If we are in MST mode then this connector 4239 * won't appear connected or have anything 4240 * with EDID on it 4241 */ 4242 status = connector_status_disconnected; 4243 goto out; 4244 } 4245 4246 /* 4247 * Some external monitors do not signal loss of link synchronization 4248 * with an IRQ_HPD, so force a link status check. 4249 */ 4250 if (!intel_dp_is_edp(intel_dp)) { 4251 int ret; 4252 4253 ret = intel_dp_retrain_link(encoder, ctx); 4254 if (ret) 4255 return ret; 4256 } 4257 4258 /* 4259 * Clearing NACK and defer counts to get their exact values 4260 * while reading EDID which are required by Compliance tests 4261 * 4.2.2.4 and 4.2.2.5 4262 */ 4263 intel_dp->aux.i2c_nack_count = 0; 4264 intel_dp->aux.i2c_defer_count = 0; 4265 4266 intel_dp_set_edid(intel_dp); 4267 if (intel_dp_is_edp(intel_dp) || 4268 to_intel_connector(connector)->detect_edid) 4269 status = connector_status_connected; 4270 4271 intel_dp_check_device_service_irq(intel_dp); 4272 4273 out: 4274 if (status != connector_status_connected && !intel_dp->is_mst) 4275 intel_dp_unset_edid(intel_dp); 4276 4277 /* 4278 * Make sure the refs for power wells enabled during detect are 4279 * dropped to avoid a new detect cycle triggered by HPD polling. 4280 */ 4281 intel_display_power_flush_work(dev_priv); 4282 4283 if (!intel_dp_is_edp(intel_dp)) 4284 drm_dp_set_subconnector_property(connector, 4285 status, 4286 intel_dp->dpcd, 4287 intel_dp->downstream_ports); 4288 return status; 4289 } 4290 4291 static void 4292 intel_dp_force(struct drm_connector *connector) 4293 { 4294 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4295 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4296 struct intel_encoder *intel_encoder = &dig_port->base; 4297 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); 4298 enum intel_display_power_domain aux_domain = 4299 intel_aux_power_domain(dig_port); 4300 intel_wakeref_t wakeref; 4301 4302 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n", 4303 connector->base.id, connector->name); 4304 intel_dp_unset_edid(intel_dp); 4305 4306 if (connector->status != connector_status_connected) 4307 return; 4308 4309 wakeref = intel_display_power_get(dev_priv, aux_domain); 4310 4311 intel_dp_set_edid(intel_dp); 4312 4313 intel_display_power_put(dev_priv, aux_domain, wakeref); 4314 } 4315 4316 static int intel_dp_get_modes(struct drm_connector *connector) 4317 { 4318 struct intel_connector *intel_connector = to_intel_connector(connector); 4319 struct edid *edid; 4320 int num_modes = 0; 4321 4322 edid = intel_connector->detect_edid; 4323 if (edid) { 4324 num_modes = intel_connector_update_modes(connector, edid); 4325 4326 if (intel_vrr_is_capable(connector)) 4327 drm_connector_set_vrr_capable_property(connector, 4328 true); 4329 } 4330 4331 /* Also add fixed mode, which may or may not be present in EDID */ 4332 if (intel_dp_is_edp(intel_attached_dp(intel_connector)) && 4333 intel_connector->panel.fixed_mode) { 4334 struct drm_display_mode *mode; 4335 4336 mode = drm_mode_duplicate(connector->dev, 4337 intel_connector->panel.fixed_mode); 4338 if (mode) { 4339 drm_mode_probed_add(connector, mode); 4340 num_modes++; 4341 } 4342 } 4343 4344 if (num_modes) 4345 return num_modes; 4346 4347 if (!edid) { 4348 struct intel_dp *intel_dp = intel_attached_dp(intel_connector); 4349 struct drm_display_mode *mode; 4350 4351 mode = drm_dp_downstream_mode(connector->dev, 4352 intel_dp->dpcd, 4353 intel_dp->downstream_ports); 4354 if (mode) { 4355 drm_mode_probed_add(connector, mode); 4356 num_modes++; 4357 } 4358 } 4359 4360 return num_modes; 4361 } 4362 4363 static int 4364 intel_dp_connector_register(struct drm_connector *connector) 4365 { 4366 struct drm_i915_private *i915 = to_i915(connector->dev); 4367 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4368 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 4369 struct intel_lspcon *lspcon = &dig_port->lspcon; 4370 int ret; 4371 4372 ret = intel_connector_register(connector); 4373 if (ret) 4374 return ret; 4375 4376 #ifdef notyet 4377 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n", 4378 intel_dp->aux.name, connector->kdev->kobj.name); 4379 #endif 4380 4381 intel_dp->aux.dev = connector->kdev; 4382 ret = drm_dp_aux_register(&intel_dp->aux); 4383 if (!ret) 4384 drm_dp_cec_register_connector(&intel_dp->aux, connector); 4385 4386 if (!intel_bios_is_lspcon_present(i915, dig_port->base.port)) 4387 return ret; 4388 4389 /* 4390 * ToDo: Clean this up to handle lspcon init and resume more 4391 * efficiently and streamlined. 4392 */ 4393 if (lspcon_init(dig_port)) { 4394 lspcon_detect_hdr_capability(lspcon); 4395 if (lspcon->hdr_supported) 4396 drm_object_attach_property(&connector->base, 4397 connector->dev->mode_config.hdr_output_metadata_property, 4398 0); 4399 } 4400 4401 return ret; 4402 } 4403 4404 static void 4405 intel_dp_connector_unregister(struct drm_connector *connector) 4406 { 4407 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); 4408 4409 drm_dp_cec_unregister_connector(&intel_dp->aux); 4410 drm_dp_aux_unregister(&intel_dp->aux); 4411 intel_connector_unregister(connector); 4412 } 4413 4414 void intel_dp_encoder_flush_work(struct drm_encoder *encoder) 4415 { 4416 struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder)); 4417 struct intel_dp *intel_dp = &dig_port->dp; 4418 4419 intel_dp_mst_encoder_cleanup(dig_port); 4420 4421 intel_pps_vdd_off_sync(intel_dp); 4422 4423 intel_dp_aux_fini(intel_dp); 4424 } 4425 4426 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) 4427 { 4428 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 4429 4430 intel_pps_vdd_off_sync(intel_dp); 4431 } 4432 4433 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder) 4434 { 4435 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder); 4436 4437 intel_pps_wait_power_cycle(intel_dp); 4438 } 4439 4440 static int intel_modeset_tile_group(struct intel_atomic_state *state, 4441 int tile_group_id) 4442 { 4443 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4444 struct drm_connector_list_iter conn_iter; 4445 struct drm_connector *connector; 4446 int ret = 0; 4447 4448 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); 4449 drm_for_each_connector_iter(connector, &conn_iter) { 4450 struct drm_connector_state *conn_state; 4451 struct intel_crtc_state *crtc_state; 4452 struct intel_crtc *crtc; 4453 4454 if (!connector->has_tile || 4455 connector->tile_group->id != tile_group_id) 4456 continue; 4457 4458 conn_state = drm_atomic_get_connector_state(&state->base, 4459 connector); 4460 if (IS_ERR(conn_state)) { 4461 ret = PTR_ERR(conn_state); 4462 break; 4463 } 4464 4465 crtc = to_intel_crtc(conn_state->crtc); 4466 4467 if (!crtc) 4468 continue; 4469 4470 crtc_state = intel_atomic_get_new_crtc_state(state, crtc); 4471 crtc_state->uapi.mode_changed = true; 4472 4473 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 4474 if (ret) 4475 break; 4476 } 4477 drm_connector_list_iter_end(&conn_iter); 4478 4479 return ret; 4480 } 4481 4482 static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders) 4483 { 4484 struct drm_i915_private *dev_priv = to_i915(state->base.dev); 4485 struct intel_crtc *crtc; 4486 4487 if (transcoders == 0) 4488 return 0; 4489 4490 for_each_intel_crtc(&dev_priv->drm, crtc) { 4491 struct intel_crtc_state *crtc_state; 4492 int ret; 4493 4494 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); 4495 if (IS_ERR(crtc_state)) 4496 return PTR_ERR(crtc_state); 4497 4498 if (!crtc_state->hw.enable) 4499 continue; 4500 4501 if (!(transcoders & BIT(crtc_state->cpu_transcoder))) 4502 continue; 4503 4504 crtc_state->uapi.mode_changed = true; 4505 4506 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base); 4507 if (ret) 4508 return ret; 4509 4510 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); 4511 if (ret) 4512 return ret; 4513 4514 transcoders &= ~BIT(crtc_state->cpu_transcoder); 4515 } 4516 4517 drm_WARN_ON(&dev_priv->drm, transcoders != 0); 4518 4519 return 0; 4520 } 4521 4522 static int intel_modeset_synced_crtcs(struct intel_atomic_state *state, 4523 struct drm_connector *connector) 4524 { 4525 const struct drm_connector_state *old_conn_state = 4526 drm_atomic_get_old_connector_state(&state->base, connector); 4527 const struct intel_crtc_state *old_crtc_state; 4528 struct intel_crtc *crtc; 4529 u8 transcoders; 4530 4531 crtc = to_intel_crtc(old_conn_state->crtc); 4532 if (!crtc) 4533 return 0; 4534 4535 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); 4536 4537 if (!old_crtc_state->hw.active) 4538 return 0; 4539 4540 transcoders = old_crtc_state->sync_mode_slaves_mask; 4541 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER) 4542 transcoders |= BIT(old_crtc_state->master_transcoder); 4543 4544 return intel_modeset_affected_transcoders(state, 4545 transcoders); 4546 } 4547 4548 static int intel_dp_connector_atomic_check(struct drm_connector *conn, 4549 struct drm_atomic_state *_state) 4550 { 4551 struct drm_i915_private *dev_priv = to_i915(conn->dev); 4552 struct intel_atomic_state *state = to_intel_atomic_state(_state); 4553 int ret; 4554 4555 ret = intel_digital_connector_atomic_check(conn, &state->base); 4556 if (ret) 4557 return ret; 4558 4559 /* 4560 * We don't enable port sync on BDW due to missing w/as and 4561 * due to not having adjusted the modeset sequence appropriately. 4562 */ 4563 if (DISPLAY_VER(dev_priv) < 9) 4564 return 0; 4565 4566 if (!intel_connector_needs_modeset(state, conn)) 4567 return 0; 4568 4569 if (conn->has_tile) { 4570 ret = intel_modeset_tile_group(state, conn->tile_group->id); 4571 if (ret) 4572 return ret; 4573 } 4574 4575 return intel_modeset_synced_crtcs(state, conn); 4576 } 4577 4578 static const struct drm_connector_funcs intel_dp_connector_funcs = { 4579 .force = intel_dp_force, 4580 .fill_modes = drm_helper_probe_single_connector_modes, 4581 .atomic_get_property = intel_digital_connector_atomic_get_property, 4582 .atomic_set_property = intel_digital_connector_atomic_set_property, 4583 .late_register = intel_dp_connector_register, 4584 .early_unregister = intel_dp_connector_unregister, 4585 .destroy = intel_connector_destroy, 4586 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 4587 .atomic_duplicate_state = intel_digital_connector_duplicate_state, 4588 }; 4589 4590 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 4591 .detect_ctx = intel_dp_detect, 4592 .get_modes = intel_dp_get_modes, 4593 .mode_valid = intel_dp_mode_valid, 4594 .atomic_check = intel_dp_connector_atomic_check, 4595 }; 4596 4597 enum irqreturn 4598 intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) 4599 { 4600 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); 4601 struct intel_dp *intel_dp = &dig_port->dp; 4602 4603 if (dig_port->base.type == INTEL_OUTPUT_EDP && 4604 (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) { 4605 /* 4606 * vdd off can generate a long/short pulse on eDP which 4607 * would require vdd on to handle it, and thus we 4608 * would end up in an endless cycle of 4609 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..." 4610 */ 4611 drm_dbg_kms(&i915->drm, 4612 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n", 4613 long_hpd ? "long" : "short", 4614 dig_port->base.base.base.id, 4615 dig_port->base.base.name); 4616 return IRQ_HANDLED; 4617 } 4618 4619 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n", 4620 dig_port->base.base.base.id, 4621 dig_port->base.base.name, 4622 long_hpd ? "long" : "short"); 4623 4624 if (long_hpd) { 4625 intel_dp->reset_link_params = true; 4626 return IRQ_NONE; 4627 } 4628 4629 if (intel_dp->is_mst) { 4630 if (!intel_dp_check_mst_status(intel_dp)) 4631 return IRQ_NONE; 4632 } else if (!intel_dp_short_pulse(intel_dp)) { 4633 return IRQ_NONE; 4634 } 4635 4636 return IRQ_HANDLED; 4637 } 4638 4639 /* check the VBT to see whether the eDP is on another port */ 4640 bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port) 4641 { 4642 /* 4643 * eDP not supported on g4x. so bail out early just 4644 * for a bit extra safety in case the VBT is bonkers. 4645 */ 4646 if (DISPLAY_VER(dev_priv) < 5) 4647 return false; 4648 4649 if (DISPLAY_VER(dev_priv) < 9 && port == PORT_A) 4650 return true; 4651 4652 return intel_bios_is_port_edp(dev_priv, port); 4653 } 4654 4655 static void 4656 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 4657 { 4658 struct drm_i915_private *dev_priv = to_i915(connector->dev); 4659 enum port port = dp_to_dig_port(intel_dp)->base.port; 4660 4661 if (!intel_dp_is_edp(intel_dp)) 4662 drm_connector_attach_dp_subconnector_property(connector); 4663 4664 if (!IS_G4X(dev_priv) && port != PORT_A) 4665 intel_attach_force_audio_property(connector); 4666 4667 intel_attach_broadcast_rgb_property(connector); 4668 if (HAS_GMCH(dev_priv)) 4669 drm_connector_attach_max_bpc_property(connector, 6, 10); 4670 else if (DISPLAY_VER(dev_priv) >= 5) 4671 drm_connector_attach_max_bpc_property(connector, 6, 12); 4672 4673 /* Register HDMI colorspace for case of lspcon */ 4674 if (intel_bios_is_lspcon_present(dev_priv, port)) { 4675 drm_connector_attach_content_type_property(connector); 4676 intel_attach_hdmi_colorspace_property(connector); 4677 } else { 4678 intel_attach_dp_colorspace_property(connector); 4679 } 4680 4681 if (IS_GEMINILAKE(dev_priv) || DISPLAY_VER(dev_priv) >= 11) 4682 drm_object_attach_property(&connector->base, 4683 connector->dev->mode_config.hdr_output_metadata_property, 4684 0); 4685 4686 if (intel_dp_is_edp(intel_dp)) { 4687 u32 allowed_scalers; 4688 4689 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); 4690 if (!HAS_GMCH(dev_priv)) 4691 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER); 4692 4693 drm_connector_attach_scaling_mode_property(connector, allowed_scalers); 4694 4695 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT; 4696 4697 } 4698 4699 if (HAS_VRR(dev_priv)) 4700 drm_connector_attach_vrr_capable_property(connector); 4701 } 4702 4703 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 4704 struct intel_connector *intel_connector) 4705 { 4706 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); 4707 struct drm_device *dev = &dev_priv->drm; 4708 struct drm_connector *connector = &intel_connector->base; 4709 struct drm_display_mode *fixed_mode = NULL; 4710 struct drm_display_mode *downclock_mode = NULL; 4711 bool has_dpcd; 4712 enum pipe pipe = INVALID_PIPE; 4713 struct edid *edid; 4714 4715 if (!intel_dp_is_edp(intel_dp)) 4716 return true; 4717 4718 /* 4719 * On IBX/CPT we may get here with LVDS already registered. Since the 4720 * driver uses the only internal power sequencer available for both 4721 * eDP and LVDS bail out early in this case to prevent interfering 4722 * with an already powered-on LVDS power sequencer. 4723 */ 4724 if (intel_get_lvds_encoder(dev_priv)) { 4725 drm_WARN_ON(dev, 4726 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); 4727 drm_info(&dev_priv->drm, 4728 "LVDS was detected, not registering eDP\n"); 4729 4730 return false; 4731 } 4732 4733 intel_pps_init(intel_dp); 4734 4735 /* Cache DPCD and EDID for edp. */ 4736 has_dpcd = intel_edp_init_dpcd(intel_dp); 4737 4738 if (!has_dpcd) { 4739 /* if this fails, presume the device is a ghost */ 4740 drm_info(&dev_priv->drm, 4741 "failed to retrieve link info, disabling eDP\n"); 4742 goto out_vdd_off; 4743 } 4744 4745 mutex_lock(&dev->mode_config.mutex); 4746 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 4747 if (edid) { 4748 if (drm_add_edid_modes(connector, edid)) { 4749 drm_connector_update_edid_property(connector, edid); 4750 } else { 4751 kfree(edid); 4752 edid = ERR_PTR(-EINVAL); 4753 } 4754 } else { 4755 edid = ERR_PTR(-ENOENT); 4756 } 4757 intel_connector->edid = edid; 4758 4759 fixed_mode = intel_panel_edid_fixed_mode(intel_connector); 4760 if (fixed_mode) 4761 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode); 4762 4763 /* multiply the mode clock and horizontal timings for MSO */ 4764 intel_edp_mso_mode_fixup(intel_connector, fixed_mode); 4765 intel_edp_mso_mode_fixup(intel_connector, downclock_mode); 4766 4767 /* fallback to VBT if available for eDP */ 4768 if (!fixed_mode) 4769 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector); 4770 mutex_unlock(&dev->mode_config.mutex); 4771 4772 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 4773 /* 4774 * Figure out the current pipe for the initial backlight setup. 4775 * If the current pipe isn't valid, try the PPS pipe, and if that 4776 * fails just assume pipe A. 4777 */ 4778 pipe = vlv_active_pipe(intel_dp); 4779 4780 if (pipe != PIPE_A && pipe != PIPE_B) 4781 pipe = intel_dp->pps.pps_pipe; 4782 4783 if (pipe != PIPE_A && pipe != PIPE_B) 4784 pipe = PIPE_A; 4785 4786 drm_dbg_kms(&dev_priv->drm, 4787 "using pipe %c for initial backlight setup\n", 4788 pipe_name(pipe)); 4789 } 4790 4791 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 4792 if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK)) 4793 intel_connector->panel.backlight.power = intel_pps_backlight_power; 4794 intel_panel_setup_backlight(connector, pipe); 4795 4796 if (fixed_mode) { 4797 drm_connector_set_panel_orientation_with_quirk(connector, 4798 dev_priv->vbt.orientation, 4799 fixed_mode->hdisplay, fixed_mode->vdisplay); 4800 } 4801 4802 return true; 4803 4804 out_vdd_off: 4805 intel_pps_vdd_off_sync(intel_dp); 4806 4807 return false; 4808 } 4809 4810 static void intel_dp_modeset_retry_work_fn(struct work_struct *work) 4811 { 4812 struct intel_connector *intel_connector; 4813 struct drm_connector *connector; 4814 4815 intel_connector = container_of(work, typeof(*intel_connector), 4816 modeset_retry_work); 4817 connector = &intel_connector->base; 4818 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 4819 connector->name); 4820 4821 /* Grab the locks before changing connector property*/ 4822 mutex_lock(&connector->dev->mode_config.mutex); 4823 /* Set connector link status to BAD and send a Uevent to notify 4824 * userspace to do a modeset. 4825 */ 4826 drm_connector_set_link_status_property(connector, 4827 DRM_MODE_LINK_STATUS_BAD); 4828 mutex_unlock(&connector->dev->mode_config.mutex); 4829 /* Send Hotplug uevent so userspace can reprobe */ 4830 drm_kms_helper_hotplug_event(connector->dev); 4831 } 4832 4833 bool 4834 intel_dp_init_connector(struct intel_digital_port *dig_port, 4835 struct intel_connector *intel_connector) 4836 { 4837 struct drm_connector *connector = &intel_connector->base; 4838 struct intel_dp *intel_dp = &dig_port->dp; 4839 struct intel_encoder *intel_encoder = &dig_port->base; 4840 struct drm_device *dev = intel_encoder->base.dev; 4841 struct drm_i915_private *dev_priv = to_i915(dev); 4842 enum port port = intel_encoder->port; 4843 enum phy phy = intel_port_to_phy(dev_priv, port); 4844 int type; 4845 4846 /* Initialize the work for modeset in case of link train failure */ 4847 INIT_WORK(&intel_connector->modeset_retry_work, 4848 intel_dp_modeset_retry_work_fn); 4849 4850 if (drm_WARN(dev, dig_port->max_lanes < 1, 4851 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", 4852 dig_port->max_lanes, intel_encoder->base.base.id, 4853 intel_encoder->base.name)) 4854 return false; 4855 4856 intel_dp_set_source_rates(intel_dp); 4857 intel_dp_set_default_sink_rates(intel_dp); 4858 intel_dp_set_common_rates(intel_dp); 4859 intel_dp_reset_max_link_params(intel_dp); 4860 4861 intel_dp->reset_link_params = true; 4862 intel_dp->pps.pps_pipe = INVALID_PIPE; 4863 intel_dp->pps.active_pipe = INVALID_PIPE; 4864 4865 /* Preserve the current hw state. */ 4866 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg); 4867 intel_dp->attached_connector = intel_connector; 4868 4869 if (intel_dp_is_port_edp(dev_priv, port)) { 4870 /* 4871 * Currently we don't support eDP on TypeC ports, although in 4872 * theory it could work on TypeC legacy ports. 4873 */ 4874 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy)); 4875 type = DRM_MODE_CONNECTOR_eDP; 4876 } else { 4877 type = DRM_MODE_CONNECTOR_DisplayPort; 4878 } 4879 4880 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4881 intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp); 4882 4883 /* 4884 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 4885 * for DP the encoder type can be set by the caller to 4886 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 4887 */ 4888 if (type == DRM_MODE_CONNECTOR_eDP) 4889 intel_encoder->type = INTEL_OUTPUT_EDP; 4890 4891 /* eDP only on port B and/or C on vlv/chv */ 4892 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) || 4893 IS_CHERRYVIEW(dev_priv)) && 4894 intel_dp_is_edp(intel_dp) && 4895 port != PORT_B && port != PORT_C)) 4896 return false; 4897 4898 drm_dbg_kms(&dev_priv->drm, 4899 "Adding %s connector on [ENCODER:%d:%s]\n", 4900 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 4901 intel_encoder->base.base.id, intel_encoder->base.name); 4902 4903 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 4904 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 4905 4906 if (!HAS_GMCH(dev_priv)) 4907 connector->interlace_allowed = true; 4908 connector->doublescan_allowed = 0; 4909 4910 intel_connector->polled = DRM_CONNECTOR_POLL_HPD; 4911 4912 intel_dp_aux_init(intel_dp); 4913 4914 intel_connector_attach_encoder(intel_connector, intel_encoder); 4915 4916 if (HAS_DDI(dev_priv)) 4917 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 4918 else 4919 intel_connector->get_hw_state = intel_connector_get_hw_state; 4920 4921 /* init MST on ports that can support it */ 4922 intel_dp_mst_encoder_init(dig_port, 4923 intel_connector->base.base.id); 4924 4925 if (!intel_edp_init_connector(intel_dp, intel_connector)) { 4926 intel_dp_aux_fini(intel_dp); 4927 intel_dp_mst_encoder_cleanup(dig_port); 4928 goto fail; 4929 } 4930 4931 intel_dp_add_properties(intel_dp, connector); 4932 4933 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) { 4934 int ret = intel_dp_hdcp_init(dig_port, intel_connector); 4935 if (ret) 4936 drm_dbg_kms(&dev_priv->drm, 4937 "HDCP init failed, skipping.\n"); 4938 } 4939 4940 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 4941 * 0xd. Failure to do so will result in spurious interrupts being 4942 * generated on the port when a cable is not attached. 4943 */ 4944 if (IS_G45(dev_priv)) { 4945 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA); 4946 intel_de_write(dev_priv, PEG_BAND_GAP_DATA, 4947 (temp & ~0xf) | 0xd); 4948 } 4949 4950 intel_dp->frl.is_trained = false; 4951 intel_dp->frl.trained_rate_gbps = 0; 4952 4953 intel_psr_init(intel_dp); 4954 4955 return true; 4956 4957 fail: 4958 drm_connector_cleanup(connector); 4959 4960 return false; 4961 } 4962 4963 void intel_dp_mst_suspend(struct drm_i915_private *dev_priv) 4964 { 4965 struct intel_encoder *encoder; 4966 4967 if (!HAS_DISPLAY(dev_priv)) 4968 return; 4969 4970 for_each_intel_encoder(&dev_priv->drm, encoder) { 4971 struct intel_dp *intel_dp; 4972 4973 if (encoder->type != INTEL_OUTPUT_DDI) 4974 continue; 4975 4976 intel_dp = enc_to_intel_dp(encoder); 4977 4978 if (!intel_dp->can_mst) 4979 continue; 4980 4981 if (intel_dp->is_mst) 4982 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr); 4983 } 4984 } 4985 4986 void intel_dp_mst_resume(struct drm_i915_private *dev_priv) 4987 { 4988 struct intel_encoder *encoder; 4989 4990 if (!HAS_DISPLAY(dev_priv)) 4991 return; 4992 4993 for_each_intel_encoder(&dev_priv->drm, encoder) { 4994 struct intel_dp *intel_dp; 4995 int ret; 4996 4997 if (encoder->type != INTEL_OUTPUT_DDI) 4998 continue; 4999 5000 intel_dp = enc_to_intel_dp(encoder); 5001 5002 if (!intel_dp->can_mst) 5003 continue; 5004 5005 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, 5006 true); 5007 if (ret) { 5008 intel_dp->is_mst = false; 5009 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, 5010 false); 5011 } 5012 } 5013 } 5014