1 /* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28 #include <linux/i2c.h> 29 #include <linux/export.h> 30 #include <drm/drmP.h> 31 #include <linux/slab.h> 32 #include <drm/drm_crtc.h> 33 #include <drm/drm_crtc_helper.h> 34 #include <drm/drm_edid.h> 35 #include "intel_drv.h" 36 #include <drm/i915_drm.h> 37 #include "i915_drv.h" 38 39 #define DP_LINK_CHECK_TIMEOUT (10 * 1000) 40 41 struct dp_link_dpll { 42 int link_bw; 43 struct dpll dpll; 44 }; 45 46 static const struct dp_link_dpll gen4_dpll[] = { 47 { DP_LINK_BW_1_62, 48 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, 49 { DP_LINK_BW_2_7, 50 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } 51 }; 52 53 static const struct dp_link_dpll pch_dpll[] = { 54 { DP_LINK_BW_1_62, 55 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, 56 { DP_LINK_BW_2_7, 57 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } 58 }; 59 60 static const struct dp_link_dpll vlv_dpll[] = { 61 { DP_LINK_BW_1_62, 62 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, 63 { DP_LINK_BW_2_7, 64 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } 65 }; 66 67 /* 68 * CHV supports eDP 1.4 that have more link rates. 69 * Below only provides the fixed rate but exclude variable rate. 70 */ 71 static const struct dp_link_dpll chv_dpll[] = { 72 /* 73 * CHV requires to program fractional division for m2. 74 * m2 is stored in fixed point format using formula below 75 * (m2_int << 22) | m2_fraction 76 */ 77 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */ 78 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, 79 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */ 80 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, 81 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */ 82 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } } 83 }; 84 85 /** 86 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 87 * @intel_dp: DP struct 88 * 89 * If a CPU or PCH DP output is attached to an eDP panel, this function 90 * will return true, and false otherwise. 91 */ 92 static bool is_edp(struct intel_dp *intel_dp) 93 { 94 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 95 96 return intel_dig_port->base.type == INTEL_OUTPUT_EDP; 97 } 98 99 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp) 100 { 101 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 102 103 return intel_dig_port->base.base.dev; 104 } 105 106 static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 107 { 108 return enc_to_intel_dp(&intel_attached_encoder(connector)->base); 109 } 110 111 static void intel_dp_link_down(struct intel_dp *intel_dp); 112 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 113 114 static int 115 intel_dp_max_link_bw(struct intel_dp *intel_dp) 116 { 117 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 118 struct drm_device *dev = intel_dp->attached_connector->base.dev; 119 120 switch (max_link_bw) { 121 case DP_LINK_BW_1_62: 122 case DP_LINK_BW_2_7: 123 break; 124 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ 125 if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || 126 INTEL_INFO(dev)->gen >= 8) && 127 intel_dp->dpcd[DP_DPCD_REV] >= 0x12) 128 max_link_bw = DP_LINK_BW_5_4; 129 else 130 max_link_bw = DP_LINK_BW_2_7; 131 break; 132 default: 133 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n", 134 max_link_bw); 135 max_link_bw = DP_LINK_BW_1_62; 136 break; 137 } 138 return max_link_bw; 139 } 140 141 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp) 142 { 143 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 144 struct drm_device *dev = intel_dig_port->base.base.dev; 145 u8 source_max, sink_max; 146 147 source_max = 4; 148 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A && 149 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0) 150 source_max = 2; 151 152 sink_max = drm_dp_max_lane_count(intel_dp->dpcd); 153 154 return min(source_max, sink_max); 155 } 156 157 /* 158 * The units on the numbers in the next two are... bizarre. Examples will 159 * make it clearer; this one parallels an example in the eDP spec. 160 * 161 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: 162 * 163 * 270000 * 1 * 8 / 10 == 216000 164 * 165 * The actual data capacity of that configuration is 2.16Gbit/s, so the 166 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - 167 * or equivalently, kilopixels per second - so for 1680x1050R it'd be 168 * 119000. At 18bpp that's 2142000 kilobits per second. 169 * 170 * Thus the strange-looking division by 10 in intel_dp_link_required, to 171 * get the result in decakilobits instead of kilobits. 172 */ 173 174 static int 175 intel_dp_link_required(int pixel_clock, int bpp) 176 { 177 return (pixel_clock * bpp + 9) / 10; 178 } 179 180 static int 181 intel_dp_max_data_rate(int max_link_clock, int max_lanes) 182 { 183 return (max_link_clock * max_lanes * 8) / 10; 184 } 185 186 static enum drm_mode_status 187 intel_dp_mode_valid(struct drm_connector *connector, 188 struct drm_display_mode *mode) 189 { 190 struct intel_dp *intel_dp = intel_attached_dp(connector); 191 struct intel_connector *intel_connector = to_intel_connector(connector); 192 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 193 int target_clock = mode->clock; 194 int max_rate, mode_rate, max_lanes, max_link_clock; 195 196 if (is_edp(intel_dp) && fixed_mode) { 197 if (mode->hdisplay > fixed_mode->hdisplay) 198 return MODE_PANEL; 199 200 if (mode->vdisplay > fixed_mode->vdisplay) 201 return MODE_PANEL; 202 203 target_clock = fixed_mode->clock; 204 } 205 206 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); 207 max_lanes = intel_dp_max_lane_count(intel_dp); 208 209 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 210 mode_rate = intel_dp_link_required(target_clock, 18); 211 212 if (mode_rate > max_rate) 213 return MODE_CLOCK_HIGH; 214 215 if (mode->clock < 10000) 216 return MODE_CLOCK_LOW; 217 218 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 219 return MODE_H_ILLEGAL; 220 221 return MODE_OK; 222 } 223 224 static uint32_t 225 pack_aux(uint8_t *src, int src_bytes) 226 { 227 int i; 228 uint32_t v = 0; 229 230 if (src_bytes > 4) 231 src_bytes = 4; 232 for (i = 0; i < src_bytes; i++) 233 v |= ((uint32_t) src[i]) << ((3-i) * 8); 234 return v; 235 } 236 237 static void 238 unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) 239 { 240 int i; 241 if (dst_bytes > 4) 242 dst_bytes = 4; 243 for (i = 0; i < dst_bytes; i++) 244 dst[i] = src >> ((3-i) * 8); 245 } 246 247 /* hrawclock is 1/4 the FSB frequency */ 248 static int 249 intel_hrawclk(struct drm_device *dev) 250 { 251 struct drm_i915_private *dev_priv = dev->dev_private; 252 uint32_t clkcfg; 253 254 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ 255 if (IS_VALLEYVIEW(dev)) 256 return 200; 257 258 clkcfg = I915_READ(CLKCFG); 259 switch (clkcfg & CLKCFG_FSB_MASK) { 260 case CLKCFG_FSB_400: 261 return 100; 262 case CLKCFG_FSB_533: 263 return 133; 264 case CLKCFG_FSB_667: 265 return 166; 266 case CLKCFG_FSB_800: 267 return 200; 268 case CLKCFG_FSB_1067: 269 return 266; 270 case CLKCFG_FSB_1333: 271 return 333; 272 /* these two are just a guess; one of them might be right */ 273 case CLKCFG_FSB_1600: 274 case CLKCFG_FSB_1600_ALT: 275 return 400; 276 default: 277 return 133; 278 } 279 } 280 281 static void 282 intel_dp_init_panel_power_sequencer(struct drm_device *dev, 283 struct intel_dp *intel_dp, 284 struct edp_power_seq *out); 285 static void 286 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 287 struct intel_dp *intel_dp, 288 struct edp_power_seq *out); 289 290 static enum i915_pipe 291 vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 292 { 293 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 294 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 295 struct drm_device *dev = intel_dig_port->base.base.dev; 296 struct drm_i915_private *dev_priv = dev->dev_private; 297 enum port port = intel_dig_port->port; 298 enum i915_pipe pipe; 299 300 /* modeset should have pipe */ 301 if (crtc) 302 return to_intel_crtc(crtc)->pipe; 303 304 /* init time, try to find a pipe with this port selected */ 305 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) { 306 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) & 307 PANEL_PORT_SELECT_MASK; 308 if (port_sel == PANEL_PORT_SELECT_DPB_VLV && port == PORT_B) 309 return pipe; 310 if (port_sel == PANEL_PORT_SELECT_DPC_VLV && port == PORT_C) 311 return pipe; 312 } 313 314 /* shrug */ 315 return PIPE_A; 316 } 317 318 static u32 _pp_ctrl_reg(struct intel_dp *intel_dp) 319 { 320 struct drm_device *dev = intel_dp_to_dev(intel_dp); 321 322 if (HAS_PCH_SPLIT(dev)) 323 return PCH_PP_CONTROL; 324 else 325 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp)); 326 } 327 328 static u32 _pp_stat_reg(struct intel_dp *intel_dp) 329 { 330 struct drm_device *dev = intel_dp_to_dev(intel_dp); 331 332 if (HAS_PCH_SPLIT(dev)) 333 return PCH_PP_STATUS; 334 else 335 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp)); 336 } 337 338 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing 339 This function only applicable when panel PM state is not to be tracked */ 340 #if 0 341 static int edp_notify_handler(struct notifier_block *this, unsigned long code, 342 void *unused) 343 { 344 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), 345 edp_notifier); 346 struct drm_device *dev = intel_dp_to_dev(intel_dp); 347 struct drm_i915_private *dev_priv = dev->dev_private; 348 u32 pp_div; 349 u32 pp_ctrl_reg, pp_div_reg; 350 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp); 351 352 if (!is_edp(intel_dp) || code != SYS_RESTART) 353 return 0; 354 355 if (IS_VALLEYVIEW(dev)) { 356 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); 357 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); 358 pp_div = I915_READ(pp_div_reg); 359 pp_div &= PP_REFERENCE_DIVIDER_MASK; 360 361 /* 0x1F write to PP_DIV_REG sets max cycle delay */ 362 I915_WRITE(pp_div_reg, pp_div | 0x1F); 363 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF); 364 msleep(intel_dp->panel_power_cycle_delay); 365 } 366 367 return 0; 368 } 369 #endif 370 371 static bool edp_have_panel_power(struct intel_dp *intel_dp) 372 { 373 struct drm_device *dev = intel_dp_to_dev(intel_dp); 374 struct drm_i915_private *dev_priv = dev->dev_private; 375 376 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0; 377 } 378 379 static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 380 { 381 struct drm_device *dev = intel_dp_to_dev(intel_dp); 382 struct drm_i915_private *dev_priv = dev->dev_private; 383 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 384 struct intel_encoder *intel_encoder = &intel_dig_port->base; 385 enum intel_display_power_domain power_domain; 386 387 power_domain = intel_display_port_power_domain(intel_encoder); 388 return intel_display_power_enabled(dev_priv, power_domain) && 389 (I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD) != 0; 390 } 391 392 static void 393 intel_dp_check_edp(struct intel_dp *intel_dp) 394 { 395 struct drm_device *dev = intel_dp_to_dev(intel_dp); 396 struct drm_i915_private *dev_priv = dev->dev_private; 397 398 if (!is_edp(intel_dp)) 399 return; 400 401 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) { 402 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 403 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 404 I915_READ(_pp_stat_reg(intel_dp)), 405 I915_READ(_pp_ctrl_reg(intel_dp))); 406 } 407 } 408 409 static uint32_t 410 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) 411 { 412 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 413 struct drm_device *dev = intel_dig_port->base.base.dev; 414 struct drm_i915_private *dev_priv = dev->dev_private; 415 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 416 uint32_t status; 417 bool done; 418 419 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) 420 if (has_aux_irq) 421 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 422 msecs_to_jiffies_timeout(10)); 423 else 424 done = wait_for_atomic(C, 10) == 0; 425 if (!done) 426 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", 427 has_aux_irq); 428 #undef C 429 430 return status; 431 } 432 433 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 434 { 435 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 436 struct drm_device *dev = intel_dig_port->base.base.dev; 437 438 /* 439 * The clock divider is based off the hrawclk, and would like to run at 440 * 2MHz. So, take the hrawclk value and divide by 2 and use that 441 */ 442 return index ? 0 : intel_hrawclk(dev) / 2; 443 } 444 445 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 446 { 447 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 448 struct drm_device *dev = intel_dig_port->base.base.dev; 449 450 if (index) 451 return 0; 452 453 if (intel_dig_port->port == PORT_A) { 454 if (IS_GEN6(dev) || IS_GEN7(dev)) 455 return 200; /* SNB & IVB eDP input clock at 400Mhz */ 456 else 457 return 225; /* eDP input clock at 450Mhz */ 458 } else { 459 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 460 } 461 } 462 463 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 464 { 465 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 466 struct drm_device *dev = intel_dig_port->base.base.dev; 467 struct drm_i915_private *dev_priv = dev->dev_private; 468 469 if (intel_dig_port->port == PORT_A) { 470 if (index) 471 return 0; 472 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000); 473 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 474 /* Workaround for non-ULT HSW */ 475 switch (index) { 476 case 0: return 63; 477 case 1: return 72; 478 default: return 0; 479 } 480 } else { 481 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2); 482 } 483 } 484 485 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index) 486 { 487 return index ? 0 : 100; 488 } 489 490 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp, 491 bool has_aux_irq, 492 int send_bytes, 493 uint32_t aux_clock_divider) 494 { 495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 496 struct drm_device *dev = intel_dig_port->base.base.dev; 497 uint32_t precharge, timeout; 498 499 if (IS_GEN6(dev)) 500 precharge = 3; 501 else 502 precharge = 5; 503 504 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL) 505 timeout = DP_AUX_CH_CTL_TIME_OUT_600us; 506 else 507 timeout = DP_AUX_CH_CTL_TIME_OUT_400us; 508 509 return DP_AUX_CH_CTL_SEND_BUSY | 510 DP_AUX_CH_CTL_DONE | 511 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | 512 DP_AUX_CH_CTL_TIME_OUT_ERROR | 513 timeout | 514 DP_AUX_CH_CTL_RECEIVE_ERROR | 515 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 516 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 517 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 518 } 519 520 static int 521 intel_dp_aux_ch(struct intel_dp *intel_dp, 522 uint8_t *send, int send_bytes, 523 uint8_t *recv, int recv_size) 524 { 525 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 526 struct drm_device *dev = intel_dig_port->base.base.dev; 527 struct drm_i915_private *dev_priv = dev->dev_private; 528 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg; 529 uint32_t ch_data = ch_ctl + 4; 530 uint32_t aux_clock_divider; 531 int i, ret, recv_bytes; 532 uint32_t status; 533 int try, clock = 0; 534 bool has_aux_irq = HAS_AUX_IRQ(dev); 535 536 /* dp aux is extremely sensitive to irq latency, hence request the 537 * lowest possible wakeup latency and so prevent the cpu from going into 538 * deep sleep states. 539 */ 540 pm_qos_update_request(&dev_priv->pm_qos, 0); 541 542 intel_dp_check_edp(intel_dp); 543 544 intel_aux_display_runtime_get(dev_priv); 545 546 /* Try to wait for any previous AUX channel activity */ 547 for (try = 0; try < 3; try++) { 548 status = I915_READ_NOTRACE(ch_ctl); 549 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 550 break; 551 msleep(1); 552 } 553 554 if (try == 3) { 555 WARN(1, "dp_aux_ch not started status 0x%08x\n", 556 I915_READ(ch_ctl)); 557 ret = -EBUSY; 558 goto out; 559 } 560 561 /* Only 5 data registers! */ 562 if (WARN_ON(send_bytes > 20 || recv_size > 20)) { 563 ret = -E2BIG; 564 goto out; 565 } 566 567 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) { 568 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp, 569 has_aux_irq, 570 send_bytes, 571 aux_clock_divider); 572 573 /* Must try at least 3 times according to DP spec */ 574 for (try = 0; try < 5; try++) { 575 /* Load the send data into the aux channel data registers */ 576 for (i = 0; i < send_bytes; i += 4) 577 I915_WRITE(ch_data + i, 578 pack_aux(send + i, send_bytes - i)); 579 580 /* Send the command and wait for it to complete */ 581 I915_WRITE(ch_ctl, send_ctl); 582 583 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); 584 585 /* Clear done status and any errors */ 586 I915_WRITE(ch_ctl, 587 status | 588 DP_AUX_CH_CTL_DONE | 589 DP_AUX_CH_CTL_TIME_OUT_ERROR | 590 DP_AUX_CH_CTL_RECEIVE_ERROR); 591 592 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | 593 DP_AUX_CH_CTL_RECEIVE_ERROR)) 594 continue; 595 if (status & DP_AUX_CH_CTL_DONE) 596 break; 597 } 598 if (status & DP_AUX_CH_CTL_DONE) 599 break; 600 } 601 602 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 603 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 604 ret = -EBUSY; 605 goto out; 606 } 607 608 /* Check for timeout or receive error. 609 * Timeouts occur when the sink is not connected 610 */ 611 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 612 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 613 ret = -EIO; 614 goto out; 615 } 616 617 /* Timeouts occur when the device isn't connected, so they're 618 * "normal" -- don't fill the kernel log with these */ 619 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 620 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 621 ret = -ETIMEDOUT; 622 goto out; 623 } 624 625 /* Unload any bytes sent back from the other side */ 626 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 627 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 628 if (recv_bytes > recv_size) 629 recv_bytes = recv_size; 630 631 for (i = 0; i < recv_bytes; i += 4) 632 unpack_aux(I915_READ(ch_data + i), 633 recv + i, recv_bytes - i); 634 635 ret = recv_bytes; 636 out: 637 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); 638 intel_aux_display_runtime_put(dev_priv); 639 640 return ret; 641 } 642 643 /* Write data to the aux channel in native mode */ 644 static int 645 intel_dp_aux_native_write(struct intel_dp *intel_dp, 646 uint16_t address, uint8_t *send, int send_bytes) 647 { 648 int ret; 649 uint8_t msg[20]; 650 int msg_bytes; 651 uint8_t ack; 652 int retry; 653 654 if (WARN_ON(send_bytes > 16)) 655 return -E2BIG; 656 657 intel_dp_check_edp(intel_dp); 658 msg[0] = DP_AUX_NATIVE_WRITE << 4; 659 msg[1] = address >> 8; 660 msg[2] = address & 0xff; 661 msg[3] = send_bytes - 1; 662 memcpy(&msg[4], send, send_bytes); 663 msg_bytes = send_bytes + 4; 664 for (retry = 0; retry < 7; retry++) { 665 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); 666 if (ret < 0) 667 return ret; 668 ack >>= 4; 669 if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) 670 return send_bytes; 671 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) 672 usleep_range(400, 500); 673 else 674 return -EIO; 675 } 676 677 DRM_ERROR("too many retries, giving up\n"); 678 return -EIO; 679 } 680 681 /* Write a single byte to the aux channel in native mode */ 682 static int 683 intel_dp_aux_native_write_1(struct intel_dp *intel_dp, 684 uint16_t address, uint8_t byte) 685 { 686 return intel_dp_aux_native_write(intel_dp, address, &byte, 1); 687 } 688 689 /* read bytes from a native aux channel */ 690 static int 691 intel_dp_aux_native_read(struct intel_dp *intel_dp, 692 uint16_t address, uint8_t *recv, int recv_bytes) 693 { 694 uint8_t msg[4]; 695 int msg_bytes; 696 uint8_t reply[20]; 697 int reply_bytes; 698 uint8_t ack; 699 int ret; 700 int retry; 701 702 if (WARN_ON(recv_bytes > 19)) 703 return -E2BIG; 704 705 intel_dp_check_edp(intel_dp); 706 msg[0] = DP_AUX_NATIVE_READ << 4; 707 msg[1] = address >> 8; 708 msg[2] = address & 0xff; 709 msg[3] = recv_bytes - 1; 710 711 msg_bytes = 4; 712 reply_bytes = recv_bytes + 1; 713 714 for (retry = 0; retry < 7; retry++) { 715 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, 716 reply, reply_bytes); 717 if (ret == 0) 718 return -EPROTO; 719 if (ret < 0) 720 return ret; 721 ack = reply[0] >> 4; 722 if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK) { 723 memcpy(recv, reply + 1, ret - 1); 724 return ret - 1; 725 } 726 else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER) 727 usleep_range(400, 500); 728 else 729 return -EIO; 730 } 731 732 DRM_ERROR("too many retries, giving up\n"); 733 return -EIO; 734 } 735 736 static int 737 intel_dp_i2c_aux_ch(struct device *adapter, int mode, 738 uint8_t write_byte, uint8_t *read_byte) 739 { 740 struct i2c_algo_dp_aux_data *data = device_get_softc(adapter); 741 struct intel_dp *intel_dp = data->priv; 742 uint16_t address = data->address; 743 uint8_t msg[5]; 744 uint8_t reply[2]; 745 unsigned retry; 746 int msg_bytes; 747 int reply_bytes; 748 int ret; 749 750 intel_edp_panel_vdd_on(intel_dp); 751 intel_dp_check_edp(intel_dp); 752 /* Set up the command byte */ 753 if (mode & MODE_I2C_READ) 754 msg[0] = DP_AUX_I2C_READ << 4; 755 else 756 msg[0] = DP_AUX_I2C_WRITE << 4; 757 758 if (!(mode & MODE_I2C_STOP)) 759 msg[0] |= DP_AUX_I2C_MOT << 4; 760 761 msg[1] = address >> 8; 762 msg[2] = address; 763 764 switch (mode) { 765 case MODE_I2C_WRITE: 766 msg[3] = 0; 767 msg[4] = write_byte; 768 msg_bytes = 5; 769 reply_bytes = 1; 770 break; 771 case MODE_I2C_READ: 772 msg[3] = 0; 773 msg_bytes = 4; 774 reply_bytes = 2; 775 break; 776 default: 777 msg_bytes = 3; 778 reply_bytes = 1; 779 break; 780 } 781 782 /* 783 * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device is 784 * required to retry at least seven times upon receiving AUX_DEFER 785 * before giving up the AUX transaction. 786 */ 787 for (retry = 0; retry < 7; retry++) { 788 ret = intel_dp_aux_ch(intel_dp, 789 msg, msg_bytes, 790 reply, reply_bytes); 791 if (ret < 0) { 792 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 793 goto out; 794 } 795 796 switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) { 797 case DP_AUX_NATIVE_REPLY_ACK: 798 /* I2C-over-AUX Reply field is only valid 799 * when paired with AUX ACK. 800 */ 801 break; 802 case DP_AUX_NATIVE_REPLY_NACK: 803 DRM_DEBUG_KMS("aux_ch native nack\n"); 804 ret = -EREMOTEIO; 805 goto out; 806 case DP_AUX_NATIVE_REPLY_DEFER: 807 /* 808 * For now, just give more slack to branch devices. We 809 * could check the DPCD for I2C bit rate capabilities, 810 * and if available, adjust the interval. We could also 811 * be more careful with DP-to-Legacy adapters where a 812 * long legacy cable may force very low I2C bit rates. 813 */ 814 if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 815 DP_DWN_STRM_PORT_PRESENT) 816 usleep_range(500, 600); 817 else 818 usleep_range(300, 400); 819 continue; 820 default: 821 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 822 reply[0]); 823 ret = -EREMOTEIO; 824 goto out; 825 } 826 827 switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) { 828 case DP_AUX_I2C_REPLY_ACK: 829 if (mode == MODE_I2C_READ) { 830 *read_byte = reply[1]; 831 } 832 ret = 0; /* reply_bytes - 1 */ 833 goto out; 834 case DP_AUX_I2C_REPLY_NACK: 835 DRM_DEBUG_KMS("aux_i2c nack\n"); 836 ret = -EREMOTEIO; 837 goto out; 838 case DP_AUX_I2C_REPLY_DEFER: 839 DRM_DEBUG_KMS("aux_i2c defer\n"); 840 udelay(100); 841 break; 842 default: 843 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); 844 ret = -EREMOTEIO; 845 goto out; 846 } 847 } 848 849 DRM_ERROR("too many retries, giving up\n"); 850 ret = -EREMOTEIO; 851 852 out: 853 return ret; 854 } 855 856 static void 857 intel_dp_connector_unregister(struct intel_connector *intel_connector) 858 { 859 intel_connector_unregister(intel_connector); 860 } 861 862 static int 863 intel_dp_i2c_init(struct intel_dp *intel_dp, 864 struct intel_connector *intel_connector, const char *name) 865 { 866 int ret; 867 868 DRM_DEBUG_KMS("i2c_init %s\n", name); 869 #if 0 870 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); 871 intel_dp->adapter.owner = THIS_MODULE; 872 intel_dp->adapter.class = I2C_CLASS_DDC; 873 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 874 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 875 intel_dp->adapter.algo_data = &intel_dp->algo; 876 intel_dp->adapter.dev.parent = intel_connector->base.dev->dev; 877 878 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 879 if (ret < 0) 880 return ret; 881 882 ret = sysfs_create_link(&intel_connector->base.kdev->kobj, 883 &intel_dp->adapter.dev.kobj, 884 intel_dp->adapter.dev.kobj.name); 885 #endif 886 ret = iic_dp_aux_add_bus(intel_connector->base.dev->dev, name, 887 intel_dp_i2c_aux_ch, intel_dp, &intel_dp->dp_iic_bus, 888 &intel_dp->adapter); 889 890 return ret; 891 } 892 893 static void 894 intel_dp_set_clock(struct intel_encoder *encoder, 895 struct intel_crtc_config *pipe_config, int link_bw) 896 { 897 struct drm_device *dev = encoder->base.dev; 898 const struct dp_link_dpll *divisor = NULL; 899 int i, count = 0; 900 901 if (IS_G4X(dev)) { 902 divisor = gen4_dpll; 903 count = ARRAY_SIZE(gen4_dpll); 904 } else if (IS_HASWELL(dev)) { 905 /* Haswell has special-purpose DP DDI clocks. */ 906 } else if (HAS_PCH_SPLIT(dev)) { 907 divisor = pch_dpll; 908 count = ARRAY_SIZE(pch_dpll); 909 } else if (IS_CHERRYVIEW(dev)) { 910 divisor = chv_dpll; 911 count = ARRAY_SIZE(chv_dpll); 912 } else if (IS_VALLEYVIEW(dev)) { 913 divisor = vlv_dpll; 914 count = ARRAY_SIZE(vlv_dpll); 915 } 916 917 if (divisor && count) { 918 for (i = 0; i < count; i++) { 919 if (link_bw == divisor[i].link_bw) { 920 pipe_config->dpll = divisor[i].dpll; 921 pipe_config->clock_set = true; 922 break; 923 } 924 } 925 } 926 } 927 928 static void 929 intel_dp_set_m2_n2(struct intel_crtc *crtc, struct intel_link_m_n *m_n) 930 { 931 struct drm_device *dev = crtc->base.dev; 932 struct drm_i915_private *dev_priv = dev->dev_private; 933 enum transcoder transcoder = crtc->config.cpu_transcoder; 934 935 I915_WRITE(PIPE_DATA_M2(transcoder), 936 TU_SIZE(m_n->tu) | m_n->gmch_m); 937 I915_WRITE(PIPE_DATA_N2(transcoder), m_n->gmch_n); 938 I915_WRITE(PIPE_LINK_M2(transcoder), m_n->link_m); 939 I915_WRITE(PIPE_LINK_N2(transcoder), m_n->link_n); 940 } 941 942 bool 943 intel_dp_compute_config(struct intel_encoder *encoder, 944 struct intel_crtc_config *pipe_config) 945 { 946 struct drm_device *dev = encoder->base.dev; 947 struct drm_i915_private *dev_priv = dev->dev_private; 948 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 949 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 950 enum port port = dp_to_dig_port(intel_dp)->port; 951 struct intel_crtc *intel_crtc = encoder->new_crtc; 952 struct intel_connector *intel_connector = intel_dp->attached_connector; 953 int lane_count, clock; 954 int min_lane_count = 1; 955 int max_lane_count = intel_dp_max_lane_count(intel_dp); 956 /* Conveniently, the link BW constants become indices with a shift...*/ 957 int min_clock = 0; 958 int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; 959 int bpp, mode_rate; 960 static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; 961 int link_avail, link_clock; 962 963 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) 964 pipe_config->has_pch_encoder = true; 965 966 pipe_config->has_dp_encoder = true; 967 pipe_config->has_audio = intel_dp->has_audio; 968 969 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 970 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 971 adjusted_mode); 972 if (!HAS_PCH_SPLIT(dev)) 973 intel_gmch_panel_fitting(intel_crtc, pipe_config, 974 intel_connector->panel.fitting_mode); 975 else 976 intel_pch_panel_fitting(intel_crtc, pipe_config, 977 intel_connector->panel.fitting_mode); 978 } 979 980 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 981 return false; 982 983 DRM_DEBUG_KMS("DP link computation with max lane count %i " 984 "max bw %02x pixel clock %iKHz\n", 985 max_lane_count, bws[max_clock], 986 adjusted_mode->crtc_clock); 987 988 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 989 * bpc in between. */ 990 bpp = pipe_config->pipe_bpp; 991 if (is_edp(intel_dp)) { 992 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) { 993 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", 994 dev_priv->vbt.edp_bpp); 995 bpp = dev_priv->vbt.edp_bpp; 996 } 997 998 if (IS_BROADWELL(dev)) { 999 /* Yes, it's an ugly hack. */ 1000 min_lane_count = max_lane_count; 1001 DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n", 1002 min_lane_count); 1003 } else if (dev_priv->vbt.edp_lanes) { 1004 min_lane_count = min(dev_priv->vbt.edp_lanes, 1005 max_lane_count); 1006 DRM_DEBUG_KMS("using min %u lanes per VBT\n", 1007 min_lane_count); 1008 } 1009 1010 if (dev_priv->vbt.edp_rate) { 1011 min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock); 1012 DRM_DEBUG_KMS("using min %02x link bw per VBT\n", 1013 bws[min_clock]); 1014 } 1015 } 1016 1017 for (; bpp >= 6*3; bpp -= 2*3) { 1018 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 1019 bpp); 1020 1021 for (clock = min_clock; clock <= max_clock; clock++) { 1022 for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { 1023 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); 1024 link_avail = intel_dp_max_data_rate(link_clock, 1025 lane_count); 1026 1027 if (mode_rate <= link_avail) { 1028 goto found; 1029 } 1030 } 1031 } 1032 } 1033 1034 return false; 1035 1036 found: 1037 if (intel_dp->color_range_auto) { 1038 /* 1039 * See: 1040 * CEA-861-E - 5.1 Default Encoding Parameters 1041 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry 1042 */ 1043 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1) 1044 intel_dp->color_range = DP_COLOR_RANGE_16_235; 1045 else 1046 intel_dp->color_range = 0; 1047 } 1048 1049 if (intel_dp->color_range) 1050 pipe_config->limited_color_range = true; 1051 1052 intel_dp->link_bw = bws[clock]; 1053 intel_dp->lane_count = lane_count; 1054 pipe_config->pipe_bpp = bpp; 1055 pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); 1056 1057 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", 1058 intel_dp->link_bw, intel_dp->lane_count, 1059 pipe_config->port_clock, bpp); 1060 DRM_DEBUG_KMS("DP link bw required %i available %i\n", 1061 mode_rate, link_avail); 1062 1063 intel_link_compute_m_n(bpp, lane_count, 1064 adjusted_mode->crtc_clock, 1065 pipe_config->port_clock, 1066 &pipe_config->dp_m_n); 1067 1068 if (intel_connector->panel.downclock_mode != NULL && 1069 intel_dp->drrs_state.type == SEAMLESS_DRRS_SUPPORT) { 1070 intel_link_compute_m_n(bpp, lane_count, 1071 intel_connector->panel.downclock_mode->clock, 1072 pipe_config->port_clock, 1073 &pipe_config->dp_m2_n2); 1074 } 1075 1076 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); 1077 1078 return true; 1079 } 1080 1081 static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp) 1082 { 1083 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1084 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 1085 struct drm_device *dev = crtc->base.dev; 1086 struct drm_i915_private *dev_priv = dev->dev_private; 1087 u32 dpa_ctl; 1088 1089 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock); 1090 dpa_ctl = I915_READ(DP_A); 1091 dpa_ctl &= ~DP_PLL_FREQ_MASK; 1092 1093 if (crtc->config.port_clock == 162000) { 1094 /* For a long time we've carried around a ILK-DevA w/a for the 1095 * 160MHz clock. If we're really unlucky, it's still required. 1096 */ 1097 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n"); 1098 dpa_ctl |= DP_PLL_FREQ_160MHZ; 1099 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 1100 } else { 1101 dpa_ctl |= DP_PLL_FREQ_270MHZ; 1102 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 1103 } 1104 1105 I915_WRITE(DP_A, dpa_ctl); 1106 1107 POSTING_READ(DP_A); 1108 udelay(500); 1109 } 1110 1111 static void intel_dp_prepare(struct intel_encoder *encoder) 1112 { 1113 struct drm_device *dev = encoder->base.dev; 1114 struct drm_i915_private *dev_priv = dev->dev_private; 1115 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1116 enum port port = dp_to_dig_port(intel_dp)->port; 1117 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1118 struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode; 1119 1120 /* 1121 * There are four kinds of DP registers: 1122 * 1123 * IBX PCH 1124 * SNB CPU 1125 * IVB CPU 1126 * CPT PCH 1127 * 1128 * IBX PCH and CPU are the same for almost everything, 1129 * except that the CPU DP PLL is configured in this 1130 * register 1131 * 1132 * CPT PCH is quite different, having many bits moved 1133 * to the TRANS_DP_CTL register instead. That 1134 * configuration happens (oddly) in ironlake_pch_enable 1135 */ 1136 1137 /* Preserve the BIOS-computed detected bit. This is 1138 * supposed to be read-only. 1139 */ 1140 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 1141 1142 /* Handle DP bits in common between all three register formats */ 1143 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 1144 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count); 1145 1146 if (crtc->config.has_audio) { 1147 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 1148 pipe_name(crtc->pipe)); 1149 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 1150 intel_write_eld(&encoder->base, adjusted_mode); 1151 } 1152 1153 /* Split out the IBX/CPU vs CPT settings */ 1154 1155 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 1156 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 1157 intel_dp->DP |= DP_SYNC_HS_HIGH; 1158 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 1159 intel_dp->DP |= DP_SYNC_VS_HIGH; 1160 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 1161 1162 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 1163 intel_dp->DP |= DP_ENHANCED_FRAMING; 1164 1165 intel_dp->DP |= crtc->pipe << 29; 1166 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) { 1167 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev)) 1168 intel_dp->DP |= intel_dp->color_range; 1169 1170 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 1171 intel_dp->DP |= DP_SYNC_HS_HIGH; 1172 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 1173 intel_dp->DP |= DP_SYNC_VS_HIGH; 1174 intel_dp->DP |= DP_LINK_TRAIN_OFF; 1175 1176 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 1177 intel_dp->DP |= DP_ENHANCED_FRAMING; 1178 1179 if (!IS_CHERRYVIEW(dev)) { 1180 if (crtc->pipe == 1) 1181 intel_dp->DP |= DP_PIPEB_SELECT; 1182 } else { 1183 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe); 1184 } 1185 } else { 1186 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 1187 } 1188 } 1189 1190 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 1191 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 1192 1193 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0) 1194 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0) 1195 1196 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 1197 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 1198 1199 static void wait_panel_status(struct intel_dp *intel_dp, 1200 u32 mask, 1201 u32 value) 1202 { 1203 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1204 struct drm_i915_private *dev_priv = dev->dev_private; 1205 u32 pp_stat_reg, pp_ctrl_reg; 1206 1207 pp_stat_reg = _pp_stat_reg(intel_dp); 1208 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1209 1210 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 1211 mask, value, 1212 I915_READ(pp_stat_reg), 1213 I915_READ(pp_ctrl_reg)); 1214 1215 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) { 1216 DRM_ERROR("Panel status timeout: status %08x control %08x\n", 1217 I915_READ(pp_stat_reg), 1218 I915_READ(pp_ctrl_reg)); 1219 } 1220 1221 DRM_DEBUG_KMS("Wait complete\n"); 1222 } 1223 1224 static void wait_panel_on(struct intel_dp *intel_dp) 1225 { 1226 DRM_DEBUG_KMS("Wait for panel power on\n"); 1227 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 1228 } 1229 1230 static void wait_panel_off(struct intel_dp *intel_dp) 1231 { 1232 DRM_DEBUG_KMS("Wait for panel power off time\n"); 1233 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 1234 } 1235 1236 static void wait_panel_power_cycle(struct intel_dp *intel_dp) 1237 { 1238 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 1239 1240 /* When we disable the VDD override bit last we have to do the manual 1241 * wait. */ 1242 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle, 1243 intel_dp->panel_power_cycle_delay); 1244 1245 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 1246 } 1247 1248 static void wait_backlight_on(struct intel_dp *intel_dp) 1249 { 1250 wait_remaining_ms_from_jiffies(intel_dp->last_power_on, 1251 intel_dp->backlight_on_delay); 1252 } 1253 1254 static void edp_wait_backlight_off(struct intel_dp *intel_dp) 1255 { 1256 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off, 1257 intel_dp->backlight_off_delay); 1258 } 1259 1260 /* Read the current pp_control value, unlocking the register if it 1261 * is locked 1262 */ 1263 1264 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) 1265 { 1266 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1267 struct drm_i915_private *dev_priv = dev->dev_private; 1268 u32 control; 1269 1270 control = I915_READ(_pp_ctrl_reg(intel_dp)); 1271 control &= ~PANEL_UNLOCK_MASK; 1272 control |= PANEL_UNLOCK_REGS; 1273 return control; 1274 } 1275 1276 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) 1277 { 1278 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1279 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1280 struct intel_encoder *intel_encoder = &intel_dig_port->base; 1281 struct drm_i915_private *dev_priv = dev->dev_private; 1282 enum intel_display_power_domain power_domain; 1283 u32 pp; 1284 u32 pp_stat_reg, pp_ctrl_reg; 1285 1286 if (!is_edp(intel_dp)) 1287 return; 1288 1289 WARN(intel_dp->want_panel_vdd, 1290 "eDP VDD already requested on\n"); 1291 1292 intel_dp->want_panel_vdd = true; 1293 1294 if (edp_have_panel_vdd(intel_dp)) 1295 return; 1296 1297 power_domain = intel_display_port_power_domain(intel_encoder); 1298 intel_display_power_get(dev_priv, power_domain); 1299 1300 DRM_DEBUG_KMS("Turning eDP VDD on\n"); 1301 1302 if (!edp_have_panel_power(intel_dp)) 1303 wait_panel_power_cycle(intel_dp); 1304 1305 pp = ironlake_get_pp_control(intel_dp); 1306 pp |= EDP_FORCE_VDD; 1307 1308 pp_stat_reg = _pp_stat_reg(intel_dp); 1309 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1310 1311 I915_WRITE(pp_ctrl_reg, pp); 1312 POSTING_READ(pp_ctrl_reg); 1313 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 1314 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); 1315 /* 1316 * If the panel wasn't on, delay before accessing aux channel 1317 */ 1318 if (!edp_have_panel_power(intel_dp)) { 1319 DRM_DEBUG_KMS("eDP was not running\n"); 1320 msleep(intel_dp->panel_power_up_delay); 1321 } 1322 } 1323 1324 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 1325 { 1326 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1327 struct drm_i915_private *dev_priv = dev->dev_private; 1328 u32 pp; 1329 u32 pp_stat_reg, pp_ctrl_reg; 1330 1331 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 1332 1333 if (!intel_dp->want_panel_vdd && edp_have_panel_vdd(intel_dp)) { 1334 struct intel_digital_port *intel_dig_port = 1335 dp_to_dig_port(intel_dp); 1336 struct intel_encoder *intel_encoder = &intel_dig_port->base; 1337 enum intel_display_power_domain power_domain; 1338 1339 DRM_DEBUG_KMS("Turning eDP VDD off\n"); 1340 1341 pp = ironlake_get_pp_control(intel_dp); 1342 pp &= ~EDP_FORCE_VDD; 1343 1344 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1345 pp_stat_reg = _pp_stat_reg(intel_dp); 1346 1347 I915_WRITE(pp_ctrl_reg, pp); 1348 POSTING_READ(pp_ctrl_reg); 1349 1350 /* Make sure sequencer is idle before allowing subsequent activity */ 1351 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", 1352 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); 1353 1354 if ((pp & POWER_TARGET_ON) == 0) 1355 intel_dp->last_power_cycle = jiffies; 1356 1357 power_domain = intel_display_port_power_domain(intel_encoder); 1358 intel_display_power_put(dev_priv, power_domain); 1359 } 1360 } 1361 1362 static void edp_panel_vdd_work(struct work_struct *__work) 1363 { 1364 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1365 struct intel_dp, panel_vdd_work); 1366 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1367 1368 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 1369 edp_panel_vdd_off_sync(intel_dp); 1370 drm_modeset_unlock(&dev->mode_config.connection_mutex); 1371 } 1372 1373 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1374 { 1375 if (!is_edp(intel_dp)) 1376 return; 1377 1378 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1379 1380 intel_dp->want_panel_vdd = false; 1381 1382 if (sync) { 1383 edp_panel_vdd_off_sync(intel_dp); 1384 } else { 1385 /* 1386 * Queue the timer to fire a long 1387 * time from now (relative to the power down delay) 1388 * to keep the panel power up across a sequence of operations 1389 */ 1390 schedule_delayed_work(&intel_dp->panel_vdd_work, 1391 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); 1392 } 1393 } 1394 1395 void intel_edp_panel_on(struct intel_dp *intel_dp) 1396 { 1397 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1398 struct drm_i915_private *dev_priv = dev->dev_private; 1399 u32 pp; 1400 u32 pp_ctrl_reg; 1401 1402 if (!is_edp(intel_dp)) 1403 return; 1404 1405 DRM_DEBUG_KMS("Turn eDP power on\n"); 1406 1407 if (edp_have_panel_power(intel_dp)) { 1408 DRM_DEBUG_KMS("eDP power already on\n"); 1409 return; 1410 } 1411 1412 wait_panel_power_cycle(intel_dp); 1413 1414 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1415 pp = ironlake_get_pp_control(intel_dp); 1416 if (IS_GEN5(dev)) { 1417 /* ILK workaround: disable reset around power sequence */ 1418 pp &= ~PANEL_POWER_RESET; 1419 I915_WRITE(pp_ctrl_reg, pp); 1420 POSTING_READ(pp_ctrl_reg); 1421 } 1422 1423 pp |= POWER_TARGET_ON; 1424 if (!IS_GEN5(dev)) 1425 pp |= PANEL_POWER_RESET; 1426 1427 I915_WRITE(pp_ctrl_reg, pp); 1428 POSTING_READ(pp_ctrl_reg); 1429 1430 wait_panel_on(intel_dp); 1431 intel_dp->last_power_on = jiffies; 1432 1433 if (IS_GEN5(dev)) { 1434 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1435 I915_WRITE(pp_ctrl_reg, pp); 1436 POSTING_READ(pp_ctrl_reg); 1437 } 1438 } 1439 1440 void intel_edp_panel_off(struct intel_dp *intel_dp) 1441 { 1442 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1443 struct intel_encoder *intel_encoder = &intel_dig_port->base; 1444 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1445 struct drm_i915_private *dev_priv = dev->dev_private; 1446 enum intel_display_power_domain power_domain; 1447 u32 pp; 1448 u32 pp_ctrl_reg; 1449 1450 if (!is_edp(intel_dp)) 1451 return; 1452 1453 DRM_DEBUG_KMS("Turn eDP power off\n"); 1454 1455 edp_wait_backlight_off(intel_dp); 1456 1457 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1458 1459 pp = ironlake_get_pp_control(intel_dp); 1460 /* We need to switch off panel power _and_ force vdd, for otherwise some 1461 * panels get very unhappy and cease to work. */ 1462 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | 1463 EDP_BLC_ENABLE); 1464 1465 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1466 1467 intel_dp->want_panel_vdd = false; 1468 1469 I915_WRITE(pp_ctrl_reg, pp); 1470 POSTING_READ(pp_ctrl_reg); 1471 1472 intel_dp->last_power_cycle = jiffies; 1473 wait_panel_off(intel_dp); 1474 1475 /* We got a reference when we enabled the VDD. */ 1476 power_domain = intel_display_port_power_domain(intel_encoder); 1477 intel_display_power_put(dev_priv, power_domain); 1478 } 1479 1480 void intel_edp_backlight_on(struct intel_dp *intel_dp) 1481 { 1482 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1483 struct drm_device *dev = intel_dig_port->base.base.dev; 1484 struct drm_i915_private *dev_priv = dev->dev_private; 1485 u32 pp; 1486 u32 pp_ctrl_reg; 1487 1488 if (!is_edp(intel_dp)) 1489 return; 1490 1491 DRM_DEBUG_KMS("\n"); 1492 /* 1493 * If we enable the backlight right away following a panel power 1494 * on, we may see slight flicker as the panel syncs with the eDP 1495 * link. So delay a bit to make sure the image is solid before 1496 * allowing it to appear. 1497 */ 1498 wait_backlight_on(intel_dp); 1499 pp = ironlake_get_pp_control(intel_dp); 1500 pp |= EDP_BLC_ENABLE; 1501 1502 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1503 1504 I915_WRITE(pp_ctrl_reg, pp); 1505 POSTING_READ(pp_ctrl_reg); 1506 1507 intel_panel_enable_backlight(intel_dp->attached_connector); 1508 } 1509 1510 void intel_edp_backlight_off(struct intel_dp *intel_dp) 1511 { 1512 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1513 struct drm_i915_private *dev_priv = dev->dev_private; 1514 u32 pp; 1515 u32 pp_ctrl_reg; 1516 1517 if (!is_edp(intel_dp)) 1518 return; 1519 1520 intel_panel_disable_backlight(intel_dp->attached_connector); 1521 1522 DRM_DEBUG_KMS("\n"); 1523 pp = ironlake_get_pp_control(intel_dp); 1524 pp &= ~EDP_BLC_ENABLE; 1525 1526 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1527 1528 I915_WRITE(pp_ctrl_reg, pp); 1529 POSTING_READ(pp_ctrl_reg); 1530 intel_dp->last_backlight_off = jiffies; 1531 } 1532 1533 static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1534 { 1535 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1536 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 1537 struct drm_device *dev = crtc->dev; 1538 struct drm_i915_private *dev_priv = dev->dev_private; 1539 u32 dpa_ctl; 1540 1541 assert_pipe_disabled(dev_priv, 1542 to_intel_crtc(crtc)->pipe); 1543 1544 DRM_DEBUG_KMS("\n"); 1545 dpa_ctl = I915_READ(DP_A); 1546 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n"); 1547 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1548 1549 /* We don't adjust intel_dp->DP while tearing down the link, to 1550 * facilitate link retraining (e.g. after hotplug). Hence clear all 1551 * enable bits here to ensure that we don't enable too much. */ 1552 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE); 1553 intel_dp->DP |= DP_PLL_ENABLE; 1554 I915_WRITE(DP_A, intel_dp->DP); 1555 POSTING_READ(DP_A); 1556 udelay(200); 1557 } 1558 1559 static void ironlake_edp_pll_off(struct intel_dp *intel_dp) 1560 { 1561 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1562 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 1563 struct drm_device *dev = crtc->dev; 1564 struct drm_i915_private *dev_priv = dev->dev_private; 1565 u32 dpa_ctl; 1566 1567 assert_pipe_disabled(dev_priv, 1568 to_intel_crtc(crtc)->pipe); 1569 1570 dpa_ctl = I915_READ(DP_A); 1571 WARN((dpa_ctl & DP_PLL_ENABLE) == 0, 1572 "dp pll off, should be on\n"); 1573 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n"); 1574 1575 /* We can't rely on the value tracked for the DP register in 1576 * intel_dp->DP because link_down must not change that (otherwise link 1577 * re-training will fail. */ 1578 dpa_ctl &= ~DP_PLL_ENABLE; 1579 I915_WRITE(DP_A, dpa_ctl); 1580 POSTING_READ(DP_A); 1581 udelay(200); 1582 } 1583 1584 /* If the sink supports it, try to set the power state appropriately */ 1585 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 1586 { 1587 int ret, i; 1588 1589 /* Should have a valid DPCD by this point */ 1590 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1591 return; 1592 1593 if (mode != DRM_MODE_DPMS_ON) { 1594 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, 1595 DP_SET_POWER_D3); 1596 if (ret != 1) 1597 DRM_DEBUG_DRIVER("failed to write sink power state\n"); 1598 } else { 1599 /* 1600 * When turning on, we need to retry for 1ms to give the sink 1601 * time to wake up. 1602 */ 1603 for (i = 0; i < 3; i++) { 1604 ret = intel_dp_aux_native_write_1(intel_dp, 1605 DP_SET_POWER, 1606 DP_SET_POWER_D0); 1607 if (ret == 1) 1608 break; 1609 msleep(1); 1610 } 1611 } 1612 } 1613 1614 static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 1615 enum i915_pipe *pipe) 1616 { 1617 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1618 enum port port = dp_to_dig_port(intel_dp)->port; 1619 struct drm_device *dev = encoder->base.dev; 1620 struct drm_i915_private *dev_priv = dev->dev_private; 1621 enum intel_display_power_domain power_domain; 1622 u32 tmp; 1623 1624 power_domain = intel_display_port_power_domain(encoder); 1625 if (!intel_display_power_enabled(dev_priv, power_domain)) 1626 return false; 1627 1628 tmp = I915_READ(intel_dp->output_reg); 1629 1630 if (!(tmp & DP_PORT_EN)) 1631 return false; 1632 1633 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) { 1634 *pipe = PORT_TO_PIPE_CPT(tmp); 1635 } else if (IS_CHERRYVIEW(dev)) { 1636 *pipe = DP_PORT_TO_PIPE_CHV(tmp); 1637 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) { 1638 *pipe = PORT_TO_PIPE(tmp); 1639 } else { 1640 u32 trans_sel; 1641 u32 trans_dp; 1642 int i; 1643 1644 switch (intel_dp->output_reg) { 1645 case PCH_DP_B: 1646 trans_sel = TRANS_DP_PORT_SEL_B; 1647 break; 1648 case PCH_DP_C: 1649 trans_sel = TRANS_DP_PORT_SEL_C; 1650 break; 1651 case PCH_DP_D: 1652 trans_sel = TRANS_DP_PORT_SEL_D; 1653 break; 1654 default: 1655 return true; 1656 } 1657 1658 for_each_pipe(i) { 1659 trans_dp = I915_READ(TRANS_DP_CTL(i)); 1660 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { 1661 *pipe = i; 1662 return true; 1663 } 1664 } 1665 1666 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", 1667 intel_dp->output_reg); 1668 } 1669 1670 return true; 1671 } 1672 1673 static void intel_dp_get_config(struct intel_encoder *encoder, 1674 struct intel_crtc_config *pipe_config) 1675 { 1676 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1677 u32 tmp, flags = 0; 1678 struct drm_device *dev = encoder->base.dev; 1679 struct drm_i915_private *dev_priv = dev->dev_private; 1680 enum port port = dp_to_dig_port(intel_dp)->port; 1681 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1682 int dotclock; 1683 1684 tmp = I915_READ(intel_dp->output_reg); 1685 if (tmp & DP_AUDIO_OUTPUT_ENABLE) 1686 pipe_config->has_audio = true; 1687 1688 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { 1689 if (tmp & DP_SYNC_HS_HIGH) 1690 flags |= DRM_MODE_FLAG_PHSYNC; 1691 else 1692 flags |= DRM_MODE_FLAG_NHSYNC; 1693 1694 if (tmp & DP_SYNC_VS_HIGH) 1695 flags |= DRM_MODE_FLAG_PVSYNC; 1696 else 1697 flags |= DRM_MODE_FLAG_NVSYNC; 1698 } else { 1699 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe)); 1700 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH) 1701 flags |= DRM_MODE_FLAG_PHSYNC; 1702 else 1703 flags |= DRM_MODE_FLAG_NHSYNC; 1704 1705 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH) 1706 flags |= DRM_MODE_FLAG_PVSYNC; 1707 else 1708 flags |= DRM_MODE_FLAG_NVSYNC; 1709 } 1710 1711 pipe_config->adjusted_mode.flags |= flags; 1712 1713 pipe_config->has_dp_encoder = true; 1714 1715 intel_dp_get_m_n(crtc, pipe_config); 1716 1717 if (port == PORT_A) { 1718 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ) 1719 pipe_config->port_clock = 162000; 1720 else 1721 pipe_config->port_clock = 270000; 1722 } 1723 1724 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 1725 &pipe_config->dp_m_n); 1726 1727 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A) 1728 ironlake_check_encoder_dotclock(pipe_config, dotclock); 1729 1730 pipe_config->adjusted_mode.crtc_clock = dotclock; 1731 1732 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && 1733 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { 1734 /* 1735 * This is a big fat ugly hack. 1736 * 1737 * Some machines in UEFI boot mode provide us a VBT that has 18 1738 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons 1739 * unknown we fail to light up. Yet the same BIOS boots up with 1740 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as 1741 * max, not what it tells us to use. 1742 * 1743 * Note: This will still be broken if the eDP panel is not lit 1744 * up by the BIOS, and thus we can't get the mode at module 1745 * load. 1746 */ 1747 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", 1748 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp); 1749 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; 1750 } 1751 } 1752 1753 static bool is_edp_psr(struct drm_device *dev) 1754 { 1755 struct drm_i915_private *dev_priv = dev->dev_private; 1756 1757 return dev_priv->psr.sink_support; 1758 } 1759 1760 static bool intel_edp_is_psr_enabled(struct drm_device *dev) 1761 { 1762 struct drm_i915_private *dev_priv = dev->dev_private; 1763 1764 if (!HAS_PSR(dev)) 1765 return false; 1766 1767 return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; 1768 } 1769 1770 static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp, 1771 struct edp_vsc_psr *vsc_psr) 1772 { 1773 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1774 struct drm_device *dev = dig_port->base.base.dev; 1775 struct drm_i915_private *dev_priv = dev->dev_private; 1776 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 1777 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder); 1778 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder); 1779 uint32_t *data = (uint32_t *) vsc_psr; 1780 unsigned int i; 1781 1782 /* As per BSPec (Pipe Video Data Island Packet), we need to disable 1783 the video DIP being updated before program video DIP data buffer 1784 registers for DIP being updated. */ 1785 I915_WRITE(ctl_reg, 0); 1786 POSTING_READ(ctl_reg); 1787 1788 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) { 1789 if (i < sizeof(struct edp_vsc_psr)) 1790 I915_WRITE(data_reg + i, *data++); 1791 else 1792 I915_WRITE(data_reg + i, 0); 1793 } 1794 1795 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW); 1796 POSTING_READ(ctl_reg); 1797 } 1798 1799 static void intel_edp_psr_setup(struct intel_dp *intel_dp) 1800 { 1801 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1802 struct drm_i915_private *dev_priv = dev->dev_private; 1803 struct edp_vsc_psr psr_vsc; 1804 1805 if (intel_dp->psr_setup_done) 1806 return; 1807 1808 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */ 1809 memset(&psr_vsc, 0, sizeof(psr_vsc)); 1810 psr_vsc.sdp_header.HB0 = 0; 1811 psr_vsc.sdp_header.HB1 = 0x7; 1812 psr_vsc.sdp_header.HB2 = 0x2; 1813 psr_vsc.sdp_header.HB3 = 0x8; 1814 intel_edp_psr_write_vsc(intel_dp, &psr_vsc); 1815 1816 /* Avoid continuous PSR exit by masking memup and hpd */ 1817 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP | 1818 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP); 1819 1820 intel_dp->psr_setup_done = true; 1821 } 1822 1823 static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp) 1824 { 1825 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1826 struct drm_i915_private *dev_priv = dev->dev_private; 1827 uint32_t aux_clock_divider; 1828 int precharge = 0x3; 1829 int msg_size = 5; /* Header(4) + Message(1) */ 1830 1831 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); 1832 1833 /* Enable PSR in sink */ 1834 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) 1835 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, 1836 DP_PSR_ENABLE & 1837 ~DP_PSR_MAIN_LINK_ACTIVE); 1838 else 1839 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG, 1840 DP_PSR_ENABLE | 1841 DP_PSR_MAIN_LINK_ACTIVE); 1842 1843 /* Setup AUX registers */ 1844 I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND); 1845 I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION); 1846 I915_WRITE(EDP_PSR_AUX_CTL(dev), 1847 DP_AUX_CH_CTL_TIME_OUT_400us | 1848 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 1849 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 1850 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT)); 1851 } 1852 1853 static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) 1854 { 1855 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1856 struct drm_i915_private *dev_priv = dev->dev_private; 1857 uint32_t max_sleep_time = 0x1f; 1858 uint32_t idle_frames = 1; 1859 uint32_t val = 0x0; 1860 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; 1861 1862 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) { 1863 val |= EDP_PSR_LINK_STANDBY; 1864 val |= EDP_PSR_TP2_TP3_TIME_0us; 1865 val |= EDP_PSR_TP1_TIME_0us; 1866 val |= EDP_PSR_SKIP_AUX_EXIT; 1867 } else 1868 val |= EDP_PSR_LINK_DISABLE; 1869 1870 I915_WRITE(EDP_PSR_CTL(dev), val | 1871 IS_BROADWELL(dev) ? 0 : link_entry_time | 1872 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | 1873 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | 1874 EDP_PSR_ENABLE); 1875 } 1876 1877 static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp) 1878 { 1879 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 1880 struct drm_device *dev = dig_port->base.base.dev; 1881 struct drm_i915_private *dev_priv = dev->dev_private; 1882 struct drm_crtc *crtc = dig_port->base.base.crtc; 1883 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1884 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->primary->fb)->obj; 1885 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 1886 1887 dev_priv->psr.source_ok = false; 1888 1889 if (!HAS_PSR(dev)) { 1890 DRM_DEBUG_KMS("PSR not supported on this platform\n"); 1891 return false; 1892 } 1893 1894 if ((intel_encoder->type != INTEL_OUTPUT_EDP) || 1895 (dig_port->port != PORT_A)) { 1896 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); 1897 return false; 1898 } 1899 1900 if (!i915.enable_psr) { 1901 DRM_DEBUG_KMS("PSR disable by flag\n"); 1902 return false; 1903 } 1904 1905 crtc = dig_port->base.base.crtc; 1906 if (crtc == NULL) { 1907 DRM_DEBUG_KMS("crtc not active for PSR\n"); 1908 return false; 1909 } 1910 1911 intel_crtc = to_intel_crtc(crtc); 1912 if (!intel_crtc_active(crtc)) { 1913 DRM_DEBUG_KMS("crtc not active for PSR\n"); 1914 return false; 1915 } 1916 1917 obj = to_intel_framebuffer(crtc->primary->fb)->obj; 1918 if (obj->tiling_mode != I915_TILING_X || 1919 obj->fence_reg == I915_FENCE_REG_NONE) { 1920 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n"); 1921 return false; 1922 } 1923 1924 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) { 1925 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n"); 1926 return false; 1927 } 1928 1929 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & 1930 S3D_ENABLE) { 1931 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); 1932 return false; 1933 } 1934 1935 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 1936 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); 1937 return false; 1938 } 1939 1940 dev_priv->psr.source_ok = true; 1941 return true; 1942 } 1943 1944 static void intel_edp_psr_do_enable(struct intel_dp *intel_dp) 1945 { 1946 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1947 1948 if (!intel_edp_psr_match_conditions(intel_dp) || 1949 intel_edp_is_psr_enabled(dev)) 1950 return; 1951 1952 /* Setup PSR once */ 1953 intel_edp_psr_setup(intel_dp); 1954 1955 /* Enable PSR on the panel */ 1956 intel_edp_psr_enable_sink(intel_dp); 1957 1958 /* Enable PSR on the host */ 1959 intel_edp_psr_enable_source(intel_dp); 1960 } 1961 1962 void intel_edp_psr_enable(struct intel_dp *intel_dp) 1963 { 1964 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1965 1966 if (intel_edp_psr_match_conditions(intel_dp) && 1967 !intel_edp_is_psr_enabled(dev)) 1968 intel_edp_psr_do_enable(intel_dp); 1969 } 1970 1971 void intel_edp_psr_disable(struct intel_dp *intel_dp) 1972 { 1973 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1974 struct drm_i915_private *dev_priv = dev->dev_private; 1975 1976 if (!intel_edp_is_psr_enabled(dev)) 1977 return; 1978 1979 I915_WRITE(EDP_PSR_CTL(dev), 1980 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE); 1981 1982 /* Wait till PSR is idle */ 1983 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) & 1984 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) 1985 DRM_ERROR("Timed out waiting for PSR Idle State\n"); 1986 } 1987 1988 void intel_edp_psr_update(struct drm_device *dev) 1989 { 1990 struct intel_encoder *encoder; 1991 struct intel_dp *intel_dp = NULL; 1992 1993 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) 1994 if (encoder->type == INTEL_OUTPUT_EDP) { 1995 intel_dp = enc_to_intel_dp(&encoder->base); 1996 1997 if (!is_edp_psr(dev)) 1998 return; 1999 2000 if (!intel_edp_psr_match_conditions(intel_dp)) 2001 intel_edp_psr_disable(intel_dp); 2002 else 2003 if (!intel_edp_is_psr_enabled(dev)) 2004 intel_edp_psr_do_enable(intel_dp); 2005 } 2006 } 2007 2008 static void intel_disable_dp(struct intel_encoder *encoder) 2009 { 2010 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2011 enum port port = dp_to_dig_port(intel_dp)->port; 2012 struct drm_device *dev = encoder->base.dev; 2013 2014 /* Make sure the panel is off before trying to change the mode. But also 2015 * ensure that we have vdd while we switch off the panel. */ 2016 intel_edp_panel_vdd_on(intel_dp); 2017 intel_edp_backlight_off(intel_dp); 2018 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 2019 intel_edp_panel_off(intel_dp); 2020 2021 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */ 2022 if (!(port == PORT_A || IS_VALLEYVIEW(dev))) 2023 intel_dp_link_down(intel_dp); 2024 } 2025 2026 static void g4x_post_disable_dp(struct intel_encoder *encoder) 2027 { 2028 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2029 enum port port = dp_to_dig_port(intel_dp)->port; 2030 2031 if (port != PORT_A) 2032 return; 2033 2034 intel_dp_link_down(intel_dp); 2035 ironlake_edp_pll_off(intel_dp); 2036 } 2037 2038 static void vlv_post_disable_dp(struct intel_encoder *encoder) 2039 { 2040 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2041 2042 intel_dp_link_down(intel_dp); 2043 } 2044 2045 static void chv_post_disable_dp(struct intel_encoder *encoder) 2046 { 2047 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2048 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 2049 struct drm_device *dev = encoder->base.dev; 2050 struct drm_i915_private *dev_priv = dev->dev_private; 2051 struct intel_crtc *intel_crtc = 2052 to_intel_crtc(encoder->base.crtc); 2053 enum dpio_channel ch = vlv_dport_to_channel(dport); 2054 enum i915_pipe pipe = intel_crtc->pipe; 2055 u32 val; 2056 2057 intel_dp_link_down(intel_dp); 2058 2059 mutex_lock(&dev_priv->dpio_lock); 2060 2061 /* Propagate soft reset to data lane reset */ 2062 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); 2063 val |= CHV_PCS_REQ_SOFTRESET_EN; 2064 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); 2065 2066 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); 2067 val |= CHV_PCS_REQ_SOFTRESET_EN; 2068 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); 2069 2070 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); 2071 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); 2072 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); 2073 2074 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); 2075 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); 2076 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); 2077 2078 mutex_unlock(&dev_priv->dpio_lock); 2079 } 2080 2081 static void intel_enable_dp(struct intel_encoder *encoder) 2082 { 2083 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2084 struct drm_device *dev = encoder->base.dev; 2085 struct drm_i915_private *dev_priv = dev->dev_private; 2086 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 2087 2088 if (WARN_ON(dp_reg & DP_PORT_EN)) 2089 return; 2090 2091 intel_edp_panel_vdd_on(intel_dp); 2092 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 2093 intel_dp_start_link_train(intel_dp); 2094 intel_edp_panel_on(intel_dp); 2095 edp_panel_vdd_off(intel_dp, true); 2096 intel_dp_complete_link_train(intel_dp); 2097 intel_dp_stop_link_train(intel_dp); 2098 } 2099 2100 static void g4x_enable_dp(struct intel_encoder *encoder) 2101 { 2102 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2103 2104 intel_enable_dp(encoder); 2105 intel_edp_backlight_on(intel_dp); 2106 } 2107 2108 static void vlv_enable_dp(struct intel_encoder *encoder) 2109 { 2110 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2111 2112 intel_edp_backlight_on(intel_dp); 2113 } 2114 2115 static void g4x_pre_enable_dp(struct intel_encoder *encoder) 2116 { 2117 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2118 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 2119 2120 intel_dp_prepare(encoder); 2121 2122 /* Only ilk+ has port A */ 2123 if (dport->port == PORT_A) { 2124 ironlake_set_pll_cpu_edp(intel_dp); 2125 ironlake_edp_pll_on(intel_dp); 2126 } 2127 } 2128 2129 static void vlv_pre_enable_dp(struct intel_encoder *encoder) 2130 { 2131 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2132 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 2133 struct drm_device *dev = encoder->base.dev; 2134 struct drm_i915_private *dev_priv = dev->dev_private; 2135 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 2136 enum dpio_channel port = vlv_dport_to_channel(dport); 2137 int pipe = intel_crtc->pipe; 2138 struct edp_power_seq power_seq; 2139 u32 val; 2140 2141 mutex_lock(&dev_priv->dpio_lock); 2142 2143 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port)); 2144 val = 0; 2145 if (pipe) 2146 val |= (1<<21); 2147 else 2148 val &= ~(1<<21); 2149 val |= 0x001000c4; 2150 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val); 2151 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018); 2152 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888); 2153 2154 mutex_unlock(&dev_priv->dpio_lock); 2155 2156 if (is_edp(intel_dp)) { 2157 /* init power sequencer on this pipe and port */ 2158 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 2159 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 2160 &power_seq); 2161 } 2162 2163 intel_enable_dp(encoder); 2164 2165 vlv_wait_port_ready(dev_priv, dport); 2166 } 2167 2168 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) 2169 { 2170 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 2171 struct drm_device *dev = encoder->base.dev; 2172 struct drm_i915_private *dev_priv = dev->dev_private; 2173 struct intel_crtc *intel_crtc = 2174 to_intel_crtc(encoder->base.crtc); 2175 enum dpio_channel port = vlv_dport_to_channel(dport); 2176 int pipe = intel_crtc->pipe; 2177 2178 intel_dp_prepare(encoder); 2179 2180 /* Program Tx lane resets to default */ 2181 mutex_lock(&dev_priv->dpio_lock); 2182 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 2183 DPIO_PCS_TX_LANE2_RESET | 2184 DPIO_PCS_TX_LANE1_RESET); 2185 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 2186 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN | 2187 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN | 2188 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) | 2189 DPIO_PCS_CLK_SOFT_RESET); 2190 2191 /* Fix up inter-pair skew failure */ 2192 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00); 2193 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500); 2194 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000); 2195 mutex_unlock(&dev_priv->dpio_lock); 2196 } 2197 2198 static void chv_pre_enable_dp(struct intel_encoder *encoder) 2199 { 2200 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2201 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 2202 struct drm_device *dev = encoder->base.dev; 2203 struct drm_i915_private *dev_priv = dev->dev_private; 2204 struct edp_power_seq power_seq; 2205 struct intel_crtc *intel_crtc = 2206 to_intel_crtc(encoder->base.crtc); 2207 enum dpio_channel ch = vlv_dport_to_channel(dport); 2208 int pipe = intel_crtc->pipe; 2209 int data, i; 2210 u32 val; 2211 2212 mutex_lock(&dev_priv->dpio_lock); 2213 2214 /* Deassert soft data lane reset*/ 2215 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); 2216 val |= CHV_PCS_REQ_SOFTRESET_EN; 2217 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); 2218 2219 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); 2220 val |= CHV_PCS_REQ_SOFTRESET_EN; 2221 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); 2222 2223 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); 2224 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); 2225 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); 2226 2227 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); 2228 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); 2229 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); 2230 2231 /* Program Tx lane latency optimal setting*/ 2232 for (i = 0; i < 4; i++) { 2233 /* Set the latency optimal bit */ 2234 data = (i == 1) ? 0x0 : 0x6; 2235 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i), 2236 data << DPIO_FRC_LATENCY_SHFIT); 2237 2238 /* Set the upar bit */ 2239 data = (i == 1) ? 0x0 : 0x1; 2240 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i), 2241 data << DPIO_UPAR_SHIFT); 2242 } 2243 2244 /* Data lane stagger programming */ 2245 /* FIXME: Fix up value only after power analysis */ 2246 2247 mutex_unlock(&dev_priv->dpio_lock); 2248 2249 if (is_edp(intel_dp)) { 2250 /* init power sequencer on this pipe and port */ 2251 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 2252 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 2253 &power_seq); 2254 } 2255 2256 intel_enable_dp(encoder); 2257 2258 vlv_wait_port_ready(dev_priv, dport); 2259 } 2260 2261 /* 2262 * Native read with retry for link status and receiver capability reads for 2263 * cases where the sink may still be asleep. 2264 */ 2265 static bool 2266 intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, 2267 uint8_t *recv, int recv_bytes) 2268 { 2269 int ret, i; 2270 2271 /* 2272 * Sinks are *supposed* to come up within 1ms from an off state, 2273 * but we're also supposed to retry 3 times per the spec. 2274 */ 2275 for (i = 0; i < 3; i++) { 2276 ret = intel_dp_aux_native_read(intel_dp, address, recv, 2277 recv_bytes); 2278 if (ret == recv_bytes) 2279 return true; 2280 msleep(1); 2281 } 2282 2283 return false; 2284 } 2285 2286 /* 2287 * Fetch AUX CH registers 0x202 - 0x207 which contain 2288 * link status information 2289 */ 2290 static bool 2291 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 2292 { 2293 return intel_dp_aux_native_read_retry(intel_dp, 2294 DP_LANE0_1_STATUS, 2295 link_status, 2296 DP_LINK_STATUS_SIZE); 2297 } 2298 2299 /* 2300 * These are source-specific values; current Intel hardware supports 2301 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 2302 */ 2303 2304 static uint8_t 2305 intel_dp_voltage_max(struct intel_dp *intel_dp) 2306 { 2307 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2308 enum port port = dp_to_dig_port(intel_dp)->port; 2309 2310 if (IS_VALLEYVIEW(dev) || IS_BROADWELL(dev)) 2311 return DP_TRAIN_VOLTAGE_SWING_1200; 2312 else if (IS_GEN7(dev) && port == PORT_A) 2313 return DP_TRAIN_VOLTAGE_SWING_800; 2314 else if (HAS_PCH_CPT(dev) && port != PORT_A) 2315 return DP_TRAIN_VOLTAGE_SWING_1200; 2316 else 2317 return DP_TRAIN_VOLTAGE_SWING_800; 2318 } 2319 2320 static uint8_t 2321 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 2322 { 2323 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2324 enum port port = dp_to_dig_port(intel_dp)->port; 2325 2326 if (IS_BROADWELL(dev)) { 2327 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2328 case DP_TRAIN_VOLTAGE_SWING_400: 2329 case DP_TRAIN_VOLTAGE_SWING_600: 2330 return DP_TRAIN_PRE_EMPHASIS_6; 2331 case DP_TRAIN_VOLTAGE_SWING_800: 2332 return DP_TRAIN_PRE_EMPHASIS_3_5; 2333 case DP_TRAIN_VOLTAGE_SWING_1200: 2334 default: 2335 return DP_TRAIN_PRE_EMPHASIS_0; 2336 } 2337 } else if (IS_HASWELL(dev)) { 2338 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2339 case DP_TRAIN_VOLTAGE_SWING_400: 2340 return DP_TRAIN_PRE_EMPHASIS_9_5; 2341 case DP_TRAIN_VOLTAGE_SWING_600: 2342 return DP_TRAIN_PRE_EMPHASIS_6; 2343 case DP_TRAIN_VOLTAGE_SWING_800: 2344 return DP_TRAIN_PRE_EMPHASIS_3_5; 2345 case DP_TRAIN_VOLTAGE_SWING_1200: 2346 default: 2347 return DP_TRAIN_PRE_EMPHASIS_0; 2348 } 2349 } else if (IS_VALLEYVIEW(dev)) { 2350 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2351 case DP_TRAIN_VOLTAGE_SWING_400: 2352 return DP_TRAIN_PRE_EMPHASIS_9_5; 2353 case DP_TRAIN_VOLTAGE_SWING_600: 2354 return DP_TRAIN_PRE_EMPHASIS_6; 2355 case DP_TRAIN_VOLTAGE_SWING_800: 2356 return DP_TRAIN_PRE_EMPHASIS_3_5; 2357 case DP_TRAIN_VOLTAGE_SWING_1200: 2358 default: 2359 return DP_TRAIN_PRE_EMPHASIS_0; 2360 } 2361 } else if (IS_GEN7(dev) && port == PORT_A) { 2362 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2363 case DP_TRAIN_VOLTAGE_SWING_400: 2364 return DP_TRAIN_PRE_EMPHASIS_6; 2365 case DP_TRAIN_VOLTAGE_SWING_600: 2366 case DP_TRAIN_VOLTAGE_SWING_800: 2367 return DP_TRAIN_PRE_EMPHASIS_3_5; 2368 default: 2369 return DP_TRAIN_PRE_EMPHASIS_0; 2370 } 2371 } else { 2372 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2373 case DP_TRAIN_VOLTAGE_SWING_400: 2374 return DP_TRAIN_PRE_EMPHASIS_6; 2375 case DP_TRAIN_VOLTAGE_SWING_600: 2376 return DP_TRAIN_PRE_EMPHASIS_6; 2377 case DP_TRAIN_VOLTAGE_SWING_800: 2378 return DP_TRAIN_PRE_EMPHASIS_3_5; 2379 case DP_TRAIN_VOLTAGE_SWING_1200: 2380 default: 2381 return DP_TRAIN_PRE_EMPHASIS_0; 2382 } 2383 } 2384 } 2385 2386 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp) 2387 { 2388 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2389 struct drm_i915_private *dev_priv = dev->dev_private; 2390 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 2391 struct intel_crtc *intel_crtc = 2392 to_intel_crtc(dport->base.base.crtc); 2393 unsigned long demph_reg_value, preemph_reg_value, 2394 uniqtranscale_reg_value; 2395 uint8_t train_set = intel_dp->train_set[0]; 2396 enum dpio_channel port = vlv_dport_to_channel(dport); 2397 int pipe = intel_crtc->pipe; 2398 2399 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 2400 case DP_TRAIN_PRE_EMPHASIS_0: 2401 preemph_reg_value = 0x0004000; 2402 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2403 case DP_TRAIN_VOLTAGE_SWING_400: 2404 demph_reg_value = 0x2B405555; 2405 uniqtranscale_reg_value = 0x552AB83A; 2406 break; 2407 case DP_TRAIN_VOLTAGE_SWING_600: 2408 demph_reg_value = 0x2B404040; 2409 uniqtranscale_reg_value = 0x5548B83A; 2410 break; 2411 case DP_TRAIN_VOLTAGE_SWING_800: 2412 demph_reg_value = 0x2B245555; 2413 uniqtranscale_reg_value = 0x5560B83A; 2414 break; 2415 case DP_TRAIN_VOLTAGE_SWING_1200: 2416 demph_reg_value = 0x2B405555; 2417 uniqtranscale_reg_value = 0x5598DA3A; 2418 break; 2419 default: 2420 return 0; 2421 } 2422 break; 2423 case DP_TRAIN_PRE_EMPHASIS_3_5: 2424 preemph_reg_value = 0x0002000; 2425 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2426 case DP_TRAIN_VOLTAGE_SWING_400: 2427 demph_reg_value = 0x2B404040; 2428 uniqtranscale_reg_value = 0x5552B83A; 2429 break; 2430 case DP_TRAIN_VOLTAGE_SWING_600: 2431 demph_reg_value = 0x2B404848; 2432 uniqtranscale_reg_value = 0x5580B83A; 2433 break; 2434 case DP_TRAIN_VOLTAGE_SWING_800: 2435 demph_reg_value = 0x2B404040; 2436 uniqtranscale_reg_value = 0x55ADDA3A; 2437 break; 2438 default: 2439 return 0; 2440 } 2441 break; 2442 case DP_TRAIN_PRE_EMPHASIS_6: 2443 preemph_reg_value = 0x0000000; 2444 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2445 case DP_TRAIN_VOLTAGE_SWING_400: 2446 demph_reg_value = 0x2B305555; 2447 uniqtranscale_reg_value = 0x5570B83A; 2448 break; 2449 case DP_TRAIN_VOLTAGE_SWING_600: 2450 demph_reg_value = 0x2B2B4040; 2451 uniqtranscale_reg_value = 0x55ADDA3A; 2452 break; 2453 default: 2454 return 0; 2455 } 2456 break; 2457 case DP_TRAIN_PRE_EMPHASIS_9_5: 2458 preemph_reg_value = 0x0006000; 2459 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2460 case DP_TRAIN_VOLTAGE_SWING_400: 2461 demph_reg_value = 0x1B405555; 2462 uniqtranscale_reg_value = 0x55ADDA3A; 2463 break; 2464 default: 2465 return 0; 2466 } 2467 break; 2468 default: 2469 return 0; 2470 } 2471 2472 mutex_lock(&dev_priv->dpio_lock); 2473 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000); 2474 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value); 2475 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 2476 uniqtranscale_reg_value); 2477 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040); 2478 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000); 2479 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value); 2480 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000); 2481 mutex_unlock(&dev_priv->dpio_lock); 2482 2483 return 0; 2484 } 2485 2486 static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp) 2487 { 2488 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2489 struct drm_i915_private *dev_priv = dev->dev_private; 2490 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 2491 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc); 2492 u32 deemph_reg_value, margin_reg_value, val; 2493 uint8_t train_set = intel_dp->train_set[0]; 2494 enum dpio_channel ch = vlv_dport_to_channel(dport); 2495 enum i915_pipe pipe = intel_crtc->pipe; 2496 int i; 2497 2498 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 2499 case DP_TRAIN_PRE_EMPHASIS_0: 2500 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2501 case DP_TRAIN_VOLTAGE_SWING_400: 2502 deemph_reg_value = 128; 2503 margin_reg_value = 52; 2504 break; 2505 case DP_TRAIN_VOLTAGE_SWING_600: 2506 deemph_reg_value = 128; 2507 margin_reg_value = 77; 2508 break; 2509 case DP_TRAIN_VOLTAGE_SWING_800: 2510 deemph_reg_value = 128; 2511 margin_reg_value = 102; 2512 break; 2513 case DP_TRAIN_VOLTAGE_SWING_1200: 2514 deemph_reg_value = 128; 2515 margin_reg_value = 154; 2516 /* FIXME extra to set for 1200 */ 2517 break; 2518 default: 2519 return 0; 2520 } 2521 break; 2522 case DP_TRAIN_PRE_EMPHASIS_3_5: 2523 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2524 case DP_TRAIN_VOLTAGE_SWING_400: 2525 deemph_reg_value = 85; 2526 margin_reg_value = 78; 2527 break; 2528 case DP_TRAIN_VOLTAGE_SWING_600: 2529 deemph_reg_value = 85; 2530 margin_reg_value = 116; 2531 break; 2532 case DP_TRAIN_VOLTAGE_SWING_800: 2533 deemph_reg_value = 85; 2534 margin_reg_value = 154; 2535 break; 2536 default: 2537 return 0; 2538 } 2539 break; 2540 case DP_TRAIN_PRE_EMPHASIS_6: 2541 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2542 case DP_TRAIN_VOLTAGE_SWING_400: 2543 deemph_reg_value = 64; 2544 margin_reg_value = 104; 2545 break; 2546 case DP_TRAIN_VOLTAGE_SWING_600: 2547 deemph_reg_value = 64; 2548 margin_reg_value = 154; 2549 break; 2550 default: 2551 return 0; 2552 } 2553 break; 2554 case DP_TRAIN_PRE_EMPHASIS_9_5: 2555 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2556 case DP_TRAIN_VOLTAGE_SWING_400: 2557 deemph_reg_value = 43; 2558 margin_reg_value = 154; 2559 break; 2560 default: 2561 return 0; 2562 } 2563 break; 2564 default: 2565 return 0; 2566 } 2567 2568 mutex_lock(&dev_priv->dpio_lock); 2569 2570 /* Clear calc init */ 2571 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); 2572 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); 2573 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); 2574 2575 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); 2576 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); 2577 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); 2578 2579 /* Program swing deemph */ 2580 for (i = 0; i < 4; i++) { 2581 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); 2582 val &= ~DPIO_SWING_DEEMPH9P5_MASK; 2583 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT; 2584 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val); 2585 } 2586 2587 /* Program swing margin */ 2588 for (i = 0; i < 4; i++) { 2589 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); 2590 val &= ~DPIO_SWING_MARGIN_MASK; 2591 val |= margin_reg_value << DPIO_SWING_MARGIN_SHIFT; 2592 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); 2593 } 2594 2595 /* Disable unique transition scale */ 2596 for (i = 0; i < 4; i++) { 2597 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); 2598 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN; 2599 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); 2600 } 2601 2602 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK) 2603 == DP_TRAIN_PRE_EMPHASIS_0) && 2604 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK) 2605 == DP_TRAIN_VOLTAGE_SWING_1200)) { 2606 2607 /* 2608 * The document said it needs to set bit 27 for ch0 and bit 26 2609 * for ch1. Might be a typo in the doc. 2610 * For now, for this unique transition scale selection, set bit 2611 * 27 for ch0 and ch1. 2612 */ 2613 for (i = 0; i < 4; i++) { 2614 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i)); 2615 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN; 2616 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val); 2617 } 2618 2619 for (i = 0; i < 4; i++) { 2620 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i)); 2621 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT); 2622 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT); 2623 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val); 2624 } 2625 } 2626 2627 /* Start swing calculation */ 2628 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); 2629 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; 2630 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); 2631 2632 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); 2633 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; 2634 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); 2635 2636 /* LRC Bypass */ 2637 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 2638 val |= DPIO_LRC_BYPASS; 2639 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val); 2640 2641 mutex_unlock(&dev_priv->dpio_lock); 2642 2643 return 0; 2644 } 2645 2646 static void 2647 intel_get_adjust_train(struct intel_dp *intel_dp, 2648 const uint8_t link_status[DP_LINK_STATUS_SIZE]) 2649 { 2650 uint8_t v = 0; 2651 uint8_t p = 0; 2652 int lane; 2653 uint8_t voltage_max; 2654 uint8_t preemph_max; 2655 2656 for (lane = 0; lane < intel_dp->lane_count; lane++) { 2657 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); 2658 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); 2659 2660 if (this_v > v) 2661 v = this_v; 2662 if (this_p > p) 2663 p = this_p; 2664 } 2665 2666 voltage_max = intel_dp_voltage_max(intel_dp); 2667 if (v >= voltage_max) 2668 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 2669 2670 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); 2671 if (p >= preemph_max) 2672 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 2673 2674 for (lane = 0; lane < 4; lane++) 2675 intel_dp->train_set[lane] = v | p; 2676 } 2677 2678 static uint32_t 2679 intel_gen4_signal_levels(uint8_t train_set) 2680 { 2681 uint32_t signal_levels = 0; 2682 2683 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 2684 case DP_TRAIN_VOLTAGE_SWING_400: 2685 default: 2686 signal_levels |= DP_VOLTAGE_0_4; 2687 break; 2688 case DP_TRAIN_VOLTAGE_SWING_600: 2689 signal_levels |= DP_VOLTAGE_0_6; 2690 break; 2691 case DP_TRAIN_VOLTAGE_SWING_800: 2692 signal_levels |= DP_VOLTAGE_0_8; 2693 break; 2694 case DP_TRAIN_VOLTAGE_SWING_1200: 2695 signal_levels |= DP_VOLTAGE_1_2; 2696 break; 2697 } 2698 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 2699 case DP_TRAIN_PRE_EMPHASIS_0: 2700 default: 2701 signal_levels |= DP_PRE_EMPHASIS_0; 2702 break; 2703 case DP_TRAIN_PRE_EMPHASIS_3_5: 2704 signal_levels |= DP_PRE_EMPHASIS_3_5; 2705 break; 2706 case DP_TRAIN_PRE_EMPHASIS_6: 2707 signal_levels |= DP_PRE_EMPHASIS_6; 2708 break; 2709 case DP_TRAIN_PRE_EMPHASIS_9_5: 2710 signal_levels |= DP_PRE_EMPHASIS_9_5; 2711 break; 2712 } 2713 return signal_levels; 2714 } 2715 2716 /* Gen6's DP voltage swing and pre-emphasis control */ 2717 static uint32_t 2718 intel_gen6_edp_signal_levels(uint8_t train_set) 2719 { 2720 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 2721 DP_TRAIN_PRE_EMPHASIS_MASK); 2722 switch (signal_levels) { 2723 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 2724 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 2725 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 2726 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 2727 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 2728 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 2729 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 2730 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 2731 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 2732 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 2733 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 2734 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 2735 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: 2736 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 2737 default: 2738 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 2739 "0x%x\n", signal_levels); 2740 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 2741 } 2742 } 2743 2744 /* Gen7's DP voltage swing and pre-emphasis control */ 2745 static uint32_t 2746 intel_gen7_edp_signal_levels(uint8_t train_set) 2747 { 2748 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 2749 DP_TRAIN_PRE_EMPHASIS_MASK); 2750 switch (signal_levels) { 2751 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 2752 return EDP_LINK_TRAIN_400MV_0DB_IVB; 2753 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 2754 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 2755 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 2756 return EDP_LINK_TRAIN_400MV_6DB_IVB; 2757 2758 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 2759 return EDP_LINK_TRAIN_600MV_0DB_IVB; 2760 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 2761 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 2762 2763 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 2764 return EDP_LINK_TRAIN_800MV_0DB_IVB; 2765 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 2766 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 2767 2768 default: 2769 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 2770 "0x%x\n", signal_levels); 2771 return EDP_LINK_TRAIN_500MV_0DB_IVB; 2772 } 2773 } 2774 2775 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ 2776 static uint32_t 2777 intel_hsw_signal_levels(uint8_t train_set) 2778 { 2779 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 2780 DP_TRAIN_PRE_EMPHASIS_MASK); 2781 switch (signal_levels) { 2782 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 2783 return DDI_BUF_EMP_400MV_0DB_HSW; 2784 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 2785 return DDI_BUF_EMP_400MV_3_5DB_HSW; 2786 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 2787 return DDI_BUF_EMP_400MV_6DB_HSW; 2788 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5: 2789 return DDI_BUF_EMP_400MV_9_5DB_HSW; 2790 2791 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 2792 return DDI_BUF_EMP_600MV_0DB_HSW; 2793 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 2794 return DDI_BUF_EMP_600MV_3_5DB_HSW; 2795 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 2796 return DDI_BUF_EMP_600MV_6DB_HSW; 2797 2798 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 2799 return DDI_BUF_EMP_800MV_0DB_HSW; 2800 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 2801 return DDI_BUF_EMP_800MV_3_5DB_HSW; 2802 default: 2803 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 2804 "0x%x\n", signal_levels); 2805 return DDI_BUF_EMP_400MV_0DB_HSW; 2806 } 2807 } 2808 2809 static uint32_t 2810 intel_bdw_signal_levels(uint8_t train_set) 2811 { 2812 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 2813 DP_TRAIN_PRE_EMPHASIS_MASK); 2814 switch (signal_levels) { 2815 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 2816 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */ 2817 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 2818 return DDI_BUF_EMP_400MV_3_5DB_BDW; /* Sel1 */ 2819 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 2820 return DDI_BUF_EMP_400MV_6DB_BDW; /* Sel2 */ 2821 2822 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 2823 return DDI_BUF_EMP_600MV_0DB_BDW; /* Sel3 */ 2824 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 2825 return DDI_BUF_EMP_600MV_3_5DB_BDW; /* Sel4 */ 2826 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 2827 return DDI_BUF_EMP_600MV_6DB_BDW; /* Sel5 */ 2828 2829 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 2830 return DDI_BUF_EMP_800MV_0DB_BDW; /* Sel6 */ 2831 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 2832 return DDI_BUF_EMP_800MV_3_5DB_BDW; /* Sel7 */ 2833 2834 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: 2835 return DDI_BUF_EMP_1200MV_0DB_BDW; /* Sel8 */ 2836 2837 default: 2838 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 2839 "0x%x\n", signal_levels); 2840 return DDI_BUF_EMP_400MV_0DB_BDW; /* Sel0 */ 2841 } 2842 } 2843 2844 /* Properly updates "DP" with the correct signal levels. */ 2845 static void 2846 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) 2847 { 2848 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2849 enum port port = intel_dig_port->port; 2850 struct drm_device *dev = intel_dig_port->base.base.dev; 2851 uint32_t signal_levels, mask; 2852 uint8_t train_set = intel_dp->train_set[0]; 2853 2854 if (IS_BROADWELL(dev)) { 2855 signal_levels = intel_bdw_signal_levels(train_set); 2856 mask = DDI_BUF_EMP_MASK; 2857 } else if (IS_HASWELL(dev)) { 2858 signal_levels = intel_hsw_signal_levels(train_set); 2859 mask = DDI_BUF_EMP_MASK; 2860 } else if (IS_CHERRYVIEW(dev)) { 2861 signal_levels = intel_chv_signal_levels(intel_dp); 2862 mask = 0; 2863 } else if (IS_VALLEYVIEW(dev)) { 2864 signal_levels = intel_vlv_signal_levels(intel_dp); 2865 mask = 0; 2866 } else if (IS_GEN7(dev) && port == PORT_A) { 2867 signal_levels = intel_gen7_edp_signal_levels(train_set); 2868 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; 2869 } else if (IS_GEN6(dev) && port == PORT_A) { 2870 signal_levels = intel_gen6_edp_signal_levels(train_set); 2871 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; 2872 } else { 2873 signal_levels = intel_gen4_signal_levels(train_set); 2874 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; 2875 } 2876 2877 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels); 2878 2879 *DP = (*DP & ~mask) | signal_levels; 2880 } 2881 2882 static bool 2883 intel_dp_set_link_train(struct intel_dp *intel_dp, 2884 uint32_t *DP, 2885 uint8_t dp_train_pat) 2886 { 2887 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2888 struct drm_device *dev = intel_dig_port->base.base.dev; 2889 struct drm_i915_private *dev_priv = dev->dev_private; 2890 enum port port = intel_dig_port->port; 2891 uint8_t buf[sizeof(intel_dp->train_set) + 1]; 2892 int ret, len; 2893 2894 if (HAS_DDI(dev)) { 2895 uint32_t temp = I915_READ(DP_TP_CTL(port)); 2896 2897 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 2898 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 2899 else 2900 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE; 2901 2902 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 2903 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2904 case DP_TRAINING_PATTERN_DISABLE: 2905 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 2906 2907 break; 2908 case DP_TRAINING_PATTERN_1: 2909 temp |= DP_TP_CTL_LINK_TRAIN_PAT1; 2910 break; 2911 case DP_TRAINING_PATTERN_2: 2912 temp |= DP_TP_CTL_LINK_TRAIN_PAT2; 2913 break; 2914 case DP_TRAINING_PATTERN_3: 2915 temp |= DP_TP_CTL_LINK_TRAIN_PAT3; 2916 break; 2917 } 2918 I915_WRITE(DP_TP_CTL(port), temp); 2919 2920 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) { 2921 *DP &= ~DP_LINK_TRAIN_MASK_CPT; 2922 2923 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2924 case DP_TRAINING_PATTERN_DISABLE: 2925 *DP |= DP_LINK_TRAIN_OFF_CPT; 2926 break; 2927 case DP_TRAINING_PATTERN_1: 2928 *DP |= DP_LINK_TRAIN_PAT_1_CPT; 2929 break; 2930 case DP_TRAINING_PATTERN_2: 2931 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 2932 break; 2933 case DP_TRAINING_PATTERN_3: 2934 DRM_ERROR("DP training pattern 3 not supported\n"); 2935 *DP |= DP_LINK_TRAIN_PAT_2_CPT; 2936 break; 2937 } 2938 2939 } else { 2940 *DP &= ~DP_LINK_TRAIN_MASK; 2941 2942 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 2943 case DP_TRAINING_PATTERN_DISABLE: 2944 *DP |= DP_LINK_TRAIN_OFF; 2945 break; 2946 case DP_TRAINING_PATTERN_1: 2947 *DP |= DP_LINK_TRAIN_PAT_1; 2948 break; 2949 case DP_TRAINING_PATTERN_2: 2950 *DP |= DP_LINK_TRAIN_PAT_2; 2951 break; 2952 case DP_TRAINING_PATTERN_3: 2953 DRM_ERROR("DP training pattern 3 not supported\n"); 2954 *DP |= DP_LINK_TRAIN_PAT_2; 2955 break; 2956 } 2957 } 2958 2959 I915_WRITE(intel_dp->output_reg, *DP); 2960 POSTING_READ(intel_dp->output_reg); 2961 2962 buf[0] = dp_train_pat; 2963 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) == 2964 DP_TRAINING_PATTERN_DISABLE) { 2965 /* don't write DP_TRAINING_LANEx_SET on disable */ 2966 len = 1; 2967 } else { 2968 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */ 2969 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count); 2970 len = intel_dp->lane_count + 1; 2971 } 2972 2973 ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_PATTERN_SET, 2974 buf, len); 2975 2976 return ret == len; 2977 } 2978 2979 static bool 2980 intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP, 2981 uint8_t dp_train_pat) 2982 { 2983 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); 2984 intel_dp_set_signal_levels(intel_dp, DP); 2985 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat); 2986 } 2987 2988 static bool 2989 intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP, 2990 const uint8_t link_status[DP_LINK_STATUS_SIZE]) 2991 { 2992 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2993 struct drm_device *dev = intel_dig_port->base.base.dev; 2994 struct drm_i915_private *dev_priv = dev->dev_private; 2995 int ret; 2996 2997 intel_get_adjust_train(intel_dp, link_status); 2998 intel_dp_set_signal_levels(intel_dp, DP); 2999 3000 I915_WRITE(intel_dp->output_reg, *DP); 3001 POSTING_READ(intel_dp->output_reg); 3002 3003 ret = intel_dp_aux_native_write(intel_dp, DP_TRAINING_LANE0_SET, 3004 intel_dp->train_set, 3005 intel_dp->lane_count); 3006 3007 return ret == intel_dp->lane_count; 3008 } 3009 3010 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) 3011 { 3012 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3013 struct drm_device *dev = intel_dig_port->base.base.dev; 3014 struct drm_i915_private *dev_priv = dev->dev_private; 3015 enum port port = intel_dig_port->port; 3016 uint32_t val; 3017 3018 if (!HAS_DDI(dev)) 3019 return; 3020 3021 val = I915_READ(DP_TP_CTL(port)); 3022 val &= ~DP_TP_CTL_LINK_TRAIN_MASK; 3023 val |= DP_TP_CTL_LINK_TRAIN_IDLE; 3024 I915_WRITE(DP_TP_CTL(port), val); 3025 3026 /* 3027 * On PORT_A we can have only eDP in SST mode. There the only reason 3028 * we need to set idle transmission mode is to work around a HW issue 3029 * where we enable the pipe while not in idle link-training mode. 3030 * In this case there is requirement to wait for a minimum number of 3031 * idle patterns to be sent. 3032 */ 3033 if (port == PORT_A) 3034 return; 3035 3036 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE), 3037 1)) 3038 DRM_ERROR("Timed out waiting for DP idle patterns\n"); 3039 } 3040 3041 /* Enable corresponding port and start training pattern 1 */ 3042 void 3043 intel_dp_start_link_train(struct intel_dp *intel_dp) 3044 { 3045 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; 3046 struct drm_device *dev = encoder->dev; 3047 int i; 3048 uint8_t voltage; 3049 int voltage_tries, loop_tries; 3050 uint32_t DP = intel_dp->DP; 3051 uint8_t link_config[2]; 3052 3053 if (HAS_DDI(dev)) 3054 intel_ddi_prepare_link_retrain(encoder); 3055 3056 /* Write the link configuration data */ 3057 link_config[0] = intel_dp->link_bw; 3058 link_config[1] = intel_dp->lane_count; 3059 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) 3060 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 3061 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, link_config, 2); 3062 3063 link_config[0] = 0; 3064 link_config[1] = DP_SET_ANSI_8B10B; 3065 intel_dp_aux_native_write(intel_dp, DP_DOWNSPREAD_CTRL, link_config, 2); 3066 3067 DP |= DP_PORT_EN; 3068 3069 /* clock recovery */ 3070 if (!intel_dp_reset_link_train(intel_dp, &DP, 3071 DP_TRAINING_PATTERN_1 | 3072 DP_LINK_SCRAMBLING_DISABLE)) { 3073 DRM_ERROR("failed to enable link training\n"); 3074 return; 3075 } 3076 3077 voltage = 0xff; 3078 voltage_tries = 0; 3079 loop_tries = 0; 3080 for (;;) { 3081 uint8_t link_status[DP_LINK_STATUS_SIZE]; 3082 3083 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd); 3084 if (!intel_dp_get_link_status(intel_dp, link_status)) { 3085 DRM_ERROR("failed to get link status\n"); 3086 break; 3087 } 3088 3089 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 3090 DRM_DEBUG_KMS("clock recovery OK\n"); 3091 break; 3092 } 3093 3094 /* Check to see if we've tried the max voltage */ 3095 for (i = 0; i < intel_dp->lane_count; i++) 3096 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 3097 break; 3098 if (i == intel_dp->lane_count) { 3099 ++loop_tries; 3100 if (loop_tries == 5) { 3101 DRM_ERROR("too many full retries, give up\n"); 3102 break; 3103 } 3104 intel_dp_reset_link_train(intel_dp, &DP, 3105 DP_TRAINING_PATTERN_1 | 3106 DP_LINK_SCRAMBLING_DISABLE); 3107 voltage_tries = 0; 3108 continue; 3109 } 3110 3111 /* Check to see if we've tried the same voltage 5 times */ 3112 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 3113 ++voltage_tries; 3114 if (voltage_tries == 5) { 3115 DRM_ERROR("too many voltage retries, give up\n"); 3116 break; 3117 } 3118 } else 3119 voltage_tries = 0; 3120 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 3121 3122 /* Update training set as requested by target */ 3123 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) { 3124 DRM_ERROR("failed to update link training\n"); 3125 break; 3126 } 3127 } 3128 3129 intel_dp->DP = DP; 3130 } 3131 3132 void 3133 intel_dp_complete_link_train(struct intel_dp *intel_dp) 3134 { 3135 bool channel_eq = false; 3136 int tries, cr_tries; 3137 uint32_t DP = intel_dp->DP; 3138 uint32_t training_pattern = DP_TRAINING_PATTERN_2; 3139 3140 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/ 3141 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3) 3142 training_pattern = DP_TRAINING_PATTERN_3; 3143 3144 /* channel equalization */ 3145 if (!intel_dp_set_link_train(intel_dp, &DP, 3146 training_pattern | 3147 DP_LINK_SCRAMBLING_DISABLE)) { 3148 DRM_ERROR("failed to start channel equalization\n"); 3149 return; 3150 } 3151 3152 tries = 0; 3153 cr_tries = 0; 3154 channel_eq = false; 3155 for (;;) { 3156 uint8_t link_status[DP_LINK_STATUS_SIZE]; 3157 3158 if (cr_tries > 5) { 3159 DRM_ERROR("failed to train DP, aborting\n"); 3160 break; 3161 } 3162 3163 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd); 3164 if (!intel_dp_get_link_status(intel_dp, link_status)) { 3165 DRM_ERROR("failed to get link status\n"); 3166 break; 3167 } 3168 3169 /* Make sure clock is still ok */ 3170 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) { 3171 intel_dp_start_link_train(intel_dp); 3172 intel_dp_set_link_train(intel_dp, &DP, 3173 training_pattern | 3174 DP_LINK_SCRAMBLING_DISABLE); 3175 cr_tries++; 3176 continue; 3177 } 3178 3179 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 3180 channel_eq = true; 3181 break; 3182 } 3183 3184 /* Try 5 times, then try clock recovery if that fails */ 3185 if (tries > 5) { 3186 intel_dp_link_down(intel_dp); 3187 intel_dp_start_link_train(intel_dp); 3188 intel_dp_set_link_train(intel_dp, &DP, 3189 training_pattern | 3190 DP_LINK_SCRAMBLING_DISABLE); 3191 tries = 0; 3192 cr_tries++; 3193 continue; 3194 } 3195 3196 /* Update training set as requested by target */ 3197 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) { 3198 DRM_ERROR("failed to update link training\n"); 3199 break; 3200 } 3201 ++tries; 3202 } 3203 3204 intel_dp_set_idle_link_train(intel_dp); 3205 3206 intel_dp->DP = DP; 3207 3208 if (channel_eq) 3209 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); 3210 3211 } 3212 3213 void intel_dp_stop_link_train(struct intel_dp *intel_dp) 3214 { 3215 intel_dp_set_link_train(intel_dp, &intel_dp->DP, 3216 DP_TRAINING_PATTERN_DISABLE); 3217 } 3218 3219 static void 3220 intel_dp_link_down(struct intel_dp *intel_dp) 3221 { 3222 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3223 enum port port = intel_dig_port->port; 3224 struct drm_device *dev = intel_dig_port->base.base.dev; 3225 struct drm_i915_private *dev_priv = dev->dev_private; 3226 struct intel_crtc *intel_crtc = 3227 to_intel_crtc(intel_dig_port->base.base.crtc); 3228 uint32_t DP = intel_dp->DP; 3229 3230 if (WARN_ON(HAS_DDI(dev))) 3231 return; 3232 3233 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) 3234 return; 3235 3236 DRM_DEBUG_KMS("\n"); 3237 3238 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) { 3239 DP &= ~DP_LINK_TRAIN_MASK_CPT; 3240 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 3241 } else { 3242 DP &= ~DP_LINK_TRAIN_MASK; 3243 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 3244 } 3245 POSTING_READ(intel_dp->output_reg); 3246 3247 if (HAS_PCH_IBX(dev) && 3248 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 3249 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 3250 3251 /* Hardware workaround: leaving our transcoder select 3252 * set to transcoder B while it's off will prevent the 3253 * corresponding HDMI output on transcoder A. 3254 * 3255 * Combine this with another hardware workaround: 3256 * transcoder select bit can only be cleared while the 3257 * port is enabled. 3258 */ 3259 DP &= ~DP_PIPEB_SELECT; 3260 I915_WRITE(intel_dp->output_reg, DP); 3261 3262 /* Changes to enable or select take place the vblank 3263 * after being written. 3264 */ 3265 if (WARN_ON(crtc == NULL)) { 3266 /* We should never try to disable a port without a crtc 3267 * attached. For paranoia keep the code around for a 3268 * bit. */ 3269 POSTING_READ(intel_dp->output_reg); 3270 msleep(50); 3271 } else 3272 intel_wait_for_vblank(dev, intel_crtc->pipe); 3273 } 3274 3275 DP &= ~DP_AUDIO_OUTPUT_ENABLE; 3276 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 3277 POSTING_READ(intel_dp->output_reg); 3278 msleep(intel_dp->panel_power_down_delay); 3279 } 3280 3281 static bool 3282 intel_dp_get_dpcd(struct intel_dp *intel_dp) 3283 { 3284 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3285 struct drm_device *dev = dig_port->base.base.dev; 3286 struct drm_i915_private *dev_priv = dev->dev_private; 3287 3288 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; 3289 3290 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 3291 sizeof(intel_dp->dpcd)) == 0) 3292 return false; /* aux transfer failed */ 3293 3294 ksnprintf(dpcd_hex_dump, 3295 sizeof(dpcd_hex_dump), 3296 "%02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n", 3297 intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], 3298 intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5], 3299 intel_dp->dpcd[6], intel_dp->dpcd[7]); 3300 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); 3301 3302 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 3303 return false; /* DPCD not present */ 3304 3305 /* Check if the panel supports PSR */ 3306 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); 3307 if (is_edp(intel_dp)) { 3308 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT, 3309 intel_dp->psr_dpcd, 3310 sizeof(intel_dp->psr_dpcd)); 3311 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) { 3312 dev_priv->psr.sink_support = true; 3313 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n"); 3314 } 3315 } 3316 3317 /* Training Pattern 3 support */ 3318 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && 3319 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) { 3320 intel_dp->use_tps3 = true; 3321 DRM_DEBUG_KMS("Displayport TPS3 supported"); 3322 } else 3323 intel_dp->use_tps3 = false; 3324 3325 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 3326 DP_DWN_STRM_PORT_PRESENT)) 3327 return true; /* native DP sink */ 3328 3329 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10) 3330 return true; /* no per-port downstream info */ 3331 3332 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0, 3333 intel_dp->downstream_ports, 3334 DP_MAX_DOWNSTREAM_PORTS) == 0) 3335 return false; /* downstream port status fetch failed */ 3336 3337 return true; 3338 } 3339 3340 static void 3341 intel_dp_probe_oui(struct intel_dp *intel_dp) 3342 { 3343 u8 buf[3]; 3344 3345 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 3346 return; 3347 3348 intel_edp_panel_vdd_on(intel_dp); 3349 3350 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) 3351 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 3352 buf[0], buf[1], buf[2]); 3353 3354 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) 3355 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 3356 buf[0], buf[1], buf[2]); 3357 3358 edp_panel_vdd_off(intel_dp, false); 3359 } 3360 3361 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc) 3362 { 3363 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3364 struct drm_device *dev = intel_dig_port->base.base.dev; 3365 struct intel_crtc *intel_crtc = 3366 to_intel_crtc(intel_dig_port->base.base.crtc); 3367 u8 buf[1]; 3368 3369 if (!intel_dp_aux_native_read(intel_dp, DP_TEST_SINK_MISC, buf, 1)) 3370 return -EAGAIN; 3371 3372 if (!(buf[0] & DP_TEST_CRC_SUPPORTED)) 3373 return -ENOTTY; 3374 3375 if (!intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK, 3376 DP_TEST_SINK_START)) 3377 return -EAGAIN; 3378 3379 /* Wait 2 vblanks to be sure we will have the correct CRC value */ 3380 intel_wait_for_vblank(dev, intel_crtc->pipe); 3381 intel_wait_for_vblank(dev, intel_crtc->pipe); 3382 3383 if (!intel_dp_aux_native_read(intel_dp, DP_TEST_CRC_R_CR, crc, 6)) 3384 return -EAGAIN; 3385 3386 intel_dp_aux_native_write_1(intel_dp, DP_TEST_SINK, 0); 3387 return 0; 3388 } 3389 3390 static bool 3391 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) 3392 { 3393 int ret; 3394 3395 ret = intel_dp_aux_native_read_retry(intel_dp, 3396 DP_DEVICE_SERVICE_IRQ_VECTOR, 3397 sink_irq_vector, 1); 3398 if (!ret) 3399 return false; 3400 3401 return true; 3402 } 3403 3404 static void 3405 intel_dp_handle_test_request(struct intel_dp *intel_dp) 3406 { 3407 /* NAK by default */ 3408 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK); 3409 } 3410 3411 /* 3412 * According to DP spec 3413 * 5.1.2: 3414 * 1. Read DPCD 3415 * 2. Configure link according to Receiver Capabilities 3416 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 3417 * 4. Check link status on receipt of hot-plug interrupt 3418 */ 3419 3420 void 3421 intel_dp_check_link_status(struct intel_dp *intel_dp) 3422 { 3423 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; 3424 u8 sink_irq_vector; 3425 u8 link_status[DP_LINK_STATUS_SIZE]; 3426 3427 /* FIXME: This access isn't protected by any locks. */ 3428 if (!intel_encoder->connectors_active) 3429 return; 3430 3431 if (WARN_ON(!intel_encoder->base.crtc)) 3432 return; 3433 3434 /* Try to read receiver status if the link appears to be up */ 3435 if (!intel_dp_get_link_status(intel_dp, link_status)) { 3436 return; 3437 } 3438 3439 /* Now read the DPCD to see if it's actually running */ 3440 if (!intel_dp_get_dpcd(intel_dp)) { 3441 return; 3442 } 3443 3444 /* Try to read the source of the interrupt */ 3445 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 3446 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { 3447 /* Clear interrupt source */ 3448 intel_dp_aux_native_write_1(intel_dp, 3449 DP_DEVICE_SERVICE_IRQ_VECTOR, 3450 sink_irq_vector); 3451 3452 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) 3453 intel_dp_handle_test_request(intel_dp); 3454 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) 3455 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 3456 } 3457 3458 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { 3459 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 3460 intel_encoder->base.name); 3461 intel_dp_start_link_train(intel_dp); 3462 intel_dp_complete_link_train(intel_dp); 3463 intel_dp_stop_link_train(intel_dp); 3464 } 3465 } 3466 3467 /* XXX this is probably wrong for multiple downstream ports */ 3468 static enum drm_connector_status 3469 intel_dp_detect_dpcd(struct intel_dp *intel_dp) 3470 { 3471 uint8_t *dpcd = intel_dp->dpcd; 3472 uint8_t type; 3473 3474 if (!intel_dp_get_dpcd(intel_dp)) 3475 return connector_status_disconnected; 3476 3477 /* if there's no downstream port, we're done */ 3478 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) 3479 return connector_status_connected; 3480 3481 /* If we're HPD-aware, SINK_COUNT changes dynamically */ 3482 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 3483 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { 3484 uint8_t reg; 3485 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT, 3486 ®, 1)) 3487 return connector_status_unknown; 3488 return DP_GET_SINK_COUNT(reg) ? connector_status_connected 3489 : connector_status_disconnected; 3490 } 3491 3492 /* If no HPD, poke DDC gently */ 3493 if (drm_probe_ddc(intel_dp->adapter)) 3494 return connector_status_connected; 3495 3496 /* Well we tried, say unknown for unreliable port types */ 3497 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 3498 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK; 3499 if (type == DP_DS_PORT_TYPE_VGA || 3500 type == DP_DS_PORT_TYPE_NON_EDID) 3501 return connector_status_unknown; 3502 } else { 3503 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & 3504 DP_DWN_STRM_PORT_TYPE_MASK; 3505 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG || 3506 type == DP_DWN_STRM_PORT_TYPE_OTHER) 3507 return connector_status_unknown; 3508 } 3509 3510 /* Anything else is out of spec, warn and ignore */ 3511 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n"); 3512 return connector_status_disconnected; 3513 } 3514 3515 static enum drm_connector_status 3516 ironlake_dp_detect(struct intel_dp *intel_dp) 3517 { 3518 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3519 struct drm_i915_private *dev_priv = dev->dev_private; 3520 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3521 enum drm_connector_status status; 3522 3523 /* Can't disconnect eDP, but you can close the lid... */ 3524 if (is_edp(intel_dp)) { 3525 status = intel_panel_detect(dev); 3526 if (status == connector_status_unknown) 3527 status = connector_status_connected; 3528 return status; 3529 } 3530 3531 if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) 3532 return connector_status_disconnected; 3533 3534 return intel_dp_detect_dpcd(intel_dp); 3535 } 3536 3537 static enum drm_connector_status 3538 g4x_dp_detect(struct intel_dp *intel_dp) 3539 { 3540 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3541 struct drm_i915_private *dev_priv = dev->dev_private; 3542 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3543 uint32_t bit; 3544 3545 /* Can't disconnect eDP, but you can close the lid... */ 3546 if (is_edp(intel_dp)) { 3547 enum drm_connector_status status; 3548 3549 status = intel_panel_detect(dev); 3550 if (status == connector_status_unknown) 3551 status = connector_status_connected; 3552 return status; 3553 } 3554 3555 if (IS_VALLEYVIEW(dev)) { 3556 switch (intel_dig_port->port) { 3557 case PORT_B: 3558 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV; 3559 break; 3560 case PORT_C: 3561 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV; 3562 break; 3563 case PORT_D: 3564 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; 3565 break; 3566 default: 3567 return connector_status_unknown; 3568 } 3569 } else { 3570 switch (intel_dig_port->port) { 3571 case PORT_B: 3572 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; 3573 break; 3574 case PORT_C: 3575 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; 3576 break; 3577 case PORT_D: 3578 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; 3579 break; 3580 default: 3581 return connector_status_unknown; 3582 } 3583 } 3584 3585 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) 3586 return connector_status_disconnected; 3587 3588 return intel_dp_detect_dpcd(intel_dp); 3589 } 3590 3591 static struct edid * 3592 intel_dp_get_edid(struct drm_connector *connector, struct device *adapter) 3593 { 3594 struct intel_connector *intel_connector = to_intel_connector(connector); 3595 3596 /* use cached edid if we have one */ 3597 if (intel_connector->edid) { 3598 /* invalid edid */ 3599 if (IS_ERR(intel_connector->edid)) 3600 return NULL; 3601 3602 return drm_edid_duplicate(intel_connector->edid); 3603 } 3604 3605 return drm_get_edid(connector, adapter); 3606 } 3607 3608 static int 3609 intel_dp_get_edid_modes(struct drm_connector *connector, struct device *adapter) 3610 { 3611 struct intel_connector *intel_connector = to_intel_connector(connector); 3612 3613 /* use cached edid if we have one */ 3614 if (intel_connector->edid) { 3615 /* invalid edid */ 3616 if (IS_ERR(intel_connector->edid)) 3617 return 0; 3618 3619 return intel_connector_update_modes(connector, 3620 intel_connector->edid); 3621 } 3622 3623 return intel_ddc_get_modes(connector, adapter); 3624 } 3625 3626 static enum drm_connector_status 3627 intel_dp_detect(struct drm_connector *connector, bool force) 3628 { 3629 struct intel_dp *intel_dp = intel_attached_dp(connector); 3630 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3631 struct intel_encoder *intel_encoder = &intel_dig_port->base; 3632 struct drm_device *dev = connector->dev; 3633 struct drm_i915_private *dev_priv = dev->dev_private; 3634 enum drm_connector_status status; 3635 enum intel_display_power_domain power_domain; 3636 struct edid *edid = NULL; 3637 3638 intel_runtime_pm_get(dev_priv); 3639 3640 power_domain = intel_display_port_power_domain(intel_encoder); 3641 intel_display_power_get(dev_priv, power_domain); 3642 3643 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 3644 connector->base.id, connector->name); 3645 3646 intel_dp->has_audio = false; 3647 3648 if (HAS_PCH_SPLIT(dev)) 3649 status = ironlake_dp_detect(intel_dp); 3650 else 3651 status = g4x_dp_detect(intel_dp); 3652 3653 if (status != connector_status_connected) 3654 goto out; 3655 3656 intel_dp_probe_oui(intel_dp); 3657 3658 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 3659 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); 3660 } else { 3661 edid = intel_dp_get_edid(connector, intel_dp->adapter); 3662 if (edid) { 3663 intel_dp->has_audio = drm_detect_monitor_audio(edid); 3664 kfree(edid); 3665 } 3666 } 3667 3668 if (intel_encoder->type != INTEL_OUTPUT_EDP) 3669 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 3670 status = connector_status_connected; 3671 3672 out: 3673 intel_display_power_put(dev_priv, power_domain); 3674 3675 intel_runtime_pm_put(dev_priv); 3676 3677 return status; 3678 } 3679 3680 static int intel_dp_get_modes(struct drm_connector *connector) 3681 { 3682 struct intel_dp *intel_dp = intel_attached_dp(connector); 3683 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3684 struct intel_encoder *intel_encoder = &intel_dig_port->base; 3685 struct intel_connector *intel_connector = to_intel_connector(connector); 3686 struct drm_device *dev = connector->dev; 3687 struct drm_i915_private *dev_priv = dev->dev_private; 3688 enum intel_display_power_domain power_domain; 3689 int ret; 3690 3691 /* We should parse the EDID data and find out if it has an audio sink 3692 */ 3693 3694 power_domain = intel_display_port_power_domain(intel_encoder); 3695 intel_display_power_get(dev_priv, power_domain); 3696 3697 ret = intel_dp_get_edid_modes(connector, intel_dp->adapter); 3698 intel_display_power_put(dev_priv, power_domain); 3699 if (ret) 3700 return ret; 3701 3702 /* if eDP has no EDID, fall back to fixed mode */ 3703 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { 3704 struct drm_display_mode *mode; 3705 mode = drm_mode_duplicate(dev, 3706 intel_connector->panel.fixed_mode); 3707 if (mode) { 3708 drm_mode_probed_add(connector, mode); 3709 return 1; 3710 } 3711 } 3712 return 0; 3713 } 3714 3715 static bool 3716 intel_dp_detect_audio(struct drm_connector *connector) 3717 { 3718 struct intel_dp *intel_dp = intel_attached_dp(connector); 3719 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3720 struct intel_encoder *intel_encoder = &intel_dig_port->base; 3721 struct drm_device *dev = connector->dev; 3722 struct drm_i915_private *dev_priv = dev->dev_private; 3723 enum intel_display_power_domain power_domain; 3724 struct edid *edid; 3725 bool has_audio = false; 3726 3727 power_domain = intel_display_port_power_domain(intel_encoder); 3728 intel_display_power_get(dev_priv, power_domain); 3729 3730 edid = intel_dp_get_edid(connector, intel_dp->adapter); 3731 if (edid) { 3732 has_audio = drm_detect_monitor_audio(edid); 3733 kfree(edid); 3734 } 3735 3736 intel_display_power_put(dev_priv, power_domain); 3737 3738 return has_audio; 3739 } 3740 3741 static int 3742 intel_dp_set_property(struct drm_connector *connector, 3743 struct drm_property *property, 3744 uint64_t val) 3745 { 3746 struct drm_i915_private *dev_priv = connector->dev->dev_private; 3747 struct intel_connector *intel_connector = to_intel_connector(connector); 3748 struct intel_encoder *intel_encoder = intel_attached_encoder(connector); 3749 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 3750 int ret; 3751 3752 ret = drm_object_property_set_value(&connector->base, property, val); 3753 if (ret) 3754 return ret; 3755 3756 if (property == dev_priv->force_audio_property) { 3757 int i = val; 3758 bool has_audio; 3759 3760 if (i == intel_dp->force_audio) 3761 return 0; 3762 3763 intel_dp->force_audio = i; 3764 3765 if (i == HDMI_AUDIO_AUTO) 3766 has_audio = intel_dp_detect_audio(connector); 3767 else 3768 has_audio = (i == HDMI_AUDIO_ON); 3769 3770 if (has_audio == intel_dp->has_audio) 3771 return 0; 3772 3773 intel_dp->has_audio = has_audio; 3774 goto done; 3775 } 3776 3777 if (property == dev_priv->broadcast_rgb_property) { 3778 bool old_auto = intel_dp->color_range_auto; 3779 uint32_t old_range = intel_dp->color_range; 3780 3781 switch (val) { 3782 case INTEL_BROADCAST_RGB_AUTO: 3783 intel_dp->color_range_auto = true; 3784 break; 3785 case INTEL_BROADCAST_RGB_FULL: 3786 intel_dp->color_range_auto = false; 3787 intel_dp->color_range = 0; 3788 break; 3789 case INTEL_BROADCAST_RGB_LIMITED: 3790 intel_dp->color_range_auto = false; 3791 intel_dp->color_range = DP_COLOR_RANGE_16_235; 3792 break; 3793 default: 3794 return -EINVAL; 3795 } 3796 3797 if (old_auto == intel_dp->color_range_auto && 3798 old_range == intel_dp->color_range) 3799 return 0; 3800 3801 goto done; 3802 } 3803 3804 if (is_edp(intel_dp) && 3805 property == connector->dev->mode_config.scaling_mode_property) { 3806 if (val == DRM_MODE_SCALE_NONE) { 3807 DRM_DEBUG_KMS("no scaling not supported\n"); 3808 return -EINVAL; 3809 } 3810 3811 if (intel_connector->panel.fitting_mode == val) { 3812 /* the eDP scaling property is not changed */ 3813 return 0; 3814 } 3815 intel_connector->panel.fitting_mode = val; 3816 3817 goto done; 3818 } 3819 3820 return -EINVAL; 3821 3822 done: 3823 if (intel_encoder->base.crtc) 3824 intel_crtc_restore_mode(intel_encoder->base.crtc); 3825 3826 return 0; 3827 } 3828 3829 static void 3830 intel_dp_connector_destroy(struct drm_connector *connector) 3831 { 3832 struct intel_connector *intel_connector = to_intel_connector(connector); 3833 3834 if (!IS_ERR_OR_NULL(intel_connector->edid)) 3835 kfree(intel_connector->edid); 3836 3837 /* Can't call is_edp() since the encoder may have been destroyed 3838 * already. */ 3839 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 3840 intel_panel_fini(&intel_connector->panel); 3841 3842 drm_connector_cleanup(connector); 3843 kfree(connector); 3844 } 3845 3846 void intel_dp_encoder_destroy(struct drm_encoder *encoder) 3847 { 3848 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 3849 struct intel_dp *intel_dp = &intel_dig_port->dp; 3850 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3851 3852 if (intel_dp->dp_iic_bus != NULL) { 3853 if (intel_dp->adapter != NULL) { 3854 device_delete_child(intel_dp->dp_iic_bus, 3855 intel_dp->adapter); 3856 } 3857 device_delete_child(dev->dev, intel_dp->dp_iic_bus); 3858 } 3859 3860 drm_encoder_cleanup(encoder); 3861 if (is_edp(intel_dp)) { 3862 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 3863 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 3864 edp_panel_vdd_off_sync(intel_dp); 3865 drm_modeset_unlock(&dev->mode_config.connection_mutex); 3866 #if 0 3867 if (intel_dp->edp_notifier.notifier_call) { 3868 unregister_reboot_notifier(&intel_dp->edp_notifier); 3869 intel_dp->edp_notifier.notifier_call = NULL; 3870 } 3871 #endif 3872 } 3873 kfree(intel_dig_port); 3874 } 3875 3876 static const struct drm_connector_funcs intel_dp_connector_funcs = { 3877 .dpms = intel_connector_dpms, 3878 .detect = intel_dp_detect, 3879 .fill_modes = drm_helper_probe_single_connector_modes, 3880 .set_property = intel_dp_set_property, 3881 .destroy = intel_dp_connector_destroy, 3882 }; 3883 3884 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 3885 .get_modes = intel_dp_get_modes, 3886 .mode_valid = intel_dp_mode_valid, 3887 .best_encoder = intel_best_encoder, 3888 }; 3889 3890 static const struct drm_encoder_funcs intel_dp_enc_funcs = { 3891 .destroy = intel_dp_encoder_destroy, 3892 }; 3893 3894 static void 3895 intel_dp_hot_plug(struct intel_encoder *intel_encoder) 3896 { 3897 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 3898 3899 intel_dp_check_link_status(intel_dp); 3900 } 3901 3902 /* Return which DP Port should be selected for Transcoder DP control */ 3903 int 3904 intel_trans_dp_port_sel(struct drm_crtc *crtc) 3905 { 3906 struct drm_device *dev = crtc->dev; 3907 struct intel_encoder *intel_encoder; 3908 struct intel_dp *intel_dp; 3909 3910 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 3911 intel_dp = enc_to_intel_dp(&intel_encoder->base); 3912 3913 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 3914 intel_encoder->type == INTEL_OUTPUT_EDP) 3915 return intel_dp->output_reg; 3916 } 3917 3918 return -1; 3919 } 3920 3921 /* check the VBT to see whether the eDP is on DP-D port */ 3922 bool intel_dp_is_edp(struct drm_device *dev, enum port port) 3923 { 3924 struct drm_i915_private *dev_priv = dev->dev_private; 3925 union child_device_config *p_child; 3926 int i; 3927 static const short port_mapping[] = { 3928 [PORT_B] = PORT_IDPB, 3929 [PORT_C] = PORT_IDPC, 3930 [PORT_D] = PORT_IDPD, 3931 }; 3932 3933 if (port == PORT_A) 3934 return true; 3935 3936 if (!dev_priv->vbt.child_dev_num) 3937 return false; 3938 3939 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 3940 p_child = dev_priv->vbt.child_dev + i; 3941 3942 if (p_child->common.dvo_port == port_mapping[port] && 3943 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) == 3944 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) 3945 return true; 3946 } 3947 return false; 3948 } 3949 3950 static void 3951 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 3952 { 3953 struct intel_connector *intel_connector = to_intel_connector(connector); 3954 3955 intel_attach_force_audio_property(connector); 3956 intel_attach_broadcast_rgb_property(connector); 3957 intel_dp->color_range_auto = true; 3958 3959 if (is_edp(intel_dp)) { 3960 drm_mode_create_scaling_mode_property(connector->dev); 3961 drm_object_attach_property( 3962 &connector->base, 3963 connector->dev->mode_config.scaling_mode_property, 3964 DRM_MODE_SCALE_ASPECT); 3965 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT; 3966 } 3967 } 3968 3969 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp) 3970 { 3971 intel_dp->last_power_cycle = jiffies; 3972 intel_dp->last_power_on = jiffies; 3973 intel_dp->last_backlight_off = jiffies; 3974 } 3975 3976 static void 3977 intel_dp_init_panel_power_sequencer(struct drm_device *dev, 3978 struct intel_dp *intel_dp, 3979 struct edp_power_seq *out) 3980 { 3981 struct drm_i915_private *dev_priv = dev->dev_private; 3982 struct edp_power_seq cur, vbt, spec, final; 3983 u32 pp_on, pp_off, pp_div, pp; 3984 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg; 3985 3986 if (HAS_PCH_SPLIT(dev)) { 3987 pp_ctrl_reg = PCH_PP_CONTROL; 3988 pp_on_reg = PCH_PP_ON_DELAYS; 3989 pp_off_reg = PCH_PP_OFF_DELAYS; 3990 pp_div_reg = PCH_PP_DIVISOR; 3991 } else { 3992 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp); 3993 3994 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); 3995 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); 3996 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe); 3997 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); 3998 } 3999 4000 /* Workaround: Need to write PP_CONTROL with the unlock key as 4001 * the very first thing. */ 4002 pp = ironlake_get_pp_control(intel_dp); 4003 I915_WRITE(pp_ctrl_reg, pp); 4004 4005 pp_on = I915_READ(pp_on_reg); 4006 pp_off = I915_READ(pp_off_reg); 4007 pp_div = I915_READ(pp_div_reg); 4008 4009 /* Pull timing values out of registers */ 4010 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 4011 PANEL_POWER_UP_DELAY_SHIFT; 4012 4013 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 4014 PANEL_LIGHT_ON_DELAY_SHIFT; 4015 4016 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 4017 PANEL_LIGHT_OFF_DELAY_SHIFT; 4018 4019 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> 4020 PANEL_POWER_DOWN_DELAY_SHIFT; 4021 4022 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> 4023 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; 4024 4025 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 4026 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); 4027 4028 vbt = dev_priv->vbt.edp_pps; 4029 4030 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of 4031 * our hw here, which are all in 100usec. */ 4032 spec.t1_t3 = 210 * 10; 4033 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */ 4034 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */ 4035 spec.t10 = 500 * 10; 4036 /* This one is special and actually in units of 100ms, but zero 4037 * based in the hw (so we need to add 100 ms). But the sw vbt 4038 * table multiplies it with 1000 to make it in units of 100usec, 4039 * too. */ 4040 spec.t11_t12 = (510 + 100) * 10; 4041 4042 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 4043 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); 4044 4045 /* Use the max of the register settings and vbt. If both are 4046 * unset, fall back to the spec limits. */ 4047 #define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ 4048 spec.field : \ 4049 max(cur.field, vbt.field)) 4050 assign_final(t1_t3); 4051 assign_final(t8); 4052 assign_final(t9); 4053 assign_final(t10); 4054 assign_final(t11_t12); 4055 #undef assign_final 4056 4057 #define get_delay(field) (DIV_ROUND_UP(final.field, 10)) 4058 intel_dp->panel_power_up_delay = get_delay(t1_t3); 4059 intel_dp->backlight_on_delay = get_delay(t8); 4060 intel_dp->backlight_off_delay = get_delay(t9); 4061 intel_dp->panel_power_down_delay = get_delay(t10); 4062 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 4063 #undef get_delay 4064 4065 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", 4066 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, 4067 intel_dp->panel_power_cycle_delay); 4068 4069 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 4070 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 4071 4072 if (out) 4073 *out = final; 4074 } 4075 4076 static void 4077 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 4078 struct intel_dp *intel_dp, 4079 struct edp_power_seq *seq) 4080 { 4081 struct drm_i915_private *dev_priv = dev->dev_private; 4082 u32 pp_on, pp_off, pp_div, port_sel = 0; 4083 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev); 4084 int pp_on_reg, pp_off_reg, pp_div_reg; 4085 4086 if (HAS_PCH_SPLIT(dev)) { 4087 pp_on_reg = PCH_PP_ON_DELAYS; 4088 pp_off_reg = PCH_PP_OFF_DELAYS; 4089 pp_div_reg = PCH_PP_DIVISOR; 4090 } else { 4091 enum i915_pipe pipe = vlv_power_sequencer_pipe(intel_dp); 4092 4093 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); 4094 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe); 4095 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe); 4096 } 4097 4098 /* 4099 * And finally store the new values in the power sequencer. The 4100 * backlight delays are set to 1 because we do manual waits on them. For 4101 * T8, even BSpec recommends doing it. For T9, if we don't do this, 4102 * we'll end up waiting for the backlight off delay twice: once when we 4103 * do the manual sleep, and once when we disable the panel and wait for 4104 * the PP_STATUS bit to become zero. 4105 */ 4106 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 4107 (1 << PANEL_LIGHT_ON_DELAY_SHIFT); 4108 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 4109 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 4110 /* Compute the divisor for the pp clock, simply match the Bspec 4111 * formula. */ 4112 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT; 4113 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000) 4114 << PANEL_POWER_CYCLE_DELAY_SHIFT); 4115 4116 /* Haswell doesn't have any port selection bits for the panel 4117 * power sequencer any more. */ 4118 if (IS_VALLEYVIEW(dev)) { 4119 if (dp_to_dig_port(intel_dp)->port == PORT_B) 4120 port_sel = PANEL_PORT_SELECT_DPB_VLV; 4121 else 4122 port_sel = PANEL_PORT_SELECT_DPC_VLV; 4123 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 4124 if (dp_to_dig_port(intel_dp)->port == PORT_A) 4125 port_sel = PANEL_PORT_SELECT_DPA; 4126 else 4127 port_sel = PANEL_PORT_SELECT_DPD; 4128 } 4129 4130 pp_on |= port_sel; 4131 4132 I915_WRITE(pp_on_reg, pp_on); 4133 I915_WRITE(pp_off_reg, pp_off); 4134 I915_WRITE(pp_div_reg, pp_div); 4135 4136 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 4137 I915_READ(pp_on_reg), 4138 I915_READ(pp_off_reg), 4139 I915_READ(pp_div_reg)); 4140 } 4141 4142 void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) 4143 { 4144 struct drm_i915_private *dev_priv = dev->dev_private; 4145 struct intel_encoder *encoder; 4146 struct intel_dp *intel_dp = NULL; 4147 struct intel_crtc_config *config = NULL; 4148 struct intel_crtc *intel_crtc = NULL; 4149 struct intel_connector *intel_connector = dev_priv->drrs.connector; 4150 u32 reg, val; 4151 enum edp_drrs_refresh_rate_type index = DRRS_HIGH_RR; 4152 4153 if (refresh_rate <= 0) { 4154 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n"); 4155 return; 4156 } 4157 4158 if (intel_connector == NULL) { 4159 DRM_DEBUG_KMS("DRRS supported for eDP only.\n"); 4160 return; 4161 } 4162 4163 if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) { 4164 DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n"); 4165 return; 4166 } 4167 4168 encoder = intel_attached_encoder(&intel_connector->base); 4169 intel_dp = enc_to_intel_dp(&encoder->base); 4170 intel_crtc = encoder->new_crtc; 4171 4172 if (!intel_crtc) { 4173 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n"); 4174 return; 4175 } 4176 4177 config = &intel_crtc->config; 4178 4179 if (intel_dp->drrs_state.type < SEAMLESS_DRRS_SUPPORT) { 4180 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n"); 4181 return; 4182 } 4183 4184 if (intel_connector->panel.downclock_mode->vrefresh == refresh_rate) 4185 index = DRRS_LOW_RR; 4186 4187 if (index == intel_dp->drrs_state.refresh_rate_type) { 4188 DRM_DEBUG_KMS( 4189 "DRRS requested for previously set RR...ignoring\n"); 4190 return; 4191 } 4192 4193 if (!intel_crtc->active) { 4194 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n"); 4195 return; 4196 } 4197 4198 if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) { 4199 reg = PIPECONF(intel_crtc->config.cpu_transcoder); 4200 val = I915_READ(reg); 4201 if (index > DRRS_HIGH_RR) { 4202 val |= PIPECONF_EDP_RR_MODE_SWITCH; 4203 intel_dp_set_m2_n2(intel_crtc, &config->dp_m2_n2); 4204 } else { 4205 val &= ~PIPECONF_EDP_RR_MODE_SWITCH; 4206 } 4207 I915_WRITE(reg, val); 4208 } 4209 4210 /* 4211 * mutex taken to ensure that there is no race between differnt 4212 * drrs calls trying to update refresh rate. This scenario may occur 4213 * in future when idleness detection based DRRS in kernel and 4214 * possible calls from user space to set differnt RR are made. 4215 */ 4216 4217 mutex_lock(&intel_dp->drrs_state.mutex); 4218 4219 intel_dp->drrs_state.refresh_rate_type = index; 4220 4221 mutex_unlock(&intel_dp->drrs_state.mutex); 4222 4223 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate); 4224 } 4225 4226 static struct drm_display_mode * 4227 intel_dp_drrs_init(struct intel_digital_port *intel_dig_port, 4228 struct intel_connector *intel_connector, 4229 struct drm_display_mode *fixed_mode) 4230 { 4231 struct drm_connector *connector = &intel_connector->base; 4232 struct intel_dp *intel_dp = &intel_dig_port->dp; 4233 struct drm_device *dev = intel_dig_port->base.base.dev; 4234 struct drm_i915_private *dev_priv = dev->dev_private; 4235 struct drm_display_mode *downclock_mode = NULL; 4236 4237 if (INTEL_INFO(dev)->gen <= 6) { 4238 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n"); 4239 return NULL; 4240 } 4241 4242 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { 4243 DRM_INFO("VBT doesn't support DRRS\n"); 4244 return NULL; 4245 } 4246 4247 downclock_mode = intel_find_panel_downclock 4248 (dev, fixed_mode, connector); 4249 4250 if (!downclock_mode) { 4251 DRM_INFO("DRRS not supported\n"); 4252 return NULL; 4253 } 4254 4255 dev_priv->drrs.connector = intel_connector; 4256 4257 lockinit(&intel_dp->drrs_state.mutex, "i915dsm", 0, LK_CANRECURSE); 4258 4259 intel_dp->drrs_state.type = dev_priv->vbt.drrs_type; 4260 4261 intel_dp->drrs_state.refresh_rate_type = DRRS_HIGH_RR; 4262 DRM_INFO("seamless DRRS supported for eDP panel.\n"); 4263 return downclock_mode; 4264 } 4265 4266 static bool intel_edp_init_connector(struct intel_dp *intel_dp, 4267 struct intel_connector *intel_connector, 4268 struct edp_power_seq *power_seq) 4269 { 4270 struct drm_connector *connector = &intel_connector->base; 4271 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4272 struct intel_encoder *intel_encoder = &intel_dig_port->base; 4273 struct drm_device *dev = intel_encoder->base.dev; 4274 struct drm_i915_private *dev_priv = dev->dev_private; 4275 struct drm_display_mode *fixed_mode = NULL; 4276 struct drm_display_mode *downclock_mode = NULL; 4277 bool has_dpcd; 4278 struct drm_display_mode *scan; 4279 struct edid *edid; 4280 4281 intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED; 4282 4283 if (!is_edp(intel_dp)) 4284 return true; 4285 4286 /* The VDD bit needs a power domain reference, so if the bit is already 4287 * enabled when we boot, grab this reference. */ 4288 if (edp_have_panel_vdd(intel_dp)) { 4289 enum intel_display_power_domain power_domain; 4290 power_domain = intel_display_port_power_domain(intel_encoder); 4291 intel_display_power_get(dev_priv, power_domain); 4292 } 4293 4294 /* Cache DPCD and EDID for edp. */ 4295 intel_edp_panel_vdd_on(intel_dp); 4296 has_dpcd = intel_dp_get_dpcd(intel_dp); 4297 edp_panel_vdd_off(intel_dp, false); 4298 4299 if (has_dpcd) { 4300 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 4301 dev_priv->no_aux_handshake = 4302 intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 4303 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 4304 } else { 4305 /* if this fails, presume the device is a ghost */ 4306 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 4307 return false; 4308 } 4309 4310 /* We now know it's not a ghost, init power sequence regs. */ 4311 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq); 4312 4313 mutex_lock(&dev->mode_config.mutex); 4314 edid = drm_get_edid(connector, intel_dp->adapter); 4315 if (edid) { 4316 if (drm_add_edid_modes(connector, edid)) { 4317 drm_mode_connector_update_edid_property(connector, 4318 edid); 4319 drm_edid_to_eld(connector, edid); 4320 } else { 4321 kfree(edid); 4322 edid = ERR_PTR(-EINVAL); 4323 } 4324 } else { 4325 edid = ERR_PTR(-ENOENT); 4326 } 4327 intel_connector->edid = edid; 4328 4329 /* prefer fixed mode from EDID if available */ 4330 list_for_each_entry(scan, &connector->probed_modes, head) { 4331 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) { 4332 fixed_mode = drm_mode_duplicate(dev, scan); 4333 downclock_mode = intel_dp_drrs_init( 4334 intel_dig_port, 4335 intel_connector, fixed_mode); 4336 break; 4337 } 4338 } 4339 4340 /* fallback to VBT if available for eDP */ 4341 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { 4342 fixed_mode = drm_mode_duplicate(dev, 4343 dev_priv->vbt.lfp_lvds_vbt_mode); 4344 if (fixed_mode) 4345 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; 4346 } 4347 mutex_unlock(&dev->mode_config.mutex); 4348 4349 #if 0 4350 if (IS_VALLEYVIEW(dev)) { 4351 intel_dp->edp_notifier.notifier_call = edp_notify_handler; 4352 register_reboot_notifier(&intel_dp->edp_notifier); 4353 } 4354 #endif 4355 4356 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 4357 intel_panel_setup_backlight(connector); 4358 4359 return true; 4360 } 4361 4362 bool 4363 intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 4364 struct intel_connector *intel_connector) 4365 { 4366 struct drm_connector *connector = &intel_connector->base; 4367 struct intel_dp *intel_dp = &intel_dig_port->dp; 4368 struct intel_encoder *intel_encoder = &intel_dig_port->base; 4369 struct drm_device *dev = intel_encoder->base.dev; 4370 struct drm_i915_private *dev_priv = dev->dev_private; 4371 enum port port = intel_dig_port->port; 4372 struct edp_power_seq power_seq = { 0 }; 4373 const char *name = NULL; 4374 int type, error; 4375 4376 /* intel_dp vfuncs */ 4377 if (IS_VALLEYVIEW(dev)) 4378 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider; 4379 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 4380 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 4381 else if (HAS_PCH_SPLIT(dev)) 4382 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; 4383 else 4384 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider; 4385 4386 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl; 4387 4388 /* Preserve the current hw state. */ 4389 intel_dp->DP = I915_READ(intel_dp->output_reg); 4390 intel_dp->attached_connector = intel_connector; 4391 4392 if (intel_dp_is_edp(dev, port)) 4393 type = DRM_MODE_CONNECTOR_eDP; 4394 else 4395 type = DRM_MODE_CONNECTOR_DisplayPort; 4396 4397 /* 4398 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but 4399 * for DP the encoder type can be set by the caller to 4400 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it. 4401 */ 4402 if (type == DRM_MODE_CONNECTOR_eDP) 4403 intel_encoder->type = INTEL_OUTPUT_EDP; 4404 4405 DRM_DEBUG_KMS("Adding %s connector on port %c\n", 4406 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 4407 port_name(port)); 4408 4409 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 4410 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 4411 4412 connector->interlace_allowed = true; 4413 connector->doublescan_allowed = 0; 4414 4415 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 4416 edp_panel_vdd_work); 4417 4418 intel_connector_attach_encoder(intel_connector, intel_encoder); 4419 drm_sysfs_connector_add(connector); 4420 4421 if (HAS_DDI(dev)) 4422 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 4423 else 4424 intel_connector->get_hw_state = intel_connector_get_hw_state; 4425 intel_connector->unregister = intel_dp_connector_unregister; 4426 4427 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; 4428 if (HAS_DDI(dev)) { 4429 switch (intel_dig_port->port) { 4430 case PORT_A: 4431 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL; 4432 break; 4433 case PORT_B: 4434 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL; 4435 break; 4436 case PORT_C: 4437 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL; 4438 break; 4439 case PORT_D: 4440 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL; 4441 break; 4442 default: 4443 BUG(); 4444 } 4445 } 4446 4447 /* Set up the DDC bus. */ 4448 switch (port) { 4449 case PORT_A: 4450 intel_encoder->hpd_pin = HPD_PORT_A; 4451 name = "DPDDC-A"; 4452 break; 4453 case PORT_B: 4454 intel_encoder->hpd_pin = HPD_PORT_B; 4455 name = "DPDDC-B"; 4456 break; 4457 case PORT_C: 4458 intel_encoder->hpd_pin = HPD_PORT_C; 4459 name = "DPDDC-C"; 4460 break; 4461 case PORT_D: 4462 intel_encoder->hpd_pin = HPD_PORT_D; 4463 name = "DPDDC-D"; 4464 break; 4465 default: 4466 BUG(); 4467 } 4468 4469 if (is_edp(intel_dp)) { 4470 intel_dp_init_panel_power_timestamps(intel_dp); 4471 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 4472 } 4473 4474 error = intel_dp_i2c_init(intel_dp, intel_connector, name); 4475 WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n", 4476 error, port_name(port)); 4477 4478 intel_dp->psr_setup_done = false; 4479 4480 if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) { 4481 #if 0 4482 i2c_del_adapter(&intel_dp->adapter); 4483 #endif 4484 if (is_edp(intel_dp)) { 4485 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 4486 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 4487 edp_panel_vdd_off_sync(intel_dp); 4488 drm_modeset_unlock(&dev->mode_config.connection_mutex); 4489 } 4490 drm_sysfs_connector_remove(connector); 4491 drm_connector_cleanup(connector); 4492 return false; 4493 } 4494 4495 intel_dp_add_properties(intel_dp, connector); 4496 4497 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 4498 * 0xd. Failure to do so will result in spurious interrupts being 4499 * generated on the port when a cable is not attached. 4500 */ 4501 if (IS_G4X(dev) && !IS_GM45(dev)) { 4502 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 4503 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 4504 } 4505 4506 return true; 4507 } 4508 4509 void 4510 intel_dp_init(struct drm_device *dev, int output_reg, enum port port) 4511 { 4512 struct intel_digital_port *intel_dig_port; 4513 struct intel_encoder *intel_encoder; 4514 struct drm_encoder *encoder; 4515 struct intel_connector *intel_connector; 4516 4517 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); 4518 if (!intel_dig_port) 4519 return; 4520 4521 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL); 4522 if (!intel_connector) { 4523 kfree(intel_dig_port); 4524 return; 4525 } 4526 4527 intel_encoder = &intel_dig_port->base; 4528 encoder = &intel_encoder->base; 4529 4530 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 4531 DRM_MODE_ENCODER_TMDS); 4532 4533 intel_encoder->compute_config = intel_dp_compute_config; 4534 intel_encoder->disable = intel_disable_dp; 4535 intel_encoder->get_hw_state = intel_dp_get_hw_state; 4536 intel_encoder->get_config = intel_dp_get_config; 4537 if (IS_CHERRYVIEW(dev)) { 4538 intel_encoder->pre_enable = chv_pre_enable_dp; 4539 intel_encoder->enable = vlv_enable_dp; 4540 intel_encoder->post_disable = chv_post_disable_dp; 4541 } else if (IS_VALLEYVIEW(dev)) { 4542 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable; 4543 intel_encoder->pre_enable = vlv_pre_enable_dp; 4544 intel_encoder->enable = vlv_enable_dp; 4545 intel_encoder->post_disable = vlv_post_disable_dp; 4546 } else { 4547 intel_encoder->pre_enable = g4x_pre_enable_dp; 4548 intel_encoder->enable = g4x_enable_dp; 4549 intel_encoder->post_disable = g4x_post_disable_dp; 4550 } 4551 4552 intel_dig_port->port = port; 4553 intel_dig_port->dp.output_reg = output_reg; 4554 4555 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 4556 if (IS_CHERRYVIEW(dev)) { 4557 if (port == PORT_D) 4558 intel_encoder->crtc_mask = 1 << 2; 4559 else 4560 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 4561 } else { 4562 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 4563 } 4564 intel_encoder->cloneable = 0; 4565 intel_encoder->hot_plug = intel_dp_hot_plug; 4566 4567 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { 4568 drm_encoder_cleanup(encoder); 4569 kfree(intel_dig_port); 4570 kfree(intel_connector); 4571 } 4572 } 4573