1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "display/intel_crt.h" 7 #include "display/intel_dp.h" 8 9 #include "i915_drv.h" 10 #include "i915_irq.h" 11 #include "intel_cdclk.h" 12 #include "intel_combo_phy.h" 13 #include "intel_csr.h" 14 #include "intel_display_power.h" 15 #include "intel_display_types.h" 16 #include "intel_dpio_phy.h" 17 #include "intel_hotplug.h" 18 #include "intel_pm.h" 19 #include "intel_sideband.h" 20 #include "intel_tc.h" 21 #include "intel_vga.h" 22 23 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 24 enum i915_power_well_id power_well_id); 25 26 const char * 27 intel_display_power_domain_str(enum intel_display_power_domain domain) 28 { 29 switch (domain) { 30 case POWER_DOMAIN_DISPLAY_CORE: 31 return "DISPLAY_CORE"; 32 case POWER_DOMAIN_PIPE_A: 33 return "PIPE_A"; 34 case POWER_DOMAIN_PIPE_B: 35 return "PIPE_B"; 36 case POWER_DOMAIN_PIPE_C: 37 return "PIPE_C"; 38 case POWER_DOMAIN_PIPE_D: 39 return "PIPE_D"; 40 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 41 return "PIPE_A_PANEL_FITTER"; 42 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 43 return "PIPE_B_PANEL_FITTER"; 44 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 45 return "PIPE_C_PANEL_FITTER"; 46 case POWER_DOMAIN_PIPE_D_PANEL_FITTER: 47 return "PIPE_D_PANEL_FITTER"; 48 case POWER_DOMAIN_TRANSCODER_A: 49 return "TRANSCODER_A"; 50 case POWER_DOMAIN_TRANSCODER_B: 51 return "TRANSCODER_B"; 52 case POWER_DOMAIN_TRANSCODER_C: 53 return "TRANSCODER_C"; 54 case POWER_DOMAIN_TRANSCODER_D: 55 return "TRANSCODER_D"; 56 case POWER_DOMAIN_TRANSCODER_EDP: 57 return "TRANSCODER_EDP"; 58 case POWER_DOMAIN_TRANSCODER_VDSC_PW2: 59 return "TRANSCODER_VDSC_PW2"; 60 case POWER_DOMAIN_TRANSCODER_DSI_A: 61 return "TRANSCODER_DSI_A"; 62 case POWER_DOMAIN_TRANSCODER_DSI_C: 63 return "TRANSCODER_DSI_C"; 64 case POWER_DOMAIN_PORT_DDI_A_LANES: 65 return "PORT_DDI_A_LANES"; 66 case POWER_DOMAIN_PORT_DDI_B_LANES: 67 return "PORT_DDI_B_LANES"; 68 case POWER_DOMAIN_PORT_DDI_C_LANES: 69 return "PORT_DDI_C_LANES"; 70 case POWER_DOMAIN_PORT_DDI_D_LANES: 71 return "PORT_DDI_D_LANES"; 72 case POWER_DOMAIN_PORT_DDI_E_LANES: 73 return "PORT_DDI_E_LANES"; 74 case POWER_DOMAIN_PORT_DDI_F_LANES: 75 return "PORT_DDI_F_LANES"; 76 case POWER_DOMAIN_PORT_DDI_G_LANES: 77 return "PORT_DDI_G_LANES"; 78 case POWER_DOMAIN_PORT_DDI_H_LANES: 79 return "PORT_DDI_H_LANES"; 80 case POWER_DOMAIN_PORT_DDI_I_LANES: 81 return "PORT_DDI_I_LANES"; 82 case POWER_DOMAIN_PORT_DDI_A_IO: 83 return "PORT_DDI_A_IO"; 84 case POWER_DOMAIN_PORT_DDI_B_IO: 85 return "PORT_DDI_B_IO"; 86 case POWER_DOMAIN_PORT_DDI_C_IO: 87 return "PORT_DDI_C_IO"; 88 case POWER_DOMAIN_PORT_DDI_D_IO: 89 return "PORT_DDI_D_IO"; 90 case POWER_DOMAIN_PORT_DDI_E_IO: 91 return "PORT_DDI_E_IO"; 92 case POWER_DOMAIN_PORT_DDI_F_IO: 93 return "PORT_DDI_F_IO"; 94 case POWER_DOMAIN_PORT_DDI_G_IO: 95 return "PORT_DDI_G_IO"; 96 case POWER_DOMAIN_PORT_DDI_H_IO: 97 return "PORT_DDI_H_IO"; 98 case POWER_DOMAIN_PORT_DDI_I_IO: 99 return "PORT_DDI_I_IO"; 100 case POWER_DOMAIN_PORT_DSI: 101 return "PORT_DSI"; 102 case POWER_DOMAIN_PORT_CRT: 103 return "PORT_CRT"; 104 case POWER_DOMAIN_PORT_OTHER: 105 return "PORT_OTHER"; 106 case POWER_DOMAIN_VGA: 107 return "VGA"; 108 case POWER_DOMAIN_AUDIO: 109 return "AUDIO"; 110 case POWER_DOMAIN_AUX_A: 111 return "AUX_A"; 112 case POWER_DOMAIN_AUX_B: 113 return "AUX_B"; 114 case POWER_DOMAIN_AUX_C: 115 return "AUX_C"; 116 case POWER_DOMAIN_AUX_D: 117 return "AUX_D"; 118 case POWER_DOMAIN_AUX_E: 119 return "AUX_E"; 120 case POWER_DOMAIN_AUX_F: 121 return "AUX_F"; 122 case POWER_DOMAIN_AUX_G: 123 return "AUX_G"; 124 case POWER_DOMAIN_AUX_H: 125 return "AUX_H"; 126 case POWER_DOMAIN_AUX_I: 127 return "AUX_I"; 128 case POWER_DOMAIN_AUX_IO_A: 129 return "AUX_IO_A"; 130 case POWER_DOMAIN_AUX_C_TBT: 131 return "AUX_C_TBT"; 132 case POWER_DOMAIN_AUX_D_TBT: 133 return "AUX_D_TBT"; 134 case POWER_DOMAIN_AUX_E_TBT: 135 return "AUX_E_TBT"; 136 case POWER_DOMAIN_AUX_F_TBT: 137 return "AUX_F_TBT"; 138 case POWER_DOMAIN_AUX_G_TBT: 139 return "AUX_G_TBT"; 140 case POWER_DOMAIN_AUX_H_TBT: 141 return "AUX_H_TBT"; 142 case POWER_DOMAIN_AUX_I_TBT: 143 return "AUX_I_TBT"; 144 case POWER_DOMAIN_GMBUS: 145 return "GMBUS"; 146 case POWER_DOMAIN_INIT: 147 return "INIT"; 148 case POWER_DOMAIN_MODESET: 149 return "MODESET"; 150 case POWER_DOMAIN_GT_IRQ: 151 return "GT_IRQ"; 152 case POWER_DOMAIN_DPLL_DC_OFF: 153 return "DPLL_DC_OFF"; 154 case POWER_DOMAIN_TC_COLD_OFF: 155 return "TC_COLD_OFF"; 156 default: 157 MISSING_CASE(domain); 158 return "?"; 159 } 160 } 161 162 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 163 struct i915_power_well *power_well) 164 { 165 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name); 166 power_well->desc->ops->enable(dev_priv, power_well); 167 power_well->hw_enabled = true; 168 } 169 170 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 171 struct i915_power_well *power_well) 172 { 173 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name); 174 power_well->hw_enabled = false; 175 power_well->desc->ops->disable(dev_priv, power_well); 176 } 177 178 static void intel_power_well_get(struct drm_i915_private *dev_priv, 179 struct i915_power_well *power_well) 180 { 181 if (!power_well->count++) 182 intel_power_well_enable(dev_priv, power_well); 183 } 184 185 static void intel_power_well_put(struct drm_i915_private *dev_priv, 186 struct i915_power_well *power_well) 187 { 188 drm_WARN(&dev_priv->drm, !power_well->count, 189 "Use count on power well %s is already zero", 190 power_well->desc->name); 191 192 if (!--power_well->count) 193 intel_power_well_disable(dev_priv, power_well); 194 } 195 196 /** 197 * __intel_display_power_is_enabled - unlocked check for a power domain 198 * @dev_priv: i915 device instance 199 * @domain: power domain to check 200 * 201 * This is the unlocked version of intel_display_power_is_enabled() and should 202 * only be used from error capture and recovery code where deadlocks are 203 * possible. 204 * 205 * Returns: 206 * True when the power domain is enabled, false otherwise. 207 */ 208 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 209 enum intel_display_power_domain domain) 210 { 211 struct i915_power_well *power_well; 212 bool is_enabled; 213 214 if (dev_priv->runtime_pm.suspended) 215 return false; 216 217 is_enabled = true; 218 219 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) { 220 if (power_well->desc->always_on) 221 continue; 222 223 if (!power_well->hw_enabled) { 224 is_enabled = false; 225 break; 226 } 227 } 228 229 return is_enabled; 230 } 231 232 /** 233 * intel_display_power_is_enabled - check for a power domain 234 * @dev_priv: i915 device instance 235 * @domain: power domain to check 236 * 237 * This function can be used to check the hw power domain state. It is mostly 238 * used in hardware state readout functions. Everywhere else code should rely 239 * upon explicit power domain reference counting to ensure that the hardware 240 * block is powered up before accessing it. 241 * 242 * Callers must hold the relevant modesetting locks to ensure that concurrent 243 * threads can't disable the power well while the caller tries to read a few 244 * registers. 245 * 246 * Returns: 247 * True when the power domain is enabled, false otherwise. 248 */ 249 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 250 enum intel_display_power_domain domain) 251 { 252 struct i915_power_domains *power_domains; 253 bool ret; 254 255 power_domains = &dev_priv->power_domains; 256 257 mutex_lock(&power_domains->lock); 258 ret = __intel_display_power_is_enabled(dev_priv, domain); 259 mutex_unlock(&power_domains->lock); 260 261 return ret; 262 } 263 264 /* 265 * Starting with Haswell, we have a "Power Down Well" that can be turned off 266 * when not needed anymore. We have 4 registers that can request the power well 267 * to be enabled, and it will only be disabled if none of the registers is 268 * requesting it to be enabled. 269 */ 270 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, 271 u8 irq_pipe_mask, bool has_vga) 272 { 273 if (has_vga) 274 intel_vga_reset_io_mem(dev_priv); 275 276 if (irq_pipe_mask) 277 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); 278 } 279 280 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, 281 u8 irq_pipe_mask) 282 { 283 if (irq_pipe_mask) 284 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); 285 } 286 287 #define ICL_AUX_PW_TO_CH(pw_idx) \ 288 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) 289 290 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ 291 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) 292 293 static enum aux_ch icl_tc_phy_aux_ch(struct drm_i915_private *dev_priv, 294 struct i915_power_well *power_well) 295 { 296 int pw_idx = power_well->desc->hsw.idx; 297 298 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : 299 ICL_AUX_PW_TO_CH(pw_idx); 300 } 301 302 static struct intel_digital_port * 303 aux_ch_to_digital_port(struct drm_i915_private *dev_priv, 304 enum aux_ch aux_ch) 305 { 306 struct intel_digital_port *dig_port = NULL; 307 struct intel_encoder *encoder; 308 309 for_each_intel_encoder(&dev_priv->drm, encoder) { 310 /* We'll check the MST primary port */ 311 if (encoder->type == INTEL_OUTPUT_DP_MST) 312 continue; 313 314 dig_port = enc_to_dig_port(encoder); 315 if (!dig_port) 316 continue; 317 318 if (dig_port->aux_ch != aux_ch) { 319 dig_port = NULL; 320 continue; 321 } 322 323 break; 324 } 325 326 return dig_port; 327 } 328 329 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, 330 struct i915_power_well *power_well, 331 bool timeout_expected) 332 { 333 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 334 int pw_idx = power_well->desc->hsw.idx; 335 336 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ 337 if (intel_de_wait_for_set(dev_priv, regs->driver, 338 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { 339 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", 340 power_well->desc->name); 341 342 drm_WARN_ON(&dev_priv->drm, !timeout_expected); 343 344 } 345 } 346 347 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, 348 const struct i915_power_well_regs *regs, 349 int pw_idx) 350 { 351 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 352 u32 ret; 353 354 ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0; 355 ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0; 356 if (regs->kvmr.reg) 357 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0; 358 ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0; 359 360 return ret; 361 } 362 363 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, 364 struct i915_power_well *power_well) 365 { 366 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 367 int pw_idx = power_well->desc->hsw.idx; 368 bool disabled; 369 u32 reqs; 370 371 /* 372 * Bspec doesn't require waiting for PWs to get disabled, but still do 373 * this for paranoia. The known cases where a PW will be forced on: 374 * - a KVMR request on any power well via the KVMR request register 375 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and 376 * DEBUG request registers 377 * Skip the wait in case any of the request bits are set and print a 378 * diagnostic message. 379 */ 380 wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) & 381 HSW_PWR_WELL_CTL_STATE(pw_idx))) || 382 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); 383 if (disabled) 384 return; 385 386 drm_dbg_kms(&dev_priv->drm, 387 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", 388 power_well->desc->name, 389 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); 390 } 391 392 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, 393 enum skl_power_gate pg) 394 { 395 /* Timeout 5us for PG#0, for other PGs 1us */ 396 drm_WARN_ON(&dev_priv->drm, 397 intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, 398 SKL_FUSE_PG_DIST_STATUS(pg), 1)); 399 } 400 401 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 402 struct i915_power_well *power_well) 403 { 404 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 405 int pw_idx = power_well->desc->hsw.idx; 406 u32 val; 407 408 if (power_well->desc->hsw.has_fuses) { 409 enum skl_power_gate pg; 410 411 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 412 SKL_PW_CTL_IDX_TO_PG(pw_idx); 413 /* 414 * For PW1 we have to wait both for the PW0/PG0 fuse state 415 * before enabling the power well and PW1/PG1's own fuse 416 * state after the enabling. For all other power wells with 417 * fuses we only have to wait for that PW/PG's fuse state 418 * after the enabling. 419 */ 420 if (pg == SKL_PG1) 421 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); 422 } 423 424 val = intel_de_read(dev_priv, regs->driver); 425 intel_de_write(dev_priv, regs->driver, 426 val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 427 428 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 429 430 /* Display WA #1178: cnl */ 431 if (IS_CANNONLAKE(dev_priv) && 432 pw_idx >= GLK_PW_CTL_IDX_AUX_B && 433 pw_idx <= CNL_PW_CTL_IDX_AUX_F) { 434 u32 val; 435 436 val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx)); 437 val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS; 438 intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val); 439 } 440 441 if (power_well->desc->hsw.has_fuses) { 442 enum skl_power_gate pg; 443 444 pg = INTEL_GEN(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 445 SKL_PW_CTL_IDX_TO_PG(pw_idx); 446 gen9_wait_for_power_well_fuses(dev_priv, pg); 447 } 448 449 hsw_power_well_post_enable(dev_priv, 450 power_well->desc->hsw.irq_pipe_mask, 451 power_well->desc->hsw.has_vga); 452 } 453 454 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 455 struct i915_power_well *power_well) 456 { 457 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 458 int pw_idx = power_well->desc->hsw.idx; 459 u32 val; 460 461 hsw_power_well_pre_disable(dev_priv, 462 power_well->desc->hsw.irq_pipe_mask); 463 464 val = intel_de_read(dev_priv, regs->driver); 465 intel_de_write(dev_priv, regs->driver, 466 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 467 hsw_wait_for_power_well_disable(dev_priv, power_well); 468 } 469 470 #define ICL_AUX_PW_TO_PHY(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A) 471 472 static void 473 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 474 struct i915_power_well *power_well) 475 { 476 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 477 int pw_idx = power_well->desc->hsw.idx; 478 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); 479 u32 val; 480 481 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 482 483 val = intel_de_read(dev_priv, regs->driver); 484 intel_de_write(dev_priv, regs->driver, 485 val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 486 487 if (INTEL_GEN(dev_priv) < 12) { 488 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 489 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 490 val | ICL_LANE_ENABLE_AUX); 491 } 492 493 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 494 495 /* Display WA #1178: icl */ 496 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && 497 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { 498 val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx)); 499 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; 500 intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val); 501 } 502 } 503 504 static void 505 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 506 struct i915_power_well *power_well) 507 { 508 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 509 int pw_idx = power_well->desc->hsw.idx; 510 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); 511 u32 val; 512 513 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 514 515 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 516 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 517 val & ~ICL_LANE_ENABLE_AUX); 518 519 val = intel_de_read(dev_priv, regs->driver); 520 intel_de_write(dev_priv, regs->driver, 521 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 522 523 hsw_wait_for_power_well_disable(dev_priv, power_well); 524 } 525 526 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 527 528 static u64 async_put_domains_mask(struct i915_power_domains *power_domains); 529 530 static int power_well_async_ref_count(struct drm_i915_private *dev_priv, 531 struct i915_power_well *power_well) 532 { 533 int refs = hweight64(power_well->desc->domains & 534 async_put_domains_mask(&dev_priv->power_domains)); 535 536 drm_WARN_ON(&dev_priv->drm, refs > power_well->count); 537 538 return refs; 539 } 540 541 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 542 struct i915_power_well *power_well, 543 struct intel_digital_port *dig_port) 544 { 545 /* Bypass the check if all references are released asynchronously */ 546 if (power_well_async_ref_count(dev_priv, power_well) == 547 power_well->count) 548 return; 549 550 if (drm_WARN_ON(&dev_priv->drm, !dig_port)) 551 return; 552 553 if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port) 554 return; 555 556 drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port)); 557 } 558 559 #else 560 561 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 562 struct i915_power_well *power_well, 563 struct intel_digital_port *dig_port) 564 { 565 } 566 567 #endif 568 569 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) 570 571 static void icl_tc_cold_exit(struct drm_i915_private *i915) 572 { 573 int ret, tries = 0; 574 575 while (1) { 576 ret = sandybridge_pcode_write_timeout(i915, 577 ICL_PCODE_EXIT_TCCOLD, 578 0, 250, 1); 579 if (ret != -EAGAIN || ++tries == 3) 580 break; 581 drm_msleep(1); 582 } 583 584 /* Spec states that TC cold exit can take up to 1ms to complete */ 585 if (!ret) 586 drm_msleep(1); 587 588 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ 589 drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : 590 "succeeded"); 591 } 592 593 static void 594 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 595 struct i915_power_well *power_well) 596 { 597 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); 598 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 599 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 600 bool is_tbt = power_well->desc->hsw.is_tc_tbt; 601 bool timeout_expected; 602 u32 val; 603 604 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 605 606 val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)); 607 val &= ~DP_AUX_CH_CTL_TBT_IO; 608 if (is_tbt) 609 val |= DP_AUX_CH_CTL_TBT_IO; 610 intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val); 611 612 val = intel_de_read(dev_priv, regs->driver); 613 intel_de_write(dev_priv, regs->driver, 614 val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx)); 615 616 /* 617 * An AUX timeout is expected if the TBT DP tunnel is down, 618 * or need to enable AUX on a legacy TypeC port as part of the TC-cold 619 * exit sequence. 620 */ 621 timeout_expected = is_tbt; 622 if (INTEL_GEN(dev_priv) == 11 && dig_port->tc_legacy_port) { 623 icl_tc_cold_exit(dev_priv); 624 timeout_expected = true; 625 } 626 627 hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected); 628 629 if (INTEL_GEN(dev_priv) >= 12 && !is_tbt) { 630 enum tc_port tc_port; 631 632 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); 633 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), 634 HIP_INDEX_VAL(tc_port, 0x2)); 635 636 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), 637 DKL_CMN_UC_DW27_UC_HEALTH, 1)) 638 drm_warn(&dev_priv->drm, 639 "Timeout waiting TC uC health\n"); 640 } 641 } 642 643 static void 644 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 645 struct i915_power_well *power_well) 646 { 647 enum aux_ch aux_ch = icl_tc_phy_aux_ch(dev_priv, power_well); 648 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 649 650 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 651 652 hsw_power_well_disable(dev_priv, power_well); 653 } 654 655 static void 656 icl_aux_power_well_enable(struct drm_i915_private *dev_priv, 657 struct i915_power_well *power_well) 658 { 659 int pw_idx = power_well->desc->hsw.idx; 660 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */ 661 bool is_tbt = power_well->desc->hsw.is_tc_tbt; 662 663 if (is_tbt || intel_phy_is_tc(dev_priv, phy)) 664 return icl_tc_phy_aux_power_well_enable(dev_priv, power_well); 665 else if (IS_ICELAKE(dev_priv)) 666 return icl_combo_phy_aux_power_well_enable(dev_priv, 667 power_well); 668 else 669 return hsw_power_well_enable(dev_priv, power_well); 670 } 671 672 static void 673 icl_aux_power_well_disable(struct drm_i915_private *dev_priv, 674 struct i915_power_well *power_well) 675 { 676 int pw_idx = power_well->desc->hsw.idx; 677 enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx); /* non-TBT only */ 678 bool is_tbt = power_well->desc->hsw.is_tc_tbt; 679 680 if (is_tbt || intel_phy_is_tc(dev_priv, phy)) 681 return icl_tc_phy_aux_power_well_disable(dev_priv, power_well); 682 else if (IS_ICELAKE(dev_priv)) 683 return icl_combo_phy_aux_power_well_disable(dev_priv, 684 power_well); 685 else 686 return hsw_power_well_disable(dev_priv, power_well); 687 } 688 689 /* 690 * We should only use the power well if we explicitly asked the hardware to 691 * enable it, so check if it's enabled and also check if we've requested it to 692 * be enabled. 693 */ 694 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 695 struct i915_power_well *power_well) 696 { 697 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 698 enum i915_power_well_id id = power_well->desc->id; 699 int pw_idx = power_well->desc->hsw.idx; 700 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 701 HSW_PWR_WELL_CTL_STATE(pw_idx); 702 u32 val; 703 704 val = intel_de_read(dev_priv, regs->driver); 705 706 /* 707 * On GEN9 big core due to a DMC bug the driver's request bits for PW1 708 * and the MISC_IO PW will be not restored, so check instead for the 709 * BIOS's own request bits, which are forced-on for these power wells 710 * when exiting DC5/6. 711 */ 712 if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) && 713 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) 714 val |= intel_de_read(dev_priv, regs->bios); 715 716 return (val & mask) == mask; 717 } 718 719 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 720 { 721 drm_WARN_ONCE(&dev_priv->drm, 722 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9), 723 "DC9 already programmed to be enabled.\n"); 724 drm_WARN_ONCE(&dev_priv->drm, 725 intel_de_read(dev_priv, DC_STATE_EN) & 726 DC_STATE_EN_UPTO_DC5, 727 "DC5 still not disabled to enable DC9.\n"); 728 drm_WARN_ONCE(&dev_priv->drm, 729 intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) & 730 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), 731 "Power well 2 on.\n"); 732 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 733 "Interrupts not disabled yet.\n"); 734 735 /* 736 * TODO: check for the following to verify the conditions to enter DC9 737 * state are satisfied: 738 * 1] Check relevant display engine registers to verify if mode set 739 * disable sequence was followed. 740 * 2] Check if display uninitialize sequence is initialized. 741 */ 742 } 743 744 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 745 { 746 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 747 "Interrupts not disabled yet.\n"); 748 drm_WARN_ONCE(&dev_priv->drm, 749 intel_de_read(dev_priv, DC_STATE_EN) & 750 DC_STATE_EN_UPTO_DC5, 751 "DC5 still not disabled.\n"); 752 753 /* 754 * TODO: check for the following to verify DC9 state was indeed 755 * entered before programming to disable it: 756 * 1] Check relevant display engine registers to verify if mode 757 * set disable sequence was followed. 758 * 2] Check if display uninitialize sequence is initialized. 759 */ 760 } 761 762 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 763 u32 state) 764 { 765 int rewrites = 0; 766 int rereads = 0; 767 u32 v; 768 769 intel_de_write(dev_priv, DC_STATE_EN, state); 770 771 /* It has been observed that disabling the dc6 state sometimes 772 * doesn't stick and dmc keeps returning old value. Make sure 773 * the write really sticks enough times and also force rewrite until 774 * we are confident that state is exactly what we want. 775 */ 776 do { 777 v = intel_de_read(dev_priv, DC_STATE_EN); 778 779 if (v != state) { 780 intel_de_write(dev_priv, DC_STATE_EN, state); 781 rewrites++; 782 rereads = 0; 783 } else if (rereads++ > 5) { 784 break; 785 } 786 787 } while (rewrites < 100); 788 789 if (v != state) 790 drm_err(&dev_priv->drm, 791 "Writing dc state to 0x%x failed, now 0x%x\n", 792 state, v); 793 794 /* Most of the times we need one retry, avoid spam */ 795 if (rewrites > 1) 796 drm_dbg_kms(&dev_priv->drm, 797 "Rewrote dc state to 0x%x %d times\n", 798 state, rewrites); 799 } 800 801 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 802 { 803 u32 mask; 804 805 mask = DC_STATE_EN_UPTO_DC5; 806 807 if (INTEL_GEN(dev_priv) >= 12) 808 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 809 | DC_STATE_EN_DC9; 810 else if (IS_GEN(dev_priv, 11)) 811 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; 812 else if (IS_GEN9_LP(dev_priv)) 813 mask |= DC_STATE_EN_DC9; 814 else 815 mask |= DC_STATE_EN_UPTO_DC6; 816 817 return mask; 818 } 819 820 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 821 { 822 u32 val; 823 824 val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv); 825 826 drm_dbg_kms(&dev_priv->drm, 827 "Resetting DC state tracking from %02x to %02x\n", 828 dev_priv->csr.dc_state, val); 829 dev_priv->csr.dc_state = val; 830 } 831 832 /** 833 * gen9_set_dc_state - set target display C power state 834 * @dev_priv: i915 device instance 835 * @state: target DC power state 836 * - DC_STATE_DISABLE 837 * - DC_STATE_EN_UPTO_DC5 838 * - DC_STATE_EN_UPTO_DC6 839 * - DC_STATE_EN_DC9 840 * 841 * Signal to DMC firmware/HW the target DC power state passed in @state. 842 * DMC/HW can turn off individual display clocks and power rails when entering 843 * a deeper DC power state (higher in number) and turns these back when exiting 844 * that state to a shallower power state (lower in number). The HW will decide 845 * when to actually enter a given state on an on-demand basis, for instance 846 * depending on the active state of display pipes. The state of display 847 * registers backed by affected power rails are saved/restored as needed. 848 * 849 * Based on the above enabling a deeper DC power state is asynchronous wrt. 850 * enabling it. Disabling a deeper power state is synchronous: for instance 851 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned 852 * back on and register state is restored. This is guaranteed by the MMIO write 853 * to DC_STATE_EN blocking until the state is restored. 854 */ 855 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) 856 { 857 u32 val; 858 u32 mask; 859 860 if (drm_WARN_ON_ONCE(&dev_priv->drm, 861 state & ~dev_priv->csr.allowed_dc_mask)) 862 state &= dev_priv->csr.allowed_dc_mask; 863 864 val = intel_de_read(dev_priv, DC_STATE_EN); 865 mask = gen9_dc_mask(dev_priv); 866 drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n", 867 val & mask, state); 868 869 /* Check if DMC is ignoring our DC state requests */ 870 if ((val & mask) != dev_priv->csr.dc_state) 871 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", 872 dev_priv->csr.dc_state, val & mask); 873 874 val &= ~mask; 875 val |= state; 876 877 gen9_write_dc_state(dev_priv, val); 878 879 dev_priv->csr.dc_state = val & mask; 880 } 881 882 static u32 883 sanitize_target_dc_state(struct drm_i915_private *dev_priv, 884 u32 target_dc_state) 885 { 886 u32 states[] = { 887 DC_STATE_EN_UPTO_DC6, 888 DC_STATE_EN_UPTO_DC5, 889 DC_STATE_EN_DC3CO, 890 DC_STATE_DISABLE, 891 }; 892 int i; 893 894 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 895 if (target_dc_state != states[i]) 896 continue; 897 898 if (dev_priv->csr.allowed_dc_mask & target_dc_state) 899 break; 900 901 target_dc_state = states[i + 1]; 902 } 903 904 return target_dc_state; 905 } 906 907 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) 908 { 909 drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n"); 910 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); 911 } 912 913 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) 914 { 915 u32 val; 916 917 drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); 918 val = intel_de_read(dev_priv, DC_STATE_EN); 919 val &= ~DC_STATE_DC3CO_STATUS; 920 intel_de_write(dev_priv, DC_STATE_EN, val); 921 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 922 /* 923 * Delay of 200us DC3CO Exit time B.Spec 49196 924 */ 925 usleep_range(200, 210); 926 } 927 928 static void bxt_enable_dc9(struct drm_i915_private *dev_priv) 929 { 930 assert_can_enable_dc9(dev_priv); 931 932 drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); 933 /* 934 * Power sequencer reset is not needed on 935 * platforms with South Display Engine on PCH, 936 * because PPS registers are always on. 937 */ 938 if (!HAS_PCH_SPLIT(dev_priv)) 939 intel_power_sequencer_reset(dev_priv); 940 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 941 } 942 943 static void bxt_disable_dc9(struct drm_i915_private *dev_priv) 944 { 945 assert_can_disable_dc9(dev_priv); 946 947 drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); 948 949 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 950 951 intel_pps_unlock_regs_wa(dev_priv); 952 } 953 954 static void assert_csr_loaded(struct drm_i915_private *dev_priv) 955 { 956 drm_WARN_ONCE(&dev_priv->drm, 957 !intel_de_read(dev_priv, CSR_PROGRAM(0)), 958 "CSR program storage start is NULL\n"); 959 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE), 960 "CSR SSP Base Not fine\n"); 961 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL), 962 "CSR HTP Not fine\n"); 963 } 964 965 static struct i915_power_well * 966 lookup_power_well(struct drm_i915_private *dev_priv, 967 enum i915_power_well_id power_well_id) 968 { 969 struct i915_power_well *power_well; 970 971 for_each_power_well(dev_priv, power_well) 972 if (power_well->desc->id == power_well_id) 973 return power_well; 974 975 /* 976 * It's not feasible to add error checking code to the callers since 977 * this condition really shouldn't happen and it doesn't even make sense 978 * to abort things like display initialization sequences. Just return 979 * the first power well and hope the WARN gets reported so we can fix 980 * our driver. 981 */ 982 drm_WARN(&dev_priv->drm, 1, 983 "Power well %d not defined for this platform\n", 984 power_well_id); 985 return &dev_priv->power_domains.power_wells[0]; 986 } 987 988 /** 989 * intel_display_power_set_target_dc_state - Set target dc state. 990 * @dev_priv: i915 device 991 * @state: state which needs to be set as target_dc_state. 992 * 993 * This function set the "DC off" power well target_dc_state, 994 * based upon this target_dc_stste, "DC off" power well will 995 * enable desired DC state. 996 */ 997 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, 998 u32 state) 999 { 1000 struct i915_power_well *power_well; 1001 bool dc_off_enabled; 1002 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1003 1004 mutex_lock(&power_domains->lock); 1005 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); 1006 1007 if (drm_WARN_ON(&dev_priv->drm, !power_well)) 1008 goto unlock; 1009 1010 state = sanitize_target_dc_state(dev_priv, state); 1011 1012 if (state == dev_priv->csr.target_dc_state) 1013 goto unlock; 1014 1015 dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv, 1016 power_well); 1017 /* 1018 * If DC off power well is disabled, need to enable and disable the 1019 * DC off power well to effect target DC state. 1020 */ 1021 if (!dc_off_enabled) 1022 power_well->desc->ops->enable(dev_priv, power_well); 1023 1024 dev_priv->csr.target_dc_state = state; 1025 1026 if (!dc_off_enabled) 1027 power_well->desc->ops->disable(dev_priv, power_well); 1028 1029 unlock: 1030 mutex_unlock(&power_domains->lock); 1031 } 1032 1033 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 1034 { 1035 enum i915_power_well_id high_pg; 1036 1037 /* Power wells at this level and above must be disabled for DC5 entry */ 1038 if (INTEL_GEN(dev_priv) >= 12) 1039 high_pg = ICL_DISP_PW_3; 1040 else 1041 high_pg = SKL_DISP_PW_2; 1042 1043 drm_WARN_ONCE(&dev_priv->drm, 1044 intel_display_power_well_is_enabled(dev_priv, high_pg), 1045 "Power wells above platform's DC5 limit still enabled.\n"); 1046 1047 drm_WARN_ONCE(&dev_priv->drm, 1048 (intel_de_read(dev_priv, DC_STATE_EN) & 1049 DC_STATE_EN_UPTO_DC5), 1050 "DC5 already programmed to be enabled.\n"); 1051 assert_rpm_wakelock_held(&dev_priv->runtime_pm); 1052 1053 assert_csr_loaded(dev_priv); 1054 } 1055 1056 static void gen9_enable_dc5(struct drm_i915_private *dev_priv) 1057 { 1058 assert_can_enable_dc5(dev_priv); 1059 1060 drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n"); 1061 1062 /* Wa Display #1183: skl,kbl,cfl */ 1063 if (IS_GEN9_BC(dev_priv)) 1064 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 1065 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 1066 1067 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 1068 } 1069 1070 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 1071 { 1072 drm_WARN_ONCE(&dev_priv->drm, 1073 intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 1074 "Backlight is not disabled.\n"); 1075 drm_WARN_ONCE(&dev_priv->drm, 1076 (intel_de_read(dev_priv, DC_STATE_EN) & 1077 DC_STATE_EN_UPTO_DC6), 1078 "DC6 already programmed to be enabled.\n"); 1079 1080 assert_csr_loaded(dev_priv); 1081 } 1082 1083 static void skl_enable_dc6(struct drm_i915_private *dev_priv) 1084 { 1085 assert_can_enable_dc6(dev_priv); 1086 1087 drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n"); 1088 1089 /* Wa Display #1183: skl,kbl,cfl */ 1090 if (IS_GEN9_BC(dev_priv)) 1091 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 1092 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 1093 1094 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 1095 } 1096 1097 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 1098 struct i915_power_well *power_well) 1099 { 1100 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 1101 int pw_idx = power_well->desc->hsw.idx; 1102 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 1103 u32 bios_req = intel_de_read(dev_priv, regs->bios); 1104 1105 /* Take over the request bit if set by BIOS. */ 1106 if (bios_req & mask) { 1107 u32 drv_req = intel_de_read(dev_priv, regs->driver); 1108 1109 if (!(drv_req & mask)) 1110 intel_de_write(dev_priv, regs->driver, drv_req | mask); 1111 intel_de_write(dev_priv, regs->bios, bios_req & ~mask); 1112 } 1113 } 1114 1115 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1116 struct i915_power_well *power_well) 1117 { 1118 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); 1119 } 1120 1121 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1122 struct i915_power_well *power_well) 1123 { 1124 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); 1125 } 1126 1127 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 1128 struct i915_power_well *power_well) 1129 { 1130 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); 1131 } 1132 1133 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 1134 { 1135 struct i915_power_well *power_well; 1136 1137 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); 1138 if (power_well->count > 0) 1139 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1140 1141 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1142 if (power_well->count > 0) 1143 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1144 1145 if (IS_GEMINILAKE(dev_priv)) { 1146 power_well = lookup_power_well(dev_priv, 1147 GLK_DISP_PW_DPIO_CMN_C); 1148 if (power_well->count > 0) 1149 bxt_ddi_phy_verify_state(dev_priv, 1150 power_well->desc->bxt.phy); 1151 } 1152 } 1153 1154 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 1155 struct i915_power_well *power_well) 1156 { 1157 return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && 1158 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); 1159 } 1160 1161 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 1162 { 1163 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); 1164 u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices; 1165 1166 drm_WARN(&dev_priv->drm, 1167 hw_enabled_dbuf_slices != enabled_dbuf_slices, 1168 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n", 1169 hw_enabled_dbuf_slices, 1170 enabled_dbuf_slices); 1171 } 1172 1173 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv) 1174 { 1175 struct intel_cdclk_config cdclk_config = {}; 1176 1177 if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) { 1178 tgl_disable_dc3co(dev_priv); 1179 return; 1180 } 1181 1182 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1183 1184 dev_priv->display.get_cdclk(dev_priv, &cdclk_config); 1185 /* Can't read out voltage_level so can't use intel_cdclk_changed() */ 1186 drm_WARN_ON(&dev_priv->drm, 1187 intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, 1188 &cdclk_config)); 1189 1190 gen9_assert_dbuf_enabled(dev_priv); 1191 1192 if (IS_GEN9_LP(dev_priv)) 1193 bxt_verify_ddi_phy_power_wells(dev_priv); 1194 1195 if (INTEL_GEN(dev_priv) >= 11) 1196 /* 1197 * DMC retains HW context only for port A, the other combo 1198 * PHY's HW context for port B is lost after DC transitions, 1199 * so we need to restore it manually. 1200 */ 1201 intel_combo_phy_init(dev_priv); 1202 } 1203 1204 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 1205 struct i915_power_well *power_well) 1206 { 1207 gen9_disable_dc_states(dev_priv); 1208 } 1209 1210 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 1211 struct i915_power_well *power_well) 1212 { 1213 if (!dev_priv->csr.dmc_payload) 1214 return; 1215 1216 switch (dev_priv->csr.target_dc_state) { 1217 case DC_STATE_EN_DC3CO: 1218 tgl_enable_dc3co(dev_priv); 1219 break; 1220 case DC_STATE_EN_UPTO_DC6: 1221 skl_enable_dc6(dev_priv); 1222 break; 1223 case DC_STATE_EN_UPTO_DC5: 1224 gen9_enable_dc5(dev_priv); 1225 break; 1226 } 1227 } 1228 1229 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, 1230 struct i915_power_well *power_well) 1231 { 1232 } 1233 1234 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 1235 struct i915_power_well *power_well) 1236 { 1237 } 1238 1239 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 1240 struct i915_power_well *power_well) 1241 { 1242 return true; 1243 } 1244 1245 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, 1246 struct i915_power_well *power_well) 1247 { 1248 if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) 1249 i830_enable_pipe(dev_priv, PIPE_A); 1250 if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) 1251 i830_enable_pipe(dev_priv, PIPE_B); 1252 } 1253 1254 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, 1255 struct i915_power_well *power_well) 1256 { 1257 i830_disable_pipe(dev_priv, PIPE_B); 1258 i830_disable_pipe(dev_priv, PIPE_A); 1259 } 1260 1261 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, 1262 struct i915_power_well *power_well) 1263 { 1264 return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE && 1265 intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; 1266 } 1267 1268 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, 1269 struct i915_power_well *power_well) 1270 { 1271 if (power_well->count > 0) 1272 i830_pipes_power_well_enable(dev_priv, power_well); 1273 else 1274 i830_pipes_power_well_disable(dev_priv, power_well); 1275 } 1276 1277 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 1278 struct i915_power_well *power_well, bool enable) 1279 { 1280 int pw_idx = power_well->desc->vlv.idx; 1281 u32 mask; 1282 u32 state; 1283 u32 ctrl; 1284 1285 mask = PUNIT_PWRGT_MASK(pw_idx); 1286 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : 1287 PUNIT_PWRGT_PWR_GATE(pw_idx); 1288 1289 vlv_punit_get(dev_priv); 1290 1291 #define COND \ 1292 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 1293 1294 if (COND) 1295 goto out; 1296 1297 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 1298 ctrl &= ~mask; 1299 ctrl |= state; 1300 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 1301 1302 if (wait_for(COND, 100)) 1303 drm_err(&dev_priv->drm, 1304 "timeout setting power well state %08x (%08x)\n", 1305 state, 1306 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 1307 1308 #undef COND 1309 1310 out: 1311 vlv_punit_put(dev_priv); 1312 } 1313 1314 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 1315 struct i915_power_well *power_well) 1316 { 1317 vlv_set_power_well(dev_priv, power_well, true); 1318 } 1319 1320 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 1321 struct i915_power_well *power_well) 1322 { 1323 vlv_set_power_well(dev_priv, power_well, false); 1324 } 1325 1326 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1327 struct i915_power_well *power_well) 1328 { 1329 int pw_idx = power_well->desc->vlv.idx; 1330 bool enabled = false; 1331 u32 mask; 1332 u32 state; 1333 u32 ctrl; 1334 1335 mask = PUNIT_PWRGT_MASK(pw_idx); 1336 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); 1337 1338 vlv_punit_get(dev_priv); 1339 1340 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1341 /* 1342 * We only ever set the power-on and power-gate states, anything 1343 * else is unexpected. 1344 */ 1345 drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) && 1346 state != PUNIT_PWRGT_PWR_GATE(pw_idx)); 1347 if (state == ctrl) 1348 enabled = true; 1349 1350 /* 1351 * A transient state at this point would mean some unexpected party 1352 * is poking at the power controls too. 1353 */ 1354 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1355 drm_WARN_ON(&dev_priv->drm, ctrl != state); 1356 1357 vlv_punit_put(dev_priv); 1358 1359 return enabled; 1360 } 1361 1362 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1363 { 1364 u32 val; 1365 1366 /* 1367 * On driver load, a pipe may be active and driving a DSI display. 1368 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1369 * (and never recovering) in this case. intel_dsi_post_disable() will 1370 * clear it when we turn off the display. 1371 */ 1372 val = intel_de_read(dev_priv, DSPCLK_GATE_D); 1373 val &= DPOUNIT_CLOCK_GATE_DISABLE; 1374 val |= VRHUNIT_CLOCK_GATE_DISABLE; 1375 intel_de_write(dev_priv, DSPCLK_GATE_D, val); 1376 1377 /* 1378 * Disable trickle feed and enable pnd deadline calculation 1379 */ 1380 intel_de_write(dev_priv, MI_ARB_VLV, 1381 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1382 intel_de_write(dev_priv, CBR1_VLV, 0); 1383 1384 drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0); 1385 intel_de_write(dev_priv, RAWCLK_FREQ_VLV, 1386 DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 1387 1000)); 1388 } 1389 1390 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1391 { 1392 struct intel_encoder *encoder; 1393 enum pipe pipe; 1394 1395 /* 1396 * Enable the CRI clock source so we can get at the 1397 * display and the reference clock for VGA 1398 * hotplug / manual detection. Supposedly DSI also 1399 * needs the ref clock up and running. 1400 * 1401 * CHV DPLL B/C have some issues if VGA mode is enabled. 1402 */ 1403 for_each_pipe(dev_priv, pipe) { 1404 u32 val = intel_de_read(dev_priv, DPLL(pipe)); 1405 1406 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1407 if (pipe != PIPE_A) 1408 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1409 1410 intel_de_write(dev_priv, DPLL(pipe), val); 1411 } 1412 1413 vlv_init_display_clock_gating(dev_priv); 1414 1415 spin_lock_irq(&dev_priv->irq_lock); 1416 valleyview_enable_display_irqs(dev_priv); 1417 spin_unlock_irq(&dev_priv->irq_lock); 1418 1419 /* 1420 * During driver initialization/resume we can avoid restoring the 1421 * part of the HW/SW state that will be inited anyway explicitly. 1422 */ 1423 if (dev_priv->power_domains.initializing) 1424 return; 1425 1426 intel_hpd_init(dev_priv); 1427 1428 /* Re-enable the ADPA, if we have one */ 1429 for_each_intel_encoder(&dev_priv->drm, encoder) { 1430 if (encoder->type == INTEL_OUTPUT_ANALOG) 1431 intel_crt_reset(&encoder->base); 1432 } 1433 1434 intel_vga_redisable_power_on(dev_priv); 1435 1436 intel_pps_unlock_regs_wa(dev_priv); 1437 } 1438 1439 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1440 { 1441 spin_lock_irq(&dev_priv->irq_lock); 1442 valleyview_disable_display_irqs(dev_priv); 1443 spin_unlock_irq(&dev_priv->irq_lock); 1444 1445 /* make sure we're done processing display irqs */ 1446 intel_synchronize_irq(dev_priv); 1447 1448 intel_power_sequencer_reset(dev_priv); 1449 1450 /* Prevent us from re-enabling polling on accident in late suspend */ 1451 #ifdef __linux__ 1452 if (!dev_priv->drm.dev->power.is_suspended) 1453 #else 1454 if (!cold) 1455 #endif 1456 intel_hpd_poll_init(dev_priv); 1457 } 1458 1459 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1460 struct i915_power_well *power_well) 1461 { 1462 vlv_set_power_well(dev_priv, power_well, true); 1463 1464 vlv_display_power_well_init(dev_priv); 1465 } 1466 1467 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1468 struct i915_power_well *power_well) 1469 { 1470 vlv_display_power_well_deinit(dev_priv); 1471 1472 vlv_set_power_well(dev_priv, power_well, false); 1473 } 1474 1475 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1476 struct i915_power_well *power_well) 1477 { 1478 /* since ref/cri clock was enabled */ 1479 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1480 1481 vlv_set_power_well(dev_priv, power_well, true); 1482 1483 /* 1484 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1485 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1486 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1487 * b. The other bits such as sfr settings / modesel may all 1488 * be set to 0. 1489 * 1490 * This should only be done on init and resume from S3 with 1491 * both PLLs disabled, or we risk losing DPIO and PLL 1492 * synchronization. 1493 */ 1494 intel_de_write(dev_priv, DPIO_CTL, 1495 intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST); 1496 } 1497 1498 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1499 struct i915_power_well *power_well) 1500 { 1501 enum pipe pipe; 1502 1503 for_each_pipe(dev_priv, pipe) 1504 assert_pll_disabled(dev_priv, pipe); 1505 1506 /* Assert common reset */ 1507 intel_de_write(dev_priv, DPIO_CTL, 1508 intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST); 1509 1510 vlv_set_power_well(dev_priv, power_well, false); 1511 } 1512 1513 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) 1514 1515 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1516 1517 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1518 { 1519 struct i915_power_well *cmn_bc = 1520 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1521 struct i915_power_well *cmn_d = 1522 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1523 u32 phy_control = dev_priv->chv_phy_control; 1524 u32 phy_status = 0; 1525 u32 phy_status_mask = 0xffffffff; 1526 1527 /* 1528 * The BIOS can leave the PHY is some weird state 1529 * where it doesn't fully power down some parts. 1530 * Disable the asserts until the PHY has been fully 1531 * reset (ie. the power well has been disabled at 1532 * least once). 1533 */ 1534 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1535 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1536 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1537 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1538 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1539 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1540 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1541 1542 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1543 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1544 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1545 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1546 1547 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 1548 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1549 1550 /* this assumes override is only used to enable lanes */ 1551 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1552 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1553 1554 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1555 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1556 1557 /* CL1 is on whenever anything is on in either channel */ 1558 if (BITS_SET(phy_control, 1559 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1560 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1561 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1562 1563 /* 1564 * The DPLLB check accounts for the pipe B + port A usage 1565 * with CL2 powered up but all the lanes in the second channel 1566 * powered down. 1567 */ 1568 if (BITS_SET(phy_control, 1569 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1570 (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1571 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1572 1573 if (BITS_SET(phy_control, 1574 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1575 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1576 if (BITS_SET(phy_control, 1577 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1578 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1579 1580 if (BITS_SET(phy_control, 1581 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1582 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1583 if (BITS_SET(phy_control, 1584 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1585 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1586 } 1587 1588 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 1589 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1590 1591 /* this assumes override is only used to enable lanes */ 1592 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1593 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1594 1595 if (BITS_SET(phy_control, 1596 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1597 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1598 1599 if (BITS_SET(phy_control, 1600 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1601 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1602 if (BITS_SET(phy_control, 1603 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1604 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1605 } 1606 1607 phy_status &= phy_status_mask; 1608 1609 /* 1610 * The PHY may be busy with some initial calibration and whatnot, 1611 * so the power state can take a while to actually change. 1612 */ 1613 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, 1614 phy_status_mask, phy_status, 10)) 1615 drm_err(&dev_priv->drm, 1616 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1617 intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, 1618 phy_status, dev_priv->chv_phy_control); 1619 } 1620 1621 #undef BITS_SET 1622 1623 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1624 struct i915_power_well *power_well) 1625 { 1626 enum dpio_phy phy; 1627 enum pipe pipe; 1628 u32 tmp; 1629 1630 drm_WARN_ON_ONCE(&dev_priv->drm, 1631 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1632 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1633 1634 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1635 pipe = PIPE_A; 1636 phy = DPIO_PHY0; 1637 } else { 1638 pipe = PIPE_C; 1639 phy = DPIO_PHY1; 1640 } 1641 1642 /* since ref/cri clock was enabled */ 1643 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1644 vlv_set_power_well(dev_priv, power_well, true); 1645 1646 /* Poll for phypwrgood signal */ 1647 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, 1648 PHY_POWERGOOD(phy), 1)) 1649 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n", 1650 phy); 1651 1652 vlv_dpio_get(dev_priv); 1653 1654 /* Enable dynamic power down */ 1655 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1656 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1657 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1658 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1659 1660 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1661 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1662 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1663 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1664 } else { 1665 /* 1666 * Force the non-existing CL2 off. BXT does this 1667 * too, so maybe it saves some power even though 1668 * CL2 doesn't exist? 1669 */ 1670 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1671 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1672 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1673 } 1674 1675 vlv_dpio_put(dev_priv); 1676 1677 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1678 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1679 dev_priv->chv_phy_control); 1680 1681 drm_dbg_kms(&dev_priv->drm, 1682 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1683 phy, dev_priv->chv_phy_control); 1684 1685 assert_chv_phy_status(dev_priv); 1686 } 1687 1688 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1689 struct i915_power_well *power_well) 1690 { 1691 enum dpio_phy phy; 1692 1693 drm_WARN_ON_ONCE(&dev_priv->drm, 1694 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1695 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1696 1697 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1698 phy = DPIO_PHY0; 1699 assert_pll_disabled(dev_priv, PIPE_A); 1700 assert_pll_disabled(dev_priv, PIPE_B); 1701 } else { 1702 phy = DPIO_PHY1; 1703 assert_pll_disabled(dev_priv, PIPE_C); 1704 } 1705 1706 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1707 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1708 dev_priv->chv_phy_control); 1709 1710 vlv_set_power_well(dev_priv, power_well, false); 1711 1712 drm_dbg_kms(&dev_priv->drm, 1713 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1714 phy, dev_priv->chv_phy_control); 1715 1716 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1717 dev_priv->chv_phy_assert[phy] = true; 1718 1719 assert_chv_phy_status(dev_priv); 1720 } 1721 1722 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1723 enum dpio_channel ch, bool override, unsigned int mask) 1724 { 1725 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1726 u32 reg, val, expected, actual; 1727 1728 /* 1729 * The BIOS can leave the PHY is some weird state 1730 * where it doesn't fully power down some parts. 1731 * Disable the asserts until the PHY has been fully 1732 * reset (ie. the power well has been disabled at 1733 * least once). 1734 */ 1735 if (!dev_priv->chv_phy_assert[phy]) 1736 return; 1737 1738 if (ch == DPIO_CH0) 1739 reg = _CHV_CMN_DW0_CH0; 1740 else 1741 reg = _CHV_CMN_DW6_CH1; 1742 1743 vlv_dpio_get(dev_priv); 1744 val = vlv_dpio_read(dev_priv, pipe, reg); 1745 vlv_dpio_put(dev_priv); 1746 1747 /* 1748 * This assumes !override is only used when the port is disabled. 1749 * All lanes should power down even without the override when 1750 * the port is disabled. 1751 */ 1752 if (!override || mask == 0xf) { 1753 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1754 /* 1755 * If CH1 common lane is not active anymore 1756 * (eg. for pipe B DPLL) the entire channel will 1757 * shut down, which causes the common lane registers 1758 * to read as 0. That means we can't actually check 1759 * the lane power down status bits, but as the entire 1760 * register reads as 0 it's a good indication that the 1761 * channel is indeed entirely powered down. 1762 */ 1763 if (ch == DPIO_CH1 && val == 0) 1764 expected = 0; 1765 } else if (mask != 0x0) { 1766 expected = DPIO_ANYDL_POWERDOWN; 1767 } else { 1768 expected = 0; 1769 } 1770 1771 if (ch == DPIO_CH0) 1772 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1773 else 1774 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1775 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1776 1777 drm_WARN(&dev_priv->drm, actual != expected, 1778 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1779 !!(actual & DPIO_ALLDL_POWERDOWN), 1780 !!(actual & DPIO_ANYDL_POWERDOWN), 1781 !!(expected & DPIO_ALLDL_POWERDOWN), 1782 !!(expected & DPIO_ANYDL_POWERDOWN), 1783 reg, val); 1784 } 1785 1786 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1787 enum dpio_channel ch, bool override) 1788 { 1789 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1790 bool was_override; 1791 1792 mutex_lock(&power_domains->lock); 1793 1794 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1795 1796 if (override == was_override) 1797 goto out; 1798 1799 if (override) 1800 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1801 else 1802 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1803 1804 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1805 dev_priv->chv_phy_control); 1806 1807 drm_dbg_kms(&dev_priv->drm, 1808 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1809 phy, ch, dev_priv->chv_phy_control); 1810 1811 assert_chv_phy_status(dev_priv); 1812 1813 out: 1814 mutex_unlock(&power_domains->lock); 1815 1816 return was_override; 1817 } 1818 1819 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1820 bool override, unsigned int mask) 1821 { 1822 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1823 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1824 enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); 1825 enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); 1826 1827 mutex_lock(&power_domains->lock); 1828 1829 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1830 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1831 1832 if (override) 1833 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1834 else 1835 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1836 1837 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1838 dev_priv->chv_phy_control); 1839 1840 drm_dbg_kms(&dev_priv->drm, 1841 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1842 phy, ch, mask, dev_priv->chv_phy_control); 1843 1844 assert_chv_phy_status(dev_priv); 1845 1846 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1847 1848 mutex_unlock(&power_domains->lock); 1849 } 1850 1851 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1852 struct i915_power_well *power_well) 1853 { 1854 enum pipe pipe = PIPE_A; 1855 bool enabled; 1856 u32 state, ctrl; 1857 1858 vlv_punit_get(dev_priv); 1859 1860 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); 1861 /* 1862 * We only ever set the power-on and power-gate states, anything 1863 * else is unexpected. 1864 */ 1865 drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) && 1866 state != DP_SSS_PWR_GATE(pipe)); 1867 enabled = state == DP_SSS_PWR_ON(pipe); 1868 1869 /* 1870 * A transient state at this point would mean some unexpected party 1871 * is poking at the power controls too. 1872 */ 1873 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); 1874 drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state); 1875 1876 vlv_punit_put(dev_priv); 1877 1878 return enabled; 1879 } 1880 1881 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1882 struct i915_power_well *power_well, 1883 bool enable) 1884 { 1885 enum pipe pipe = PIPE_A; 1886 u32 state; 1887 u32 ctrl; 1888 1889 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1890 1891 vlv_punit_get(dev_priv); 1892 1893 #define COND \ 1894 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) 1895 1896 if (COND) 1897 goto out; 1898 1899 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 1900 ctrl &= ~DP_SSC_MASK(pipe); 1901 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1902 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); 1903 1904 if (wait_for(COND, 100)) 1905 drm_err(&dev_priv->drm, 1906 "timeout setting power well state %08x (%08x)\n", 1907 state, 1908 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); 1909 1910 #undef COND 1911 1912 out: 1913 vlv_punit_put(dev_priv); 1914 } 1915 1916 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 1917 struct i915_power_well *power_well) 1918 { 1919 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1920 dev_priv->chv_phy_control); 1921 } 1922 1923 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1924 struct i915_power_well *power_well) 1925 { 1926 chv_set_pipe_power_well(dev_priv, power_well, true); 1927 1928 vlv_display_power_well_init(dev_priv); 1929 } 1930 1931 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1932 struct i915_power_well *power_well) 1933 { 1934 vlv_display_power_well_deinit(dev_priv); 1935 1936 chv_set_pipe_power_well(dev_priv, power_well, false); 1937 } 1938 1939 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains) 1940 { 1941 return power_domains->async_put_domains[0] | 1942 power_domains->async_put_domains[1]; 1943 } 1944 1945 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 1946 1947 static bool 1948 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 1949 { 1950 struct drm_i915_private *i915 = container_of(power_domains, 1951 struct drm_i915_private, 1952 power_domains); 1953 return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] & 1954 power_domains->async_put_domains[1]); 1955 } 1956 1957 static bool 1958 __async_put_domains_state_ok(struct i915_power_domains *power_domains) 1959 { 1960 struct drm_i915_private *i915 = container_of(power_domains, 1961 struct drm_i915_private, 1962 power_domains); 1963 enum intel_display_power_domain domain; 1964 bool err = false; 1965 1966 err |= !assert_async_put_domain_masks_disjoint(power_domains); 1967 err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref != 1968 !!__async_put_domains_mask(power_domains)); 1969 1970 for_each_power_domain(domain, __async_put_domains_mask(power_domains)) 1971 err |= drm_WARN_ON(&i915->drm, 1972 power_domains->domain_use_count[domain] != 1); 1973 1974 return !err; 1975 } 1976 1977 static void print_power_domains(struct i915_power_domains *power_domains, 1978 const char *prefix, u64 mask) 1979 { 1980 struct drm_i915_private *i915 = container_of(power_domains, 1981 struct drm_i915_private, 1982 power_domains); 1983 enum intel_display_power_domain domain; 1984 1985 drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask)); 1986 for_each_power_domain(domain, mask) 1987 drm_dbg(&i915->drm, "%s use_count %d\n", 1988 intel_display_power_domain_str(domain), 1989 power_domains->domain_use_count[domain]); 1990 } 1991 1992 static void 1993 print_async_put_domains_state(struct i915_power_domains *power_domains) 1994 { 1995 struct drm_i915_private *i915 = container_of(power_domains, 1996 struct drm_i915_private, 1997 power_domains); 1998 1999 drm_dbg(&i915->drm, "async_put_wakeref %u\n", 2000 power_domains->async_put_wakeref); 2001 2002 print_power_domains(power_domains, "async_put_domains[0]", 2003 power_domains->async_put_domains[0]); 2004 print_power_domains(power_domains, "async_put_domains[1]", 2005 power_domains->async_put_domains[1]); 2006 } 2007 2008 static void 2009 verify_async_put_domains_state(struct i915_power_domains *power_domains) 2010 { 2011 if (!__async_put_domains_state_ok(power_domains)) 2012 print_async_put_domains_state(power_domains); 2013 } 2014 2015 #else 2016 2017 static void 2018 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 2019 { 2020 } 2021 2022 static void 2023 verify_async_put_domains_state(struct i915_power_domains *power_domains) 2024 { 2025 } 2026 2027 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ 2028 2029 static u64 async_put_domains_mask(struct i915_power_domains *power_domains) 2030 { 2031 assert_async_put_domain_masks_disjoint(power_domains); 2032 2033 return __async_put_domains_mask(power_domains); 2034 } 2035 2036 static void 2037 async_put_domains_clear_domain(struct i915_power_domains *power_domains, 2038 enum intel_display_power_domain domain) 2039 { 2040 assert_async_put_domain_masks_disjoint(power_domains); 2041 2042 power_domains->async_put_domains[0] &= ~BIT_ULL(domain); 2043 power_domains->async_put_domains[1] &= ~BIT_ULL(domain); 2044 } 2045 2046 static bool 2047 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, 2048 enum intel_display_power_domain domain) 2049 { 2050 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2051 bool ret = false; 2052 2053 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain))) 2054 goto out_verify; 2055 2056 async_put_domains_clear_domain(power_domains, domain); 2057 2058 ret = true; 2059 2060 if (async_put_domains_mask(power_domains)) 2061 goto out_verify; 2062 2063 cancel_delayed_work(&power_domains->async_put_work); 2064 intel_runtime_pm_put_raw(&dev_priv->runtime_pm, 2065 fetch_and_zero(&power_domains->async_put_wakeref)); 2066 out_verify: 2067 verify_async_put_domains_state(power_domains); 2068 2069 return ret; 2070 } 2071 2072 static void 2073 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 2074 enum intel_display_power_domain domain) 2075 { 2076 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2077 struct i915_power_well *power_well; 2078 2079 if (intel_display_power_grab_async_put_ref(dev_priv, domain)) 2080 return; 2081 2082 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) 2083 intel_power_well_get(dev_priv, power_well); 2084 2085 power_domains->domain_use_count[domain]++; 2086 } 2087 2088 /** 2089 * intel_display_power_get - grab a power domain reference 2090 * @dev_priv: i915 device instance 2091 * @domain: power domain to reference 2092 * 2093 * This function grabs a power domain reference for @domain and ensures that the 2094 * power domain and all its parents are powered up. Therefore users should only 2095 * grab a reference to the innermost power domain they need. 2096 * 2097 * Any power domain reference obtained by this function must have a symmetric 2098 * call to intel_display_power_put() to release the reference again. 2099 */ 2100 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, 2101 enum intel_display_power_domain domain) 2102 { 2103 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2104 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2105 2106 mutex_lock(&power_domains->lock); 2107 __intel_display_power_get_domain(dev_priv, domain); 2108 mutex_unlock(&power_domains->lock); 2109 2110 return wakeref; 2111 } 2112 2113 /** 2114 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 2115 * @dev_priv: i915 device instance 2116 * @domain: power domain to reference 2117 * 2118 * This function grabs a power domain reference for @domain and ensures that the 2119 * power domain and all its parents are powered up. Therefore users should only 2120 * grab a reference to the innermost power domain they need. 2121 * 2122 * Any power domain reference obtained by this function must have a symmetric 2123 * call to intel_display_power_put() to release the reference again. 2124 */ 2125 intel_wakeref_t 2126 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 2127 enum intel_display_power_domain domain) 2128 { 2129 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2130 intel_wakeref_t wakeref; 2131 bool is_enabled; 2132 2133 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); 2134 if (!wakeref) 2135 return false; 2136 2137 mutex_lock(&power_domains->lock); 2138 2139 if (__intel_display_power_is_enabled(dev_priv, domain)) { 2140 __intel_display_power_get_domain(dev_priv, domain); 2141 is_enabled = true; 2142 } else { 2143 is_enabled = false; 2144 } 2145 2146 mutex_unlock(&power_domains->lock); 2147 2148 if (!is_enabled) { 2149 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2150 wakeref = 0; 2151 } 2152 2153 return wakeref; 2154 } 2155 2156 static void 2157 __intel_display_power_put_domain(struct drm_i915_private *dev_priv, 2158 enum intel_display_power_domain domain) 2159 { 2160 struct i915_power_domains *power_domains; 2161 struct i915_power_well *power_well; 2162 const char *name = intel_display_power_domain_str(domain); 2163 2164 power_domains = &dev_priv->power_domains; 2165 2166 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], 2167 "Use count on domain %s is already zero\n", 2168 name); 2169 drm_WARN(&dev_priv->drm, 2170 async_put_domains_mask(power_domains) & BIT_ULL(domain), 2171 "Async disabling of domain %s is pending\n", 2172 name); 2173 2174 power_domains->domain_use_count[domain]--; 2175 2176 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) 2177 intel_power_well_put(dev_priv, power_well); 2178 } 2179 2180 static void __intel_display_power_put(struct drm_i915_private *dev_priv, 2181 enum intel_display_power_domain domain) 2182 { 2183 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2184 2185 mutex_lock(&power_domains->lock); 2186 __intel_display_power_put_domain(dev_priv, domain); 2187 mutex_unlock(&power_domains->lock); 2188 } 2189 2190 /** 2191 * intel_display_power_put_unchecked - release an unchecked power domain reference 2192 * @dev_priv: i915 device instance 2193 * @domain: power domain to reference 2194 * 2195 * This function drops the power domain reference obtained by 2196 * intel_display_power_get() and might power down the corresponding hardware 2197 * block right away if this is the last reference. 2198 * 2199 * This function exists only for historical reasons and should be avoided in 2200 * new code, as the correctness of its use cannot be checked. Always use 2201 * intel_display_power_put() instead. 2202 */ 2203 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 2204 enum intel_display_power_domain domain) 2205 { 2206 __intel_display_power_put(dev_priv, domain); 2207 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 2208 } 2209 2210 static void 2211 queue_async_put_domains_work(struct i915_power_domains *power_domains, 2212 intel_wakeref_t wakeref) 2213 { 2214 struct drm_i915_private *i915 = container_of(power_domains, 2215 struct drm_i915_private, 2216 power_domains); 2217 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 2218 power_domains->async_put_wakeref = wakeref; 2219 drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, 2220 &power_domains->async_put_work, 2221 msecs_to_jiffies(100))); 2222 } 2223 2224 static void 2225 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) 2226 { 2227 struct drm_i915_private *dev_priv = 2228 container_of(power_domains, struct drm_i915_private, 2229 power_domains); 2230 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2231 enum intel_display_power_domain domain; 2232 intel_wakeref_t wakeref; 2233 2234 /* 2235 * The caller must hold already raw wakeref, upgrade that to a proper 2236 * wakeref to make the state checker happy about the HW access during 2237 * power well disabling. 2238 */ 2239 assert_rpm_raw_wakeref_held(rpm); 2240 wakeref = intel_runtime_pm_get(rpm); 2241 2242 for_each_power_domain(domain, mask) { 2243 /* Clear before put, so put's sanity check is happy. */ 2244 async_put_domains_clear_domain(power_domains, domain); 2245 __intel_display_power_put_domain(dev_priv, domain); 2246 } 2247 2248 intel_runtime_pm_put(rpm, wakeref); 2249 } 2250 2251 static void 2252 intel_display_power_put_async_work(struct work_struct *work) 2253 { 2254 struct drm_i915_private *dev_priv = 2255 container_of(work, struct drm_i915_private, 2256 power_domains.async_put_work.work); 2257 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2258 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2259 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); 2260 intel_wakeref_t old_work_wakeref = 0; 2261 2262 mutex_lock(&power_domains->lock); 2263 2264 /* 2265 * Bail out if all the domain refs pending to be released were grabbed 2266 * by subsequent gets or a flush_work. 2267 */ 2268 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2269 if (!old_work_wakeref) 2270 goto out_verify; 2271 2272 release_async_put_domains(power_domains, 2273 power_domains->async_put_domains[0]); 2274 2275 /* Requeue the work if more domains were async put meanwhile. */ 2276 if (power_domains->async_put_domains[1]) { 2277 power_domains->async_put_domains[0] = 2278 fetch_and_zero(&power_domains->async_put_domains[1]); 2279 queue_async_put_domains_work(power_domains, 2280 fetch_and_zero(&new_work_wakeref)); 2281 } 2282 2283 out_verify: 2284 verify_async_put_domains_state(power_domains); 2285 2286 mutex_unlock(&power_domains->lock); 2287 2288 if (old_work_wakeref) 2289 intel_runtime_pm_put_raw(rpm, old_work_wakeref); 2290 if (new_work_wakeref) 2291 intel_runtime_pm_put_raw(rpm, new_work_wakeref); 2292 } 2293 2294 /** 2295 * intel_display_power_put_async - release a power domain reference asynchronously 2296 * @i915: i915 device instance 2297 * @domain: power domain to reference 2298 * @wakeref: wakeref acquired for the reference that is being released 2299 * 2300 * This function drops the power domain reference obtained by 2301 * intel_display_power_get*() and schedules a work to power down the 2302 * corresponding hardware block if this is the last reference. 2303 */ 2304 void __intel_display_power_put_async(struct drm_i915_private *i915, 2305 enum intel_display_power_domain domain, 2306 intel_wakeref_t wakeref) 2307 { 2308 struct i915_power_domains *power_domains = &i915->power_domains; 2309 struct intel_runtime_pm *rpm = &i915->runtime_pm; 2310 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); 2311 2312 mutex_lock(&power_domains->lock); 2313 2314 if (power_domains->domain_use_count[domain] > 1) { 2315 __intel_display_power_put_domain(i915, domain); 2316 2317 goto out_verify; 2318 } 2319 2320 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); 2321 2322 /* Let a pending work requeue itself or queue a new one. */ 2323 if (power_domains->async_put_wakeref) { 2324 power_domains->async_put_domains[1] |= BIT_ULL(domain); 2325 } else { 2326 power_domains->async_put_domains[0] |= BIT_ULL(domain); 2327 queue_async_put_domains_work(power_domains, 2328 fetch_and_zero(&work_wakeref)); 2329 } 2330 2331 out_verify: 2332 verify_async_put_domains_state(power_domains); 2333 2334 mutex_unlock(&power_domains->lock); 2335 2336 if (work_wakeref) 2337 intel_runtime_pm_put_raw(rpm, work_wakeref); 2338 2339 intel_runtime_pm_put(rpm, wakeref); 2340 } 2341 2342 /** 2343 * intel_display_power_flush_work - flushes the async display power disabling work 2344 * @i915: i915 device instance 2345 * 2346 * Flushes any pending work that was scheduled by a preceding 2347 * intel_display_power_put_async() call, completing the disabling of the 2348 * corresponding power domains. 2349 * 2350 * Note that the work handler function may still be running after this 2351 * function returns; to ensure that the work handler isn't running use 2352 * intel_display_power_flush_work_sync() instead. 2353 */ 2354 void intel_display_power_flush_work(struct drm_i915_private *i915) 2355 { 2356 struct i915_power_domains *power_domains = &i915->power_domains; 2357 intel_wakeref_t work_wakeref; 2358 2359 mutex_lock(&power_domains->lock); 2360 2361 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2362 if (!work_wakeref) 2363 goto out_verify; 2364 2365 release_async_put_domains(power_domains, 2366 async_put_domains_mask(power_domains)); 2367 cancel_delayed_work(&power_domains->async_put_work); 2368 2369 out_verify: 2370 verify_async_put_domains_state(power_domains); 2371 2372 mutex_unlock(&power_domains->lock); 2373 2374 if (work_wakeref) 2375 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); 2376 } 2377 2378 /** 2379 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work 2380 * @i915: i915 device instance 2381 * 2382 * Like intel_display_power_flush_work(), but also ensure that the work 2383 * handler function is not running any more when this function returns. 2384 */ 2385 static void 2386 intel_display_power_flush_work_sync(struct drm_i915_private *i915) 2387 { 2388 struct i915_power_domains *power_domains = &i915->power_domains; 2389 2390 intel_display_power_flush_work(i915); 2391 cancel_delayed_work_sync(&power_domains->async_put_work); 2392 2393 verify_async_put_domains_state(power_domains); 2394 2395 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 2396 } 2397 2398 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2399 /** 2400 * intel_display_power_put - release a power domain reference 2401 * @dev_priv: i915 device instance 2402 * @domain: power domain to reference 2403 * @wakeref: wakeref acquired for the reference that is being released 2404 * 2405 * This function drops the power domain reference obtained by 2406 * intel_display_power_get() and might power down the corresponding hardware 2407 * block right away if this is the last reference. 2408 */ 2409 void intel_display_power_put(struct drm_i915_private *dev_priv, 2410 enum intel_display_power_domain domain, 2411 intel_wakeref_t wakeref) 2412 { 2413 __intel_display_power_put(dev_priv, domain); 2414 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2415 } 2416 #endif 2417 2418 #define I830_PIPES_POWER_DOMAINS ( \ 2419 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2420 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2421 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2422 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2423 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2424 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2425 BIT_ULL(POWER_DOMAIN_INIT)) 2426 2427 #define VLV_DISPLAY_POWER_DOMAINS ( \ 2428 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2429 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2430 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2431 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2432 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2433 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2434 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2435 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2436 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2437 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2438 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2439 BIT_ULL(POWER_DOMAIN_VGA) | \ 2440 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2441 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2442 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2443 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2444 BIT_ULL(POWER_DOMAIN_INIT)) 2445 2446 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2447 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2448 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2449 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2450 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2451 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2452 BIT_ULL(POWER_DOMAIN_INIT)) 2453 2454 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 2455 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2456 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2457 BIT_ULL(POWER_DOMAIN_INIT)) 2458 2459 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 2460 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2461 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2462 BIT_ULL(POWER_DOMAIN_INIT)) 2463 2464 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 2465 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2466 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2467 BIT_ULL(POWER_DOMAIN_INIT)) 2468 2469 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 2470 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2471 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2472 BIT_ULL(POWER_DOMAIN_INIT)) 2473 2474 #define CHV_DISPLAY_POWER_DOMAINS ( \ 2475 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2476 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2477 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2478 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2479 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2480 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2481 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2482 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2483 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2484 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2485 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2486 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2487 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2488 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2489 BIT_ULL(POWER_DOMAIN_VGA) | \ 2490 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2491 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2492 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2493 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2494 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2495 BIT_ULL(POWER_DOMAIN_INIT)) 2496 2497 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2498 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2499 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2500 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2501 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2502 BIT_ULL(POWER_DOMAIN_INIT)) 2503 2504 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 2505 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2506 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2507 BIT_ULL(POWER_DOMAIN_INIT)) 2508 2509 #define HSW_DISPLAY_POWER_DOMAINS ( \ 2510 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2511 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2512 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2513 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2514 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2515 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2516 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2517 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2518 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2519 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2520 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2521 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2522 BIT_ULL(POWER_DOMAIN_VGA) | \ 2523 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2524 BIT_ULL(POWER_DOMAIN_INIT)) 2525 2526 #define BDW_DISPLAY_POWER_DOMAINS ( \ 2527 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2528 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2529 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2530 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2531 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2532 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2533 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2534 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2535 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2536 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2537 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2538 BIT_ULL(POWER_DOMAIN_VGA) | \ 2539 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2540 BIT_ULL(POWER_DOMAIN_INIT)) 2541 2542 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2543 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2544 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2545 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2546 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2547 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2548 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2549 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2550 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2551 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2552 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2553 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2554 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2555 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2556 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2557 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2558 BIT_ULL(POWER_DOMAIN_VGA) | \ 2559 BIT_ULL(POWER_DOMAIN_INIT)) 2560 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ 2561 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2562 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ 2563 BIT_ULL(POWER_DOMAIN_INIT)) 2564 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2565 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2566 BIT_ULL(POWER_DOMAIN_INIT)) 2567 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2568 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2569 BIT_ULL(POWER_DOMAIN_INIT)) 2570 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ 2571 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2572 BIT_ULL(POWER_DOMAIN_INIT)) 2573 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2574 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2575 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2576 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2577 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2578 BIT_ULL(POWER_DOMAIN_INIT)) 2579 2580 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2581 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2582 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2583 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2584 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2585 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2586 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2587 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2588 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2589 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2590 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2591 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2592 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2593 BIT_ULL(POWER_DOMAIN_VGA) | \ 2594 BIT_ULL(POWER_DOMAIN_INIT)) 2595 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2596 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2597 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2598 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2599 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2600 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2601 BIT_ULL(POWER_DOMAIN_INIT)) 2602 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ 2603 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2604 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2605 BIT_ULL(POWER_DOMAIN_INIT)) 2606 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ 2607 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2608 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2609 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2610 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2611 BIT_ULL(POWER_DOMAIN_INIT)) 2612 2613 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2614 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2615 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2616 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2617 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2618 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2619 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2620 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2621 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2622 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2623 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2624 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2625 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2626 BIT_ULL(POWER_DOMAIN_VGA) | \ 2627 BIT_ULL(POWER_DOMAIN_INIT)) 2628 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \ 2629 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2630 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2631 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2632 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2633 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2634 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ 2635 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2636 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2637 BIT_ULL(POWER_DOMAIN_INIT)) 2638 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ 2639 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2640 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2641 BIT_ULL(POWER_DOMAIN_INIT)) 2642 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ 2643 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2644 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2645 BIT_ULL(POWER_DOMAIN_INIT)) 2646 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2647 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2648 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2649 BIT_ULL(POWER_DOMAIN_INIT)) 2650 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2651 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2652 BIT_ULL(POWER_DOMAIN_INIT)) 2653 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2654 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2655 BIT_ULL(POWER_DOMAIN_INIT)) 2656 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2657 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2658 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2659 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2660 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2661 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2662 BIT_ULL(POWER_DOMAIN_INIT)) 2663 2664 #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2665 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2666 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2667 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2668 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2669 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2670 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2671 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2672 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2673 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2674 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2675 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2676 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2677 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2678 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2679 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2680 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2681 BIT_ULL(POWER_DOMAIN_VGA) | \ 2682 BIT_ULL(POWER_DOMAIN_INIT)) 2683 #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS ( \ 2684 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2685 BIT_ULL(POWER_DOMAIN_INIT)) 2686 #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS ( \ 2687 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2688 BIT_ULL(POWER_DOMAIN_INIT)) 2689 #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS ( \ 2690 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2691 BIT_ULL(POWER_DOMAIN_INIT)) 2692 #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS ( \ 2693 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2694 BIT_ULL(POWER_DOMAIN_INIT)) 2695 #define CNL_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2696 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2697 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2698 BIT_ULL(POWER_DOMAIN_INIT)) 2699 #define CNL_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2700 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2701 BIT_ULL(POWER_DOMAIN_INIT)) 2702 #define CNL_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2703 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2704 BIT_ULL(POWER_DOMAIN_INIT)) 2705 #define CNL_DISPLAY_AUX_D_POWER_DOMAINS ( \ 2706 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2707 BIT_ULL(POWER_DOMAIN_INIT)) 2708 #define CNL_DISPLAY_AUX_F_POWER_DOMAINS ( \ 2709 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2710 BIT_ULL(POWER_DOMAIN_INIT)) 2711 #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS ( \ 2712 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \ 2713 BIT_ULL(POWER_DOMAIN_INIT)) 2714 #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2715 CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2716 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2717 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2718 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2719 BIT_ULL(POWER_DOMAIN_INIT)) 2720 2721 /* 2722 * ICL PW_0/PG_0 domains (HW/DMC control): 2723 * - PCI 2724 * - clocks except port PLL 2725 * - central power except FBC 2726 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers 2727 * ICL PW_1/PG_1 domains (HW/DMC control): 2728 * - DBUF function 2729 * - PIPE_A and its planes, except VGA 2730 * - transcoder EDP + PSR 2731 * - transcoder DSI 2732 * - DDI_A 2733 * - FBC 2734 */ 2735 #define ICL_PW_4_POWER_DOMAINS ( \ 2736 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2737 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2738 BIT_ULL(POWER_DOMAIN_INIT)) 2739 /* VDSC/joining */ 2740 #define ICL_PW_3_POWER_DOMAINS ( \ 2741 ICL_PW_4_POWER_DOMAINS | \ 2742 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2743 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2744 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2745 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2746 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2747 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2748 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2749 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2750 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2751 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2752 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2753 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2754 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2755 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2756 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2757 BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ 2758 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2759 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2760 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2761 BIT_ULL(POWER_DOMAIN_VGA) | \ 2762 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2763 BIT_ULL(POWER_DOMAIN_INIT)) 2764 /* 2765 * - transcoder WD 2766 * - KVMR (HW control) 2767 */ 2768 #define ICL_PW_2_POWER_DOMAINS ( \ 2769 ICL_PW_3_POWER_DOMAINS | \ 2770 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2771 BIT_ULL(POWER_DOMAIN_INIT)) 2772 /* 2773 * - KVMR (HW control) 2774 */ 2775 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2776 ICL_PW_2_POWER_DOMAINS | \ 2777 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2778 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2779 BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \ 2780 BIT_ULL(POWER_DOMAIN_INIT)) 2781 2782 #define ICL_DDI_IO_A_POWER_DOMAINS ( \ 2783 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2784 #define ICL_DDI_IO_B_POWER_DOMAINS ( \ 2785 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2786 #define ICL_DDI_IO_C_POWER_DOMAINS ( \ 2787 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2788 #define ICL_DDI_IO_D_POWER_DOMAINS ( \ 2789 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2790 #define ICL_DDI_IO_E_POWER_DOMAINS ( \ 2791 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2792 #define ICL_DDI_IO_F_POWER_DOMAINS ( \ 2793 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2794 2795 #define ICL_AUX_A_IO_POWER_DOMAINS ( \ 2796 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2797 BIT_ULL(POWER_DOMAIN_AUX_A)) 2798 #define ICL_AUX_B_IO_POWER_DOMAINS ( \ 2799 BIT_ULL(POWER_DOMAIN_AUX_B)) 2800 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \ 2801 BIT_ULL(POWER_DOMAIN_AUX_C)) 2802 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \ 2803 BIT_ULL(POWER_DOMAIN_AUX_D)) 2804 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \ 2805 BIT_ULL(POWER_DOMAIN_AUX_E)) 2806 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \ 2807 BIT_ULL(POWER_DOMAIN_AUX_F)) 2808 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \ 2809 BIT_ULL(POWER_DOMAIN_AUX_C_TBT)) 2810 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \ 2811 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) 2812 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \ 2813 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) 2814 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \ 2815 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) 2816 2817 #define TGL_PW_5_POWER_DOMAINS ( \ 2818 BIT_ULL(POWER_DOMAIN_PIPE_D) | \ 2819 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ 2820 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ 2821 BIT_ULL(POWER_DOMAIN_INIT)) 2822 2823 #define TGL_PW_4_POWER_DOMAINS ( \ 2824 TGL_PW_5_POWER_DOMAINS | \ 2825 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2826 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2827 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2828 BIT_ULL(POWER_DOMAIN_INIT)) 2829 2830 #define TGL_PW_3_POWER_DOMAINS ( \ 2831 TGL_PW_4_POWER_DOMAINS | \ 2832 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2833 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2834 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2835 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2836 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2837 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2838 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \ 2839 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \ 2840 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \ 2841 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2842 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2843 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2844 BIT_ULL(POWER_DOMAIN_AUX_G) | \ 2845 BIT_ULL(POWER_DOMAIN_AUX_H) | \ 2846 BIT_ULL(POWER_DOMAIN_AUX_I) | \ 2847 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2848 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2849 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2850 BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \ 2851 BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \ 2852 BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \ 2853 BIT_ULL(POWER_DOMAIN_VGA) | \ 2854 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2855 BIT_ULL(POWER_DOMAIN_INIT)) 2856 2857 #define TGL_PW_2_POWER_DOMAINS ( \ 2858 TGL_PW_3_POWER_DOMAINS | \ 2859 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2860 BIT_ULL(POWER_DOMAIN_INIT)) 2861 2862 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2863 TGL_PW_3_POWER_DOMAINS | \ 2864 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2865 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2866 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2867 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2868 BIT_ULL(POWER_DOMAIN_INIT)) 2869 2870 #define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \ 2871 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2872 #define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \ 2873 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2874 #define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \ 2875 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2876 #define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \ 2877 BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO)) 2878 #define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \ 2879 BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO)) 2880 #define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \ 2881 BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO)) 2882 2883 #define TGL_AUX_A_IO_POWER_DOMAINS ( \ 2884 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2885 BIT_ULL(POWER_DOMAIN_AUX_A)) 2886 #define TGL_AUX_B_IO_POWER_DOMAINS ( \ 2887 BIT_ULL(POWER_DOMAIN_AUX_B)) 2888 #define TGL_AUX_C_IO_POWER_DOMAINS ( \ 2889 BIT_ULL(POWER_DOMAIN_AUX_C)) 2890 #define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \ 2891 BIT_ULL(POWER_DOMAIN_AUX_D)) 2892 #define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \ 2893 BIT_ULL(POWER_DOMAIN_AUX_E)) 2894 #define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \ 2895 BIT_ULL(POWER_DOMAIN_AUX_F)) 2896 #define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \ 2897 BIT_ULL(POWER_DOMAIN_AUX_G)) 2898 #define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \ 2899 BIT_ULL(POWER_DOMAIN_AUX_H)) 2900 #define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \ 2901 BIT_ULL(POWER_DOMAIN_AUX_I)) 2902 #define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \ 2903 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) 2904 #define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \ 2905 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) 2906 #define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \ 2907 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) 2908 #define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \ 2909 BIT_ULL(POWER_DOMAIN_AUX_G_TBT)) 2910 #define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \ 2911 BIT_ULL(POWER_DOMAIN_AUX_H_TBT)) 2912 #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \ 2913 BIT_ULL(POWER_DOMAIN_AUX_I_TBT)) 2914 2915 #define TGL_TC_COLD_OFF_POWER_DOMAINS ( \ 2916 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2917 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2918 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2919 BIT_ULL(POWER_DOMAIN_AUX_G) | \ 2920 BIT_ULL(POWER_DOMAIN_AUX_H) | \ 2921 BIT_ULL(POWER_DOMAIN_AUX_I) | \ 2922 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2923 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2924 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2925 BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \ 2926 BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \ 2927 BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \ 2928 BIT_ULL(POWER_DOMAIN_TC_COLD_OFF)) 2929 2930 #define RKL_PW_4_POWER_DOMAINS ( \ 2931 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2932 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2933 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2934 BIT_ULL(POWER_DOMAIN_INIT)) 2935 2936 #define RKL_PW_3_POWER_DOMAINS ( \ 2937 RKL_PW_4_POWER_DOMAINS | \ 2938 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2939 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2940 BIT_ULL(POWER_DOMAIN_AUDIO) | \ 2941 BIT_ULL(POWER_DOMAIN_VGA) | \ 2942 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2943 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2944 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2945 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2946 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2947 BIT_ULL(POWER_DOMAIN_INIT)) 2948 2949 /* 2950 * There is no PW_2/PG_2 on RKL. 2951 * 2952 * RKL PW_1/PG_1 domains (under HW/DMC control): 2953 * - DBUF function (note: registers are in PW0) 2954 * - PIPE_A and its planes and VDSC/joining, except VGA 2955 * - transcoder A 2956 * - DDI_A and DDI_B 2957 * - FBC 2958 * 2959 * RKL PW_0/PG_0 domains (under HW/DMC control): 2960 * - PCI 2961 * - clocks except port PLL 2962 * - shared functions: 2963 * * interrupts except pipe interrupts 2964 * * MBus except PIPE_MBUS_DBOX_CTL 2965 * * DBUF registers 2966 * - central power except FBC 2967 * - top-level GTC (DDI-level GTC is in the well associated with the DDI) 2968 */ 2969 2970 #define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2971 RKL_PW_3_POWER_DOMAINS | \ 2972 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2973 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2974 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2975 BIT_ULL(POWER_DOMAIN_INIT)) 2976 2977 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 2978 .sync_hw = i9xx_power_well_sync_hw_noop, 2979 .enable = i9xx_always_on_power_well_noop, 2980 .disable = i9xx_always_on_power_well_noop, 2981 .is_enabled = i9xx_always_on_power_well_enabled, 2982 }; 2983 2984 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 2985 .sync_hw = chv_pipe_power_well_sync_hw, 2986 .enable = chv_pipe_power_well_enable, 2987 .disable = chv_pipe_power_well_disable, 2988 .is_enabled = chv_pipe_power_well_enabled, 2989 }; 2990 2991 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 2992 .sync_hw = i9xx_power_well_sync_hw_noop, 2993 .enable = chv_dpio_cmn_power_well_enable, 2994 .disable = chv_dpio_cmn_power_well_disable, 2995 .is_enabled = vlv_power_well_enabled, 2996 }; 2997 2998 static const struct i915_power_well_desc i9xx_always_on_power_well[] = { 2999 { 3000 .name = "always-on", 3001 .always_on = true, 3002 .domains = POWER_DOMAIN_MASK, 3003 .ops = &i9xx_always_on_power_well_ops, 3004 .id = DISP_PW_ID_NONE, 3005 }, 3006 }; 3007 3008 static const struct i915_power_well_ops i830_pipes_power_well_ops = { 3009 .sync_hw = i830_pipes_power_well_sync_hw, 3010 .enable = i830_pipes_power_well_enable, 3011 .disable = i830_pipes_power_well_disable, 3012 .is_enabled = i830_pipes_power_well_enabled, 3013 }; 3014 3015 static const struct i915_power_well_desc i830_power_wells[] = { 3016 { 3017 .name = "always-on", 3018 .always_on = true, 3019 .domains = POWER_DOMAIN_MASK, 3020 .ops = &i9xx_always_on_power_well_ops, 3021 .id = DISP_PW_ID_NONE, 3022 }, 3023 { 3024 .name = "pipes", 3025 .domains = I830_PIPES_POWER_DOMAINS, 3026 .ops = &i830_pipes_power_well_ops, 3027 .id = DISP_PW_ID_NONE, 3028 }, 3029 }; 3030 3031 static const struct i915_power_well_ops hsw_power_well_ops = { 3032 .sync_hw = hsw_power_well_sync_hw, 3033 .enable = hsw_power_well_enable, 3034 .disable = hsw_power_well_disable, 3035 .is_enabled = hsw_power_well_enabled, 3036 }; 3037 3038 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 3039 .sync_hw = i9xx_power_well_sync_hw_noop, 3040 .enable = gen9_dc_off_power_well_enable, 3041 .disable = gen9_dc_off_power_well_disable, 3042 .is_enabled = gen9_dc_off_power_well_enabled, 3043 }; 3044 3045 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 3046 .sync_hw = i9xx_power_well_sync_hw_noop, 3047 .enable = bxt_dpio_cmn_power_well_enable, 3048 .disable = bxt_dpio_cmn_power_well_disable, 3049 .is_enabled = bxt_dpio_cmn_power_well_enabled, 3050 }; 3051 3052 static const struct i915_power_well_regs hsw_power_well_regs = { 3053 .bios = HSW_PWR_WELL_CTL1, 3054 .driver = HSW_PWR_WELL_CTL2, 3055 .kvmr = HSW_PWR_WELL_CTL3, 3056 .debug = HSW_PWR_WELL_CTL4, 3057 }; 3058 3059 static const struct i915_power_well_desc hsw_power_wells[] = { 3060 { 3061 .name = "always-on", 3062 .always_on = true, 3063 .domains = POWER_DOMAIN_MASK, 3064 .ops = &i9xx_always_on_power_well_ops, 3065 .id = DISP_PW_ID_NONE, 3066 }, 3067 { 3068 .name = "display", 3069 .domains = HSW_DISPLAY_POWER_DOMAINS, 3070 .ops = &hsw_power_well_ops, 3071 .id = HSW_DISP_PW_GLOBAL, 3072 { 3073 .hsw.regs = &hsw_power_well_regs, 3074 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 3075 .hsw.has_vga = true, 3076 }, 3077 }, 3078 }; 3079 3080 static const struct i915_power_well_desc bdw_power_wells[] = { 3081 { 3082 .name = "always-on", 3083 .always_on = true, 3084 .domains = POWER_DOMAIN_MASK, 3085 .ops = &i9xx_always_on_power_well_ops, 3086 .id = DISP_PW_ID_NONE, 3087 }, 3088 { 3089 .name = "display", 3090 .domains = BDW_DISPLAY_POWER_DOMAINS, 3091 .ops = &hsw_power_well_ops, 3092 .id = HSW_DISP_PW_GLOBAL, 3093 { 3094 .hsw.regs = &hsw_power_well_regs, 3095 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 3096 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3097 .hsw.has_vga = true, 3098 }, 3099 }, 3100 }; 3101 3102 static const struct i915_power_well_ops vlv_display_power_well_ops = { 3103 .sync_hw = i9xx_power_well_sync_hw_noop, 3104 .enable = vlv_display_power_well_enable, 3105 .disable = vlv_display_power_well_disable, 3106 .is_enabled = vlv_power_well_enabled, 3107 }; 3108 3109 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 3110 .sync_hw = i9xx_power_well_sync_hw_noop, 3111 .enable = vlv_dpio_cmn_power_well_enable, 3112 .disable = vlv_dpio_cmn_power_well_disable, 3113 .is_enabled = vlv_power_well_enabled, 3114 }; 3115 3116 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 3117 .sync_hw = i9xx_power_well_sync_hw_noop, 3118 .enable = vlv_power_well_enable, 3119 .disable = vlv_power_well_disable, 3120 .is_enabled = vlv_power_well_enabled, 3121 }; 3122 3123 static const struct i915_power_well_desc vlv_power_wells[] = { 3124 { 3125 .name = "always-on", 3126 .always_on = true, 3127 .domains = POWER_DOMAIN_MASK, 3128 .ops = &i9xx_always_on_power_well_ops, 3129 .id = DISP_PW_ID_NONE, 3130 }, 3131 { 3132 .name = "display", 3133 .domains = VLV_DISPLAY_POWER_DOMAINS, 3134 .ops = &vlv_display_power_well_ops, 3135 .id = VLV_DISP_PW_DISP2D, 3136 { 3137 .vlv.idx = PUNIT_PWGT_IDX_DISP2D, 3138 }, 3139 }, 3140 { 3141 .name = "dpio-tx-b-01", 3142 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3143 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3144 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3145 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3146 .ops = &vlv_dpio_power_well_ops, 3147 .id = DISP_PW_ID_NONE, 3148 { 3149 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, 3150 }, 3151 }, 3152 { 3153 .name = "dpio-tx-b-23", 3154 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3155 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3156 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3157 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3158 .ops = &vlv_dpio_power_well_ops, 3159 .id = DISP_PW_ID_NONE, 3160 { 3161 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, 3162 }, 3163 }, 3164 { 3165 .name = "dpio-tx-c-01", 3166 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3167 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3168 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3169 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3170 .ops = &vlv_dpio_power_well_ops, 3171 .id = DISP_PW_ID_NONE, 3172 { 3173 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, 3174 }, 3175 }, 3176 { 3177 .name = "dpio-tx-c-23", 3178 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3179 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3180 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3181 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3182 .ops = &vlv_dpio_power_well_ops, 3183 .id = DISP_PW_ID_NONE, 3184 { 3185 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, 3186 }, 3187 }, 3188 { 3189 .name = "dpio-common", 3190 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 3191 .ops = &vlv_dpio_cmn_power_well_ops, 3192 .id = VLV_DISP_PW_DPIO_CMN_BC, 3193 { 3194 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 3195 }, 3196 }, 3197 }; 3198 3199 static const struct i915_power_well_desc chv_power_wells[] = { 3200 { 3201 .name = "always-on", 3202 .always_on = true, 3203 .domains = POWER_DOMAIN_MASK, 3204 .ops = &i9xx_always_on_power_well_ops, 3205 .id = DISP_PW_ID_NONE, 3206 }, 3207 { 3208 .name = "display", 3209 /* 3210 * Pipe A power well is the new disp2d well. Pipe B and C 3211 * power wells don't actually exist. Pipe A power well is 3212 * required for any pipe to work. 3213 */ 3214 .domains = CHV_DISPLAY_POWER_DOMAINS, 3215 .ops = &chv_pipe_power_well_ops, 3216 .id = DISP_PW_ID_NONE, 3217 }, 3218 { 3219 .name = "dpio-common-bc", 3220 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 3221 .ops = &chv_dpio_cmn_power_well_ops, 3222 .id = VLV_DISP_PW_DPIO_CMN_BC, 3223 { 3224 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 3225 }, 3226 }, 3227 { 3228 .name = "dpio-common-d", 3229 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 3230 .ops = &chv_dpio_cmn_power_well_ops, 3231 .id = CHV_DISP_PW_DPIO_CMN_D, 3232 { 3233 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, 3234 }, 3235 }, 3236 }; 3237 3238 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 3239 enum i915_power_well_id power_well_id) 3240 { 3241 struct i915_power_well *power_well; 3242 bool ret; 3243 3244 power_well = lookup_power_well(dev_priv, power_well_id); 3245 ret = power_well->desc->ops->is_enabled(dev_priv, power_well); 3246 3247 return ret; 3248 } 3249 3250 static const struct i915_power_well_desc skl_power_wells[] = { 3251 { 3252 .name = "always-on", 3253 .always_on = true, 3254 .domains = POWER_DOMAIN_MASK, 3255 .ops = &i9xx_always_on_power_well_ops, 3256 .id = DISP_PW_ID_NONE, 3257 }, 3258 { 3259 .name = "power well 1", 3260 /* Handled by the DMC firmware */ 3261 .always_on = true, 3262 .domains = 0, 3263 .ops = &hsw_power_well_ops, 3264 .id = SKL_DISP_PW_1, 3265 { 3266 .hsw.regs = &hsw_power_well_regs, 3267 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3268 .hsw.has_fuses = true, 3269 }, 3270 }, 3271 { 3272 .name = "MISC IO power well", 3273 /* Handled by the DMC firmware */ 3274 .always_on = true, 3275 .domains = 0, 3276 .ops = &hsw_power_well_ops, 3277 .id = SKL_DISP_PW_MISC_IO, 3278 { 3279 .hsw.regs = &hsw_power_well_regs, 3280 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, 3281 }, 3282 }, 3283 { 3284 .name = "DC off", 3285 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 3286 .ops = &gen9_dc_off_power_well_ops, 3287 .id = SKL_DISP_DC_OFF, 3288 }, 3289 { 3290 .name = "power well 2", 3291 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3292 .ops = &hsw_power_well_ops, 3293 .id = SKL_DISP_PW_2, 3294 { 3295 .hsw.regs = &hsw_power_well_regs, 3296 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3297 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3298 .hsw.has_vga = true, 3299 .hsw.has_fuses = true, 3300 }, 3301 }, 3302 { 3303 .name = "DDI A/E IO power well", 3304 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, 3305 .ops = &hsw_power_well_ops, 3306 .id = DISP_PW_ID_NONE, 3307 { 3308 .hsw.regs = &hsw_power_well_regs, 3309 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, 3310 }, 3311 }, 3312 { 3313 .name = "DDI B IO power well", 3314 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3315 .ops = &hsw_power_well_ops, 3316 .id = DISP_PW_ID_NONE, 3317 { 3318 .hsw.regs = &hsw_power_well_regs, 3319 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3320 }, 3321 }, 3322 { 3323 .name = "DDI C IO power well", 3324 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3325 .ops = &hsw_power_well_ops, 3326 .id = DISP_PW_ID_NONE, 3327 { 3328 .hsw.regs = &hsw_power_well_regs, 3329 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3330 }, 3331 }, 3332 { 3333 .name = "DDI D IO power well", 3334 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, 3335 .ops = &hsw_power_well_ops, 3336 .id = DISP_PW_ID_NONE, 3337 { 3338 .hsw.regs = &hsw_power_well_regs, 3339 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3340 }, 3341 }, 3342 }; 3343 3344 static const struct i915_power_well_desc bxt_power_wells[] = { 3345 { 3346 .name = "always-on", 3347 .always_on = true, 3348 .domains = POWER_DOMAIN_MASK, 3349 .ops = &i9xx_always_on_power_well_ops, 3350 .id = DISP_PW_ID_NONE, 3351 }, 3352 { 3353 .name = "power well 1", 3354 /* Handled by the DMC firmware */ 3355 .always_on = true, 3356 .domains = 0, 3357 .ops = &hsw_power_well_ops, 3358 .id = SKL_DISP_PW_1, 3359 { 3360 .hsw.regs = &hsw_power_well_regs, 3361 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3362 .hsw.has_fuses = true, 3363 }, 3364 }, 3365 { 3366 .name = "DC off", 3367 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 3368 .ops = &gen9_dc_off_power_well_ops, 3369 .id = SKL_DISP_DC_OFF, 3370 }, 3371 { 3372 .name = "power well 2", 3373 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3374 .ops = &hsw_power_well_ops, 3375 .id = SKL_DISP_PW_2, 3376 { 3377 .hsw.regs = &hsw_power_well_regs, 3378 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3379 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3380 .hsw.has_vga = true, 3381 .hsw.has_fuses = true, 3382 }, 3383 }, 3384 { 3385 .name = "dpio-common-a", 3386 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, 3387 .ops = &bxt_dpio_cmn_power_well_ops, 3388 .id = BXT_DISP_PW_DPIO_CMN_A, 3389 { 3390 .bxt.phy = DPIO_PHY1, 3391 }, 3392 }, 3393 { 3394 .name = "dpio-common-bc", 3395 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, 3396 .ops = &bxt_dpio_cmn_power_well_ops, 3397 .id = VLV_DISP_PW_DPIO_CMN_BC, 3398 { 3399 .bxt.phy = DPIO_PHY0, 3400 }, 3401 }, 3402 }; 3403 3404 static const struct i915_power_well_desc glk_power_wells[] = { 3405 { 3406 .name = "always-on", 3407 .always_on = true, 3408 .domains = POWER_DOMAIN_MASK, 3409 .ops = &i9xx_always_on_power_well_ops, 3410 .id = DISP_PW_ID_NONE, 3411 }, 3412 { 3413 .name = "power well 1", 3414 /* Handled by the DMC firmware */ 3415 .always_on = true, 3416 .domains = 0, 3417 .ops = &hsw_power_well_ops, 3418 .id = SKL_DISP_PW_1, 3419 { 3420 .hsw.regs = &hsw_power_well_regs, 3421 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3422 .hsw.has_fuses = true, 3423 }, 3424 }, 3425 { 3426 .name = "DC off", 3427 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, 3428 .ops = &gen9_dc_off_power_well_ops, 3429 .id = SKL_DISP_DC_OFF, 3430 }, 3431 { 3432 .name = "power well 2", 3433 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3434 .ops = &hsw_power_well_ops, 3435 .id = SKL_DISP_PW_2, 3436 { 3437 .hsw.regs = &hsw_power_well_regs, 3438 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3439 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3440 .hsw.has_vga = true, 3441 .hsw.has_fuses = true, 3442 }, 3443 }, 3444 { 3445 .name = "dpio-common-a", 3446 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, 3447 .ops = &bxt_dpio_cmn_power_well_ops, 3448 .id = BXT_DISP_PW_DPIO_CMN_A, 3449 { 3450 .bxt.phy = DPIO_PHY1, 3451 }, 3452 }, 3453 { 3454 .name = "dpio-common-b", 3455 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, 3456 .ops = &bxt_dpio_cmn_power_well_ops, 3457 .id = VLV_DISP_PW_DPIO_CMN_BC, 3458 { 3459 .bxt.phy = DPIO_PHY0, 3460 }, 3461 }, 3462 { 3463 .name = "dpio-common-c", 3464 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, 3465 .ops = &bxt_dpio_cmn_power_well_ops, 3466 .id = GLK_DISP_PW_DPIO_CMN_C, 3467 { 3468 .bxt.phy = DPIO_PHY2, 3469 }, 3470 }, 3471 { 3472 .name = "AUX A", 3473 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, 3474 .ops = &hsw_power_well_ops, 3475 .id = DISP_PW_ID_NONE, 3476 { 3477 .hsw.regs = &hsw_power_well_regs, 3478 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3479 }, 3480 }, 3481 { 3482 .name = "AUX B", 3483 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, 3484 .ops = &hsw_power_well_ops, 3485 .id = DISP_PW_ID_NONE, 3486 { 3487 .hsw.regs = &hsw_power_well_regs, 3488 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3489 }, 3490 }, 3491 { 3492 .name = "AUX C", 3493 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, 3494 .ops = &hsw_power_well_ops, 3495 .id = DISP_PW_ID_NONE, 3496 { 3497 .hsw.regs = &hsw_power_well_regs, 3498 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3499 }, 3500 }, 3501 { 3502 .name = "DDI A IO power well", 3503 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, 3504 .ops = &hsw_power_well_ops, 3505 .id = DISP_PW_ID_NONE, 3506 { 3507 .hsw.regs = &hsw_power_well_regs, 3508 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3509 }, 3510 }, 3511 { 3512 .name = "DDI B IO power well", 3513 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3514 .ops = &hsw_power_well_ops, 3515 .id = DISP_PW_ID_NONE, 3516 { 3517 .hsw.regs = &hsw_power_well_regs, 3518 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3519 }, 3520 }, 3521 { 3522 .name = "DDI C IO power well", 3523 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3524 .ops = &hsw_power_well_ops, 3525 .id = DISP_PW_ID_NONE, 3526 { 3527 .hsw.regs = &hsw_power_well_regs, 3528 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3529 }, 3530 }, 3531 }; 3532 3533 static const struct i915_power_well_desc cnl_power_wells[] = { 3534 { 3535 .name = "always-on", 3536 .always_on = true, 3537 .domains = POWER_DOMAIN_MASK, 3538 .ops = &i9xx_always_on_power_well_ops, 3539 .id = DISP_PW_ID_NONE, 3540 }, 3541 { 3542 .name = "power well 1", 3543 /* Handled by the DMC firmware */ 3544 .always_on = true, 3545 .domains = 0, 3546 .ops = &hsw_power_well_ops, 3547 .id = SKL_DISP_PW_1, 3548 { 3549 .hsw.regs = &hsw_power_well_regs, 3550 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3551 .hsw.has_fuses = true, 3552 }, 3553 }, 3554 { 3555 .name = "AUX A", 3556 .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS, 3557 .ops = &hsw_power_well_ops, 3558 .id = DISP_PW_ID_NONE, 3559 { 3560 .hsw.regs = &hsw_power_well_regs, 3561 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3562 }, 3563 }, 3564 { 3565 .name = "AUX B", 3566 .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS, 3567 .ops = &hsw_power_well_ops, 3568 .id = DISP_PW_ID_NONE, 3569 { 3570 .hsw.regs = &hsw_power_well_regs, 3571 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3572 }, 3573 }, 3574 { 3575 .name = "AUX C", 3576 .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS, 3577 .ops = &hsw_power_well_ops, 3578 .id = DISP_PW_ID_NONE, 3579 { 3580 .hsw.regs = &hsw_power_well_regs, 3581 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3582 }, 3583 }, 3584 { 3585 .name = "AUX D", 3586 .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS, 3587 .ops = &hsw_power_well_ops, 3588 .id = DISP_PW_ID_NONE, 3589 { 3590 .hsw.regs = &hsw_power_well_regs, 3591 .hsw.idx = CNL_PW_CTL_IDX_AUX_D, 3592 }, 3593 }, 3594 { 3595 .name = "DC off", 3596 .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS, 3597 .ops = &gen9_dc_off_power_well_ops, 3598 .id = SKL_DISP_DC_OFF, 3599 }, 3600 { 3601 .name = "power well 2", 3602 .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3603 .ops = &hsw_power_well_ops, 3604 .id = SKL_DISP_PW_2, 3605 { 3606 .hsw.regs = &hsw_power_well_regs, 3607 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3608 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3609 .hsw.has_vga = true, 3610 .hsw.has_fuses = true, 3611 }, 3612 }, 3613 { 3614 .name = "DDI A IO power well", 3615 .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS, 3616 .ops = &hsw_power_well_ops, 3617 .id = DISP_PW_ID_NONE, 3618 { 3619 .hsw.regs = &hsw_power_well_regs, 3620 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3621 }, 3622 }, 3623 { 3624 .name = "DDI B IO power well", 3625 .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS, 3626 .ops = &hsw_power_well_ops, 3627 .id = DISP_PW_ID_NONE, 3628 { 3629 .hsw.regs = &hsw_power_well_regs, 3630 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3631 }, 3632 }, 3633 { 3634 .name = "DDI C IO power well", 3635 .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS, 3636 .ops = &hsw_power_well_ops, 3637 .id = DISP_PW_ID_NONE, 3638 { 3639 .hsw.regs = &hsw_power_well_regs, 3640 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3641 }, 3642 }, 3643 { 3644 .name = "DDI D IO power well", 3645 .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS, 3646 .ops = &hsw_power_well_ops, 3647 .id = DISP_PW_ID_NONE, 3648 { 3649 .hsw.regs = &hsw_power_well_regs, 3650 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3651 }, 3652 }, 3653 { 3654 .name = "DDI F IO power well", 3655 .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS, 3656 .ops = &hsw_power_well_ops, 3657 .id = DISP_PW_ID_NONE, 3658 { 3659 .hsw.regs = &hsw_power_well_regs, 3660 .hsw.idx = CNL_PW_CTL_IDX_DDI_F, 3661 }, 3662 }, 3663 { 3664 .name = "AUX F", 3665 .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS, 3666 .ops = &hsw_power_well_ops, 3667 .id = DISP_PW_ID_NONE, 3668 { 3669 .hsw.regs = &hsw_power_well_regs, 3670 .hsw.idx = CNL_PW_CTL_IDX_AUX_F, 3671 }, 3672 }, 3673 }; 3674 3675 static const struct i915_power_well_ops icl_aux_power_well_ops = { 3676 .sync_hw = hsw_power_well_sync_hw, 3677 .enable = icl_aux_power_well_enable, 3678 .disable = icl_aux_power_well_disable, 3679 .is_enabled = hsw_power_well_enabled, 3680 }; 3681 3682 static const struct i915_power_well_regs icl_aux_power_well_regs = { 3683 .bios = ICL_PWR_WELL_CTL_AUX1, 3684 .driver = ICL_PWR_WELL_CTL_AUX2, 3685 .debug = ICL_PWR_WELL_CTL_AUX4, 3686 }; 3687 3688 static const struct i915_power_well_regs icl_ddi_power_well_regs = { 3689 .bios = ICL_PWR_WELL_CTL_DDI1, 3690 .driver = ICL_PWR_WELL_CTL_DDI2, 3691 .debug = ICL_PWR_WELL_CTL_DDI4, 3692 }; 3693 3694 static const struct i915_power_well_desc icl_power_wells[] = { 3695 { 3696 .name = "always-on", 3697 .always_on = true, 3698 .domains = POWER_DOMAIN_MASK, 3699 .ops = &i9xx_always_on_power_well_ops, 3700 .id = DISP_PW_ID_NONE, 3701 }, 3702 { 3703 .name = "power well 1", 3704 /* Handled by the DMC firmware */ 3705 .always_on = true, 3706 .domains = 0, 3707 .ops = &hsw_power_well_ops, 3708 .id = SKL_DISP_PW_1, 3709 { 3710 .hsw.regs = &hsw_power_well_regs, 3711 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 3712 .hsw.has_fuses = true, 3713 }, 3714 }, 3715 { 3716 .name = "DC off", 3717 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, 3718 .ops = &gen9_dc_off_power_well_ops, 3719 .id = SKL_DISP_DC_OFF, 3720 }, 3721 { 3722 .name = "power well 2", 3723 .domains = ICL_PW_2_POWER_DOMAINS, 3724 .ops = &hsw_power_well_ops, 3725 .id = SKL_DISP_PW_2, 3726 { 3727 .hsw.regs = &hsw_power_well_regs, 3728 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 3729 .hsw.has_fuses = true, 3730 }, 3731 }, 3732 { 3733 .name = "power well 3", 3734 .domains = ICL_PW_3_POWER_DOMAINS, 3735 .ops = &hsw_power_well_ops, 3736 .id = ICL_DISP_PW_3, 3737 { 3738 .hsw.regs = &hsw_power_well_regs, 3739 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 3740 .hsw.irq_pipe_mask = BIT(PIPE_B), 3741 .hsw.has_vga = true, 3742 .hsw.has_fuses = true, 3743 }, 3744 }, 3745 { 3746 .name = "DDI A IO", 3747 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 3748 .ops = &hsw_power_well_ops, 3749 .id = DISP_PW_ID_NONE, 3750 { 3751 .hsw.regs = &icl_ddi_power_well_regs, 3752 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 3753 }, 3754 }, 3755 { 3756 .name = "DDI B IO", 3757 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 3758 .ops = &hsw_power_well_ops, 3759 .id = DISP_PW_ID_NONE, 3760 { 3761 .hsw.regs = &icl_ddi_power_well_regs, 3762 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 3763 }, 3764 }, 3765 { 3766 .name = "DDI C IO", 3767 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 3768 .ops = &hsw_power_well_ops, 3769 .id = DISP_PW_ID_NONE, 3770 { 3771 .hsw.regs = &icl_ddi_power_well_regs, 3772 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 3773 }, 3774 }, 3775 { 3776 .name = "DDI D IO", 3777 .domains = ICL_DDI_IO_D_POWER_DOMAINS, 3778 .ops = &hsw_power_well_ops, 3779 .id = DISP_PW_ID_NONE, 3780 { 3781 .hsw.regs = &icl_ddi_power_well_regs, 3782 .hsw.idx = ICL_PW_CTL_IDX_DDI_D, 3783 }, 3784 }, 3785 { 3786 .name = "DDI E IO", 3787 .domains = ICL_DDI_IO_E_POWER_DOMAINS, 3788 .ops = &hsw_power_well_ops, 3789 .id = DISP_PW_ID_NONE, 3790 { 3791 .hsw.regs = &icl_ddi_power_well_regs, 3792 .hsw.idx = ICL_PW_CTL_IDX_DDI_E, 3793 }, 3794 }, 3795 { 3796 .name = "DDI F IO", 3797 .domains = ICL_DDI_IO_F_POWER_DOMAINS, 3798 .ops = &hsw_power_well_ops, 3799 .id = DISP_PW_ID_NONE, 3800 { 3801 .hsw.regs = &icl_ddi_power_well_regs, 3802 .hsw.idx = ICL_PW_CTL_IDX_DDI_F, 3803 }, 3804 }, 3805 { 3806 .name = "AUX A", 3807 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 3808 .ops = &icl_aux_power_well_ops, 3809 .id = DISP_PW_ID_NONE, 3810 { 3811 .hsw.regs = &icl_aux_power_well_regs, 3812 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 3813 }, 3814 }, 3815 { 3816 .name = "AUX B", 3817 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 3818 .ops = &icl_aux_power_well_ops, 3819 .id = DISP_PW_ID_NONE, 3820 { 3821 .hsw.regs = &icl_aux_power_well_regs, 3822 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 3823 }, 3824 }, 3825 { 3826 .name = "AUX C TC1", 3827 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, 3828 .ops = &icl_aux_power_well_ops, 3829 .id = DISP_PW_ID_NONE, 3830 { 3831 .hsw.regs = &icl_aux_power_well_regs, 3832 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 3833 .hsw.is_tc_tbt = false, 3834 }, 3835 }, 3836 { 3837 .name = "AUX D TC2", 3838 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, 3839 .ops = &icl_aux_power_well_ops, 3840 .id = DISP_PW_ID_NONE, 3841 { 3842 .hsw.regs = &icl_aux_power_well_regs, 3843 .hsw.idx = ICL_PW_CTL_IDX_AUX_D, 3844 .hsw.is_tc_tbt = false, 3845 }, 3846 }, 3847 { 3848 .name = "AUX E TC3", 3849 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, 3850 .ops = &icl_aux_power_well_ops, 3851 .id = DISP_PW_ID_NONE, 3852 { 3853 .hsw.regs = &icl_aux_power_well_regs, 3854 .hsw.idx = ICL_PW_CTL_IDX_AUX_E, 3855 .hsw.is_tc_tbt = false, 3856 }, 3857 }, 3858 { 3859 .name = "AUX F TC4", 3860 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, 3861 .ops = &icl_aux_power_well_ops, 3862 .id = DISP_PW_ID_NONE, 3863 { 3864 .hsw.regs = &icl_aux_power_well_regs, 3865 .hsw.idx = ICL_PW_CTL_IDX_AUX_F, 3866 .hsw.is_tc_tbt = false, 3867 }, 3868 }, 3869 { 3870 .name = "AUX C TBT1", 3871 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, 3872 .ops = &icl_aux_power_well_ops, 3873 .id = DISP_PW_ID_NONE, 3874 { 3875 .hsw.regs = &icl_aux_power_well_regs, 3876 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, 3877 .hsw.is_tc_tbt = true, 3878 }, 3879 }, 3880 { 3881 .name = "AUX D TBT2", 3882 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, 3883 .ops = &icl_aux_power_well_ops, 3884 .id = DISP_PW_ID_NONE, 3885 { 3886 .hsw.regs = &icl_aux_power_well_regs, 3887 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, 3888 .hsw.is_tc_tbt = true, 3889 }, 3890 }, 3891 { 3892 .name = "AUX E TBT3", 3893 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, 3894 .ops = &icl_aux_power_well_ops, 3895 .id = DISP_PW_ID_NONE, 3896 { 3897 .hsw.regs = &icl_aux_power_well_regs, 3898 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, 3899 .hsw.is_tc_tbt = true, 3900 }, 3901 }, 3902 { 3903 .name = "AUX F TBT4", 3904 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, 3905 .ops = &icl_aux_power_well_ops, 3906 .id = DISP_PW_ID_NONE, 3907 { 3908 .hsw.regs = &icl_aux_power_well_regs, 3909 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, 3910 .hsw.is_tc_tbt = true, 3911 }, 3912 }, 3913 { 3914 .name = "power well 4", 3915 .domains = ICL_PW_4_POWER_DOMAINS, 3916 .ops = &hsw_power_well_ops, 3917 .id = DISP_PW_ID_NONE, 3918 { 3919 .hsw.regs = &hsw_power_well_regs, 3920 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 3921 .hsw.has_fuses = true, 3922 .hsw.irq_pipe_mask = BIT(PIPE_C), 3923 }, 3924 }, 3925 }; 3926 3927 static void 3928 tgl_tc_cold_request(struct drm_i915_private *i915, bool block) 3929 { 3930 u8 tries = 0; 3931 int ret; 3932 3933 while (1) { 3934 u32 low_val; 3935 u32 high_val = 0; 3936 3937 if (block) 3938 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; 3939 else 3940 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ; 3941 3942 /* 3943 * Spec states that we should timeout the request after 200us 3944 * but the function below will timeout after 500us 3945 */ 3946 ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, 3947 &high_val); 3948 if (ret == 0) { 3949 if (block && 3950 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) 3951 ret = -EIO; 3952 else 3953 break; 3954 } 3955 3956 if (++tries == 3) 3957 break; 3958 3959 drm_msleep(1); 3960 } 3961 3962 if (ret) 3963 drm_err(&i915->drm, "TC cold %sblock failed\n", 3964 block ? "" : "un"); 3965 else 3966 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", 3967 block ? "" : "un"); 3968 } 3969 3970 static void 3971 tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915, 3972 struct i915_power_well *power_well) 3973 { 3974 tgl_tc_cold_request(i915, true); 3975 } 3976 3977 static void 3978 tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915, 3979 struct i915_power_well *power_well) 3980 { 3981 tgl_tc_cold_request(i915, false); 3982 } 3983 3984 static void 3985 tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915, 3986 struct i915_power_well *power_well) 3987 { 3988 if (power_well->count > 0) 3989 tgl_tc_cold_off_power_well_enable(i915, power_well); 3990 else 3991 tgl_tc_cold_off_power_well_disable(i915, power_well); 3992 } 3993 3994 static bool 3995 tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, 3996 struct i915_power_well *power_well) 3997 { 3998 /* 3999 * Not the correctly implementation but there is no way to just read it 4000 * from PCODE, so returning count to avoid state mismatch errors 4001 */ 4002 return power_well->count; 4003 } 4004 4005 static const struct i915_power_well_ops tgl_tc_cold_off_ops = { 4006 .sync_hw = tgl_tc_cold_off_power_well_sync_hw, 4007 .enable = tgl_tc_cold_off_power_well_enable, 4008 .disable = tgl_tc_cold_off_power_well_disable, 4009 .is_enabled = tgl_tc_cold_off_power_well_is_enabled, 4010 }; 4011 4012 static const struct i915_power_well_desc tgl_power_wells[] = { 4013 { 4014 .name = "always-on", 4015 .always_on = true, 4016 .domains = POWER_DOMAIN_MASK, 4017 .ops = &i9xx_always_on_power_well_ops, 4018 .id = DISP_PW_ID_NONE, 4019 }, 4020 { 4021 .name = "power well 1", 4022 /* Handled by the DMC firmware */ 4023 .always_on = true, 4024 .domains = 0, 4025 .ops = &hsw_power_well_ops, 4026 .id = SKL_DISP_PW_1, 4027 { 4028 .hsw.regs = &hsw_power_well_regs, 4029 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4030 .hsw.has_fuses = true, 4031 }, 4032 }, 4033 { 4034 .name = "DC off", 4035 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, 4036 .ops = &gen9_dc_off_power_well_ops, 4037 .id = SKL_DISP_DC_OFF, 4038 }, 4039 { 4040 .name = "power well 2", 4041 .domains = TGL_PW_2_POWER_DOMAINS, 4042 .ops = &hsw_power_well_ops, 4043 .id = SKL_DISP_PW_2, 4044 { 4045 .hsw.regs = &hsw_power_well_regs, 4046 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4047 .hsw.has_fuses = true, 4048 }, 4049 }, 4050 { 4051 .name = "power well 3", 4052 .domains = TGL_PW_3_POWER_DOMAINS, 4053 .ops = &hsw_power_well_ops, 4054 .id = ICL_DISP_PW_3, 4055 { 4056 .hsw.regs = &hsw_power_well_regs, 4057 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4058 .hsw.irq_pipe_mask = BIT(PIPE_B), 4059 .hsw.has_vga = true, 4060 .hsw.has_fuses = true, 4061 }, 4062 }, 4063 { 4064 .name = "DDI A IO", 4065 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4066 .ops = &hsw_power_well_ops, 4067 .id = DISP_PW_ID_NONE, 4068 { 4069 .hsw.regs = &icl_ddi_power_well_regs, 4070 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4071 } 4072 }, 4073 { 4074 .name = "DDI B IO", 4075 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4076 .ops = &hsw_power_well_ops, 4077 .id = DISP_PW_ID_NONE, 4078 { 4079 .hsw.regs = &icl_ddi_power_well_regs, 4080 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4081 } 4082 }, 4083 { 4084 .name = "DDI C IO", 4085 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 4086 .ops = &hsw_power_well_ops, 4087 .id = DISP_PW_ID_NONE, 4088 { 4089 .hsw.regs = &icl_ddi_power_well_regs, 4090 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 4091 } 4092 }, 4093 { 4094 .name = "DDI D TC1 IO", 4095 .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS, 4096 .ops = &hsw_power_well_ops, 4097 .id = DISP_PW_ID_NONE, 4098 { 4099 .hsw.regs = &icl_ddi_power_well_regs, 4100 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4101 }, 4102 }, 4103 { 4104 .name = "DDI E TC2 IO", 4105 .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS, 4106 .ops = &hsw_power_well_ops, 4107 .id = DISP_PW_ID_NONE, 4108 { 4109 .hsw.regs = &icl_ddi_power_well_regs, 4110 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4111 }, 4112 }, 4113 { 4114 .name = "DDI F TC3 IO", 4115 .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS, 4116 .ops = &hsw_power_well_ops, 4117 .id = DISP_PW_ID_NONE, 4118 { 4119 .hsw.regs = &icl_ddi_power_well_regs, 4120 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, 4121 }, 4122 }, 4123 { 4124 .name = "DDI G TC4 IO", 4125 .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS, 4126 .ops = &hsw_power_well_ops, 4127 .id = DISP_PW_ID_NONE, 4128 { 4129 .hsw.regs = &icl_ddi_power_well_regs, 4130 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, 4131 }, 4132 }, 4133 { 4134 .name = "DDI H TC5 IO", 4135 .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS, 4136 .ops = &hsw_power_well_ops, 4137 .id = DISP_PW_ID_NONE, 4138 { 4139 .hsw.regs = &icl_ddi_power_well_regs, 4140 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, 4141 }, 4142 }, 4143 { 4144 .name = "DDI I TC6 IO", 4145 .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS, 4146 .ops = &hsw_power_well_ops, 4147 .id = DISP_PW_ID_NONE, 4148 { 4149 .hsw.regs = &icl_ddi_power_well_regs, 4150 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, 4151 }, 4152 }, 4153 { 4154 .name = "TC cold off", 4155 .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, 4156 .ops = &tgl_tc_cold_off_ops, 4157 .id = DISP_PW_ID_NONE, 4158 }, 4159 { 4160 .name = "AUX A", 4161 .domains = TGL_AUX_A_IO_POWER_DOMAINS, 4162 .ops = &icl_aux_power_well_ops, 4163 .id = DISP_PW_ID_NONE, 4164 { 4165 .hsw.regs = &icl_aux_power_well_regs, 4166 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4167 }, 4168 }, 4169 { 4170 .name = "AUX B", 4171 .domains = TGL_AUX_B_IO_POWER_DOMAINS, 4172 .ops = &icl_aux_power_well_ops, 4173 .id = DISP_PW_ID_NONE, 4174 { 4175 .hsw.regs = &icl_aux_power_well_regs, 4176 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4177 }, 4178 }, 4179 { 4180 .name = "AUX C", 4181 .domains = TGL_AUX_C_IO_POWER_DOMAINS, 4182 .ops = &icl_aux_power_well_ops, 4183 .id = DISP_PW_ID_NONE, 4184 { 4185 .hsw.regs = &icl_aux_power_well_regs, 4186 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 4187 }, 4188 }, 4189 { 4190 .name = "AUX D TC1", 4191 .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS, 4192 .ops = &icl_aux_power_well_ops, 4193 .id = DISP_PW_ID_NONE, 4194 { 4195 .hsw.regs = &icl_aux_power_well_regs, 4196 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4197 .hsw.is_tc_tbt = false, 4198 }, 4199 }, 4200 { 4201 .name = "AUX E TC2", 4202 .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS, 4203 .ops = &icl_aux_power_well_ops, 4204 .id = DISP_PW_ID_NONE, 4205 { 4206 .hsw.regs = &icl_aux_power_well_regs, 4207 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4208 .hsw.is_tc_tbt = false, 4209 }, 4210 }, 4211 { 4212 .name = "AUX F TC3", 4213 .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS, 4214 .ops = &icl_aux_power_well_ops, 4215 .id = DISP_PW_ID_NONE, 4216 { 4217 .hsw.regs = &icl_aux_power_well_regs, 4218 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, 4219 .hsw.is_tc_tbt = false, 4220 }, 4221 }, 4222 { 4223 .name = "AUX G TC4", 4224 .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS, 4225 .ops = &icl_aux_power_well_ops, 4226 .id = DISP_PW_ID_NONE, 4227 { 4228 .hsw.regs = &icl_aux_power_well_regs, 4229 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, 4230 .hsw.is_tc_tbt = false, 4231 }, 4232 }, 4233 { 4234 .name = "AUX H TC5", 4235 .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS, 4236 .ops = &icl_aux_power_well_ops, 4237 .id = DISP_PW_ID_NONE, 4238 { 4239 .hsw.regs = &icl_aux_power_well_regs, 4240 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, 4241 .hsw.is_tc_tbt = false, 4242 }, 4243 }, 4244 { 4245 .name = "AUX I TC6", 4246 .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS, 4247 .ops = &icl_aux_power_well_ops, 4248 .id = DISP_PW_ID_NONE, 4249 { 4250 .hsw.regs = &icl_aux_power_well_regs, 4251 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, 4252 .hsw.is_tc_tbt = false, 4253 }, 4254 }, 4255 { 4256 .name = "AUX D TBT1", 4257 .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS, 4258 .ops = &icl_aux_power_well_ops, 4259 .id = DISP_PW_ID_NONE, 4260 { 4261 .hsw.regs = &icl_aux_power_well_regs, 4262 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, 4263 .hsw.is_tc_tbt = true, 4264 }, 4265 }, 4266 { 4267 .name = "AUX E TBT2", 4268 .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS, 4269 .ops = &icl_aux_power_well_ops, 4270 .id = DISP_PW_ID_NONE, 4271 { 4272 .hsw.regs = &icl_aux_power_well_regs, 4273 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, 4274 .hsw.is_tc_tbt = true, 4275 }, 4276 }, 4277 { 4278 .name = "AUX F TBT3", 4279 .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS, 4280 .ops = &icl_aux_power_well_ops, 4281 .id = DISP_PW_ID_NONE, 4282 { 4283 .hsw.regs = &icl_aux_power_well_regs, 4284 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, 4285 .hsw.is_tc_tbt = true, 4286 }, 4287 }, 4288 { 4289 .name = "AUX G TBT4", 4290 .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS, 4291 .ops = &icl_aux_power_well_ops, 4292 .id = DISP_PW_ID_NONE, 4293 { 4294 .hsw.regs = &icl_aux_power_well_regs, 4295 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, 4296 .hsw.is_tc_tbt = true, 4297 }, 4298 }, 4299 { 4300 .name = "AUX H TBT5", 4301 .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS, 4302 .ops = &icl_aux_power_well_ops, 4303 .id = DISP_PW_ID_NONE, 4304 { 4305 .hsw.regs = &icl_aux_power_well_regs, 4306 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, 4307 .hsw.is_tc_tbt = true, 4308 }, 4309 }, 4310 { 4311 .name = "AUX I TBT6", 4312 .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS, 4313 .ops = &icl_aux_power_well_ops, 4314 .id = DISP_PW_ID_NONE, 4315 { 4316 .hsw.regs = &icl_aux_power_well_regs, 4317 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, 4318 .hsw.is_tc_tbt = true, 4319 }, 4320 }, 4321 { 4322 .name = "power well 4", 4323 .domains = TGL_PW_4_POWER_DOMAINS, 4324 .ops = &hsw_power_well_ops, 4325 .id = DISP_PW_ID_NONE, 4326 { 4327 .hsw.regs = &hsw_power_well_regs, 4328 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4329 .hsw.has_fuses = true, 4330 .hsw.irq_pipe_mask = BIT(PIPE_C), 4331 } 4332 }, 4333 { 4334 .name = "power well 5", 4335 .domains = TGL_PW_5_POWER_DOMAINS, 4336 .ops = &hsw_power_well_ops, 4337 .id = DISP_PW_ID_NONE, 4338 { 4339 .hsw.regs = &hsw_power_well_regs, 4340 .hsw.idx = TGL_PW_CTL_IDX_PW_5, 4341 .hsw.has_fuses = true, 4342 .hsw.irq_pipe_mask = BIT(PIPE_D), 4343 }, 4344 }, 4345 }; 4346 4347 static const struct i915_power_well_desc rkl_power_wells[] = { 4348 { 4349 .name = "always-on", 4350 .always_on = true, 4351 .domains = POWER_DOMAIN_MASK, 4352 .ops = &i9xx_always_on_power_well_ops, 4353 .id = DISP_PW_ID_NONE, 4354 }, 4355 { 4356 .name = "power well 1", 4357 /* Handled by the DMC firmware */ 4358 .always_on = true, 4359 .domains = 0, 4360 .ops = &hsw_power_well_ops, 4361 .id = SKL_DISP_PW_1, 4362 { 4363 .hsw.regs = &hsw_power_well_regs, 4364 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4365 .hsw.has_fuses = true, 4366 }, 4367 }, 4368 { 4369 .name = "DC off", 4370 .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS, 4371 .ops = &gen9_dc_off_power_well_ops, 4372 .id = SKL_DISP_DC_OFF, 4373 }, 4374 { 4375 .name = "power well 3", 4376 .domains = RKL_PW_3_POWER_DOMAINS, 4377 .ops = &hsw_power_well_ops, 4378 .id = ICL_DISP_PW_3, 4379 { 4380 .hsw.regs = &hsw_power_well_regs, 4381 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4382 .hsw.irq_pipe_mask = BIT(PIPE_B), 4383 .hsw.has_vga = true, 4384 .hsw.has_fuses = true, 4385 }, 4386 }, 4387 { 4388 .name = "power well 4", 4389 .domains = RKL_PW_4_POWER_DOMAINS, 4390 .ops = &hsw_power_well_ops, 4391 .id = DISP_PW_ID_NONE, 4392 { 4393 .hsw.regs = &hsw_power_well_regs, 4394 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4395 .hsw.has_fuses = true, 4396 .hsw.irq_pipe_mask = BIT(PIPE_C), 4397 } 4398 }, 4399 { 4400 .name = "DDI A IO", 4401 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4402 .ops = &hsw_power_well_ops, 4403 .id = DISP_PW_ID_NONE, 4404 { 4405 .hsw.regs = &icl_ddi_power_well_regs, 4406 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4407 } 4408 }, 4409 { 4410 .name = "DDI B IO", 4411 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4412 .ops = &hsw_power_well_ops, 4413 .id = DISP_PW_ID_NONE, 4414 { 4415 .hsw.regs = &icl_ddi_power_well_regs, 4416 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4417 } 4418 }, 4419 { 4420 .name = "DDI D TC1 IO", 4421 .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS, 4422 .ops = &hsw_power_well_ops, 4423 .id = DISP_PW_ID_NONE, 4424 { 4425 .hsw.regs = &icl_ddi_power_well_regs, 4426 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4427 }, 4428 }, 4429 { 4430 .name = "DDI E TC2 IO", 4431 .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS, 4432 .ops = &hsw_power_well_ops, 4433 .id = DISP_PW_ID_NONE, 4434 { 4435 .hsw.regs = &icl_ddi_power_well_regs, 4436 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4437 }, 4438 }, 4439 { 4440 .name = "AUX A", 4441 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 4442 .ops = &icl_aux_power_well_ops, 4443 .id = DISP_PW_ID_NONE, 4444 { 4445 .hsw.regs = &icl_aux_power_well_regs, 4446 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4447 }, 4448 }, 4449 { 4450 .name = "AUX B", 4451 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 4452 .ops = &icl_aux_power_well_ops, 4453 .id = DISP_PW_ID_NONE, 4454 { 4455 .hsw.regs = &icl_aux_power_well_regs, 4456 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4457 }, 4458 }, 4459 { 4460 .name = "AUX D TC1", 4461 .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS, 4462 .ops = &icl_aux_power_well_ops, 4463 .id = DISP_PW_ID_NONE, 4464 { 4465 .hsw.regs = &icl_aux_power_well_regs, 4466 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4467 }, 4468 }, 4469 { 4470 .name = "AUX E TC2", 4471 .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS, 4472 .ops = &icl_aux_power_well_ops, 4473 .id = DISP_PW_ID_NONE, 4474 { 4475 .hsw.regs = &icl_aux_power_well_regs, 4476 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4477 }, 4478 }, 4479 }; 4480 4481 static int 4482 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 4483 int disable_power_well) 4484 { 4485 if (disable_power_well >= 0) 4486 return !!disable_power_well; 4487 4488 return 1; 4489 } 4490 4491 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 4492 int enable_dc) 4493 { 4494 u32 mask; 4495 int requested_dc; 4496 int max_dc; 4497 4498 if (INTEL_GEN(dev_priv) >= 12) { 4499 max_dc = 4; 4500 /* 4501 * DC9 has a separate HW flow from the rest of the DC states, 4502 * not depending on the DMC firmware. It's needed by system 4503 * suspend/resume, so allow it unconditionally. 4504 */ 4505 mask = DC_STATE_EN_DC9; 4506 } else if (IS_GEN(dev_priv, 11)) { 4507 max_dc = 2; 4508 mask = DC_STATE_EN_DC9; 4509 } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) { 4510 max_dc = 2; 4511 mask = 0; 4512 } else if (IS_GEN9_LP(dev_priv)) { 4513 max_dc = 1; 4514 mask = DC_STATE_EN_DC9; 4515 } else { 4516 max_dc = 0; 4517 mask = 0; 4518 } 4519 4520 if (!dev_priv->params.disable_power_well) 4521 max_dc = 0; 4522 4523 if (enable_dc >= 0 && enable_dc <= max_dc) { 4524 requested_dc = enable_dc; 4525 } else if (enable_dc == -1) { 4526 requested_dc = max_dc; 4527 } else if (enable_dc > max_dc && enable_dc <= 4) { 4528 drm_dbg_kms(&dev_priv->drm, 4529 "Adjusting requested max DC state (%d->%d)\n", 4530 enable_dc, max_dc); 4531 requested_dc = max_dc; 4532 } else { 4533 drm_err(&dev_priv->drm, 4534 "Unexpected value for enable_dc (%d)\n", enable_dc); 4535 requested_dc = max_dc; 4536 } 4537 4538 switch (requested_dc) { 4539 case 4: 4540 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; 4541 break; 4542 case 3: 4543 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; 4544 break; 4545 case 2: 4546 mask |= DC_STATE_EN_UPTO_DC6; 4547 break; 4548 case 1: 4549 mask |= DC_STATE_EN_UPTO_DC5; 4550 break; 4551 } 4552 4553 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask); 4554 4555 return mask; 4556 } 4557 4558 static int 4559 __set_power_wells(struct i915_power_domains *power_domains, 4560 const struct i915_power_well_desc *power_well_descs, 4561 int power_well_count) 4562 { 4563 struct drm_i915_private *i915 = container_of(power_domains, 4564 struct drm_i915_private, 4565 power_domains); 4566 u64 power_well_ids = 0; 4567 int i; 4568 4569 power_domains->power_well_count = power_well_count; 4570 power_domains->power_wells = 4571 kcalloc(power_well_count, 4572 sizeof(*power_domains->power_wells), 4573 GFP_KERNEL); 4574 if (!power_domains->power_wells) 4575 return -ENOMEM; 4576 4577 for (i = 0; i < power_well_count; i++) { 4578 enum i915_power_well_id id = power_well_descs[i].id; 4579 4580 power_domains->power_wells[i].desc = &power_well_descs[i]; 4581 4582 if (id == DISP_PW_ID_NONE) 4583 continue; 4584 4585 drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8); 4586 drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id)); 4587 power_well_ids |= BIT_ULL(id); 4588 } 4589 4590 return 0; 4591 } 4592 4593 #define set_power_wells(power_domains, __power_well_descs) \ 4594 __set_power_wells(power_domains, __power_well_descs, \ 4595 ARRAY_SIZE(__power_well_descs)) 4596 4597 /** 4598 * intel_power_domains_init - initializes the power domain structures 4599 * @dev_priv: i915 device instance 4600 * 4601 * Initializes the power domain structures for @dev_priv depending upon the 4602 * supported platform. 4603 */ 4604 int intel_power_domains_init(struct drm_i915_private *dev_priv) 4605 { 4606 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4607 int err; 4608 4609 dev_priv->params.disable_power_well = 4610 sanitize_disable_power_well_option(dev_priv, 4611 dev_priv->params.disable_power_well); 4612 dev_priv->csr.allowed_dc_mask = 4613 get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); 4614 4615 dev_priv->csr.target_dc_state = 4616 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 4617 4618 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); 4619 4620 rw_init(&power_domains->lock, "ipdl"); 4621 4622 INIT_DELAYED_WORK(&power_domains->async_put_work, 4623 intel_display_power_put_async_work); 4624 4625 /* 4626 * The enabling order will be from lower to higher indexed wells, 4627 * the disabling order is reversed. 4628 */ 4629 if (IS_ROCKETLAKE(dev_priv)) { 4630 err = set_power_wells(power_domains, rkl_power_wells); 4631 } else if (IS_GEN(dev_priv, 12)) { 4632 err = set_power_wells(power_domains, tgl_power_wells); 4633 } else if (IS_GEN(dev_priv, 11)) { 4634 err = set_power_wells(power_domains, icl_power_wells); 4635 } else if (IS_CANNONLAKE(dev_priv)) { 4636 err = set_power_wells(power_domains, cnl_power_wells); 4637 4638 /* 4639 * DDI and Aux IO are getting enabled for all ports 4640 * regardless the presence or use. So, in order to avoid 4641 * timeouts, lets remove them from the list 4642 * for the SKUs without port F. 4643 */ 4644 if (!IS_CNL_WITH_PORT_F(dev_priv)) 4645 power_domains->power_well_count -= 2; 4646 } else if (IS_GEMINILAKE(dev_priv)) { 4647 err = set_power_wells(power_domains, glk_power_wells); 4648 } else if (IS_BROXTON(dev_priv)) { 4649 err = set_power_wells(power_domains, bxt_power_wells); 4650 } else if (IS_GEN9_BC(dev_priv)) { 4651 err = set_power_wells(power_domains, skl_power_wells); 4652 } else if (IS_CHERRYVIEW(dev_priv)) { 4653 err = set_power_wells(power_domains, chv_power_wells); 4654 } else if (IS_BROADWELL(dev_priv)) { 4655 err = set_power_wells(power_domains, bdw_power_wells); 4656 } else if (IS_HASWELL(dev_priv)) { 4657 err = set_power_wells(power_domains, hsw_power_wells); 4658 } else if (IS_VALLEYVIEW(dev_priv)) { 4659 err = set_power_wells(power_domains, vlv_power_wells); 4660 } else if (IS_I830(dev_priv)) { 4661 err = set_power_wells(power_domains, i830_power_wells); 4662 } else { 4663 err = set_power_wells(power_domains, i9xx_always_on_power_well); 4664 } 4665 4666 return err; 4667 } 4668 4669 /** 4670 * intel_power_domains_cleanup - clean up power domains resources 4671 * @dev_priv: i915 device instance 4672 * 4673 * Release any resources acquired by intel_power_domains_init() 4674 */ 4675 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) 4676 { 4677 kfree(dev_priv->power_domains.power_wells); 4678 } 4679 4680 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 4681 { 4682 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4683 struct i915_power_well *power_well; 4684 4685 mutex_lock(&power_domains->lock); 4686 for_each_power_well(dev_priv, power_well) { 4687 power_well->desc->ops->sync_hw(dev_priv, power_well); 4688 power_well->hw_enabled = 4689 power_well->desc->ops->is_enabled(dev_priv, power_well); 4690 } 4691 mutex_unlock(&power_domains->lock); 4692 } 4693 4694 static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv, 4695 enum dbuf_slice slice, bool enable) 4696 { 4697 i915_reg_t reg = DBUF_CTL_S(slice); 4698 bool state; 4699 u32 val; 4700 4701 val = intel_de_read(dev_priv, reg); 4702 if (enable) 4703 val |= DBUF_POWER_REQUEST; 4704 else 4705 val &= ~DBUF_POWER_REQUEST; 4706 intel_de_write(dev_priv, reg, val); 4707 intel_de_posting_read(dev_priv, reg); 4708 udelay(10); 4709 4710 state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE; 4711 drm_WARN(&dev_priv->drm, enable != state, 4712 "DBuf slice %d power %s timeout!\n", 4713 slice, enable ? "enable" : "disable"); 4714 } 4715 4716 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, 4717 u8 req_slices) 4718 { 4719 int num_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices; 4720 struct i915_power_domains *power_domains = &dev_priv->power_domains; 4721 enum dbuf_slice slice; 4722 4723 drm_WARN(&dev_priv->drm, req_slices & ~(BIT(num_slices) - 1), 4724 "Invalid set of dbuf slices (0x%x) requested (num dbuf slices %d)\n", 4725 req_slices, num_slices); 4726 4727 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n", 4728 req_slices); 4729 4730 /* 4731 * Might be running this in parallel to gen9_dc_off_power_well_enable 4732 * being called from intel_dp_detect for instance, 4733 * which causes assertion triggered by race condition, 4734 * as gen9_assert_dbuf_enabled might preempt this when registers 4735 * were already updated, while dev_priv was not. 4736 */ 4737 mutex_lock(&power_domains->lock); 4738 4739 for (slice = DBUF_S1; slice < num_slices; slice++) 4740 gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice)); 4741 4742 dev_priv->dbuf.enabled_slices = req_slices; 4743 4744 mutex_unlock(&power_domains->lock); 4745 } 4746 4747 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 4748 { 4749 dev_priv->dbuf.enabled_slices = 4750 intel_enabled_dbuf_slices_mask(dev_priv); 4751 4752 /* 4753 * Just power up at least 1 slice, we will 4754 * figure out later which slices we have and what we need. 4755 */ 4756 gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) | 4757 dev_priv->dbuf.enabled_slices); 4758 } 4759 4760 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 4761 { 4762 gen9_dbuf_slices_update(dev_priv, 0); 4763 } 4764 4765 static void icl_mbus_init(struct drm_i915_private *dev_priv) 4766 { 4767 unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask; 4768 u32 mask, val, i; 4769 4770 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | 4771 MBUS_ABOX_BT_CREDIT_POOL2_MASK | 4772 MBUS_ABOX_B_CREDIT_MASK | 4773 MBUS_ABOX_BW_CREDIT_MASK; 4774 val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 4775 MBUS_ABOX_BT_CREDIT_POOL2(16) | 4776 MBUS_ABOX_B_CREDIT(1) | 4777 MBUS_ABOX_BW_CREDIT(1); 4778 4779 /* 4780 * gen12 platforms that use abox1 and abox2 for pixel data reads still 4781 * expect us to program the abox_ctl0 register as well, even though 4782 * we don't have to program other instance-0 registers like BW_BUDDY. 4783 */ 4784 if (IS_GEN(dev_priv, 12)) 4785 abox_regs |= BIT(0); 4786 4787 for_each_set_bit(i, &abox_regs, sizeof(abox_regs)) 4788 intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val); 4789 } 4790 4791 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) 4792 { 4793 u32 val = intel_de_read(dev_priv, LCPLL_CTL); 4794 4795 /* 4796 * The LCPLL register should be turned on by the BIOS. For now 4797 * let's just check its state and print errors in case 4798 * something is wrong. Don't even try to turn it on. 4799 */ 4800 4801 if (val & LCPLL_CD_SOURCE_FCLK) 4802 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n"); 4803 4804 if (val & LCPLL_PLL_DISABLE) 4805 drm_err(&dev_priv->drm, "LCPLL is disabled\n"); 4806 4807 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) 4808 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n"); 4809 } 4810 4811 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 4812 { 4813 struct drm_device *dev = &dev_priv->drm; 4814 struct intel_crtc *crtc; 4815 4816 for_each_intel_crtc(dev, crtc) 4817 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 4818 pipe_name(crtc->pipe)); 4819 4820 I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2), 4821 "Display power well on\n"); 4822 I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE, 4823 "SPLL enabled\n"); 4824 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, 4825 "WRPLL1 enabled\n"); 4826 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, 4827 "WRPLL2 enabled\n"); 4828 I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON, 4829 "Panel power on\n"); 4830 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 4831 "CPU PWM1 enabled\n"); 4832 if (IS_HASWELL(dev_priv)) 4833 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 4834 "CPU PWM2 enabled\n"); 4835 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 4836 "PCH PWM1 enabled\n"); 4837 I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 4838 "Utility pin enabled\n"); 4839 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, 4840 "PCH GTC enabled\n"); 4841 4842 /* 4843 * In theory we can still leave IRQs enabled, as long as only the HPD 4844 * interrupts remain enabled. We used to check for that, but since it's 4845 * gen-specific and since we only disable LCPLL after we fully disable 4846 * the interrupts, the check below should be enough. 4847 */ 4848 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 4849 } 4850 4851 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) 4852 { 4853 if (IS_HASWELL(dev_priv)) 4854 return intel_de_read(dev_priv, D_COMP_HSW); 4855 else 4856 return intel_de_read(dev_priv, D_COMP_BDW); 4857 } 4858 4859 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) 4860 { 4861 if (IS_HASWELL(dev_priv)) { 4862 if (sandybridge_pcode_write(dev_priv, 4863 GEN6_PCODE_WRITE_D_COMP, val)) 4864 drm_dbg_kms(&dev_priv->drm, 4865 "Failed to write to D_COMP\n"); 4866 } else { 4867 intel_de_write(dev_priv, D_COMP_BDW, val); 4868 intel_de_posting_read(dev_priv, D_COMP_BDW); 4869 } 4870 } 4871 4872 /* 4873 * This function implements pieces of two sequences from BSpec: 4874 * - Sequence for display software to disable LCPLL 4875 * - Sequence for display software to allow package C8+ 4876 * The steps implemented here are just the steps that actually touch the LCPLL 4877 * register. Callers should take care of disabling all the display engine 4878 * functions, doing the mode unset, fixing interrupts, etc. 4879 */ 4880 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 4881 bool switch_to_fclk, bool allow_power_down) 4882 { 4883 u32 val; 4884 4885 assert_can_disable_lcpll(dev_priv); 4886 4887 val = intel_de_read(dev_priv, LCPLL_CTL); 4888 4889 if (switch_to_fclk) { 4890 val |= LCPLL_CD_SOURCE_FCLK; 4891 intel_de_write(dev_priv, LCPLL_CTL, val); 4892 4893 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & 4894 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 4895 drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); 4896 4897 val = intel_de_read(dev_priv, LCPLL_CTL); 4898 } 4899 4900 val |= LCPLL_PLL_DISABLE; 4901 intel_de_write(dev_priv, LCPLL_CTL, val); 4902 intel_de_posting_read(dev_priv, LCPLL_CTL); 4903 4904 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) 4905 drm_err(&dev_priv->drm, "LCPLL still locked\n"); 4906 4907 val = hsw_read_dcomp(dev_priv); 4908 val |= D_COMP_COMP_DISABLE; 4909 hsw_write_dcomp(dev_priv, val); 4910 ndelay(100); 4911 4912 if (wait_for((hsw_read_dcomp(dev_priv) & 4913 D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 4914 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); 4915 4916 if (allow_power_down) { 4917 val = intel_de_read(dev_priv, LCPLL_CTL); 4918 val |= LCPLL_POWER_DOWN_ALLOW; 4919 intel_de_write(dev_priv, LCPLL_CTL, val); 4920 intel_de_posting_read(dev_priv, LCPLL_CTL); 4921 } 4922 } 4923 4924 /* 4925 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 4926 * source. 4927 */ 4928 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 4929 { 4930 u32 val; 4931 4932 val = intel_de_read(dev_priv, LCPLL_CTL); 4933 4934 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 4935 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 4936 return; 4937 4938 /* 4939 * Make sure we're not on PC8 state before disabling PC8, otherwise 4940 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 4941 */ 4942 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 4943 4944 if (val & LCPLL_POWER_DOWN_ALLOW) { 4945 val &= ~LCPLL_POWER_DOWN_ALLOW; 4946 intel_de_write(dev_priv, LCPLL_CTL, val); 4947 intel_de_posting_read(dev_priv, LCPLL_CTL); 4948 } 4949 4950 val = hsw_read_dcomp(dev_priv); 4951 val |= D_COMP_COMP_FORCE; 4952 val &= ~D_COMP_COMP_DISABLE; 4953 hsw_write_dcomp(dev_priv, val); 4954 4955 val = intel_de_read(dev_priv, LCPLL_CTL); 4956 val &= ~LCPLL_PLL_DISABLE; 4957 intel_de_write(dev_priv, LCPLL_CTL, val); 4958 4959 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) 4960 drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); 4961 4962 if (val & LCPLL_CD_SOURCE_FCLK) { 4963 val = intel_de_read(dev_priv, LCPLL_CTL); 4964 val &= ~LCPLL_CD_SOURCE_FCLK; 4965 intel_de_write(dev_priv, LCPLL_CTL, val); 4966 4967 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & 4968 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 4969 drm_err(&dev_priv->drm, 4970 "Switching back to LCPLL failed\n"); 4971 } 4972 4973 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 4974 4975 intel_update_cdclk(dev_priv); 4976 intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); 4977 } 4978 4979 /* 4980 * Package states C8 and deeper are really deep PC states that can only be 4981 * reached when all the devices on the system allow it, so even if the graphics 4982 * device allows PC8+, it doesn't mean the system will actually get to these 4983 * states. Our driver only allows PC8+ when going into runtime PM. 4984 * 4985 * The requirements for PC8+ are that all the outputs are disabled, the power 4986 * well is disabled and most interrupts are disabled, and these are also 4987 * requirements for runtime PM. When these conditions are met, we manually do 4988 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 4989 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 4990 * hang the machine. 4991 * 4992 * When we really reach PC8 or deeper states (not just when we allow it) we lose 4993 * the state of some registers, so when we come back from PC8+ we need to 4994 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 4995 * need to take care of the registers kept by RC6. Notice that this happens even 4996 * if we don't put the device in PCI D3 state (which is what currently happens 4997 * because of the runtime PM support). 4998 * 4999 * For more, read "Display Sequences for Package C8" on the hardware 5000 * documentation. 5001 */ 5002 static void hsw_enable_pc8(struct drm_i915_private *dev_priv) 5003 { 5004 u32 val; 5005 5006 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); 5007 5008 if (HAS_PCH_LPT_LP(dev_priv)) { 5009 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 5010 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 5011 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 5012 } 5013 5014 lpt_disable_clkout_dp(dev_priv); 5015 hsw_disable_lcpll(dev_priv, true, true); 5016 } 5017 5018 static void hsw_disable_pc8(struct drm_i915_private *dev_priv) 5019 { 5020 u32 val; 5021 5022 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); 5023 5024 hsw_restore_lcpll(dev_priv); 5025 intel_init_pch_refclk(dev_priv); 5026 5027 if (HAS_PCH_LPT_LP(dev_priv)) { 5028 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 5029 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 5030 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 5031 } 5032 } 5033 5034 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, 5035 bool enable) 5036 { 5037 i915_reg_t reg; 5038 u32 reset_bits, val; 5039 5040 if (IS_IVYBRIDGE(dev_priv)) { 5041 reg = GEN7_MSG_CTL; 5042 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; 5043 } else { 5044 reg = HSW_NDE_RSTWRN_OPT; 5045 reset_bits = RESET_PCH_HANDSHAKE_ENABLE; 5046 } 5047 5048 val = intel_de_read(dev_priv, reg); 5049 5050 if (enable) 5051 val |= reset_bits; 5052 else 5053 val &= ~reset_bits; 5054 5055 intel_de_write(dev_priv, reg, val); 5056 } 5057 5058 static void skl_display_core_init(struct drm_i915_private *dev_priv, 5059 bool resume) 5060 { 5061 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5062 struct i915_power_well *well; 5063 5064 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5065 5066 /* enable PCH reset handshake */ 5067 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5068 5069 /* enable PG1 and Misc I/O */ 5070 mutex_lock(&power_domains->lock); 5071 5072 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5073 intel_power_well_enable(dev_priv, well); 5074 5075 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 5076 intel_power_well_enable(dev_priv, well); 5077 5078 mutex_unlock(&power_domains->lock); 5079 5080 intel_cdclk_init_hw(dev_priv); 5081 5082 gen9_dbuf_enable(dev_priv); 5083 5084 if (resume && dev_priv->csr.dmc_payload) 5085 intel_csr_load_program(dev_priv); 5086 } 5087 5088 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 5089 { 5090 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5091 struct i915_power_well *well; 5092 5093 gen9_disable_dc_states(dev_priv); 5094 5095 gen9_dbuf_disable(dev_priv); 5096 5097 intel_cdclk_uninit_hw(dev_priv); 5098 5099 /* The spec doesn't call for removing the reset handshake flag */ 5100 /* disable PG1 and Misc I/O */ 5101 5102 mutex_lock(&power_domains->lock); 5103 5104 /* 5105 * BSpec says to keep the MISC IO power well enabled here, only 5106 * remove our request for power well 1. 5107 * Note that even though the driver's request is removed power well 1 5108 * may stay enabled after this due to DMC's own request on it. 5109 */ 5110 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5111 intel_power_well_disable(dev_priv, well); 5112 5113 mutex_unlock(&power_domains->lock); 5114 5115 usleep_range(10, 30); /* 10 us delay per Bspec */ 5116 } 5117 5118 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) 5119 { 5120 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5121 struct i915_power_well *well; 5122 5123 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5124 5125 /* 5126 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 5127 * or else the reset will hang because there is no PCH to respond. 5128 * Move the handshake programming to initialization sequence. 5129 * Previously was left up to BIOS. 5130 */ 5131 intel_pch_reset_handshake(dev_priv, false); 5132 5133 /* Enable PG1 */ 5134 mutex_lock(&power_domains->lock); 5135 5136 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5137 intel_power_well_enable(dev_priv, well); 5138 5139 mutex_unlock(&power_domains->lock); 5140 5141 intel_cdclk_init_hw(dev_priv); 5142 5143 gen9_dbuf_enable(dev_priv); 5144 5145 if (resume && dev_priv->csr.dmc_payload) 5146 intel_csr_load_program(dev_priv); 5147 } 5148 5149 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 5150 { 5151 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5152 struct i915_power_well *well; 5153 5154 gen9_disable_dc_states(dev_priv); 5155 5156 gen9_dbuf_disable(dev_priv); 5157 5158 intel_cdclk_uninit_hw(dev_priv); 5159 5160 /* The spec doesn't call for removing the reset handshake flag */ 5161 5162 /* 5163 * Disable PW1 (PG1). 5164 * Note that even though the driver's request is removed power well 1 5165 * may stay enabled after this due to DMC's own request on it. 5166 */ 5167 mutex_lock(&power_domains->lock); 5168 5169 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5170 intel_power_well_disable(dev_priv, well); 5171 5172 mutex_unlock(&power_domains->lock); 5173 5174 usleep_range(10, 30); /* 10 us delay per Bspec */ 5175 } 5176 5177 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume) 5178 { 5179 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5180 struct i915_power_well *well; 5181 5182 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5183 5184 /* 1. Enable PCH Reset Handshake */ 5185 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5186 5187 /* 2-3. */ 5188 intel_combo_phy_init(dev_priv); 5189 5190 /* 5191 * 4. Enable Power Well 1 (PG1). 5192 * The AUX IO power wells will be enabled on demand. 5193 */ 5194 mutex_lock(&power_domains->lock); 5195 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5196 intel_power_well_enable(dev_priv, well); 5197 mutex_unlock(&power_domains->lock); 5198 5199 /* 5. Enable CD clock */ 5200 intel_cdclk_init_hw(dev_priv); 5201 5202 /* 6. Enable DBUF */ 5203 gen9_dbuf_enable(dev_priv); 5204 5205 if (resume && dev_priv->csr.dmc_payload) 5206 intel_csr_load_program(dev_priv); 5207 } 5208 5209 static void cnl_display_core_uninit(struct drm_i915_private *dev_priv) 5210 { 5211 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5212 struct i915_power_well *well; 5213 5214 gen9_disable_dc_states(dev_priv); 5215 5216 /* 1. Disable all display engine functions -> aready done */ 5217 5218 /* 2. Disable DBUF */ 5219 gen9_dbuf_disable(dev_priv); 5220 5221 /* 3. Disable CD clock */ 5222 intel_cdclk_uninit_hw(dev_priv); 5223 5224 /* 5225 * 4. Disable Power Well 1 (PG1). 5226 * The AUX IO power wells are toggled on demand, so they are already 5227 * disabled at this point. 5228 */ 5229 mutex_lock(&power_domains->lock); 5230 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5231 intel_power_well_disable(dev_priv, well); 5232 mutex_unlock(&power_domains->lock); 5233 5234 usleep_range(10, 30); /* 10 us delay per Bspec */ 5235 5236 /* 5. */ 5237 intel_combo_phy_uninit(dev_priv); 5238 } 5239 5240 struct buddy_page_mask { 5241 u32 page_mask; 5242 u8 type; 5243 u8 num_channels; 5244 }; 5245 5246 static const struct buddy_page_mask tgl_buddy_page_masks[] = { 5247 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, 5248 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, 5249 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, 5250 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 }, 5251 {} 5252 }; 5253 5254 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = { 5255 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 }, 5256 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 }, 5257 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 }, 5258 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 }, 5259 {} 5260 }; 5261 5262 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) 5263 { 5264 enum intel_dram_type type = dev_priv->dram_info.type; 5265 u8 num_channels = dev_priv->dram_info.num_channels; 5266 const struct buddy_page_mask *table; 5267 unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask; 5268 int config, i; 5269 5270 if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B0)) 5271 /* Wa_1409767108: tgl */ 5272 table = wa_1409767108_buddy_page_masks; 5273 else 5274 table = tgl_buddy_page_masks; 5275 5276 for (config = 0; table[config].page_mask != 0; config++) 5277 if (table[config].num_channels == num_channels && 5278 table[config].type == type) 5279 break; 5280 5281 if (table[config].page_mask == 0) { 5282 drm_dbg(&dev_priv->drm, 5283 "Unknown memory configuration; disabling address buddy logic.\n"); 5284 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) 5285 intel_de_write(dev_priv, BW_BUDDY_CTL(i), 5286 BW_BUDDY_DISABLE); 5287 } else { 5288 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) { 5289 intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i), 5290 table[config].page_mask); 5291 5292 /* Wa_22010178259:tgl,rkl */ 5293 intel_de_rmw(dev_priv, BW_BUDDY_CTL(i), 5294 BW_BUDDY_TLB_REQ_TIMER_MASK, 5295 BW_BUDDY_TLB_REQ_TIMER(0x8)); 5296 } 5297 } 5298 } 5299 5300 static void icl_display_core_init(struct drm_i915_private *dev_priv, 5301 bool resume) 5302 { 5303 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5304 struct i915_power_well *well; 5305 u32 val; 5306 5307 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5308 5309 /* Wa_14011294188:ehl,jsl,tgl,rkl */ 5310 if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP && 5311 INTEL_PCH_TYPE(dev_priv) < PCH_DG1) 5312 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0, 5313 PCH_DPMGUNIT_CLOCK_GATE_DISABLE); 5314 5315 /* 1. Enable PCH reset handshake. */ 5316 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5317 5318 /* 2. Initialize all combo phys */ 5319 intel_combo_phy_init(dev_priv); 5320 5321 /* 5322 * 3. Enable Power Well 1 (PG1). 5323 * The AUX IO power wells will be enabled on demand. 5324 */ 5325 mutex_lock(&power_domains->lock); 5326 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5327 intel_power_well_enable(dev_priv, well); 5328 mutex_unlock(&power_domains->lock); 5329 5330 /* 4. Enable CDCLK. */ 5331 intel_cdclk_init_hw(dev_priv); 5332 5333 /* 5. Enable DBUF. */ 5334 gen9_dbuf_enable(dev_priv); 5335 5336 /* 6. Setup MBUS. */ 5337 icl_mbus_init(dev_priv); 5338 5339 /* 7. Program arbiter BW_BUDDY registers */ 5340 if (INTEL_GEN(dev_priv) >= 12) 5341 tgl_bw_buddy_init(dev_priv); 5342 5343 if (resume && dev_priv->csr.dmc_payload) 5344 intel_csr_load_program(dev_priv); 5345 5346 /* Wa_14011508470 */ 5347 if (IS_GEN(dev_priv, 12)) { 5348 val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | 5349 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR; 5350 intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val); 5351 } 5352 } 5353 5354 static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 5355 { 5356 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5357 struct i915_power_well *well; 5358 5359 gen9_disable_dc_states(dev_priv); 5360 5361 /* 1. Disable all display engine functions -> aready done */ 5362 5363 /* 2. Disable DBUF */ 5364 gen9_dbuf_disable(dev_priv); 5365 5366 /* 3. Disable CD clock */ 5367 intel_cdclk_uninit_hw(dev_priv); 5368 5369 /* 5370 * 4. Disable Power Well 1 (PG1). 5371 * The AUX IO power wells are toggled on demand, so they are already 5372 * disabled at this point. 5373 */ 5374 mutex_lock(&power_domains->lock); 5375 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5376 intel_power_well_disable(dev_priv, well); 5377 mutex_unlock(&power_domains->lock); 5378 5379 /* 5. */ 5380 intel_combo_phy_uninit(dev_priv); 5381 } 5382 5383 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 5384 { 5385 struct i915_power_well *cmn_bc = 5386 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 5387 struct i915_power_well *cmn_d = 5388 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 5389 5390 /* 5391 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 5392 * workaround never ever read DISPLAY_PHY_CONTROL, and 5393 * instead maintain a shadow copy ourselves. Use the actual 5394 * power well state and lane status to reconstruct the 5395 * expected initial value. 5396 */ 5397 dev_priv->chv_phy_control = 5398 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 5399 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 5400 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 5401 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 5402 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 5403 5404 /* 5405 * If all lanes are disabled we leave the override disabled 5406 * with all power down bits cleared to match the state we 5407 * would use after disabling the port. Otherwise enable the 5408 * override and set the lane powerdown bits accding to the 5409 * current lane status. 5410 */ 5411 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 5412 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A)); 5413 unsigned int mask; 5414 5415 mask = status & DPLL_PORTB_READY_MASK; 5416 if (mask == 0xf) 5417 mask = 0x0; 5418 else 5419 dev_priv->chv_phy_control |= 5420 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 5421 5422 dev_priv->chv_phy_control |= 5423 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 5424 5425 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 5426 if (mask == 0xf) 5427 mask = 0x0; 5428 else 5429 dev_priv->chv_phy_control |= 5430 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 5431 5432 dev_priv->chv_phy_control |= 5433 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 5434 5435 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 5436 5437 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 5438 } else { 5439 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 5440 } 5441 5442 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 5443 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS); 5444 unsigned int mask; 5445 5446 mask = status & DPLL_PORTD_READY_MASK; 5447 5448 if (mask == 0xf) 5449 mask = 0x0; 5450 else 5451 dev_priv->chv_phy_control |= 5452 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 5453 5454 dev_priv->chv_phy_control |= 5455 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 5456 5457 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 5458 5459 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 5460 } else { 5461 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 5462 } 5463 5464 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n", 5465 dev_priv->chv_phy_control); 5466 5467 /* Defer application of initial phy_control to enabling the powerwell */ 5468 } 5469 5470 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 5471 { 5472 struct i915_power_well *cmn = 5473 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 5474 struct i915_power_well *disp2d = 5475 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); 5476 5477 /* If the display might be already active skip this */ 5478 if (cmn->desc->ops->is_enabled(dev_priv, cmn) && 5479 disp2d->desc->ops->is_enabled(dev_priv, disp2d) && 5480 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST) 5481 return; 5482 5483 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n"); 5484 5485 /* cmnlane needs DPLL registers */ 5486 disp2d->desc->ops->enable(dev_priv, disp2d); 5487 5488 /* 5489 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 5490 * Need to assert and de-assert PHY SB reset by gating the 5491 * common lane power, then un-gating it. 5492 * Simply ungating isn't enough to reset the PHY enough to get 5493 * ports and lanes running. 5494 */ 5495 cmn->desc->ops->disable(dev_priv, cmn); 5496 } 5497 5498 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) 5499 { 5500 bool ret; 5501 5502 vlv_punit_get(dev_priv); 5503 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; 5504 vlv_punit_put(dev_priv); 5505 5506 return ret; 5507 } 5508 5509 static void assert_ved_power_gated(struct drm_i915_private *dev_priv) 5510 { 5511 drm_WARN(&dev_priv->drm, 5512 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), 5513 "VED not power gated\n"); 5514 } 5515 5516 static void assert_isp_power_gated(struct drm_i915_private *dev_priv) 5517 { 5518 #ifdef notyet 5519 static const struct pci_device_id isp_ids[] = { 5520 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, 5521 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, 5522 {} 5523 }; 5524 5525 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) && 5526 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), 5527 "ISP not power gated\n"); 5528 #endif 5529 } 5530 5531 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); 5532 5533 /** 5534 * intel_power_domains_init_hw - initialize hardware power domain state 5535 * @i915: i915 device instance 5536 * @resume: Called from resume code paths or not 5537 * 5538 * This function initializes the hardware power domain state and enables all 5539 * power wells belonging to the INIT power domain. Power wells in other 5540 * domains (and not in the INIT domain) are referenced or disabled by 5541 * intel_modeset_readout_hw_state(). After that the reference count of each 5542 * power well must match its HW enabled state, see 5543 * intel_power_domains_verify_state(). 5544 * 5545 * It will return with power domains disabled (to be enabled later by 5546 * intel_power_domains_enable()) and must be paired with 5547 * intel_power_domains_driver_remove(). 5548 */ 5549 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) 5550 { 5551 struct i915_power_domains *power_domains = &i915->power_domains; 5552 5553 power_domains->initializing = true; 5554 5555 if (INTEL_GEN(i915) >= 11) { 5556 icl_display_core_init(i915, resume); 5557 } else if (IS_CANNONLAKE(i915)) { 5558 cnl_display_core_init(i915, resume); 5559 } else if (IS_GEN9_BC(i915)) { 5560 skl_display_core_init(i915, resume); 5561 } else if (IS_GEN9_LP(i915)) { 5562 bxt_display_core_init(i915, resume); 5563 } else if (IS_CHERRYVIEW(i915)) { 5564 mutex_lock(&power_domains->lock); 5565 chv_phy_control_init(i915); 5566 mutex_unlock(&power_domains->lock); 5567 assert_isp_power_gated(i915); 5568 } else if (IS_VALLEYVIEW(i915)) { 5569 mutex_lock(&power_domains->lock); 5570 vlv_cmnlane_wa(i915); 5571 mutex_unlock(&power_domains->lock); 5572 assert_ved_power_gated(i915); 5573 assert_isp_power_gated(i915); 5574 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { 5575 hsw_assert_cdclk(i915); 5576 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 5577 } else if (IS_IVYBRIDGE(i915)) { 5578 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 5579 } 5580 5581 /* 5582 * Keep all power wells enabled for any dependent HW access during 5583 * initialization and to make sure we keep BIOS enabled display HW 5584 * resources powered until display HW readout is complete. We drop 5585 * this reference in intel_power_domains_enable(). 5586 */ 5587 power_domains->wakeref = 5588 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5589 5590 /* Disable power support if the user asked so. */ 5591 if (!i915->params.disable_power_well) 5592 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5593 intel_power_domains_sync_hw(i915); 5594 5595 power_domains->initializing = false; 5596 } 5597 5598 /** 5599 * intel_power_domains_driver_remove - deinitialize hw power domain state 5600 * @i915: i915 device instance 5601 * 5602 * De-initializes the display power domain HW state. It also ensures that the 5603 * device stays powered up so that the driver can be reloaded. 5604 * 5605 * It must be called with power domains already disabled (after a call to 5606 * intel_power_domains_disable()) and must be paired with 5607 * intel_power_domains_init_hw(). 5608 */ 5609 void intel_power_domains_driver_remove(struct drm_i915_private *i915) 5610 { 5611 intel_wakeref_t wakeref __maybe_unused = 5612 fetch_and_zero(&i915->power_domains.wakeref); 5613 5614 /* Remove the refcount we took to keep power well support disabled. */ 5615 if (!i915->params.disable_power_well) 5616 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); 5617 5618 intel_display_power_flush_work_sync(i915); 5619 5620 intel_power_domains_verify_state(i915); 5621 5622 /* Keep the power well enabled, but cancel its rpm wakeref. */ 5623 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 5624 } 5625 5626 /** 5627 * intel_power_domains_enable - enable toggling of display power wells 5628 * @i915: i915 device instance 5629 * 5630 * Enable the ondemand enabling/disabling of the display power wells. Note that 5631 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled 5632 * only at specific points of the display modeset sequence, thus they are not 5633 * affected by the intel_power_domains_enable()/disable() calls. The purpose 5634 * of these function is to keep the rest of power wells enabled until the end 5635 * of display HW readout (which will acquire the power references reflecting 5636 * the current HW state). 5637 */ 5638 void intel_power_domains_enable(struct drm_i915_private *i915) 5639 { 5640 intel_wakeref_t wakeref __maybe_unused = 5641 fetch_and_zero(&i915->power_domains.wakeref); 5642 5643 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 5644 intel_power_domains_verify_state(i915); 5645 } 5646 5647 /** 5648 * intel_power_domains_disable - disable toggling of display power wells 5649 * @i915: i915 device instance 5650 * 5651 * Disable the ondemand enabling/disabling of the display power wells. See 5652 * intel_power_domains_enable() for which power wells this call controls. 5653 */ 5654 void intel_power_domains_disable(struct drm_i915_private *i915) 5655 { 5656 struct i915_power_domains *power_domains = &i915->power_domains; 5657 5658 drm_WARN_ON(&i915->drm, power_domains->wakeref); 5659 power_domains->wakeref = 5660 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5661 5662 intel_power_domains_verify_state(i915); 5663 } 5664 5665 /** 5666 * intel_power_domains_suspend - suspend power domain state 5667 * @i915: i915 device instance 5668 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) 5669 * 5670 * This function prepares the hardware power domain state before entering 5671 * system suspend. 5672 * 5673 * It must be called with power domains already disabled (after a call to 5674 * intel_power_domains_disable()) and paired with intel_power_domains_resume(). 5675 */ 5676 void intel_power_domains_suspend(struct drm_i915_private *i915, 5677 enum i915_drm_suspend_mode suspend_mode) 5678 { 5679 struct i915_power_domains *power_domains = &i915->power_domains; 5680 intel_wakeref_t wakeref __maybe_unused = 5681 fetch_and_zero(&power_domains->wakeref); 5682 5683 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 5684 5685 /* 5686 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 5687 * support don't manually deinit the power domains. This also means the 5688 * CSR/DMC firmware will stay active, it will power down any HW 5689 * resources as required and also enable deeper system power states 5690 * that would be blocked if the firmware was inactive. 5691 */ 5692 if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) && 5693 suspend_mode == I915_DRM_SUSPEND_IDLE && 5694 i915->csr.dmc_payload) { 5695 intel_display_power_flush_work(i915); 5696 intel_power_domains_verify_state(i915); 5697 return; 5698 } 5699 5700 /* 5701 * Even if power well support was disabled we still want to disable 5702 * power wells if power domains must be deinitialized for suspend. 5703 */ 5704 if (!i915->params.disable_power_well) 5705 intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); 5706 5707 intel_display_power_flush_work(i915); 5708 intel_power_domains_verify_state(i915); 5709 5710 if (INTEL_GEN(i915) >= 11) 5711 icl_display_core_uninit(i915); 5712 else if (IS_CANNONLAKE(i915)) 5713 cnl_display_core_uninit(i915); 5714 else if (IS_GEN9_BC(i915)) 5715 skl_display_core_uninit(i915); 5716 else if (IS_GEN9_LP(i915)) 5717 bxt_display_core_uninit(i915); 5718 5719 power_domains->display_core_suspended = true; 5720 } 5721 5722 /** 5723 * intel_power_domains_resume - resume power domain state 5724 * @i915: i915 device instance 5725 * 5726 * This function resume the hardware power domain state during system resume. 5727 * 5728 * It will return with power domain support disabled (to be enabled later by 5729 * intel_power_domains_enable()) and must be paired with 5730 * intel_power_domains_suspend(). 5731 */ 5732 void intel_power_domains_resume(struct drm_i915_private *i915) 5733 { 5734 struct i915_power_domains *power_domains = &i915->power_domains; 5735 5736 if (power_domains->display_core_suspended) { 5737 intel_power_domains_init_hw(i915, true); 5738 power_domains->display_core_suspended = false; 5739 } else { 5740 drm_WARN_ON(&i915->drm, power_domains->wakeref); 5741 power_domains->wakeref = 5742 intel_display_power_get(i915, POWER_DOMAIN_INIT); 5743 } 5744 5745 intel_power_domains_verify_state(i915); 5746 } 5747 5748 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 5749 5750 static void intel_power_domains_dump_info(struct drm_i915_private *i915) 5751 { 5752 struct i915_power_domains *power_domains = &i915->power_domains; 5753 struct i915_power_well *power_well; 5754 5755 for_each_power_well(i915, power_well) { 5756 enum intel_display_power_domain domain; 5757 5758 drm_dbg(&i915->drm, "%-25s %d\n", 5759 power_well->desc->name, power_well->count); 5760 5761 for_each_power_domain(domain, power_well->desc->domains) 5762 drm_dbg(&i915->drm, " %-23s %d\n", 5763 intel_display_power_domain_str(domain), 5764 power_domains->domain_use_count[domain]); 5765 } 5766 } 5767 5768 /** 5769 * intel_power_domains_verify_state - verify the HW/SW state for all power wells 5770 * @i915: i915 device instance 5771 * 5772 * Verify if the reference count of each power well matches its HW enabled 5773 * state and the total refcount of the domains it belongs to. This must be 5774 * called after modeset HW state sanitization, which is responsible for 5775 * acquiring reference counts for any power wells in use and disabling the 5776 * ones left on by BIOS but not required by any active output. 5777 */ 5778 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 5779 { 5780 struct i915_power_domains *power_domains = &i915->power_domains; 5781 struct i915_power_well *power_well; 5782 bool dump_domain_info; 5783 5784 mutex_lock(&power_domains->lock); 5785 5786 verify_async_put_domains_state(power_domains); 5787 5788 dump_domain_info = false; 5789 for_each_power_well(i915, power_well) { 5790 enum intel_display_power_domain domain; 5791 int domains_count; 5792 bool enabled; 5793 5794 enabled = power_well->desc->ops->is_enabled(i915, power_well); 5795 if ((power_well->count || power_well->desc->always_on) != 5796 enabled) 5797 drm_err(&i915->drm, 5798 "power well %s state mismatch (refcount %d/enabled %d)", 5799 power_well->desc->name, 5800 power_well->count, enabled); 5801 5802 domains_count = 0; 5803 for_each_power_domain(domain, power_well->desc->domains) 5804 domains_count += power_domains->domain_use_count[domain]; 5805 5806 if (power_well->count != domains_count) { 5807 drm_err(&i915->drm, 5808 "power well %s refcount/domain refcount mismatch " 5809 "(refcount %d/domains refcount %d)\n", 5810 power_well->desc->name, power_well->count, 5811 domains_count); 5812 dump_domain_info = true; 5813 } 5814 } 5815 5816 if (dump_domain_info) { 5817 static bool dumped; 5818 5819 if (!dumped) { 5820 intel_power_domains_dump_info(i915); 5821 dumped = true; 5822 } 5823 } 5824 5825 mutex_unlock(&power_domains->lock); 5826 } 5827 5828 #else 5829 5830 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 5831 { 5832 } 5833 5834 #endif 5835 5836 void intel_display_power_suspend_late(struct drm_i915_private *i915) 5837 { 5838 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) 5839 bxt_enable_dc9(i915); 5840 else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) 5841 hsw_enable_pc8(i915); 5842 } 5843 5844 void intel_display_power_resume_early(struct drm_i915_private *i915) 5845 { 5846 if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) { 5847 gen9_sanitize_dc_state(i915); 5848 bxt_disable_dc9(i915); 5849 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5850 hsw_disable_pc8(i915); 5851 } 5852 } 5853 5854 void intel_display_power_suspend(struct drm_i915_private *i915) 5855 { 5856 if (INTEL_GEN(i915) >= 11) { 5857 icl_display_core_uninit(i915); 5858 bxt_enable_dc9(i915); 5859 } else if (IS_GEN9_LP(i915)) { 5860 bxt_display_core_uninit(i915); 5861 bxt_enable_dc9(i915); 5862 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5863 hsw_enable_pc8(i915); 5864 } 5865 } 5866 5867 void intel_display_power_resume(struct drm_i915_private *i915) 5868 { 5869 if (INTEL_GEN(i915) >= 11) { 5870 bxt_disable_dc9(i915); 5871 icl_display_core_init(i915, true); 5872 if (i915->csr.dmc_payload) { 5873 if (i915->csr.allowed_dc_mask & 5874 DC_STATE_EN_UPTO_DC6) 5875 skl_enable_dc6(i915); 5876 else if (i915->csr.allowed_dc_mask & 5877 DC_STATE_EN_UPTO_DC5) 5878 gen9_enable_dc5(i915); 5879 } 5880 } else if (IS_GEN9_LP(i915)) { 5881 bxt_disable_dc9(i915); 5882 bxt_display_core_init(i915, true); 5883 if (i915->csr.dmc_payload && 5884 (i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 5885 gen9_enable_dc5(i915); 5886 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 5887 hsw_disable_pc8(i915); 5888 } 5889 } 5890