1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include "display/intel_crt.h" 7 8 #include "i915_drv.h" 9 #include "i915_irq.h" 10 #include "intel_cdclk.h" 11 #include "intel_combo_phy.h" 12 #include "intel_display_power.h" 13 #include "intel_de.h" 14 #include "intel_display_types.h" 15 #include "intel_dmc.h" 16 #include "intel_dpio_phy.h" 17 #include "intel_hotplug.h" 18 #include "intel_pm.h" 19 #include "intel_pps.h" 20 #include "intel_sideband.h" 21 #include "intel_snps_phy.h" 22 #include "intel_tc.h" 23 #include "intel_vga.h" 24 25 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 26 enum i915_power_well_id power_well_id); 27 28 const char * 29 intel_display_power_domain_str(enum intel_display_power_domain domain) 30 { 31 switch (domain) { 32 case POWER_DOMAIN_DISPLAY_CORE: 33 return "DISPLAY_CORE"; 34 case POWER_DOMAIN_PIPE_A: 35 return "PIPE_A"; 36 case POWER_DOMAIN_PIPE_B: 37 return "PIPE_B"; 38 case POWER_DOMAIN_PIPE_C: 39 return "PIPE_C"; 40 case POWER_DOMAIN_PIPE_D: 41 return "PIPE_D"; 42 case POWER_DOMAIN_PIPE_A_PANEL_FITTER: 43 return "PIPE_A_PANEL_FITTER"; 44 case POWER_DOMAIN_PIPE_B_PANEL_FITTER: 45 return "PIPE_B_PANEL_FITTER"; 46 case POWER_DOMAIN_PIPE_C_PANEL_FITTER: 47 return "PIPE_C_PANEL_FITTER"; 48 case POWER_DOMAIN_PIPE_D_PANEL_FITTER: 49 return "PIPE_D_PANEL_FITTER"; 50 case POWER_DOMAIN_TRANSCODER_A: 51 return "TRANSCODER_A"; 52 case POWER_DOMAIN_TRANSCODER_B: 53 return "TRANSCODER_B"; 54 case POWER_DOMAIN_TRANSCODER_C: 55 return "TRANSCODER_C"; 56 case POWER_DOMAIN_TRANSCODER_D: 57 return "TRANSCODER_D"; 58 case POWER_DOMAIN_TRANSCODER_EDP: 59 return "TRANSCODER_EDP"; 60 case POWER_DOMAIN_TRANSCODER_VDSC_PW2: 61 return "TRANSCODER_VDSC_PW2"; 62 case POWER_DOMAIN_TRANSCODER_DSI_A: 63 return "TRANSCODER_DSI_A"; 64 case POWER_DOMAIN_TRANSCODER_DSI_C: 65 return "TRANSCODER_DSI_C"; 66 case POWER_DOMAIN_PORT_DDI_A_LANES: 67 return "PORT_DDI_A_LANES"; 68 case POWER_DOMAIN_PORT_DDI_B_LANES: 69 return "PORT_DDI_B_LANES"; 70 case POWER_DOMAIN_PORT_DDI_C_LANES: 71 return "PORT_DDI_C_LANES"; 72 case POWER_DOMAIN_PORT_DDI_D_LANES: 73 return "PORT_DDI_D_LANES"; 74 case POWER_DOMAIN_PORT_DDI_E_LANES: 75 return "PORT_DDI_E_LANES"; 76 case POWER_DOMAIN_PORT_DDI_F_LANES: 77 return "PORT_DDI_F_LANES"; 78 case POWER_DOMAIN_PORT_DDI_G_LANES: 79 return "PORT_DDI_G_LANES"; 80 case POWER_DOMAIN_PORT_DDI_H_LANES: 81 return "PORT_DDI_H_LANES"; 82 case POWER_DOMAIN_PORT_DDI_I_LANES: 83 return "PORT_DDI_I_LANES"; 84 case POWER_DOMAIN_PORT_DDI_A_IO: 85 return "PORT_DDI_A_IO"; 86 case POWER_DOMAIN_PORT_DDI_B_IO: 87 return "PORT_DDI_B_IO"; 88 case POWER_DOMAIN_PORT_DDI_C_IO: 89 return "PORT_DDI_C_IO"; 90 case POWER_DOMAIN_PORT_DDI_D_IO: 91 return "PORT_DDI_D_IO"; 92 case POWER_DOMAIN_PORT_DDI_E_IO: 93 return "PORT_DDI_E_IO"; 94 case POWER_DOMAIN_PORT_DDI_F_IO: 95 return "PORT_DDI_F_IO"; 96 case POWER_DOMAIN_PORT_DDI_G_IO: 97 return "PORT_DDI_G_IO"; 98 case POWER_DOMAIN_PORT_DDI_H_IO: 99 return "PORT_DDI_H_IO"; 100 case POWER_DOMAIN_PORT_DDI_I_IO: 101 return "PORT_DDI_I_IO"; 102 case POWER_DOMAIN_PORT_DSI: 103 return "PORT_DSI"; 104 case POWER_DOMAIN_PORT_CRT: 105 return "PORT_CRT"; 106 case POWER_DOMAIN_PORT_OTHER: 107 return "PORT_OTHER"; 108 case POWER_DOMAIN_VGA: 109 return "VGA"; 110 case POWER_DOMAIN_AUDIO_MMIO: 111 return "AUDIO_MMIO"; 112 case POWER_DOMAIN_AUDIO_PLAYBACK: 113 return "AUDIO_PLAYBACK"; 114 case POWER_DOMAIN_AUX_A: 115 return "AUX_A"; 116 case POWER_DOMAIN_AUX_B: 117 return "AUX_B"; 118 case POWER_DOMAIN_AUX_C: 119 return "AUX_C"; 120 case POWER_DOMAIN_AUX_D: 121 return "AUX_D"; 122 case POWER_DOMAIN_AUX_E: 123 return "AUX_E"; 124 case POWER_DOMAIN_AUX_F: 125 return "AUX_F"; 126 case POWER_DOMAIN_AUX_G: 127 return "AUX_G"; 128 case POWER_DOMAIN_AUX_H: 129 return "AUX_H"; 130 case POWER_DOMAIN_AUX_I: 131 return "AUX_I"; 132 case POWER_DOMAIN_AUX_IO_A: 133 return "AUX_IO_A"; 134 case POWER_DOMAIN_AUX_C_TBT: 135 return "AUX_C_TBT"; 136 case POWER_DOMAIN_AUX_D_TBT: 137 return "AUX_D_TBT"; 138 case POWER_DOMAIN_AUX_E_TBT: 139 return "AUX_E_TBT"; 140 case POWER_DOMAIN_AUX_F_TBT: 141 return "AUX_F_TBT"; 142 case POWER_DOMAIN_AUX_G_TBT: 143 return "AUX_G_TBT"; 144 case POWER_DOMAIN_AUX_H_TBT: 145 return "AUX_H_TBT"; 146 case POWER_DOMAIN_AUX_I_TBT: 147 return "AUX_I_TBT"; 148 case POWER_DOMAIN_GMBUS: 149 return "GMBUS"; 150 case POWER_DOMAIN_INIT: 151 return "INIT"; 152 case POWER_DOMAIN_MODESET: 153 return "MODESET"; 154 case POWER_DOMAIN_GT_IRQ: 155 return "GT_IRQ"; 156 case POWER_DOMAIN_DPLL_DC_OFF: 157 return "DPLL_DC_OFF"; 158 case POWER_DOMAIN_TC_COLD_OFF: 159 return "TC_COLD_OFF"; 160 default: 161 MISSING_CASE(domain); 162 return "?"; 163 } 164 } 165 166 static void intel_power_well_enable(struct drm_i915_private *dev_priv, 167 struct i915_power_well *power_well) 168 { 169 drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name); 170 power_well->desc->ops->enable(dev_priv, power_well); 171 power_well->hw_enabled = true; 172 } 173 174 static void intel_power_well_disable(struct drm_i915_private *dev_priv, 175 struct i915_power_well *power_well) 176 { 177 drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name); 178 power_well->hw_enabled = false; 179 power_well->desc->ops->disable(dev_priv, power_well); 180 } 181 182 static void intel_power_well_get(struct drm_i915_private *dev_priv, 183 struct i915_power_well *power_well) 184 { 185 if (!power_well->count++) 186 intel_power_well_enable(dev_priv, power_well); 187 } 188 189 static void intel_power_well_put(struct drm_i915_private *dev_priv, 190 struct i915_power_well *power_well) 191 { 192 drm_WARN(&dev_priv->drm, !power_well->count, 193 "Use count on power well %s is already zero", 194 power_well->desc->name); 195 196 if (!--power_well->count) 197 intel_power_well_disable(dev_priv, power_well); 198 } 199 200 /** 201 * __intel_display_power_is_enabled - unlocked check for a power domain 202 * @dev_priv: i915 device instance 203 * @domain: power domain to check 204 * 205 * This is the unlocked version of intel_display_power_is_enabled() and should 206 * only be used from error capture and recovery code where deadlocks are 207 * possible. 208 * 209 * Returns: 210 * True when the power domain is enabled, false otherwise. 211 */ 212 bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 213 enum intel_display_power_domain domain) 214 { 215 struct i915_power_well *power_well; 216 bool is_enabled; 217 218 if (dev_priv->runtime_pm.suspended) 219 return false; 220 221 is_enabled = true; 222 223 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) { 224 if (power_well->desc->always_on) 225 continue; 226 227 if (!power_well->hw_enabled) { 228 is_enabled = false; 229 break; 230 } 231 } 232 233 return is_enabled; 234 } 235 236 /** 237 * intel_display_power_is_enabled - check for a power domain 238 * @dev_priv: i915 device instance 239 * @domain: power domain to check 240 * 241 * This function can be used to check the hw power domain state. It is mostly 242 * used in hardware state readout functions. Everywhere else code should rely 243 * upon explicit power domain reference counting to ensure that the hardware 244 * block is powered up before accessing it. 245 * 246 * Callers must hold the relevant modesetting locks to ensure that concurrent 247 * threads can't disable the power well while the caller tries to read a few 248 * registers. 249 * 250 * Returns: 251 * True when the power domain is enabled, false otherwise. 252 */ 253 bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, 254 enum intel_display_power_domain domain) 255 { 256 struct i915_power_domains *power_domains; 257 bool ret; 258 259 power_domains = &dev_priv->power_domains; 260 261 mutex_lock(&power_domains->lock); 262 ret = __intel_display_power_is_enabled(dev_priv, domain); 263 mutex_unlock(&power_domains->lock); 264 265 return ret; 266 } 267 268 /* 269 * Starting with Haswell, we have a "Power Down Well" that can be turned off 270 * when not needed anymore. We have 4 registers that can request the power well 271 * to be enabled, and it will only be disabled if none of the registers is 272 * requesting it to be enabled. 273 */ 274 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, 275 u8 irq_pipe_mask, bool has_vga) 276 { 277 if (has_vga) 278 intel_vga_reset_io_mem(dev_priv); 279 280 if (irq_pipe_mask) 281 gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); 282 } 283 284 static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv, 285 u8 irq_pipe_mask) 286 { 287 if (irq_pipe_mask) 288 gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); 289 } 290 291 #define ICL_AUX_PW_TO_CH(pw_idx) \ 292 ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A) 293 294 #define ICL_TBT_AUX_PW_TO_CH(pw_idx) \ 295 ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C) 296 297 static enum aux_ch icl_aux_pw_to_ch(const struct i915_power_well *power_well) 298 { 299 int pw_idx = power_well->desc->hsw.idx; 300 301 return power_well->desc->hsw.is_tc_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) : 302 ICL_AUX_PW_TO_CH(pw_idx); 303 } 304 305 static struct intel_digital_port * 306 aux_ch_to_digital_port(struct drm_i915_private *dev_priv, 307 enum aux_ch aux_ch) 308 { 309 struct intel_digital_port *dig_port = NULL; 310 struct intel_encoder *encoder; 311 312 for_each_intel_encoder(&dev_priv->drm, encoder) { 313 /* We'll check the MST primary port */ 314 if (encoder->type == INTEL_OUTPUT_DP_MST) 315 continue; 316 317 dig_port = enc_to_dig_port(encoder); 318 if (!dig_port) 319 continue; 320 321 if (dig_port->aux_ch != aux_ch) { 322 dig_port = NULL; 323 continue; 324 } 325 326 break; 327 } 328 329 return dig_port; 330 } 331 332 static enum phy icl_aux_pw_to_phy(struct drm_i915_private *i915, 333 const struct i915_power_well *power_well) 334 { 335 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 336 struct intel_digital_port *dig_port = aux_ch_to_digital_port(i915, aux_ch); 337 338 return intel_port_to_phy(i915, dig_port->base.port); 339 } 340 341 static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv, 342 struct i915_power_well *power_well, 343 bool timeout_expected) 344 { 345 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 346 int pw_idx = power_well->desc->hsw.idx; 347 int enable_delay = power_well->desc->hsw.fixed_enable_delay; 348 349 /* 350 * For some power wells we're not supposed to watch the status bit for 351 * an ack, but rather just wait a fixed amount of time and then 352 * proceed. This is only used on DG2. 353 */ 354 if (IS_DG2(dev_priv) && enable_delay) { 355 usleep_range(enable_delay, 2 * enable_delay); 356 return; 357 } 358 359 /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */ 360 if (intel_de_wait_for_set(dev_priv, regs->driver, 361 HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) { 362 drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n", 363 power_well->desc->name); 364 365 drm_WARN_ON(&dev_priv->drm, !timeout_expected); 366 367 } 368 } 369 370 static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv, 371 const struct i915_power_well_regs *regs, 372 int pw_idx) 373 { 374 u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 375 u32 ret; 376 377 ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0; 378 ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0; 379 if (regs->kvmr.reg) 380 ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0; 381 ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0; 382 383 return ret; 384 } 385 386 static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv, 387 struct i915_power_well *power_well) 388 { 389 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 390 int pw_idx = power_well->desc->hsw.idx; 391 bool disabled; 392 u32 reqs; 393 394 /* 395 * Bspec doesn't require waiting for PWs to get disabled, but still do 396 * this for paranoia. The known cases where a PW will be forced on: 397 * - a KVMR request on any power well via the KVMR request register 398 * - a DMC request on PW1 and MISC_IO power wells via the BIOS and 399 * DEBUG request registers 400 * Skip the wait in case any of the request bits are set and print a 401 * diagnostic message. 402 */ 403 wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) & 404 HSW_PWR_WELL_CTL_STATE(pw_idx))) || 405 (reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1); 406 if (disabled) 407 return; 408 409 drm_dbg_kms(&dev_priv->drm, 410 "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", 411 power_well->desc->name, 412 !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8)); 413 } 414 415 static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv, 416 enum skl_power_gate pg) 417 { 418 /* Timeout 5us for PG#0, for other PGs 1us */ 419 drm_WARN_ON(&dev_priv->drm, 420 intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS, 421 SKL_FUSE_PG_DIST_STATUS(pg), 1)); 422 } 423 424 static void hsw_power_well_enable(struct drm_i915_private *dev_priv, 425 struct i915_power_well *power_well) 426 { 427 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 428 int pw_idx = power_well->desc->hsw.idx; 429 u32 val; 430 431 if (power_well->desc->hsw.has_fuses) { 432 enum skl_power_gate pg; 433 434 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 435 SKL_PW_CTL_IDX_TO_PG(pw_idx); 436 /* 437 * For PW1 we have to wait both for the PW0/PG0 fuse state 438 * before enabling the power well and PW1/PG1's own fuse 439 * state after the enabling. For all other power wells with 440 * fuses we only have to wait for that PW/PG's fuse state 441 * after the enabling. 442 */ 443 if (pg == SKL_PG1) 444 gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0); 445 } 446 447 val = intel_de_read(dev_priv, regs->driver); 448 intel_de_write(dev_priv, regs->driver, 449 val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 450 451 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 452 453 if (power_well->desc->hsw.has_fuses) { 454 enum skl_power_gate pg; 455 456 pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : 457 SKL_PW_CTL_IDX_TO_PG(pw_idx); 458 gen9_wait_for_power_well_fuses(dev_priv, pg); 459 } 460 461 hsw_power_well_post_enable(dev_priv, 462 power_well->desc->hsw.irq_pipe_mask, 463 power_well->desc->hsw.has_vga); 464 } 465 466 static void hsw_power_well_disable(struct drm_i915_private *dev_priv, 467 struct i915_power_well *power_well) 468 { 469 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 470 int pw_idx = power_well->desc->hsw.idx; 471 u32 val; 472 473 hsw_power_well_pre_disable(dev_priv, 474 power_well->desc->hsw.irq_pipe_mask); 475 476 val = intel_de_read(dev_priv, regs->driver); 477 intel_de_write(dev_priv, regs->driver, 478 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 479 hsw_wait_for_power_well_disable(dev_priv, power_well); 480 } 481 482 static void 483 icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 484 struct i915_power_well *power_well) 485 { 486 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 487 int pw_idx = power_well->desc->hsw.idx; 488 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 489 u32 val; 490 491 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 492 493 val = intel_de_read(dev_priv, regs->driver); 494 intel_de_write(dev_priv, regs->driver, 495 val | HSW_PWR_WELL_CTL_REQ(pw_idx)); 496 497 if (DISPLAY_VER(dev_priv) < 12) { 498 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 499 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 500 val | ICL_LANE_ENABLE_AUX); 501 } 502 503 hsw_wait_for_power_well_enable(dev_priv, power_well, false); 504 505 /* Display WA #1178: icl */ 506 if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B && 507 !intel_bios_is_port_edp(dev_priv, (enum port)phy)) { 508 val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx)); 509 val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS; 510 intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val); 511 } 512 } 513 514 static void 515 icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 516 struct i915_power_well *power_well) 517 { 518 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 519 int pw_idx = power_well->desc->hsw.idx; 520 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 521 u32 val; 522 523 drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv)); 524 525 val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy)); 526 intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy), 527 val & ~ICL_LANE_ENABLE_AUX); 528 529 val = intel_de_read(dev_priv, regs->driver); 530 intel_de_write(dev_priv, regs->driver, 531 val & ~HSW_PWR_WELL_CTL_REQ(pw_idx)); 532 533 hsw_wait_for_power_well_disable(dev_priv, power_well); 534 } 535 536 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 537 538 static u64 async_put_domains_mask(struct i915_power_domains *power_domains); 539 540 static int power_well_async_ref_count(struct drm_i915_private *dev_priv, 541 struct i915_power_well *power_well) 542 { 543 int refs = hweight64(power_well->desc->domains & 544 async_put_domains_mask(&dev_priv->power_domains)); 545 546 drm_WARN_ON(&dev_priv->drm, refs > power_well->count); 547 548 return refs; 549 } 550 551 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 552 struct i915_power_well *power_well, 553 struct intel_digital_port *dig_port) 554 { 555 /* Bypass the check if all references are released asynchronously */ 556 if (power_well_async_ref_count(dev_priv, power_well) == 557 power_well->count) 558 return; 559 560 if (drm_WARN_ON(&dev_priv->drm, !dig_port)) 561 return; 562 563 if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port) 564 return; 565 566 drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port)); 567 } 568 569 #else 570 571 static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, 572 struct i915_power_well *power_well, 573 struct intel_digital_port *dig_port) 574 { 575 } 576 577 #endif 578 579 #define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) 580 581 static void icl_tc_cold_exit(struct drm_i915_private *i915) 582 { 583 int ret, tries = 0; 584 585 while (1) { 586 ret = sandybridge_pcode_write_timeout(i915, 587 ICL_PCODE_EXIT_TCCOLD, 588 0, 250, 1); 589 if (ret != -EAGAIN || ++tries == 3) 590 break; 591 drm_msleep(1); 592 } 593 594 /* Spec states that TC cold exit can take up to 1ms to complete */ 595 if (!ret) 596 drm_msleep(1); 597 598 /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ 599 drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : 600 "succeeded"); 601 } 602 603 static void 604 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, 605 struct i915_power_well *power_well) 606 { 607 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 608 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 609 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 610 bool is_tbt = power_well->desc->hsw.is_tc_tbt; 611 bool timeout_expected; 612 u32 val; 613 614 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 615 616 val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch)); 617 val &= ~DP_AUX_CH_CTL_TBT_IO; 618 if (is_tbt) 619 val |= DP_AUX_CH_CTL_TBT_IO; 620 intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val); 621 622 val = intel_de_read(dev_priv, regs->driver); 623 intel_de_write(dev_priv, regs->driver, 624 val | HSW_PWR_WELL_CTL_REQ(power_well->desc->hsw.idx)); 625 626 /* 627 * An AUX timeout is expected if the TBT DP tunnel is down, 628 * or need to enable AUX on a legacy TypeC port as part of the TC-cold 629 * exit sequence. 630 */ 631 timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port); 632 if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port) 633 icl_tc_cold_exit(dev_priv); 634 635 hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected); 636 637 if (DISPLAY_VER(dev_priv) >= 12 && !is_tbt) { 638 enum tc_port tc_port; 639 640 tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); 641 intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), 642 HIP_INDEX_VAL(tc_port, 0x2)); 643 644 if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), 645 DKL_CMN_UC_DW27_UC_HEALTH, 1)) 646 drm_warn(&dev_priv->drm, 647 "Timeout waiting TC uC health\n"); 648 } 649 } 650 651 static void 652 icl_tc_phy_aux_power_well_disable(struct drm_i915_private *dev_priv, 653 struct i915_power_well *power_well) 654 { 655 enum aux_ch aux_ch = icl_aux_pw_to_ch(power_well); 656 struct intel_digital_port *dig_port = aux_ch_to_digital_port(dev_priv, aux_ch); 657 658 icl_tc_port_assert_ref_held(dev_priv, power_well, dig_port); 659 660 hsw_power_well_disable(dev_priv, power_well); 661 } 662 663 static void 664 icl_aux_power_well_enable(struct drm_i915_private *dev_priv, 665 struct i915_power_well *power_well) 666 { 667 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 668 669 if (intel_phy_is_tc(dev_priv, phy)) 670 return icl_tc_phy_aux_power_well_enable(dev_priv, power_well); 671 else if (IS_ICELAKE(dev_priv)) 672 return icl_combo_phy_aux_power_well_enable(dev_priv, 673 power_well); 674 else 675 return hsw_power_well_enable(dev_priv, power_well); 676 } 677 678 static void 679 icl_aux_power_well_disable(struct drm_i915_private *dev_priv, 680 struct i915_power_well *power_well) 681 { 682 enum phy phy = icl_aux_pw_to_phy(dev_priv, power_well); 683 684 if (intel_phy_is_tc(dev_priv, phy)) 685 return icl_tc_phy_aux_power_well_disable(dev_priv, power_well); 686 else if (IS_ICELAKE(dev_priv)) 687 return icl_combo_phy_aux_power_well_disable(dev_priv, 688 power_well); 689 else 690 return hsw_power_well_disable(dev_priv, power_well); 691 } 692 693 /* 694 * We should only use the power well if we explicitly asked the hardware to 695 * enable it, so check if it's enabled and also check if we've requested it to 696 * be enabled. 697 */ 698 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv, 699 struct i915_power_well *power_well) 700 { 701 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 702 enum i915_power_well_id id = power_well->desc->id; 703 int pw_idx = power_well->desc->hsw.idx; 704 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) | 705 HSW_PWR_WELL_CTL_STATE(pw_idx); 706 u32 val; 707 708 val = intel_de_read(dev_priv, regs->driver); 709 710 /* 711 * On GEN9 big core due to a DMC bug the driver's request bits for PW1 712 * and the MISC_IO PW will be not restored, so check instead for the 713 * BIOS's own request bits, which are forced-on for these power wells 714 * when exiting DC5/6. 715 */ 716 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) && 717 (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO)) 718 val |= intel_de_read(dev_priv, regs->bios); 719 720 return (val & mask) == mask; 721 } 722 723 static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 724 { 725 drm_WARN_ONCE(&dev_priv->drm, 726 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9), 727 "DC9 already programmed to be enabled.\n"); 728 drm_WARN_ONCE(&dev_priv->drm, 729 intel_de_read(dev_priv, DC_STATE_EN) & 730 DC_STATE_EN_UPTO_DC5, 731 "DC5 still not disabled to enable DC9.\n"); 732 drm_WARN_ONCE(&dev_priv->drm, 733 intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) & 734 HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2), 735 "Power well 2 on.\n"); 736 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 737 "Interrupts not disabled yet.\n"); 738 739 /* 740 * TODO: check for the following to verify the conditions to enter DC9 741 * state are satisfied: 742 * 1] Check relevant display engine registers to verify if mode set 743 * disable sequence was followed. 744 * 2] Check if display uninitialize sequence is initialized. 745 */ 746 } 747 748 static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) 749 { 750 drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv), 751 "Interrupts not disabled yet.\n"); 752 drm_WARN_ONCE(&dev_priv->drm, 753 intel_de_read(dev_priv, DC_STATE_EN) & 754 DC_STATE_EN_UPTO_DC5, 755 "DC5 still not disabled.\n"); 756 757 /* 758 * TODO: check for the following to verify DC9 state was indeed 759 * entered before programming to disable it: 760 * 1] Check relevant display engine registers to verify if mode 761 * set disable sequence was followed. 762 * 2] Check if display uninitialize sequence is initialized. 763 */ 764 } 765 766 static void gen9_write_dc_state(struct drm_i915_private *dev_priv, 767 u32 state) 768 { 769 int rewrites = 0; 770 int rereads = 0; 771 u32 v; 772 773 intel_de_write(dev_priv, DC_STATE_EN, state); 774 775 /* It has been observed that disabling the dc6 state sometimes 776 * doesn't stick and dmc keeps returning old value. Make sure 777 * the write really sticks enough times and also force rewrite until 778 * we are confident that state is exactly what we want. 779 */ 780 do { 781 v = intel_de_read(dev_priv, DC_STATE_EN); 782 783 if (v != state) { 784 intel_de_write(dev_priv, DC_STATE_EN, state); 785 rewrites++; 786 rereads = 0; 787 } else if (rereads++ > 5) { 788 break; 789 } 790 791 } while (rewrites < 100); 792 793 if (v != state) 794 drm_err(&dev_priv->drm, 795 "Writing dc state to 0x%x failed, now 0x%x\n", 796 state, v); 797 798 /* Most of the times we need one retry, avoid spam */ 799 if (rewrites > 1) 800 drm_dbg_kms(&dev_priv->drm, 801 "Rewrote dc state to 0x%x %d times\n", 802 state, rewrites); 803 } 804 805 static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) 806 { 807 u32 mask; 808 809 mask = DC_STATE_EN_UPTO_DC5; 810 811 if (DISPLAY_VER(dev_priv) >= 12) 812 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 813 | DC_STATE_EN_DC9; 814 else if (DISPLAY_VER(dev_priv) == 11) 815 mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; 816 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 817 mask |= DC_STATE_EN_DC9; 818 else 819 mask |= DC_STATE_EN_UPTO_DC6; 820 821 return mask; 822 } 823 824 static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv) 825 { 826 u32 val; 827 828 if (!HAS_DISPLAY(dev_priv)) 829 return; 830 831 val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv); 832 833 drm_dbg_kms(&dev_priv->drm, 834 "Resetting DC state tracking from %02x to %02x\n", 835 dev_priv->dmc.dc_state, val); 836 dev_priv->dmc.dc_state = val; 837 } 838 839 /** 840 * gen9_set_dc_state - set target display C power state 841 * @dev_priv: i915 device instance 842 * @state: target DC power state 843 * - DC_STATE_DISABLE 844 * - DC_STATE_EN_UPTO_DC5 845 * - DC_STATE_EN_UPTO_DC6 846 * - DC_STATE_EN_DC9 847 * 848 * Signal to DMC firmware/HW the target DC power state passed in @state. 849 * DMC/HW can turn off individual display clocks and power rails when entering 850 * a deeper DC power state (higher in number) and turns these back when exiting 851 * that state to a shallower power state (lower in number). The HW will decide 852 * when to actually enter a given state on an on-demand basis, for instance 853 * depending on the active state of display pipes. The state of display 854 * registers backed by affected power rails are saved/restored as needed. 855 * 856 * Based on the above enabling a deeper DC power state is asynchronous wrt. 857 * enabling it. Disabling a deeper power state is synchronous: for instance 858 * setting %DC_STATE_DISABLE won't complete until all HW resources are turned 859 * back on and register state is restored. This is guaranteed by the MMIO write 860 * to DC_STATE_EN blocking until the state is restored. 861 */ 862 static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) 863 { 864 u32 val; 865 u32 mask; 866 867 if (!HAS_DISPLAY(dev_priv)) 868 return; 869 870 if (drm_WARN_ON_ONCE(&dev_priv->drm, 871 state & ~dev_priv->dmc.allowed_dc_mask)) 872 state &= dev_priv->dmc.allowed_dc_mask; 873 874 val = intel_de_read(dev_priv, DC_STATE_EN); 875 mask = gen9_dc_mask(dev_priv); 876 drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n", 877 val & mask, state); 878 879 /* Check if DMC is ignoring our DC state requests */ 880 if ((val & mask) != dev_priv->dmc.dc_state) 881 drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n", 882 dev_priv->dmc.dc_state, val & mask); 883 884 val &= ~mask; 885 val |= state; 886 887 gen9_write_dc_state(dev_priv, val); 888 889 dev_priv->dmc.dc_state = val & mask; 890 } 891 892 static u32 893 sanitize_target_dc_state(struct drm_i915_private *dev_priv, 894 u32 target_dc_state) 895 { 896 u32 states[] = { 897 DC_STATE_EN_UPTO_DC6, 898 DC_STATE_EN_UPTO_DC5, 899 DC_STATE_EN_DC3CO, 900 DC_STATE_DISABLE, 901 }; 902 int i; 903 904 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 905 if (target_dc_state != states[i]) 906 continue; 907 908 if (dev_priv->dmc.allowed_dc_mask & target_dc_state) 909 break; 910 911 target_dc_state = states[i + 1]; 912 } 913 914 return target_dc_state; 915 } 916 917 static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) 918 { 919 drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n"); 920 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); 921 } 922 923 static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) 924 { 925 u32 val; 926 927 drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n"); 928 val = intel_de_read(dev_priv, DC_STATE_EN); 929 val &= ~DC_STATE_DC3CO_STATUS; 930 intel_de_write(dev_priv, DC_STATE_EN, val); 931 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 932 /* 933 * Delay of 200us DC3CO Exit time B.Spec 49196 934 */ 935 usleep_range(200, 210); 936 } 937 938 static void bxt_enable_dc9(struct drm_i915_private *dev_priv) 939 { 940 assert_can_enable_dc9(dev_priv); 941 942 drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n"); 943 /* 944 * Power sequencer reset is not needed on 945 * platforms with South Display Engine on PCH, 946 * because PPS registers are always on. 947 */ 948 if (!HAS_PCH_SPLIT(dev_priv)) 949 intel_pps_reset_all(dev_priv); 950 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 951 } 952 953 static void bxt_disable_dc9(struct drm_i915_private *dev_priv) 954 { 955 assert_can_disable_dc9(dev_priv); 956 957 drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n"); 958 959 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 960 961 intel_pps_unlock_regs_wa(dev_priv); 962 } 963 964 static void assert_dmc_loaded(struct drm_i915_private *dev_priv) 965 { 966 drm_WARN_ONCE(&dev_priv->drm, 967 !intel_de_read(dev_priv, 968 DMC_PROGRAM(dev_priv->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), 969 "DMC program storage start is NULL\n"); 970 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_SSP_BASE), 971 "DMC SSP Base Not fine\n"); 972 drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_HTP_SKL), 973 "DMC HTP Not fine\n"); 974 } 975 976 static struct i915_power_well * 977 lookup_power_well(struct drm_i915_private *dev_priv, 978 enum i915_power_well_id power_well_id) 979 { 980 struct i915_power_well *power_well; 981 982 for_each_power_well(dev_priv, power_well) 983 if (power_well->desc->id == power_well_id) 984 return power_well; 985 986 /* 987 * It's not feasible to add error checking code to the callers since 988 * this condition really shouldn't happen and it doesn't even make sense 989 * to abort things like display initialization sequences. Just return 990 * the first power well and hope the WARN gets reported so we can fix 991 * our driver. 992 */ 993 drm_WARN(&dev_priv->drm, 1, 994 "Power well %d not defined for this platform\n", 995 power_well_id); 996 return &dev_priv->power_domains.power_wells[0]; 997 } 998 999 /** 1000 * intel_display_power_set_target_dc_state - Set target dc state. 1001 * @dev_priv: i915 device 1002 * @state: state which needs to be set as target_dc_state. 1003 * 1004 * This function set the "DC off" power well target_dc_state, 1005 * based upon this target_dc_stste, "DC off" power well will 1006 * enable desired DC state. 1007 */ 1008 void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, 1009 u32 state) 1010 { 1011 struct i915_power_well *power_well; 1012 bool dc_off_enabled; 1013 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1014 1015 mutex_lock(&power_domains->lock); 1016 power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); 1017 1018 if (drm_WARN_ON(&dev_priv->drm, !power_well)) 1019 goto unlock; 1020 1021 state = sanitize_target_dc_state(dev_priv, state); 1022 1023 if (state == dev_priv->dmc.target_dc_state) 1024 goto unlock; 1025 1026 dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv, 1027 power_well); 1028 /* 1029 * If DC off power well is disabled, need to enable and disable the 1030 * DC off power well to effect target DC state. 1031 */ 1032 if (!dc_off_enabled) 1033 power_well->desc->ops->enable(dev_priv, power_well); 1034 1035 dev_priv->dmc.target_dc_state = state; 1036 1037 if (!dc_off_enabled) 1038 power_well->desc->ops->disable(dev_priv, power_well); 1039 1040 unlock: 1041 mutex_unlock(&power_domains->lock); 1042 } 1043 1044 static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) 1045 { 1046 enum i915_power_well_id high_pg; 1047 1048 /* Power wells at this level and above must be disabled for DC5 entry */ 1049 if (DISPLAY_VER(dev_priv) == 12) 1050 high_pg = ICL_DISP_PW_3; 1051 else 1052 high_pg = SKL_DISP_PW_2; 1053 1054 drm_WARN_ONCE(&dev_priv->drm, 1055 intel_display_power_well_is_enabled(dev_priv, high_pg), 1056 "Power wells above platform's DC5 limit still enabled.\n"); 1057 1058 drm_WARN_ONCE(&dev_priv->drm, 1059 (intel_de_read(dev_priv, DC_STATE_EN) & 1060 DC_STATE_EN_UPTO_DC5), 1061 "DC5 already programmed to be enabled.\n"); 1062 assert_rpm_wakelock_held(&dev_priv->runtime_pm); 1063 1064 assert_dmc_loaded(dev_priv); 1065 } 1066 1067 static void gen9_enable_dc5(struct drm_i915_private *dev_priv) 1068 { 1069 assert_can_enable_dc5(dev_priv); 1070 1071 drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n"); 1072 1073 /* Wa Display #1183: skl,kbl,cfl */ 1074 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) 1075 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 1076 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 1077 1078 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); 1079 } 1080 1081 static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) 1082 { 1083 drm_WARN_ONCE(&dev_priv->drm, 1084 intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 1085 "Backlight is not disabled.\n"); 1086 drm_WARN_ONCE(&dev_priv->drm, 1087 (intel_de_read(dev_priv, DC_STATE_EN) & 1088 DC_STATE_EN_UPTO_DC6), 1089 "DC6 already programmed to be enabled.\n"); 1090 1091 assert_dmc_loaded(dev_priv); 1092 } 1093 1094 static void skl_enable_dc6(struct drm_i915_private *dev_priv) 1095 { 1096 assert_can_enable_dc6(dev_priv); 1097 1098 drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n"); 1099 1100 /* Wa Display #1183: skl,kbl,cfl */ 1101 if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) 1102 intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1, 1103 intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT); 1104 1105 gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 1106 } 1107 1108 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, 1109 struct i915_power_well *power_well) 1110 { 1111 const struct i915_power_well_regs *regs = power_well->desc->hsw.regs; 1112 int pw_idx = power_well->desc->hsw.idx; 1113 u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx); 1114 u32 bios_req = intel_de_read(dev_priv, regs->bios); 1115 1116 /* Take over the request bit if set by BIOS. */ 1117 if (bios_req & mask) { 1118 u32 drv_req = intel_de_read(dev_priv, regs->driver); 1119 1120 if (!(drv_req & mask)) 1121 intel_de_write(dev_priv, regs->driver, drv_req | mask); 1122 intel_de_write(dev_priv, regs->bios, bios_req & ~mask); 1123 } 1124 } 1125 1126 static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1127 struct i915_power_well *power_well) 1128 { 1129 bxt_ddi_phy_init(dev_priv, power_well->desc->bxt.phy); 1130 } 1131 1132 static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1133 struct i915_power_well *power_well) 1134 { 1135 bxt_ddi_phy_uninit(dev_priv, power_well->desc->bxt.phy); 1136 } 1137 1138 static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv, 1139 struct i915_power_well *power_well) 1140 { 1141 return bxt_ddi_phy_is_enabled(dev_priv, power_well->desc->bxt.phy); 1142 } 1143 1144 static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) 1145 { 1146 struct i915_power_well *power_well; 1147 1148 power_well = lookup_power_well(dev_priv, BXT_DISP_PW_DPIO_CMN_A); 1149 if (power_well->count > 0) 1150 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1151 1152 power_well = lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1153 if (power_well->count > 0) 1154 bxt_ddi_phy_verify_state(dev_priv, power_well->desc->bxt.phy); 1155 1156 if (IS_GEMINILAKE(dev_priv)) { 1157 power_well = lookup_power_well(dev_priv, 1158 GLK_DISP_PW_DPIO_CMN_C); 1159 if (power_well->count > 0) 1160 bxt_ddi_phy_verify_state(dev_priv, 1161 power_well->desc->bxt.phy); 1162 } 1163 } 1164 1165 static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 1166 struct i915_power_well *power_well) 1167 { 1168 return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && 1169 (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); 1170 } 1171 1172 static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) 1173 { 1174 u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv); 1175 u8 enabled_dbuf_slices = dev_priv->dbuf.enabled_slices; 1176 1177 drm_WARN(&dev_priv->drm, 1178 hw_enabled_dbuf_slices != enabled_dbuf_slices, 1179 "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n", 1180 hw_enabled_dbuf_slices, 1181 enabled_dbuf_slices); 1182 } 1183 1184 static void gen9_disable_dc_states(struct drm_i915_private *dev_priv) 1185 { 1186 struct intel_cdclk_config cdclk_config = {}; 1187 1188 if (dev_priv->dmc.target_dc_state == DC_STATE_EN_DC3CO) { 1189 tgl_disable_dc3co(dev_priv); 1190 return; 1191 } 1192 1193 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 1194 1195 if (!HAS_DISPLAY(dev_priv)) 1196 return; 1197 1198 dev_priv->display.get_cdclk(dev_priv, &cdclk_config); 1199 /* Can't read out voltage_level so can't use intel_cdclk_changed() */ 1200 drm_WARN_ON(&dev_priv->drm, 1201 intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, 1202 &cdclk_config)); 1203 1204 gen9_assert_dbuf_enabled(dev_priv); 1205 1206 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 1207 bxt_verify_ddi_phy_power_wells(dev_priv); 1208 1209 if (DISPLAY_VER(dev_priv) >= 11) 1210 /* 1211 * DMC retains HW context only for port A, the other combo 1212 * PHY's HW context for port B is lost after DC transitions, 1213 * so we need to restore it manually. 1214 */ 1215 intel_combo_phy_init(dev_priv); 1216 } 1217 1218 static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 1219 struct i915_power_well *power_well) 1220 { 1221 gen9_disable_dc_states(dev_priv); 1222 } 1223 1224 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 1225 struct i915_power_well *power_well) 1226 { 1227 if (!intel_dmc_has_payload(dev_priv)) 1228 return; 1229 1230 switch (dev_priv->dmc.target_dc_state) { 1231 case DC_STATE_EN_DC3CO: 1232 tgl_enable_dc3co(dev_priv); 1233 break; 1234 case DC_STATE_EN_UPTO_DC6: 1235 skl_enable_dc6(dev_priv); 1236 break; 1237 case DC_STATE_EN_UPTO_DC5: 1238 gen9_enable_dc5(dev_priv); 1239 break; 1240 } 1241 } 1242 1243 static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, 1244 struct i915_power_well *power_well) 1245 { 1246 } 1247 1248 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, 1249 struct i915_power_well *power_well) 1250 { 1251 } 1252 1253 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv, 1254 struct i915_power_well *power_well) 1255 { 1256 return true; 1257 } 1258 1259 static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv, 1260 struct i915_power_well *power_well) 1261 { 1262 if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0) 1263 i830_enable_pipe(dev_priv, PIPE_A); 1264 if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0) 1265 i830_enable_pipe(dev_priv, PIPE_B); 1266 } 1267 1268 static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv, 1269 struct i915_power_well *power_well) 1270 { 1271 i830_disable_pipe(dev_priv, PIPE_B); 1272 i830_disable_pipe(dev_priv, PIPE_A); 1273 } 1274 1275 static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv, 1276 struct i915_power_well *power_well) 1277 { 1278 return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE && 1279 intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE; 1280 } 1281 1282 static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv, 1283 struct i915_power_well *power_well) 1284 { 1285 if (power_well->count > 0) 1286 i830_pipes_power_well_enable(dev_priv, power_well); 1287 else 1288 i830_pipes_power_well_disable(dev_priv, power_well); 1289 } 1290 1291 static void vlv_set_power_well(struct drm_i915_private *dev_priv, 1292 struct i915_power_well *power_well, bool enable) 1293 { 1294 int pw_idx = power_well->desc->vlv.idx; 1295 u32 mask; 1296 u32 state; 1297 u32 ctrl; 1298 1299 mask = PUNIT_PWRGT_MASK(pw_idx); 1300 state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : 1301 PUNIT_PWRGT_PWR_GATE(pw_idx); 1302 1303 vlv_punit_get(dev_priv); 1304 1305 #define COND \ 1306 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) 1307 1308 if (COND) 1309 goto out; 1310 1311 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL); 1312 ctrl &= ~mask; 1313 ctrl |= state; 1314 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl); 1315 1316 if (wait_for(COND, 100)) 1317 drm_err(&dev_priv->drm, 1318 "timeout setting power well state %08x (%08x)\n", 1319 state, 1320 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL)); 1321 1322 #undef COND 1323 1324 out: 1325 vlv_punit_put(dev_priv); 1326 } 1327 1328 static void vlv_power_well_enable(struct drm_i915_private *dev_priv, 1329 struct i915_power_well *power_well) 1330 { 1331 vlv_set_power_well(dev_priv, power_well, true); 1332 } 1333 1334 static void vlv_power_well_disable(struct drm_i915_private *dev_priv, 1335 struct i915_power_well *power_well) 1336 { 1337 vlv_set_power_well(dev_priv, power_well, false); 1338 } 1339 1340 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, 1341 struct i915_power_well *power_well) 1342 { 1343 int pw_idx = power_well->desc->vlv.idx; 1344 bool enabled = false; 1345 u32 mask; 1346 u32 state; 1347 u32 ctrl; 1348 1349 mask = PUNIT_PWRGT_MASK(pw_idx); 1350 ctrl = PUNIT_PWRGT_PWR_ON(pw_idx); 1351 1352 vlv_punit_get(dev_priv); 1353 1354 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; 1355 /* 1356 * We only ever set the power-on and power-gate states, anything 1357 * else is unexpected. 1358 */ 1359 drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) && 1360 state != PUNIT_PWRGT_PWR_GATE(pw_idx)); 1361 if (state == ctrl) 1362 enabled = true; 1363 1364 /* 1365 * A transient state at this point would mean some unexpected party 1366 * is poking at the power controls too. 1367 */ 1368 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; 1369 drm_WARN_ON(&dev_priv->drm, ctrl != state); 1370 1371 vlv_punit_put(dev_priv); 1372 1373 return enabled; 1374 } 1375 1376 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1377 { 1378 u32 val; 1379 1380 /* 1381 * On driver load, a pipe may be active and driving a DSI display. 1382 * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1383 * (and never recovering) in this case. intel_dsi_post_disable() will 1384 * clear it when we turn off the display. 1385 */ 1386 val = intel_de_read(dev_priv, DSPCLK_GATE_D); 1387 val &= DPOUNIT_CLOCK_GATE_DISABLE; 1388 val |= VRHUNIT_CLOCK_GATE_DISABLE; 1389 intel_de_write(dev_priv, DSPCLK_GATE_D, val); 1390 1391 /* 1392 * Disable trickle feed and enable pnd deadline calculation 1393 */ 1394 intel_de_write(dev_priv, MI_ARB_VLV, 1395 MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1396 intel_de_write(dev_priv, CBR1_VLV, 0); 1397 1398 drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0); 1399 intel_de_write(dev_priv, RAWCLK_FREQ_VLV, 1400 DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 1401 1000)); 1402 } 1403 1404 static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1405 { 1406 struct intel_encoder *encoder; 1407 enum pipe pipe; 1408 1409 /* 1410 * Enable the CRI clock source so we can get at the 1411 * display and the reference clock for VGA 1412 * hotplug / manual detection. Supposedly DSI also 1413 * needs the ref clock up and running. 1414 * 1415 * CHV DPLL B/C have some issues if VGA mode is enabled. 1416 */ 1417 for_each_pipe(dev_priv, pipe) { 1418 u32 val = intel_de_read(dev_priv, DPLL(pipe)); 1419 1420 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1421 if (pipe != PIPE_A) 1422 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1423 1424 intel_de_write(dev_priv, DPLL(pipe), val); 1425 } 1426 1427 vlv_init_display_clock_gating(dev_priv); 1428 1429 spin_lock_irq(&dev_priv->irq_lock); 1430 valleyview_enable_display_irqs(dev_priv); 1431 spin_unlock_irq(&dev_priv->irq_lock); 1432 1433 /* 1434 * During driver initialization/resume we can avoid restoring the 1435 * part of the HW/SW state that will be inited anyway explicitly. 1436 */ 1437 if (dev_priv->power_domains.initializing) 1438 return; 1439 1440 intel_hpd_init(dev_priv); 1441 intel_hpd_poll_disable(dev_priv); 1442 1443 /* Re-enable the ADPA, if we have one */ 1444 for_each_intel_encoder(&dev_priv->drm, encoder) { 1445 if (encoder->type == INTEL_OUTPUT_ANALOG) 1446 intel_crt_reset(&encoder->base); 1447 } 1448 1449 intel_vga_redisable_power_on(dev_priv); 1450 1451 intel_pps_unlock_regs_wa(dev_priv); 1452 } 1453 1454 static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1455 { 1456 spin_lock_irq(&dev_priv->irq_lock); 1457 valleyview_disable_display_irqs(dev_priv); 1458 spin_unlock_irq(&dev_priv->irq_lock); 1459 1460 /* make sure we're done processing display irqs */ 1461 intel_synchronize_irq(dev_priv); 1462 1463 intel_pps_reset_all(dev_priv); 1464 1465 /* Prevent us from re-enabling polling on accident in late suspend */ 1466 #ifdef __linux__ 1467 if (!dev_priv->drm.dev->power.is_suspended) 1468 #else 1469 if (!cold) 1470 #endif 1471 intel_hpd_poll_enable(dev_priv); 1472 } 1473 1474 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1475 struct i915_power_well *power_well) 1476 { 1477 vlv_set_power_well(dev_priv, power_well, true); 1478 1479 vlv_display_power_well_init(dev_priv); 1480 } 1481 1482 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv, 1483 struct i915_power_well *power_well) 1484 { 1485 vlv_display_power_well_deinit(dev_priv); 1486 1487 vlv_set_power_well(dev_priv, power_well, false); 1488 } 1489 1490 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1491 struct i915_power_well *power_well) 1492 { 1493 /* since ref/cri clock was enabled */ 1494 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1495 1496 vlv_set_power_well(dev_priv, power_well, true); 1497 1498 /* 1499 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx - 1500 * 6. De-assert cmn_reset/side_reset. Same as VLV X0. 1501 * a. GUnit 0x2110 bit[0] set to 1 (def 0) 1502 * b. The other bits such as sfr settings / modesel may all 1503 * be set to 0. 1504 * 1505 * This should only be done on init and resume from S3 with 1506 * both PLLs disabled, or we risk losing DPIO and PLL 1507 * synchronization. 1508 */ 1509 intel_de_write(dev_priv, DPIO_CTL, 1510 intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST); 1511 } 1512 1513 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1514 struct i915_power_well *power_well) 1515 { 1516 enum pipe pipe; 1517 1518 for_each_pipe(dev_priv, pipe) 1519 assert_pll_disabled(dev_priv, pipe); 1520 1521 /* Assert common reset */ 1522 intel_de_write(dev_priv, DPIO_CTL, 1523 intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST); 1524 1525 vlv_set_power_well(dev_priv, power_well, false); 1526 } 1527 1528 #define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0)) 1529 1530 #define BITS_SET(val, bits) (((val) & (bits)) == (bits)) 1531 1532 static void assert_chv_phy_status(struct drm_i915_private *dev_priv) 1533 { 1534 struct i915_power_well *cmn_bc = 1535 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 1536 struct i915_power_well *cmn_d = 1537 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 1538 u32 phy_control = dev_priv->chv_phy_control; 1539 u32 phy_status = 0; 1540 u32 phy_status_mask = 0xffffffff; 1541 1542 /* 1543 * The BIOS can leave the PHY is some weird state 1544 * where it doesn't fully power down some parts. 1545 * Disable the asserts until the PHY has been fully 1546 * reset (ie. the power well has been disabled at 1547 * least once). 1548 */ 1549 if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1550 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1551 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1552 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1553 PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1554 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1555 PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1556 1557 if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1558 phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1559 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1560 PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 1561 1562 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 1563 phy_status |= PHY_POWERGOOD(DPIO_PHY0); 1564 1565 /* this assumes override is only used to enable lanes */ 1566 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0)) == 0) 1567 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0); 1568 1569 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1)) == 0) 1570 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1); 1571 1572 /* CL1 is on whenever anything is on in either channel */ 1573 if (BITS_SET(phy_control, 1574 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH0) | 1575 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1))) 1576 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0); 1577 1578 /* 1579 * The DPLLB check accounts for the pipe B + port A usage 1580 * with CL2 powered up but all the lanes in the second channel 1581 * powered down. 1582 */ 1583 if (BITS_SET(phy_control, 1584 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) && 1585 (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0) 1586 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1); 1587 1588 if (BITS_SET(phy_control, 1589 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH0))) 1590 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0); 1591 if (BITS_SET(phy_control, 1592 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH0))) 1593 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1); 1594 1595 if (BITS_SET(phy_control, 1596 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY0, DPIO_CH1))) 1597 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0); 1598 if (BITS_SET(phy_control, 1599 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY0, DPIO_CH1))) 1600 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1); 1601 } 1602 1603 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 1604 phy_status |= PHY_POWERGOOD(DPIO_PHY1); 1605 1606 /* this assumes override is only used to enable lanes */ 1607 if ((phy_control & PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0)) == 0) 1608 phy_control |= PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0); 1609 1610 if (BITS_SET(phy_control, 1611 PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY1, DPIO_CH0))) 1612 phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0); 1613 1614 if (BITS_SET(phy_control, 1615 PHY_CH_POWER_DOWN_OVRD(0x3, DPIO_PHY1, DPIO_CH0))) 1616 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0); 1617 if (BITS_SET(phy_control, 1618 PHY_CH_POWER_DOWN_OVRD(0xc, DPIO_PHY1, DPIO_CH0))) 1619 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1620 } 1621 1622 phy_status &= phy_status_mask; 1623 1624 /* 1625 * The PHY may be busy with some initial calibration and whatnot, 1626 * so the power state can take a while to actually change. 1627 */ 1628 if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS, 1629 phy_status_mask, phy_status, 10)) 1630 drm_err(&dev_priv->drm, 1631 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1632 intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask, 1633 phy_status, dev_priv->chv_phy_control); 1634 } 1635 1636 #undef BITS_SET 1637 1638 static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv, 1639 struct i915_power_well *power_well) 1640 { 1641 enum dpio_phy phy; 1642 enum pipe pipe; 1643 u32 tmp; 1644 1645 drm_WARN_ON_ONCE(&dev_priv->drm, 1646 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1647 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1648 1649 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1650 pipe = PIPE_A; 1651 phy = DPIO_PHY0; 1652 } else { 1653 pipe = PIPE_C; 1654 phy = DPIO_PHY1; 1655 } 1656 1657 /* since ref/cri clock was enabled */ 1658 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */ 1659 vlv_set_power_well(dev_priv, power_well, true); 1660 1661 /* Poll for phypwrgood signal */ 1662 if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS, 1663 PHY_POWERGOOD(phy), 1)) 1664 drm_err(&dev_priv->drm, "Display PHY %d is not power up\n", 1665 phy); 1666 1667 vlv_dpio_get(dev_priv); 1668 1669 /* Enable dynamic power down */ 1670 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28); 1671 tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN | 1672 DPIO_SUS_CLK_CONFIG_GATE_CLKREQ; 1673 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp); 1674 1675 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1676 tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1); 1677 tmp |= DPIO_DYNPWRDOWNEN_CH1; 1678 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp); 1679 } else { 1680 /* 1681 * Force the non-existing CL2 off. BXT does this 1682 * too, so maybe it saves some power even though 1683 * CL2 doesn't exist? 1684 */ 1685 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1686 tmp |= DPIO_CL2_LDOFUSE_PWRENB; 1687 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp); 1688 } 1689 1690 vlv_dpio_put(dev_priv); 1691 1692 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy); 1693 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1694 dev_priv->chv_phy_control); 1695 1696 drm_dbg_kms(&dev_priv->drm, 1697 "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1698 phy, dev_priv->chv_phy_control); 1699 1700 assert_chv_phy_status(dev_priv); 1701 } 1702 1703 static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 1704 struct i915_power_well *power_well) 1705 { 1706 enum dpio_phy phy; 1707 1708 drm_WARN_ON_ONCE(&dev_priv->drm, 1709 power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC && 1710 power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D); 1711 1712 if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) { 1713 phy = DPIO_PHY0; 1714 assert_pll_disabled(dev_priv, PIPE_A); 1715 assert_pll_disabled(dev_priv, PIPE_B); 1716 } else { 1717 phy = DPIO_PHY1; 1718 assert_pll_disabled(dev_priv, PIPE_C); 1719 } 1720 1721 dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy); 1722 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1723 dev_priv->chv_phy_control); 1724 1725 vlv_set_power_well(dev_priv, power_well, false); 1726 1727 drm_dbg_kms(&dev_priv->drm, 1728 "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1729 phy, dev_priv->chv_phy_control); 1730 1731 /* PHY is fully reset now, so we can enable the PHY state asserts */ 1732 dev_priv->chv_phy_assert[phy] = true; 1733 1734 assert_chv_phy_status(dev_priv); 1735 } 1736 1737 static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1738 enum dpio_channel ch, bool override, unsigned int mask) 1739 { 1740 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1741 u32 reg, val, expected, actual; 1742 1743 /* 1744 * The BIOS can leave the PHY is some weird state 1745 * where it doesn't fully power down some parts. 1746 * Disable the asserts until the PHY has been fully 1747 * reset (ie. the power well has been disabled at 1748 * least once). 1749 */ 1750 if (!dev_priv->chv_phy_assert[phy]) 1751 return; 1752 1753 if (ch == DPIO_CH0) 1754 reg = _CHV_CMN_DW0_CH0; 1755 else 1756 reg = _CHV_CMN_DW6_CH1; 1757 1758 vlv_dpio_get(dev_priv); 1759 val = vlv_dpio_read(dev_priv, pipe, reg); 1760 vlv_dpio_put(dev_priv); 1761 1762 /* 1763 * This assumes !override is only used when the port is disabled. 1764 * All lanes should power down even without the override when 1765 * the port is disabled. 1766 */ 1767 if (!override || mask == 0xf) { 1768 expected = DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1769 /* 1770 * If CH1 common lane is not active anymore 1771 * (eg. for pipe B DPLL) the entire channel will 1772 * shut down, which causes the common lane registers 1773 * to read as 0. That means we can't actually check 1774 * the lane power down status bits, but as the entire 1775 * register reads as 0 it's a good indication that the 1776 * channel is indeed entirely powered down. 1777 */ 1778 if (ch == DPIO_CH1 && val == 0) 1779 expected = 0; 1780 } else if (mask != 0x0) { 1781 expected = DPIO_ANYDL_POWERDOWN; 1782 } else { 1783 expected = 0; 1784 } 1785 1786 if (ch == DPIO_CH0) 1787 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH0; 1788 else 1789 actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1; 1790 actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN; 1791 1792 drm_WARN(&dev_priv->drm, actual != expected, 1793 "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n", 1794 !!(actual & DPIO_ALLDL_POWERDOWN), 1795 !!(actual & DPIO_ANYDL_POWERDOWN), 1796 !!(expected & DPIO_ALLDL_POWERDOWN), 1797 !!(expected & DPIO_ANYDL_POWERDOWN), 1798 reg, val); 1799 } 1800 1801 bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1802 enum dpio_channel ch, bool override) 1803 { 1804 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1805 bool was_override; 1806 1807 mutex_lock(&power_domains->lock); 1808 1809 was_override = dev_priv->chv_phy_control & PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1810 1811 if (override == was_override) 1812 goto out; 1813 1814 if (override) 1815 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1816 else 1817 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1818 1819 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1820 dev_priv->chv_phy_control); 1821 1822 drm_dbg_kms(&dev_priv->drm, 1823 "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n", 1824 phy, ch, dev_priv->chv_phy_control); 1825 1826 assert_chv_phy_status(dev_priv); 1827 1828 out: 1829 mutex_unlock(&power_domains->lock); 1830 1831 return was_override; 1832 } 1833 1834 void chv_phy_powergate_lanes(struct intel_encoder *encoder, 1835 bool override, unsigned int mask) 1836 { 1837 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1838 struct i915_power_domains *power_domains = &dev_priv->power_domains; 1839 enum dpio_phy phy = vlv_dig_port_to_phy(enc_to_dig_port(encoder)); 1840 enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder)); 1841 1842 mutex_lock(&power_domains->lock); 1843 1844 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD(0xf, phy, ch); 1845 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD(mask, phy, ch); 1846 1847 if (override) 1848 dev_priv->chv_phy_control |= PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1849 else 1850 dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch); 1851 1852 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1853 dev_priv->chv_phy_control); 1854 1855 drm_dbg_kms(&dev_priv->drm, 1856 "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n", 1857 phy, ch, mask, dev_priv->chv_phy_control); 1858 1859 assert_chv_phy_status(dev_priv); 1860 1861 assert_chv_phy_powergate(dev_priv, phy, ch, override, mask); 1862 1863 mutex_unlock(&power_domains->lock); 1864 } 1865 1866 static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, 1867 struct i915_power_well *power_well) 1868 { 1869 enum pipe pipe = PIPE_A; 1870 bool enabled; 1871 u32 state, ctrl; 1872 1873 vlv_punit_get(dev_priv); 1874 1875 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe); 1876 /* 1877 * We only ever set the power-on and power-gate states, anything 1878 * else is unexpected. 1879 */ 1880 drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) && 1881 state != DP_SSS_PWR_GATE(pipe)); 1882 enabled = state == DP_SSS_PWR_ON(pipe); 1883 1884 /* 1885 * A transient state at this point would mean some unexpected party 1886 * is poking at the power controls too. 1887 */ 1888 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe); 1889 drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state); 1890 1891 vlv_punit_put(dev_priv); 1892 1893 return enabled; 1894 } 1895 1896 static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, 1897 struct i915_power_well *power_well, 1898 bool enable) 1899 { 1900 enum pipe pipe = PIPE_A; 1901 u32 state; 1902 u32 ctrl; 1903 1904 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); 1905 1906 vlv_punit_get(dev_priv); 1907 1908 #define COND \ 1909 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) 1910 1911 if (COND) 1912 goto out; 1913 1914 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM); 1915 ctrl &= ~DP_SSC_MASK(pipe); 1916 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); 1917 vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl); 1918 1919 if (wait_for(COND, 100)) 1920 drm_err(&dev_priv->drm, 1921 "timeout setting power well state %08x (%08x)\n", 1922 state, 1923 vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM)); 1924 1925 #undef COND 1926 1927 out: 1928 vlv_punit_put(dev_priv); 1929 } 1930 1931 static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv, 1932 struct i915_power_well *power_well) 1933 { 1934 intel_de_write(dev_priv, DISPLAY_PHY_CONTROL, 1935 dev_priv->chv_phy_control); 1936 } 1937 1938 static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, 1939 struct i915_power_well *power_well) 1940 { 1941 chv_set_pipe_power_well(dev_priv, power_well, true); 1942 1943 vlv_display_power_well_init(dev_priv); 1944 } 1945 1946 static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, 1947 struct i915_power_well *power_well) 1948 { 1949 vlv_display_power_well_deinit(dev_priv); 1950 1951 chv_set_pipe_power_well(dev_priv, power_well, false); 1952 } 1953 1954 static u64 __async_put_domains_mask(struct i915_power_domains *power_domains) 1955 { 1956 return power_domains->async_put_domains[0] | 1957 power_domains->async_put_domains[1]; 1958 } 1959 1960 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 1961 1962 static bool 1963 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 1964 { 1965 struct drm_i915_private *i915 = container_of(power_domains, 1966 struct drm_i915_private, 1967 power_domains); 1968 return !drm_WARN_ON(&i915->drm, power_domains->async_put_domains[0] & 1969 power_domains->async_put_domains[1]); 1970 } 1971 1972 static bool 1973 __async_put_domains_state_ok(struct i915_power_domains *power_domains) 1974 { 1975 struct drm_i915_private *i915 = container_of(power_domains, 1976 struct drm_i915_private, 1977 power_domains); 1978 enum intel_display_power_domain domain; 1979 bool err = false; 1980 1981 err |= !assert_async_put_domain_masks_disjoint(power_domains); 1982 err |= drm_WARN_ON(&i915->drm, !!power_domains->async_put_wakeref != 1983 !!__async_put_domains_mask(power_domains)); 1984 1985 for_each_power_domain(domain, __async_put_domains_mask(power_domains)) 1986 err |= drm_WARN_ON(&i915->drm, 1987 power_domains->domain_use_count[domain] != 1); 1988 1989 return !err; 1990 } 1991 1992 static void print_power_domains(struct i915_power_domains *power_domains, 1993 const char *prefix, u64 mask) 1994 { 1995 struct drm_i915_private *i915 = container_of(power_domains, 1996 struct drm_i915_private, 1997 power_domains); 1998 enum intel_display_power_domain domain; 1999 2000 drm_dbg(&i915->drm, "%s (%lu):\n", prefix, hweight64(mask)); 2001 for_each_power_domain(domain, mask) 2002 drm_dbg(&i915->drm, "%s use_count %d\n", 2003 intel_display_power_domain_str(domain), 2004 power_domains->domain_use_count[domain]); 2005 } 2006 2007 static void 2008 print_async_put_domains_state(struct i915_power_domains *power_domains) 2009 { 2010 struct drm_i915_private *i915 = container_of(power_domains, 2011 struct drm_i915_private, 2012 power_domains); 2013 2014 drm_dbg(&i915->drm, "async_put_wakeref %u\n", 2015 power_domains->async_put_wakeref); 2016 2017 print_power_domains(power_domains, "async_put_domains[0]", 2018 power_domains->async_put_domains[0]); 2019 print_power_domains(power_domains, "async_put_domains[1]", 2020 power_domains->async_put_domains[1]); 2021 } 2022 2023 static void 2024 verify_async_put_domains_state(struct i915_power_domains *power_domains) 2025 { 2026 if (!__async_put_domains_state_ok(power_domains)) 2027 print_async_put_domains_state(power_domains); 2028 } 2029 2030 #else 2031 2032 static void 2033 assert_async_put_domain_masks_disjoint(struct i915_power_domains *power_domains) 2034 { 2035 } 2036 2037 static void 2038 verify_async_put_domains_state(struct i915_power_domains *power_domains) 2039 { 2040 } 2041 2042 #endif /* CONFIG_DRM_I915_DEBUG_RUNTIME_PM */ 2043 2044 static u64 async_put_domains_mask(struct i915_power_domains *power_domains) 2045 { 2046 assert_async_put_domain_masks_disjoint(power_domains); 2047 2048 return __async_put_domains_mask(power_domains); 2049 } 2050 2051 static void 2052 async_put_domains_clear_domain(struct i915_power_domains *power_domains, 2053 enum intel_display_power_domain domain) 2054 { 2055 assert_async_put_domain_masks_disjoint(power_domains); 2056 2057 power_domains->async_put_domains[0] &= ~BIT_ULL(domain); 2058 power_domains->async_put_domains[1] &= ~BIT_ULL(domain); 2059 } 2060 2061 static bool 2062 intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, 2063 enum intel_display_power_domain domain) 2064 { 2065 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2066 bool ret = false; 2067 2068 if (!(async_put_domains_mask(power_domains) & BIT_ULL(domain))) 2069 goto out_verify; 2070 2071 async_put_domains_clear_domain(power_domains, domain); 2072 2073 ret = true; 2074 2075 if (async_put_domains_mask(power_domains)) 2076 goto out_verify; 2077 2078 cancel_delayed_work(&power_domains->async_put_work); 2079 intel_runtime_pm_put_raw(&dev_priv->runtime_pm, 2080 fetch_and_zero(&power_domains->async_put_wakeref)); 2081 out_verify: 2082 verify_async_put_domains_state(power_domains); 2083 2084 return ret; 2085 } 2086 2087 static void 2088 __intel_display_power_get_domain(struct drm_i915_private *dev_priv, 2089 enum intel_display_power_domain domain) 2090 { 2091 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2092 struct i915_power_well *power_well; 2093 2094 if (intel_display_power_grab_async_put_ref(dev_priv, domain)) 2095 return; 2096 2097 for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain)) 2098 intel_power_well_get(dev_priv, power_well); 2099 2100 power_domains->domain_use_count[domain]++; 2101 } 2102 2103 /** 2104 * intel_display_power_get - grab a power domain reference 2105 * @dev_priv: i915 device instance 2106 * @domain: power domain to reference 2107 * 2108 * This function grabs a power domain reference for @domain and ensures that the 2109 * power domain and all its parents are powered up. Therefore users should only 2110 * grab a reference to the innermost power domain they need. 2111 * 2112 * Any power domain reference obtained by this function must have a symmetric 2113 * call to intel_display_power_put() to release the reference again. 2114 */ 2115 intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, 2116 enum intel_display_power_domain domain) 2117 { 2118 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2119 intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); 2120 2121 mutex_lock(&power_domains->lock); 2122 __intel_display_power_get_domain(dev_priv, domain); 2123 mutex_unlock(&power_domains->lock); 2124 2125 return wakeref; 2126 } 2127 2128 /** 2129 * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain 2130 * @dev_priv: i915 device instance 2131 * @domain: power domain to reference 2132 * 2133 * This function grabs a power domain reference for @domain and ensures that the 2134 * power domain and all its parents are powered up. Therefore users should only 2135 * grab a reference to the innermost power domain they need. 2136 * 2137 * Any power domain reference obtained by this function must have a symmetric 2138 * call to intel_display_power_put() to release the reference again. 2139 */ 2140 intel_wakeref_t 2141 intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, 2142 enum intel_display_power_domain domain) 2143 { 2144 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2145 intel_wakeref_t wakeref; 2146 bool is_enabled; 2147 2148 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); 2149 if (!wakeref) 2150 return false; 2151 2152 mutex_lock(&power_domains->lock); 2153 2154 if (__intel_display_power_is_enabled(dev_priv, domain)) { 2155 __intel_display_power_get_domain(dev_priv, domain); 2156 is_enabled = true; 2157 } else { 2158 is_enabled = false; 2159 } 2160 2161 mutex_unlock(&power_domains->lock); 2162 2163 if (!is_enabled) { 2164 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2165 wakeref = 0; 2166 } 2167 2168 return wakeref; 2169 } 2170 2171 static void 2172 __intel_display_power_put_domain(struct drm_i915_private *dev_priv, 2173 enum intel_display_power_domain domain) 2174 { 2175 struct i915_power_domains *power_domains; 2176 struct i915_power_well *power_well; 2177 const char *name = intel_display_power_domain_str(domain); 2178 2179 power_domains = &dev_priv->power_domains; 2180 2181 drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain], 2182 "Use count on domain %s is already zero\n", 2183 name); 2184 drm_WARN(&dev_priv->drm, 2185 async_put_domains_mask(power_domains) & BIT_ULL(domain), 2186 "Async disabling of domain %s is pending\n", 2187 name); 2188 2189 power_domains->domain_use_count[domain]--; 2190 2191 for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) 2192 intel_power_well_put(dev_priv, power_well); 2193 } 2194 2195 static void __intel_display_power_put(struct drm_i915_private *dev_priv, 2196 enum intel_display_power_domain domain) 2197 { 2198 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2199 2200 mutex_lock(&power_domains->lock); 2201 __intel_display_power_put_domain(dev_priv, domain); 2202 mutex_unlock(&power_domains->lock); 2203 } 2204 2205 static void 2206 queue_async_put_domains_work(struct i915_power_domains *power_domains, 2207 intel_wakeref_t wakeref) 2208 { 2209 struct drm_i915_private *i915 = container_of(power_domains, 2210 struct drm_i915_private, 2211 power_domains); 2212 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 2213 power_domains->async_put_wakeref = wakeref; 2214 drm_WARN_ON(&i915->drm, !queue_delayed_work(system_unbound_wq, 2215 &power_domains->async_put_work, 2216 msecs_to_jiffies(100))); 2217 } 2218 2219 static void 2220 release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) 2221 { 2222 struct drm_i915_private *dev_priv = 2223 container_of(power_domains, struct drm_i915_private, 2224 power_domains); 2225 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2226 enum intel_display_power_domain domain; 2227 intel_wakeref_t wakeref; 2228 2229 /* 2230 * The caller must hold already raw wakeref, upgrade that to a proper 2231 * wakeref to make the state checker happy about the HW access during 2232 * power well disabling. 2233 */ 2234 assert_rpm_raw_wakeref_held(rpm); 2235 wakeref = intel_runtime_pm_get(rpm); 2236 2237 for_each_power_domain(domain, mask) { 2238 /* Clear before put, so put's sanity check is happy. */ 2239 async_put_domains_clear_domain(power_domains, domain); 2240 __intel_display_power_put_domain(dev_priv, domain); 2241 } 2242 2243 intel_runtime_pm_put(rpm, wakeref); 2244 } 2245 2246 static void 2247 intel_display_power_put_async_work(struct work_struct *work) 2248 { 2249 struct drm_i915_private *dev_priv = 2250 container_of(work, struct drm_i915_private, 2251 power_domains.async_put_work.work); 2252 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2253 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; 2254 intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); 2255 intel_wakeref_t old_work_wakeref = 0; 2256 2257 mutex_lock(&power_domains->lock); 2258 2259 /* 2260 * Bail out if all the domain refs pending to be released were grabbed 2261 * by subsequent gets or a flush_work. 2262 */ 2263 old_work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2264 if (!old_work_wakeref) 2265 goto out_verify; 2266 2267 release_async_put_domains(power_domains, 2268 power_domains->async_put_domains[0]); 2269 2270 /* Requeue the work if more domains were async put meanwhile. */ 2271 if (power_domains->async_put_domains[1]) { 2272 power_domains->async_put_domains[0] = 2273 fetch_and_zero(&power_domains->async_put_domains[1]); 2274 queue_async_put_domains_work(power_domains, 2275 fetch_and_zero(&new_work_wakeref)); 2276 } else { 2277 /* 2278 * Cancel the work that got queued after this one got dequeued, 2279 * since here we released the corresponding async-put reference. 2280 */ 2281 cancel_delayed_work(&power_domains->async_put_work); 2282 } 2283 2284 out_verify: 2285 verify_async_put_domains_state(power_domains); 2286 2287 mutex_unlock(&power_domains->lock); 2288 2289 if (old_work_wakeref) 2290 intel_runtime_pm_put_raw(rpm, old_work_wakeref); 2291 if (new_work_wakeref) 2292 intel_runtime_pm_put_raw(rpm, new_work_wakeref); 2293 } 2294 2295 /** 2296 * intel_display_power_put_async - release a power domain reference asynchronously 2297 * @i915: i915 device instance 2298 * @domain: power domain to reference 2299 * @wakeref: wakeref acquired for the reference that is being released 2300 * 2301 * This function drops the power domain reference obtained by 2302 * intel_display_power_get*() and schedules a work to power down the 2303 * corresponding hardware block if this is the last reference. 2304 */ 2305 void __intel_display_power_put_async(struct drm_i915_private *i915, 2306 enum intel_display_power_domain domain, 2307 intel_wakeref_t wakeref) 2308 { 2309 struct i915_power_domains *power_domains = &i915->power_domains; 2310 struct intel_runtime_pm *rpm = &i915->runtime_pm; 2311 intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); 2312 2313 mutex_lock(&power_domains->lock); 2314 2315 if (power_domains->domain_use_count[domain] > 1) { 2316 __intel_display_power_put_domain(i915, domain); 2317 2318 goto out_verify; 2319 } 2320 2321 drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1); 2322 2323 /* Let a pending work requeue itself or queue a new one. */ 2324 if (power_domains->async_put_wakeref) { 2325 power_domains->async_put_domains[1] |= BIT_ULL(domain); 2326 } else { 2327 power_domains->async_put_domains[0] |= BIT_ULL(domain); 2328 queue_async_put_domains_work(power_domains, 2329 fetch_and_zero(&work_wakeref)); 2330 } 2331 2332 out_verify: 2333 verify_async_put_domains_state(power_domains); 2334 2335 mutex_unlock(&power_domains->lock); 2336 2337 if (work_wakeref) 2338 intel_runtime_pm_put_raw(rpm, work_wakeref); 2339 2340 intel_runtime_pm_put(rpm, wakeref); 2341 } 2342 2343 /** 2344 * intel_display_power_flush_work - flushes the async display power disabling work 2345 * @i915: i915 device instance 2346 * 2347 * Flushes any pending work that was scheduled by a preceding 2348 * intel_display_power_put_async() call, completing the disabling of the 2349 * corresponding power domains. 2350 * 2351 * Note that the work handler function may still be running after this 2352 * function returns; to ensure that the work handler isn't running use 2353 * intel_display_power_flush_work_sync() instead. 2354 */ 2355 void intel_display_power_flush_work(struct drm_i915_private *i915) 2356 { 2357 struct i915_power_domains *power_domains = &i915->power_domains; 2358 intel_wakeref_t work_wakeref; 2359 2360 mutex_lock(&power_domains->lock); 2361 2362 work_wakeref = fetch_and_zero(&power_domains->async_put_wakeref); 2363 if (!work_wakeref) 2364 goto out_verify; 2365 2366 release_async_put_domains(power_domains, 2367 async_put_domains_mask(power_domains)); 2368 cancel_delayed_work(&power_domains->async_put_work); 2369 2370 out_verify: 2371 verify_async_put_domains_state(power_domains); 2372 2373 mutex_unlock(&power_domains->lock); 2374 2375 if (work_wakeref) 2376 intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); 2377 } 2378 2379 /** 2380 * intel_display_power_flush_work_sync - flushes and syncs the async display power disabling work 2381 * @i915: i915 device instance 2382 * 2383 * Like intel_display_power_flush_work(), but also ensure that the work 2384 * handler function is not running any more when this function returns. 2385 */ 2386 static void 2387 intel_display_power_flush_work_sync(struct drm_i915_private *i915) 2388 { 2389 struct i915_power_domains *power_domains = &i915->power_domains; 2390 2391 intel_display_power_flush_work(i915); 2392 cancel_delayed_work_sync(&power_domains->async_put_work); 2393 2394 verify_async_put_domains_state(power_domains); 2395 2396 drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref); 2397 } 2398 2399 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2400 /** 2401 * intel_display_power_put - release a power domain reference 2402 * @dev_priv: i915 device instance 2403 * @domain: power domain to reference 2404 * @wakeref: wakeref acquired for the reference that is being released 2405 * 2406 * This function drops the power domain reference obtained by 2407 * intel_display_power_get() and might power down the corresponding hardware 2408 * block right away if this is the last reference. 2409 */ 2410 void intel_display_power_put(struct drm_i915_private *dev_priv, 2411 enum intel_display_power_domain domain, 2412 intel_wakeref_t wakeref) 2413 { 2414 __intel_display_power_put(dev_priv, domain); 2415 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); 2416 } 2417 #else 2418 /** 2419 * intel_display_power_put_unchecked - release an unchecked power domain reference 2420 * @dev_priv: i915 device instance 2421 * @domain: power domain to reference 2422 * 2423 * This function drops the power domain reference obtained by 2424 * intel_display_power_get() and might power down the corresponding hardware 2425 * block right away if this is the last reference. 2426 * 2427 * This function is only for the power domain code's internal use to suppress wakeref 2428 * tracking when the correspondig debug kconfig option is disabled, should not 2429 * be used otherwise. 2430 */ 2431 void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, 2432 enum intel_display_power_domain domain) 2433 { 2434 __intel_display_power_put(dev_priv, domain); 2435 intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); 2436 } 2437 #endif 2438 2439 void 2440 intel_display_power_get_in_set(struct drm_i915_private *i915, 2441 struct intel_display_power_domain_set *power_domain_set, 2442 enum intel_display_power_domain domain) 2443 { 2444 intel_wakeref_t __maybe_unused wf; 2445 2446 drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); 2447 2448 wf = intel_display_power_get(i915, domain); 2449 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2450 power_domain_set->wakerefs[domain] = wf; 2451 #endif 2452 power_domain_set->mask |= BIT_ULL(domain); 2453 } 2454 2455 bool 2456 intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, 2457 struct intel_display_power_domain_set *power_domain_set, 2458 enum intel_display_power_domain domain) 2459 { 2460 intel_wakeref_t wf; 2461 2462 drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); 2463 2464 wf = intel_display_power_get_if_enabled(i915, domain); 2465 if (!wf) 2466 return false; 2467 2468 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2469 power_domain_set->wakerefs[domain] = wf; 2470 #endif 2471 power_domain_set->mask |= BIT_ULL(domain); 2472 2473 return true; 2474 } 2475 2476 void 2477 intel_display_power_put_mask_in_set(struct drm_i915_private *i915, 2478 struct intel_display_power_domain_set *power_domain_set, 2479 u64 mask) 2480 { 2481 enum intel_display_power_domain domain; 2482 2483 drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask); 2484 2485 for_each_power_domain(domain, mask) { 2486 intel_wakeref_t __maybe_unused wf = -1; 2487 2488 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 2489 wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); 2490 #endif 2491 intel_display_power_put(i915, domain, wf); 2492 power_domain_set->mask &= ~BIT_ULL(domain); 2493 } 2494 } 2495 2496 #define I830_PIPES_POWER_DOMAINS ( \ 2497 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2498 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2499 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2500 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2501 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2502 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2503 BIT_ULL(POWER_DOMAIN_INIT)) 2504 2505 #define VLV_DISPLAY_POWER_DOMAINS ( \ 2506 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2507 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2508 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2509 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2510 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2511 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2512 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2513 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2514 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2515 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2516 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2517 BIT_ULL(POWER_DOMAIN_VGA) | \ 2518 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2519 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2520 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2521 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2522 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2523 BIT_ULL(POWER_DOMAIN_INIT)) 2524 2525 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2526 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2527 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2528 BIT_ULL(POWER_DOMAIN_PORT_CRT) | \ 2529 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2530 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2531 BIT_ULL(POWER_DOMAIN_INIT)) 2532 2533 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \ 2534 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2535 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2536 BIT_ULL(POWER_DOMAIN_INIT)) 2537 2538 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \ 2539 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2540 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2541 BIT_ULL(POWER_DOMAIN_INIT)) 2542 2543 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \ 2544 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2545 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2546 BIT_ULL(POWER_DOMAIN_INIT)) 2547 2548 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \ 2549 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2550 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2551 BIT_ULL(POWER_DOMAIN_INIT)) 2552 2553 #define CHV_DISPLAY_POWER_DOMAINS ( \ 2554 BIT_ULL(POWER_DOMAIN_DISPLAY_CORE) | \ 2555 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 2556 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2557 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2558 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2559 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2560 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2561 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2562 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2563 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2564 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2565 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2566 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2567 BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ 2568 BIT_ULL(POWER_DOMAIN_VGA) | \ 2569 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2570 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2571 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2572 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2573 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2574 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2575 BIT_ULL(POWER_DOMAIN_INIT)) 2576 2577 #define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \ 2578 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2579 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2580 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2581 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2582 BIT_ULL(POWER_DOMAIN_INIT)) 2583 2584 #define CHV_DPIO_CMN_D_POWER_DOMAINS ( \ 2585 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2586 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2587 BIT_ULL(POWER_DOMAIN_INIT)) 2588 2589 #define HSW_DISPLAY_POWER_DOMAINS ( \ 2590 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2591 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2592 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 2593 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2594 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2595 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2596 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2597 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2598 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2599 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2600 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2601 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2602 BIT_ULL(POWER_DOMAIN_VGA) | \ 2603 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2604 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2605 BIT_ULL(POWER_DOMAIN_INIT)) 2606 2607 #define BDW_DISPLAY_POWER_DOMAINS ( \ 2608 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2609 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2610 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2611 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2612 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2613 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2614 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2615 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2616 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2617 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2618 BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \ 2619 BIT_ULL(POWER_DOMAIN_VGA) | \ 2620 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2621 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2622 BIT_ULL(POWER_DOMAIN_INIT)) 2623 2624 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2625 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2626 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2627 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2628 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2629 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2630 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2631 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2632 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2633 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2634 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2635 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2636 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2637 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2638 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2639 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2640 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2641 BIT_ULL(POWER_DOMAIN_VGA) | \ 2642 BIT_ULL(POWER_DOMAIN_INIT)) 2643 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \ 2644 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \ 2645 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \ 2646 BIT_ULL(POWER_DOMAIN_INIT)) 2647 #define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2648 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \ 2649 BIT_ULL(POWER_DOMAIN_INIT)) 2650 #define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2651 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \ 2652 BIT_ULL(POWER_DOMAIN_INIT)) 2653 #define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \ 2654 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \ 2655 BIT_ULL(POWER_DOMAIN_INIT)) 2656 #define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2657 SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2658 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2659 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2660 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2661 BIT_ULL(POWER_DOMAIN_INIT)) 2662 2663 #define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2664 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2665 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2666 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2667 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2668 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2669 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2670 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2671 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2672 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2673 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2674 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2675 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2676 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2677 BIT_ULL(POWER_DOMAIN_VGA) | \ 2678 BIT_ULL(POWER_DOMAIN_INIT)) 2679 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2680 BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2681 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2682 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2683 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2684 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2685 BIT_ULL(POWER_DOMAIN_INIT)) 2686 #define BXT_DPIO_CMN_A_POWER_DOMAINS ( \ 2687 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2688 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2689 BIT_ULL(POWER_DOMAIN_INIT)) 2690 #define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \ 2691 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2692 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2693 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2694 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2695 BIT_ULL(POWER_DOMAIN_INIT)) 2696 2697 #define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \ 2698 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2699 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2700 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2701 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2702 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2703 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2704 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2705 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2706 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2707 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2708 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2709 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2710 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2711 BIT_ULL(POWER_DOMAIN_VGA) | \ 2712 BIT_ULL(POWER_DOMAIN_INIT)) 2713 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \ 2714 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2715 #define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \ 2716 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2717 #define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \ 2718 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2719 #define GLK_DPIO_CMN_A_POWER_DOMAINS ( \ 2720 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \ 2721 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2722 BIT_ULL(POWER_DOMAIN_INIT)) 2723 #define GLK_DPIO_CMN_B_POWER_DOMAINS ( \ 2724 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2725 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2726 BIT_ULL(POWER_DOMAIN_INIT)) 2727 #define GLK_DPIO_CMN_C_POWER_DOMAINS ( \ 2728 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2729 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2730 BIT_ULL(POWER_DOMAIN_INIT)) 2731 #define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \ 2732 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2733 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2734 BIT_ULL(POWER_DOMAIN_INIT)) 2735 #define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \ 2736 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2737 BIT_ULL(POWER_DOMAIN_INIT)) 2738 #define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \ 2739 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2740 BIT_ULL(POWER_DOMAIN_INIT)) 2741 #define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2742 GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ 2743 BIT_ULL(POWER_DOMAIN_GT_IRQ) | \ 2744 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2745 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2746 BIT_ULL(POWER_DOMAIN_GMBUS) | \ 2747 BIT_ULL(POWER_DOMAIN_INIT)) 2748 2749 /* 2750 * ICL PW_0/PG_0 domains (HW/DMC control): 2751 * - PCI 2752 * - clocks except port PLL 2753 * - central power except FBC 2754 * - shared functions except pipe interrupts, pipe MBUS, DBUF registers 2755 * ICL PW_1/PG_1 domains (HW/DMC control): 2756 * - DBUF function 2757 * - PIPE_A and its planes, except VGA 2758 * - transcoder EDP + PSR 2759 * - transcoder DSI 2760 * - DDI_A 2761 * - FBC 2762 */ 2763 #define ICL_PW_4_POWER_DOMAINS ( \ 2764 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2765 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2766 BIT_ULL(POWER_DOMAIN_INIT)) 2767 /* VDSC/joining */ 2768 #define ICL_PW_3_POWER_DOMAINS ( \ 2769 ICL_PW_4_POWER_DOMAINS | \ 2770 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2771 BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \ 2772 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2773 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2774 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2775 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \ 2776 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 2777 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ 2778 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ 2779 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ 2780 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2781 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2782 BIT_ULL(POWER_DOMAIN_AUX_D) | \ 2783 BIT_ULL(POWER_DOMAIN_AUX_E) | \ 2784 BIT_ULL(POWER_DOMAIN_AUX_F) | \ 2785 BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ 2786 BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ 2787 BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ 2788 BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ 2789 BIT_ULL(POWER_DOMAIN_VGA) | \ 2790 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2791 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2792 BIT_ULL(POWER_DOMAIN_INIT)) 2793 /* 2794 * - transcoder WD 2795 * - KVMR (HW control) 2796 */ 2797 #define ICL_PW_2_POWER_DOMAINS ( \ 2798 ICL_PW_3_POWER_DOMAINS | \ 2799 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2800 BIT_ULL(POWER_DOMAIN_INIT)) 2801 /* 2802 * - KVMR (HW control) 2803 */ 2804 #define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2805 ICL_PW_2_POWER_DOMAINS | \ 2806 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2807 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2808 BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \ 2809 BIT_ULL(POWER_DOMAIN_INIT)) 2810 2811 #define ICL_DDI_IO_A_POWER_DOMAINS ( \ 2812 BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO)) 2813 #define ICL_DDI_IO_B_POWER_DOMAINS ( \ 2814 BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO)) 2815 #define ICL_DDI_IO_C_POWER_DOMAINS ( \ 2816 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO)) 2817 #define ICL_DDI_IO_D_POWER_DOMAINS ( \ 2818 BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) 2819 #define ICL_DDI_IO_E_POWER_DOMAINS ( \ 2820 BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) 2821 #define ICL_DDI_IO_F_POWER_DOMAINS ( \ 2822 BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) 2823 2824 #define ICL_AUX_A_IO_POWER_DOMAINS ( \ 2825 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2826 BIT_ULL(POWER_DOMAIN_AUX_A)) 2827 #define ICL_AUX_B_IO_POWER_DOMAINS ( \ 2828 BIT_ULL(POWER_DOMAIN_AUX_B)) 2829 #define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \ 2830 BIT_ULL(POWER_DOMAIN_AUX_C)) 2831 #define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \ 2832 BIT_ULL(POWER_DOMAIN_AUX_D)) 2833 #define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \ 2834 BIT_ULL(POWER_DOMAIN_AUX_E)) 2835 #define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \ 2836 BIT_ULL(POWER_DOMAIN_AUX_F)) 2837 #define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \ 2838 BIT_ULL(POWER_DOMAIN_AUX_C_TBT)) 2839 #define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \ 2840 BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) 2841 #define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \ 2842 BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) 2843 #define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \ 2844 BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) 2845 2846 #define TGL_PW_5_POWER_DOMAINS ( \ 2847 BIT_ULL(POWER_DOMAIN_PIPE_D) | \ 2848 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ 2849 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ 2850 BIT_ULL(POWER_DOMAIN_INIT)) 2851 2852 #define TGL_PW_4_POWER_DOMAINS ( \ 2853 TGL_PW_5_POWER_DOMAINS | \ 2854 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2855 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2856 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2857 BIT_ULL(POWER_DOMAIN_INIT)) 2858 2859 #define TGL_PW_3_POWER_DOMAINS ( \ 2860 TGL_PW_4_POWER_DOMAINS | \ 2861 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2862 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2863 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2864 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 2865 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 2866 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ 2867 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ 2868 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC5) | \ 2869 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC6) | \ 2870 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 2871 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 2872 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 2873 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 2874 BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ 2875 BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ 2876 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 2877 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 2878 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 2879 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 2880 BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ 2881 BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ 2882 BIT_ULL(POWER_DOMAIN_VGA) | \ 2883 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2884 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2885 BIT_ULL(POWER_DOMAIN_INIT)) 2886 2887 #define TGL_PW_2_POWER_DOMAINS ( \ 2888 TGL_PW_3_POWER_DOMAINS | \ 2889 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 2890 BIT_ULL(POWER_DOMAIN_INIT)) 2891 2892 #define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2893 TGL_PW_3_POWER_DOMAINS | \ 2894 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2895 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2896 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2897 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 2898 BIT_ULL(POWER_DOMAIN_INIT)) 2899 2900 #define TGL_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) 2901 #define TGL_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) 2902 #define TGL_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) 2903 #define TGL_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) 2904 #define TGL_DDI_IO_TC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC5) 2905 #define TGL_DDI_IO_TC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC6) 2906 2907 #define TGL_AUX_A_IO_POWER_DOMAINS ( \ 2908 BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ 2909 BIT_ULL(POWER_DOMAIN_AUX_A)) 2910 #define TGL_AUX_B_IO_POWER_DOMAINS ( \ 2911 BIT_ULL(POWER_DOMAIN_AUX_B)) 2912 #define TGL_AUX_C_IO_POWER_DOMAINS ( \ 2913 BIT_ULL(POWER_DOMAIN_AUX_C)) 2914 2915 #define TGL_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) 2916 #define TGL_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) 2917 #define TGL_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) 2918 #define TGL_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) 2919 #define TGL_AUX_IO_USBC5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC5) 2920 #define TGL_AUX_IO_USBC6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC6) 2921 2922 #define TGL_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) 2923 #define TGL_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) 2924 #define TGL_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) 2925 #define TGL_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) 2926 #define TGL_AUX_IO_TBT5_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT5) 2927 #define TGL_AUX_IO_TBT6_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT6) 2928 2929 #define TGL_TC_COLD_OFF_POWER_DOMAINS ( \ 2930 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 2931 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 2932 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 2933 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 2934 BIT_ULL(POWER_DOMAIN_AUX_USBC5) | \ 2935 BIT_ULL(POWER_DOMAIN_AUX_USBC6) | \ 2936 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 2937 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 2938 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 2939 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 2940 BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ 2941 BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ 2942 BIT_ULL(POWER_DOMAIN_TC_COLD_OFF)) 2943 2944 #define RKL_PW_4_POWER_DOMAINS ( \ 2945 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 2946 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 2947 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 2948 BIT_ULL(POWER_DOMAIN_INIT)) 2949 2950 #define RKL_PW_3_POWER_DOMAINS ( \ 2951 RKL_PW_4_POWER_DOMAINS | \ 2952 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2953 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 2954 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 2955 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 2956 BIT_ULL(POWER_DOMAIN_VGA) | \ 2957 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2958 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 2959 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 2960 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 2961 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 2962 BIT_ULL(POWER_DOMAIN_INIT)) 2963 2964 /* 2965 * There is no PW_2/PG_2 on RKL. 2966 * 2967 * RKL PW_1/PG_1 domains (under HW/DMC control): 2968 * - DBUF function (note: registers are in PW0) 2969 * - PIPE_A and its planes and VDSC/joining, except VGA 2970 * - transcoder A 2971 * - DDI_A and DDI_B 2972 * - FBC 2973 * 2974 * RKL PW_0/PG_0 domains (under HW/DMC control): 2975 * - PCI 2976 * - clocks except port PLL 2977 * - shared functions: 2978 * * interrupts except pipe interrupts 2979 * * MBus except PIPE_MBUS_DBOX_CTL 2980 * * DBUF registers 2981 * - central power except FBC 2982 * - top-level GTC (DDI-level GTC is in the well associated with the DDI) 2983 */ 2984 2985 #define RKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 2986 RKL_PW_3_POWER_DOMAINS | \ 2987 BIT_ULL(POWER_DOMAIN_MODESET) | \ 2988 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 2989 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 2990 BIT_ULL(POWER_DOMAIN_INIT)) 2991 2992 /* 2993 * DG1 onwards Audio MMIO/VERBS lies in PG0 power well. 2994 */ 2995 #define DG1_PW_3_POWER_DOMAINS ( \ 2996 TGL_PW_4_POWER_DOMAINS | \ 2997 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 2998 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 2999 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 3000 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 3001 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 3002 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 3003 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 3004 BIT_ULL(POWER_DOMAIN_VGA) | \ 3005 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 3006 BIT_ULL(POWER_DOMAIN_INIT)) 3007 3008 #define DG1_PW_2_POWER_DOMAINS ( \ 3009 DG1_PW_3_POWER_DOMAINS | \ 3010 BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) | \ 3011 BIT_ULL(POWER_DOMAIN_INIT)) 3012 3013 #define DG1_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 3014 DG1_PW_3_POWER_DOMAINS | \ 3015 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 3016 BIT_ULL(POWER_DOMAIN_MODESET) | \ 3017 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 3018 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 3019 BIT_ULL(POWER_DOMAIN_INIT)) 3020 3021 /* 3022 * XE_LPD Power Domains 3023 * 3024 * Previous platforms required that PG(n-1) be enabled before PG(n). That 3025 * dependency chain turns into a dependency tree on XE_LPD: 3026 * 3027 * PG0 3028 * | 3029 * --PG1-- 3030 * / \ 3031 * PGA --PG2-- 3032 * / | \ 3033 * PGB PGC PGD 3034 * 3035 * Power wells must be enabled from top to bottom and disabled from bottom 3036 * to top. This allows pipes to be power gated independently. 3037 */ 3038 3039 #define XELPD_PW_D_POWER_DOMAINS ( \ 3040 BIT_ULL(POWER_DOMAIN_PIPE_D) | \ 3041 BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \ 3042 BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \ 3043 BIT_ULL(POWER_DOMAIN_INIT)) 3044 3045 #define XELPD_PW_C_POWER_DOMAINS ( \ 3046 BIT_ULL(POWER_DOMAIN_PIPE_C) | \ 3047 BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \ 3048 BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \ 3049 BIT_ULL(POWER_DOMAIN_INIT)) 3050 3051 #define XELPD_PW_B_POWER_DOMAINS ( \ 3052 BIT_ULL(POWER_DOMAIN_PIPE_B) | \ 3053 BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ 3054 BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ 3055 BIT_ULL(POWER_DOMAIN_INIT)) 3056 3057 #define XELPD_PW_A_POWER_DOMAINS ( \ 3058 BIT_ULL(POWER_DOMAIN_PIPE_A) | \ 3059 BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ 3060 BIT_ULL(POWER_DOMAIN_INIT)) 3061 3062 #define XELPD_PW_2_POWER_DOMAINS ( \ 3063 XELPD_PW_B_POWER_DOMAINS | \ 3064 XELPD_PW_C_POWER_DOMAINS | \ 3065 XELPD_PW_D_POWER_DOMAINS | \ 3066 BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) | \ 3067 BIT_ULL(POWER_DOMAIN_VGA) | \ 3068 BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \ 3069 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) | \ 3070 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_E_XELPD) | \ 3071 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) | \ 3072 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) | \ 3073 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC3) | \ 3074 BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC4) | \ 3075 BIT_ULL(POWER_DOMAIN_AUX_C) | \ 3076 BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) | \ 3077 BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) | \ 3078 BIT_ULL(POWER_DOMAIN_AUX_USBC1) | \ 3079 BIT_ULL(POWER_DOMAIN_AUX_USBC2) | \ 3080 BIT_ULL(POWER_DOMAIN_AUX_USBC3) | \ 3081 BIT_ULL(POWER_DOMAIN_AUX_USBC4) | \ 3082 BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ 3083 BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ 3084 BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ 3085 BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ 3086 BIT_ULL(POWER_DOMAIN_INIT)) 3087 3088 /* 3089 * XELPD PW_1/PG_1 domains (under HW/DMC control): 3090 * - DBUF function (registers are in PW0) 3091 * - Transcoder A 3092 * - DDI_A and DDI_B 3093 * 3094 * XELPD PW_0/PW_1 domains (under HW/DMC control): 3095 * - PCI 3096 * - Clocks except port PLL 3097 * - Shared functions: 3098 * * interrupts except pipe interrupts 3099 * * MBus except PIPE_MBUS_DBOX_CTL 3100 * * DBUF registers 3101 * - Central power except FBC 3102 * - Top-level GTC (DDI-level GTC is in the well associated with the DDI) 3103 */ 3104 3105 #define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS ( \ 3106 XELPD_PW_2_POWER_DOMAINS | \ 3107 BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) | \ 3108 BIT_ULL(POWER_DOMAIN_MODESET) | \ 3109 BIT_ULL(POWER_DOMAIN_AUX_A) | \ 3110 BIT_ULL(POWER_DOMAIN_AUX_B) | \ 3111 BIT_ULL(POWER_DOMAIN_INIT)) 3112 3113 #define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) 3114 #define XELPD_AUX_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_E_XELPD) 3115 #define XELPD_AUX_IO_USBC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC1) 3116 #define XELPD_AUX_IO_USBC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC2) 3117 #define XELPD_AUX_IO_USBC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC3) 3118 #define XELPD_AUX_IO_USBC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_USBC4) 3119 3120 #define XELPD_AUX_IO_TBT1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT1) 3121 #define XELPD_AUX_IO_TBT2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT2) 3122 #define XELPD_AUX_IO_TBT3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT3) 3123 #define XELPD_AUX_IO_TBT4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_TBT4) 3124 3125 #define XELPD_DDI_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_D_XELPD) 3126 #define XELPD_DDI_IO_E_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_E_XELPD) 3127 #define XELPD_DDI_IO_TC1_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC1) 3128 #define XELPD_DDI_IO_TC2_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC2) 3129 #define XELPD_DDI_IO_TC3_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC3) 3130 #define XELPD_DDI_IO_TC4_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_PORT_DDI_IO_TC4) 3131 3132 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { 3133 .sync_hw = i9xx_power_well_sync_hw_noop, 3134 .enable = i9xx_always_on_power_well_noop, 3135 .disable = i9xx_always_on_power_well_noop, 3136 .is_enabled = i9xx_always_on_power_well_enabled, 3137 }; 3138 3139 static const struct i915_power_well_ops chv_pipe_power_well_ops = { 3140 .sync_hw = chv_pipe_power_well_sync_hw, 3141 .enable = chv_pipe_power_well_enable, 3142 .disable = chv_pipe_power_well_disable, 3143 .is_enabled = chv_pipe_power_well_enabled, 3144 }; 3145 3146 static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = { 3147 .sync_hw = i9xx_power_well_sync_hw_noop, 3148 .enable = chv_dpio_cmn_power_well_enable, 3149 .disable = chv_dpio_cmn_power_well_disable, 3150 .is_enabled = vlv_power_well_enabled, 3151 }; 3152 3153 static const struct i915_power_well_desc i9xx_always_on_power_well[] = { 3154 { 3155 .name = "always-on", 3156 .always_on = true, 3157 .domains = POWER_DOMAIN_MASK, 3158 .ops = &i9xx_always_on_power_well_ops, 3159 .id = DISP_PW_ID_NONE, 3160 }, 3161 }; 3162 3163 static const struct i915_power_well_ops i830_pipes_power_well_ops = { 3164 .sync_hw = i830_pipes_power_well_sync_hw, 3165 .enable = i830_pipes_power_well_enable, 3166 .disable = i830_pipes_power_well_disable, 3167 .is_enabled = i830_pipes_power_well_enabled, 3168 }; 3169 3170 static const struct i915_power_well_desc i830_power_wells[] = { 3171 { 3172 .name = "always-on", 3173 .always_on = true, 3174 .domains = POWER_DOMAIN_MASK, 3175 .ops = &i9xx_always_on_power_well_ops, 3176 .id = DISP_PW_ID_NONE, 3177 }, 3178 { 3179 .name = "pipes", 3180 .domains = I830_PIPES_POWER_DOMAINS, 3181 .ops = &i830_pipes_power_well_ops, 3182 .id = DISP_PW_ID_NONE, 3183 }, 3184 }; 3185 3186 static const struct i915_power_well_ops hsw_power_well_ops = { 3187 .sync_hw = hsw_power_well_sync_hw, 3188 .enable = hsw_power_well_enable, 3189 .disable = hsw_power_well_disable, 3190 .is_enabled = hsw_power_well_enabled, 3191 }; 3192 3193 static const struct i915_power_well_ops gen9_dc_off_power_well_ops = { 3194 .sync_hw = i9xx_power_well_sync_hw_noop, 3195 .enable = gen9_dc_off_power_well_enable, 3196 .disable = gen9_dc_off_power_well_disable, 3197 .is_enabled = gen9_dc_off_power_well_enabled, 3198 }; 3199 3200 static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = { 3201 .sync_hw = i9xx_power_well_sync_hw_noop, 3202 .enable = bxt_dpio_cmn_power_well_enable, 3203 .disable = bxt_dpio_cmn_power_well_disable, 3204 .is_enabled = bxt_dpio_cmn_power_well_enabled, 3205 }; 3206 3207 static const struct i915_power_well_regs hsw_power_well_regs = { 3208 .bios = HSW_PWR_WELL_CTL1, 3209 .driver = HSW_PWR_WELL_CTL2, 3210 .kvmr = HSW_PWR_WELL_CTL3, 3211 .debug = HSW_PWR_WELL_CTL4, 3212 }; 3213 3214 static const struct i915_power_well_desc hsw_power_wells[] = { 3215 { 3216 .name = "always-on", 3217 .always_on = true, 3218 .domains = POWER_DOMAIN_MASK, 3219 .ops = &i9xx_always_on_power_well_ops, 3220 .id = DISP_PW_ID_NONE, 3221 }, 3222 { 3223 .name = "display", 3224 .domains = HSW_DISPLAY_POWER_DOMAINS, 3225 .ops = &hsw_power_well_ops, 3226 .id = HSW_DISP_PW_GLOBAL, 3227 { 3228 .hsw.regs = &hsw_power_well_regs, 3229 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 3230 .hsw.has_vga = true, 3231 }, 3232 }, 3233 }; 3234 3235 static const struct i915_power_well_desc bdw_power_wells[] = { 3236 { 3237 .name = "always-on", 3238 .always_on = true, 3239 .domains = POWER_DOMAIN_MASK, 3240 .ops = &i9xx_always_on_power_well_ops, 3241 .id = DISP_PW_ID_NONE, 3242 }, 3243 { 3244 .name = "display", 3245 .domains = BDW_DISPLAY_POWER_DOMAINS, 3246 .ops = &hsw_power_well_ops, 3247 .id = HSW_DISP_PW_GLOBAL, 3248 { 3249 .hsw.regs = &hsw_power_well_regs, 3250 .hsw.idx = HSW_PW_CTL_IDX_GLOBAL, 3251 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3252 .hsw.has_vga = true, 3253 }, 3254 }, 3255 }; 3256 3257 static const struct i915_power_well_ops vlv_display_power_well_ops = { 3258 .sync_hw = i9xx_power_well_sync_hw_noop, 3259 .enable = vlv_display_power_well_enable, 3260 .disable = vlv_display_power_well_disable, 3261 .is_enabled = vlv_power_well_enabled, 3262 }; 3263 3264 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = { 3265 .sync_hw = i9xx_power_well_sync_hw_noop, 3266 .enable = vlv_dpio_cmn_power_well_enable, 3267 .disable = vlv_dpio_cmn_power_well_disable, 3268 .is_enabled = vlv_power_well_enabled, 3269 }; 3270 3271 static const struct i915_power_well_ops vlv_dpio_power_well_ops = { 3272 .sync_hw = i9xx_power_well_sync_hw_noop, 3273 .enable = vlv_power_well_enable, 3274 .disable = vlv_power_well_disable, 3275 .is_enabled = vlv_power_well_enabled, 3276 }; 3277 3278 static const struct i915_power_well_desc vlv_power_wells[] = { 3279 { 3280 .name = "always-on", 3281 .always_on = true, 3282 .domains = POWER_DOMAIN_MASK, 3283 .ops = &i9xx_always_on_power_well_ops, 3284 .id = DISP_PW_ID_NONE, 3285 }, 3286 { 3287 .name = "display", 3288 .domains = VLV_DISPLAY_POWER_DOMAINS, 3289 .ops = &vlv_display_power_well_ops, 3290 .id = VLV_DISP_PW_DISP2D, 3291 { 3292 .vlv.idx = PUNIT_PWGT_IDX_DISP2D, 3293 }, 3294 }, 3295 { 3296 .name = "dpio-tx-b-01", 3297 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3298 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3299 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3300 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3301 .ops = &vlv_dpio_power_well_ops, 3302 .id = DISP_PW_ID_NONE, 3303 { 3304 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01, 3305 }, 3306 }, 3307 { 3308 .name = "dpio-tx-b-23", 3309 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3310 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3311 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3312 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3313 .ops = &vlv_dpio_power_well_ops, 3314 .id = DISP_PW_ID_NONE, 3315 { 3316 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23, 3317 }, 3318 }, 3319 { 3320 .name = "dpio-tx-c-01", 3321 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3322 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3323 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3324 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3325 .ops = &vlv_dpio_power_well_ops, 3326 .id = DISP_PW_ID_NONE, 3327 { 3328 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01, 3329 }, 3330 }, 3331 { 3332 .name = "dpio-tx-c-23", 3333 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS | 3334 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS | 3335 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS | 3336 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS, 3337 .ops = &vlv_dpio_power_well_ops, 3338 .id = DISP_PW_ID_NONE, 3339 { 3340 .vlv.idx = PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23, 3341 }, 3342 }, 3343 { 3344 .name = "dpio-common", 3345 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS, 3346 .ops = &vlv_dpio_cmn_power_well_ops, 3347 .id = VLV_DISP_PW_DPIO_CMN_BC, 3348 { 3349 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 3350 }, 3351 }, 3352 }; 3353 3354 static const struct i915_power_well_desc chv_power_wells[] = { 3355 { 3356 .name = "always-on", 3357 .always_on = true, 3358 .domains = POWER_DOMAIN_MASK, 3359 .ops = &i9xx_always_on_power_well_ops, 3360 .id = DISP_PW_ID_NONE, 3361 }, 3362 { 3363 .name = "display", 3364 /* 3365 * Pipe A power well is the new disp2d well. Pipe B and C 3366 * power wells don't actually exist. Pipe A power well is 3367 * required for any pipe to work. 3368 */ 3369 .domains = CHV_DISPLAY_POWER_DOMAINS, 3370 .ops = &chv_pipe_power_well_ops, 3371 .id = DISP_PW_ID_NONE, 3372 }, 3373 { 3374 .name = "dpio-common-bc", 3375 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS, 3376 .ops = &chv_dpio_cmn_power_well_ops, 3377 .id = VLV_DISP_PW_DPIO_CMN_BC, 3378 { 3379 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_BC, 3380 }, 3381 }, 3382 { 3383 .name = "dpio-common-d", 3384 .domains = CHV_DPIO_CMN_D_POWER_DOMAINS, 3385 .ops = &chv_dpio_cmn_power_well_ops, 3386 .id = CHV_DISP_PW_DPIO_CMN_D, 3387 { 3388 .vlv.idx = PUNIT_PWGT_IDX_DPIO_CMN_D, 3389 }, 3390 }, 3391 }; 3392 3393 bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 3394 enum i915_power_well_id power_well_id) 3395 { 3396 struct i915_power_well *power_well; 3397 bool ret; 3398 3399 power_well = lookup_power_well(dev_priv, power_well_id); 3400 ret = power_well->desc->ops->is_enabled(dev_priv, power_well); 3401 3402 return ret; 3403 } 3404 3405 static const struct i915_power_well_desc skl_power_wells[] = { 3406 { 3407 .name = "always-on", 3408 .always_on = true, 3409 .domains = POWER_DOMAIN_MASK, 3410 .ops = &i9xx_always_on_power_well_ops, 3411 .id = DISP_PW_ID_NONE, 3412 }, 3413 { 3414 .name = "power well 1", 3415 /* Handled by the DMC firmware */ 3416 .always_on = true, 3417 .domains = 0, 3418 .ops = &hsw_power_well_ops, 3419 .id = SKL_DISP_PW_1, 3420 { 3421 .hsw.regs = &hsw_power_well_regs, 3422 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3423 .hsw.has_fuses = true, 3424 }, 3425 }, 3426 { 3427 .name = "MISC IO power well", 3428 /* Handled by the DMC firmware */ 3429 .always_on = true, 3430 .domains = 0, 3431 .ops = &hsw_power_well_ops, 3432 .id = SKL_DISP_PW_MISC_IO, 3433 { 3434 .hsw.regs = &hsw_power_well_regs, 3435 .hsw.idx = SKL_PW_CTL_IDX_MISC_IO, 3436 }, 3437 }, 3438 { 3439 .name = "DC off", 3440 .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, 3441 .ops = &gen9_dc_off_power_well_ops, 3442 .id = SKL_DISP_DC_OFF, 3443 }, 3444 { 3445 .name = "power well 2", 3446 .domains = SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3447 .ops = &hsw_power_well_ops, 3448 .id = SKL_DISP_PW_2, 3449 { 3450 .hsw.regs = &hsw_power_well_regs, 3451 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3452 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3453 .hsw.has_vga = true, 3454 .hsw.has_fuses = true, 3455 }, 3456 }, 3457 { 3458 .name = "DDI A/E IO power well", 3459 .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS, 3460 .ops = &hsw_power_well_ops, 3461 .id = DISP_PW_ID_NONE, 3462 { 3463 .hsw.regs = &hsw_power_well_regs, 3464 .hsw.idx = SKL_PW_CTL_IDX_DDI_A_E, 3465 }, 3466 }, 3467 { 3468 .name = "DDI B IO power well", 3469 .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3470 .ops = &hsw_power_well_ops, 3471 .id = DISP_PW_ID_NONE, 3472 { 3473 .hsw.regs = &hsw_power_well_regs, 3474 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3475 }, 3476 }, 3477 { 3478 .name = "DDI C IO power well", 3479 .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3480 .ops = &hsw_power_well_ops, 3481 .id = DISP_PW_ID_NONE, 3482 { 3483 .hsw.regs = &hsw_power_well_regs, 3484 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3485 }, 3486 }, 3487 { 3488 .name = "DDI D IO power well", 3489 .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS, 3490 .ops = &hsw_power_well_ops, 3491 .id = DISP_PW_ID_NONE, 3492 { 3493 .hsw.regs = &hsw_power_well_regs, 3494 .hsw.idx = SKL_PW_CTL_IDX_DDI_D, 3495 }, 3496 }, 3497 }; 3498 3499 static const struct i915_power_well_desc bxt_power_wells[] = { 3500 { 3501 .name = "always-on", 3502 .always_on = true, 3503 .domains = POWER_DOMAIN_MASK, 3504 .ops = &i9xx_always_on_power_well_ops, 3505 .id = DISP_PW_ID_NONE, 3506 }, 3507 { 3508 .name = "power well 1", 3509 /* Handled by the DMC firmware */ 3510 .always_on = true, 3511 .domains = 0, 3512 .ops = &hsw_power_well_ops, 3513 .id = SKL_DISP_PW_1, 3514 { 3515 .hsw.regs = &hsw_power_well_regs, 3516 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3517 .hsw.has_fuses = true, 3518 }, 3519 }, 3520 { 3521 .name = "DC off", 3522 .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, 3523 .ops = &gen9_dc_off_power_well_ops, 3524 .id = SKL_DISP_DC_OFF, 3525 }, 3526 { 3527 .name = "power well 2", 3528 .domains = BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3529 .ops = &hsw_power_well_ops, 3530 .id = SKL_DISP_PW_2, 3531 { 3532 .hsw.regs = &hsw_power_well_regs, 3533 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3534 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3535 .hsw.has_vga = true, 3536 .hsw.has_fuses = true, 3537 }, 3538 }, 3539 { 3540 .name = "dpio-common-a", 3541 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS, 3542 .ops = &bxt_dpio_cmn_power_well_ops, 3543 .id = BXT_DISP_PW_DPIO_CMN_A, 3544 { 3545 .bxt.phy = DPIO_PHY1, 3546 }, 3547 }, 3548 { 3549 .name = "dpio-common-bc", 3550 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS, 3551 .ops = &bxt_dpio_cmn_power_well_ops, 3552 .id = VLV_DISP_PW_DPIO_CMN_BC, 3553 { 3554 .bxt.phy = DPIO_PHY0, 3555 }, 3556 }, 3557 }; 3558 3559 static const struct i915_power_well_desc glk_power_wells[] = { 3560 { 3561 .name = "always-on", 3562 .always_on = true, 3563 .domains = POWER_DOMAIN_MASK, 3564 .ops = &i9xx_always_on_power_well_ops, 3565 .id = DISP_PW_ID_NONE, 3566 }, 3567 { 3568 .name = "power well 1", 3569 /* Handled by the DMC firmware */ 3570 .always_on = true, 3571 .domains = 0, 3572 .ops = &hsw_power_well_ops, 3573 .id = SKL_DISP_PW_1, 3574 { 3575 .hsw.regs = &hsw_power_well_regs, 3576 .hsw.idx = SKL_PW_CTL_IDX_PW_1, 3577 .hsw.has_fuses = true, 3578 }, 3579 }, 3580 { 3581 .name = "DC off", 3582 .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, 3583 .ops = &gen9_dc_off_power_well_ops, 3584 .id = SKL_DISP_DC_OFF, 3585 }, 3586 { 3587 .name = "power well 2", 3588 .domains = GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS, 3589 .ops = &hsw_power_well_ops, 3590 .id = SKL_DISP_PW_2, 3591 { 3592 .hsw.regs = &hsw_power_well_regs, 3593 .hsw.idx = SKL_PW_CTL_IDX_PW_2, 3594 .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C), 3595 .hsw.has_vga = true, 3596 .hsw.has_fuses = true, 3597 }, 3598 }, 3599 { 3600 .name = "dpio-common-a", 3601 .domains = GLK_DPIO_CMN_A_POWER_DOMAINS, 3602 .ops = &bxt_dpio_cmn_power_well_ops, 3603 .id = BXT_DISP_PW_DPIO_CMN_A, 3604 { 3605 .bxt.phy = DPIO_PHY1, 3606 }, 3607 }, 3608 { 3609 .name = "dpio-common-b", 3610 .domains = GLK_DPIO_CMN_B_POWER_DOMAINS, 3611 .ops = &bxt_dpio_cmn_power_well_ops, 3612 .id = VLV_DISP_PW_DPIO_CMN_BC, 3613 { 3614 .bxt.phy = DPIO_PHY0, 3615 }, 3616 }, 3617 { 3618 .name = "dpio-common-c", 3619 .domains = GLK_DPIO_CMN_C_POWER_DOMAINS, 3620 .ops = &bxt_dpio_cmn_power_well_ops, 3621 .id = GLK_DISP_PW_DPIO_CMN_C, 3622 { 3623 .bxt.phy = DPIO_PHY2, 3624 }, 3625 }, 3626 { 3627 .name = "AUX A", 3628 .domains = GLK_DISPLAY_AUX_A_POWER_DOMAINS, 3629 .ops = &hsw_power_well_ops, 3630 .id = DISP_PW_ID_NONE, 3631 { 3632 .hsw.regs = &hsw_power_well_regs, 3633 .hsw.idx = GLK_PW_CTL_IDX_AUX_A, 3634 }, 3635 }, 3636 { 3637 .name = "AUX B", 3638 .domains = GLK_DISPLAY_AUX_B_POWER_DOMAINS, 3639 .ops = &hsw_power_well_ops, 3640 .id = DISP_PW_ID_NONE, 3641 { 3642 .hsw.regs = &hsw_power_well_regs, 3643 .hsw.idx = GLK_PW_CTL_IDX_AUX_B, 3644 }, 3645 }, 3646 { 3647 .name = "AUX C", 3648 .domains = GLK_DISPLAY_AUX_C_POWER_DOMAINS, 3649 .ops = &hsw_power_well_ops, 3650 .id = DISP_PW_ID_NONE, 3651 { 3652 .hsw.regs = &hsw_power_well_regs, 3653 .hsw.idx = GLK_PW_CTL_IDX_AUX_C, 3654 }, 3655 }, 3656 { 3657 .name = "DDI A IO power well", 3658 .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS, 3659 .ops = &hsw_power_well_ops, 3660 .id = DISP_PW_ID_NONE, 3661 { 3662 .hsw.regs = &hsw_power_well_regs, 3663 .hsw.idx = GLK_PW_CTL_IDX_DDI_A, 3664 }, 3665 }, 3666 { 3667 .name = "DDI B IO power well", 3668 .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS, 3669 .ops = &hsw_power_well_ops, 3670 .id = DISP_PW_ID_NONE, 3671 { 3672 .hsw.regs = &hsw_power_well_regs, 3673 .hsw.idx = SKL_PW_CTL_IDX_DDI_B, 3674 }, 3675 }, 3676 { 3677 .name = "DDI C IO power well", 3678 .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS, 3679 .ops = &hsw_power_well_ops, 3680 .id = DISP_PW_ID_NONE, 3681 { 3682 .hsw.regs = &hsw_power_well_regs, 3683 .hsw.idx = SKL_PW_CTL_IDX_DDI_C, 3684 }, 3685 }, 3686 }; 3687 3688 static const struct i915_power_well_ops icl_aux_power_well_ops = { 3689 .sync_hw = hsw_power_well_sync_hw, 3690 .enable = icl_aux_power_well_enable, 3691 .disable = icl_aux_power_well_disable, 3692 .is_enabled = hsw_power_well_enabled, 3693 }; 3694 3695 static const struct i915_power_well_regs icl_aux_power_well_regs = { 3696 .bios = ICL_PWR_WELL_CTL_AUX1, 3697 .driver = ICL_PWR_WELL_CTL_AUX2, 3698 .debug = ICL_PWR_WELL_CTL_AUX4, 3699 }; 3700 3701 static const struct i915_power_well_regs icl_ddi_power_well_regs = { 3702 .bios = ICL_PWR_WELL_CTL_DDI1, 3703 .driver = ICL_PWR_WELL_CTL_DDI2, 3704 .debug = ICL_PWR_WELL_CTL_DDI4, 3705 }; 3706 3707 static const struct i915_power_well_desc icl_power_wells[] = { 3708 { 3709 .name = "always-on", 3710 .always_on = true, 3711 .domains = POWER_DOMAIN_MASK, 3712 .ops = &i9xx_always_on_power_well_ops, 3713 .id = DISP_PW_ID_NONE, 3714 }, 3715 { 3716 .name = "power well 1", 3717 /* Handled by the DMC firmware */ 3718 .always_on = true, 3719 .domains = 0, 3720 .ops = &hsw_power_well_ops, 3721 .id = SKL_DISP_PW_1, 3722 { 3723 .hsw.regs = &hsw_power_well_regs, 3724 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 3725 .hsw.has_fuses = true, 3726 }, 3727 }, 3728 { 3729 .name = "DC off", 3730 .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, 3731 .ops = &gen9_dc_off_power_well_ops, 3732 .id = SKL_DISP_DC_OFF, 3733 }, 3734 { 3735 .name = "power well 2", 3736 .domains = ICL_PW_2_POWER_DOMAINS, 3737 .ops = &hsw_power_well_ops, 3738 .id = SKL_DISP_PW_2, 3739 { 3740 .hsw.regs = &hsw_power_well_regs, 3741 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 3742 .hsw.has_fuses = true, 3743 }, 3744 }, 3745 { 3746 .name = "power well 3", 3747 .domains = ICL_PW_3_POWER_DOMAINS, 3748 .ops = &hsw_power_well_ops, 3749 .id = ICL_DISP_PW_3, 3750 { 3751 .hsw.regs = &hsw_power_well_regs, 3752 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 3753 .hsw.irq_pipe_mask = BIT(PIPE_B), 3754 .hsw.has_vga = true, 3755 .hsw.has_fuses = true, 3756 }, 3757 }, 3758 { 3759 .name = "DDI A IO", 3760 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 3761 .ops = &hsw_power_well_ops, 3762 .id = DISP_PW_ID_NONE, 3763 { 3764 .hsw.regs = &icl_ddi_power_well_regs, 3765 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 3766 }, 3767 }, 3768 { 3769 .name = "DDI B IO", 3770 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 3771 .ops = &hsw_power_well_ops, 3772 .id = DISP_PW_ID_NONE, 3773 { 3774 .hsw.regs = &icl_ddi_power_well_regs, 3775 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 3776 }, 3777 }, 3778 { 3779 .name = "DDI C IO", 3780 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 3781 .ops = &hsw_power_well_ops, 3782 .id = DISP_PW_ID_NONE, 3783 { 3784 .hsw.regs = &icl_ddi_power_well_regs, 3785 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 3786 }, 3787 }, 3788 { 3789 .name = "DDI D IO", 3790 .domains = ICL_DDI_IO_D_POWER_DOMAINS, 3791 .ops = &hsw_power_well_ops, 3792 .id = DISP_PW_ID_NONE, 3793 { 3794 .hsw.regs = &icl_ddi_power_well_regs, 3795 .hsw.idx = ICL_PW_CTL_IDX_DDI_D, 3796 }, 3797 }, 3798 { 3799 .name = "DDI E IO", 3800 .domains = ICL_DDI_IO_E_POWER_DOMAINS, 3801 .ops = &hsw_power_well_ops, 3802 .id = DISP_PW_ID_NONE, 3803 { 3804 .hsw.regs = &icl_ddi_power_well_regs, 3805 .hsw.idx = ICL_PW_CTL_IDX_DDI_E, 3806 }, 3807 }, 3808 { 3809 .name = "DDI F IO", 3810 .domains = ICL_DDI_IO_F_POWER_DOMAINS, 3811 .ops = &hsw_power_well_ops, 3812 .id = DISP_PW_ID_NONE, 3813 { 3814 .hsw.regs = &icl_ddi_power_well_regs, 3815 .hsw.idx = ICL_PW_CTL_IDX_DDI_F, 3816 }, 3817 }, 3818 { 3819 .name = "AUX A", 3820 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 3821 .ops = &icl_aux_power_well_ops, 3822 .id = DISP_PW_ID_NONE, 3823 { 3824 .hsw.regs = &icl_aux_power_well_regs, 3825 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 3826 }, 3827 }, 3828 { 3829 .name = "AUX B", 3830 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 3831 .ops = &icl_aux_power_well_ops, 3832 .id = DISP_PW_ID_NONE, 3833 { 3834 .hsw.regs = &icl_aux_power_well_regs, 3835 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 3836 }, 3837 }, 3838 { 3839 .name = "AUX C TC1", 3840 .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, 3841 .ops = &icl_aux_power_well_ops, 3842 .id = DISP_PW_ID_NONE, 3843 { 3844 .hsw.regs = &icl_aux_power_well_regs, 3845 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 3846 .hsw.is_tc_tbt = false, 3847 }, 3848 }, 3849 { 3850 .name = "AUX D TC2", 3851 .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, 3852 .ops = &icl_aux_power_well_ops, 3853 .id = DISP_PW_ID_NONE, 3854 { 3855 .hsw.regs = &icl_aux_power_well_regs, 3856 .hsw.idx = ICL_PW_CTL_IDX_AUX_D, 3857 .hsw.is_tc_tbt = false, 3858 }, 3859 }, 3860 { 3861 .name = "AUX E TC3", 3862 .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, 3863 .ops = &icl_aux_power_well_ops, 3864 .id = DISP_PW_ID_NONE, 3865 { 3866 .hsw.regs = &icl_aux_power_well_regs, 3867 .hsw.idx = ICL_PW_CTL_IDX_AUX_E, 3868 .hsw.is_tc_tbt = false, 3869 }, 3870 }, 3871 { 3872 .name = "AUX F TC4", 3873 .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, 3874 .ops = &icl_aux_power_well_ops, 3875 .id = DISP_PW_ID_NONE, 3876 { 3877 .hsw.regs = &icl_aux_power_well_regs, 3878 .hsw.idx = ICL_PW_CTL_IDX_AUX_F, 3879 .hsw.is_tc_tbt = false, 3880 }, 3881 }, 3882 { 3883 .name = "AUX C TBT1", 3884 .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, 3885 .ops = &icl_aux_power_well_ops, 3886 .id = DISP_PW_ID_NONE, 3887 { 3888 .hsw.regs = &icl_aux_power_well_regs, 3889 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1, 3890 .hsw.is_tc_tbt = true, 3891 }, 3892 }, 3893 { 3894 .name = "AUX D TBT2", 3895 .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, 3896 .ops = &icl_aux_power_well_ops, 3897 .id = DISP_PW_ID_NONE, 3898 { 3899 .hsw.regs = &icl_aux_power_well_regs, 3900 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2, 3901 .hsw.is_tc_tbt = true, 3902 }, 3903 }, 3904 { 3905 .name = "AUX E TBT3", 3906 .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, 3907 .ops = &icl_aux_power_well_ops, 3908 .id = DISP_PW_ID_NONE, 3909 { 3910 .hsw.regs = &icl_aux_power_well_regs, 3911 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3, 3912 .hsw.is_tc_tbt = true, 3913 }, 3914 }, 3915 { 3916 .name = "AUX F TBT4", 3917 .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, 3918 .ops = &icl_aux_power_well_ops, 3919 .id = DISP_PW_ID_NONE, 3920 { 3921 .hsw.regs = &icl_aux_power_well_regs, 3922 .hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4, 3923 .hsw.is_tc_tbt = true, 3924 }, 3925 }, 3926 { 3927 .name = "power well 4", 3928 .domains = ICL_PW_4_POWER_DOMAINS, 3929 .ops = &hsw_power_well_ops, 3930 .id = DISP_PW_ID_NONE, 3931 { 3932 .hsw.regs = &hsw_power_well_regs, 3933 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 3934 .hsw.has_fuses = true, 3935 .hsw.irq_pipe_mask = BIT(PIPE_C), 3936 }, 3937 }, 3938 }; 3939 3940 static void 3941 tgl_tc_cold_request(struct drm_i915_private *i915, bool block) 3942 { 3943 u8 tries = 0; 3944 int ret; 3945 3946 while (1) { 3947 u32 low_val; 3948 u32 high_val = 0; 3949 3950 if (block) 3951 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ; 3952 else 3953 low_val = TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ; 3954 3955 /* 3956 * Spec states that we should timeout the request after 200us 3957 * but the function below will timeout after 500us 3958 */ 3959 ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, 3960 &high_val); 3961 if (ret == 0) { 3962 if (block && 3963 (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) 3964 ret = -EIO; 3965 else 3966 break; 3967 } 3968 3969 if (++tries == 3) 3970 break; 3971 3972 drm_msleep(1); 3973 } 3974 3975 if (ret) 3976 drm_err(&i915->drm, "TC cold %sblock failed\n", 3977 block ? "" : "un"); 3978 else 3979 drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", 3980 block ? "" : "un"); 3981 } 3982 3983 static void 3984 tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915, 3985 struct i915_power_well *power_well) 3986 { 3987 tgl_tc_cold_request(i915, true); 3988 } 3989 3990 static void 3991 tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915, 3992 struct i915_power_well *power_well) 3993 { 3994 tgl_tc_cold_request(i915, false); 3995 } 3996 3997 static void 3998 tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915, 3999 struct i915_power_well *power_well) 4000 { 4001 if (power_well->count > 0) 4002 tgl_tc_cold_off_power_well_enable(i915, power_well); 4003 else 4004 tgl_tc_cold_off_power_well_disable(i915, power_well); 4005 } 4006 4007 static bool 4008 tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, 4009 struct i915_power_well *power_well) 4010 { 4011 /* 4012 * Not the correctly implementation but there is no way to just read it 4013 * from PCODE, so returning count to avoid state mismatch errors 4014 */ 4015 return power_well->count; 4016 } 4017 4018 static const struct i915_power_well_ops tgl_tc_cold_off_ops = { 4019 .sync_hw = tgl_tc_cold_off_power_well_sync_hw, 4020 .enable = tgl_tc_cold_off_power_well_enable, 4021 .disable = tgl_tc_cold_off_power_well_disable, 4022 .is_enabled = tgl_tc_cold_off_power_well_is_enabled, 4023 }; 4024 4025 static const struct i915_power_well_desc tgl_power_wells[] = { 4026 { 4027 .name = "always-on", 4028 .always_on = true, 4029 .domains = POWER_DOMAIN_MASK, 4030 .ops = &i9xx_always_on_power_well_ops, 4031 .id = DISP_PW_ID_NONE, 4032 }, 4033 { 4034 .name = "power well 1", 4035 /* Handled by the DMC firmware */ 4036 .always_on = true, 4037 .domains = 0, 4038 .ops = &hsw_power_well_ops, 4039 .id = SKL_DISP_PW_1, 4040 { 4041 .hsw.regs = &hsw_power_well_regs, 4042 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4043 .hsw.has_fuses = true, 4044 }, 4045 }, 4046 { 4047 .name = "DC off", 4048 .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, 4049 .ops = &gen9_dc_off_power_well_ops, 4050 .id = SKL_DISP_DC_OFF, 4051 }, 4052 { 4053 .name = "power well 2", 4054 .domains = TGL_PW_2_POWER_DOMAINS, 4055 .ops = &hsw_power_well_ops, 4056 .id = SKL_DISP_PW_2, 4057 { 4058 .hsw.regs = &hsw_power_well_regs, 4059 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4060 .hsw.has_fuses = true, 4061 }, 4062 }, 4063 { 4064 .name = "power well 3", 4065 .domains = TGL_PW_3_POWER_DOMAINS, 4066 .ops = &hsw_power_well_ops, 4067 .id = ICL_DISP_PW_3, 4068 { 4069 .hsw.regs = &hsw_power_well_regs, 4070 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4071 .hsw.irq_pipe_mask = BIT(PIPE_B), 4072 .hsw.has_vga = true, 4073 .hsw.has_fuses = true, 4074 }, 4075 }, 4076 { 4077 .name = "DDI A IO", 4078 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4079 .ops = &hsw_power_well_ops, 4080 .id = DISP_PW_ID_NONE, 4081 { 4082 .hsw.regs = &icl_ddi_power_well_regs, 4083 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4084 } 4085 }, 4086 { 4087 .name = "DDI B IO", 4088 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4089 .ops = &hsw_power_well_ops, 4090 .id = DISP_PW_ID_NONE, 4091 { 4092 .hsw.regs = &icl_ddi_power_well_regs, 4093 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4094 } 4095 }, 4096 { 4097 .name = "DDI C IO", 4098 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 4099 .ops = &hsw_power_well_ops, 4100 .id = DISP_PW_ID_NONE, 4101 { 4102 .hsw.regs = &icl_ddi_power_well_regs, 4103 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 4104 } 4105 }, 4106 { 4107 .name = "DDI IO TC1", 4108 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, 4109 .ops = &hsw_power_well_ops, 4110 .id = DISP_PW_ID_NONE, 4111 { 4112 .hsw.regs = &icl_ddi_power_well_regs, 4113 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4114 }, 4115 }, 4116 { 4117 .name = "DDI IO TC2", 4118 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, 4119 .ops = &hsw_power_well_ops, 4120 .id = DISP_PW_ID_NONE, 4121 { 4122 .hsw.regs = &icl_ddi_power_well_regs, 4123 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4124 }, 4125 }, 4126 { 4127 .name = "DDI IO TC3", 4128 .domains = TGL_DDI_IO_TC3_POWER_DOMAINS, 4129 .ops = &hsw_power_well_ops, 4130 .id = DISP_PW_ID_NONE, 4131 { 4132 .hsw.regs = &icl_ddi_power_well_regs, 4133 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, 4134 }, 4135 }, 4136 { 4137 .name = "DDI IO TC4", 4138 .domains = TGL_DDI_IO_TC4_POWER_DOMAINS, 4139 .ops = &hsw_power_well_ops, 4140 .id = DISP_PW_ID_NONE, 4141 { 4142 .hsw.regs = &icl_ddi_power_well_regs, 4143 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, 4144 }, 4145 }, 4146 { 4147 .name = "DDI IO TC5", 4148 .domains = TGL_DDI_IO_TC5_POWER_DOMAINS, 4149 .ops = &hsw_power_well_ops, 4150 .id = DISP_PW_ID_NONE, 4151 { 4152 .hsw.regs = &icl_ddi_power_well_regs, 4153 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC5, 4154 }, 4155 }, 4156 { 4157 .name = "DDI IO TC6", 4158 .domains = TGL_DDI_IO_TC6_POWER_DOMAINS, 4159 .ops = &hsw_power_well_ops, 4160 .id = DISP_PW_ID_NONE, 4161 { 4162 .hsw.regs = &icl_ddi_power_well_regs, 4163 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC6, 4164 }, 4165 }, 4166 { 4167 .name = "TC cold off", 4168 .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, 4169 .ops = &tgl_tc_cold_off_ops, 4170 .id = TGL_DISP_PW_TC_COLD_OFF, 4171 }, 4172 { 4173 .name = "AUX A", 4174 .domains = TGL_AUX_A_IO_POWER_DOMAINS, 4175 .ops = &icl_aux_power_well_ops, 4176 .id = DISP_PW_ID_NONE, 4177 { 4178 .hsw.regs = &icl_aux_power_well_regs, 4179 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4180 }, 4181 }, 4182 { 4183 .name = "AUX B", 4184 .domains = TGL_AUX_B_IO_POWER_DOMAINS, 4185 .ops = &icl_aux_power_well_ops, 4186 .id = DISP_PW_ID_NONE, 4187 { 4188 .hsw.regs = &icl_aux_power_well_regs, 4189 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4190 }, 4191 }, 4192 { 4193 .name = "AUX C", 4194 .domains = TGL_AUX_C_IO_POWER_DOMAINS, 4195 .ops = &icl_aux_power_well_ops, 4196 .id = DISP_PW_ID_NONE, 4197 { 4198 .hsw.regs = &icl_aux_power_well_regs, 4199 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 4200 }, 4201 }, 4202 { 4203 .name = "AUX USBC1", 4204 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, 4205 .ops = &icl_aux_power_well_ops, 4206 .id = DISP_PW_ID_NONE, 4207 { 4208 .hsw.regs = &icl_aux_power_well_regs, 4209 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4210 .hsw.is_tc_tbt = false, 4211 }, 4212 }, 4213 { 4214 .name = "AUX USBC2", 4215 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, 4216 .ops = &icl_aux_power_well_ops, 4217 .id = DISP_PW_ID_NONE, 4218 { 4219 .hsw.regs = &icl_aux_power_well_regs, 4220 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4221 .hsw.is_tc_tbt = false, 4222 }, 4223 }, 4224 { 4225 .name = "AUX USBC3", 4226 .domains = TGL_AUX_IO_USBC3_POWER_DOMAINS, 4227 .ops = &icl_aux_power_well_ops, 4228 .id = DISP_PW_ID_NONE, 4229 { 4230 .hsw.regs = &icl_aux_power_well_regs, 4231 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, 4232 .hsw.is_tc_tbt = false, 4233 }, 4234 }, 4235 { 4236 .name = "AUX USBC4", 4237 .domains = TGL_AUX_IO_USBC4_POWER_DOMAINS, 4238 .ops = &icl_aux_power_well_ops, 4239 .id = DISP_PW_ID_NONE, 4240 { 4241 .hsw.regs = &icl_aux_power_well_regs, 4242 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, 4243 .hsw.is_tc_tbt = false, 4244 }, 4245 }, 4246 { 4247 .name = "AUX USBC5", 4248 .domains = TGL_AUX_IO_USBC5_POWER_DOMAINS, 4249 .ops = &icl_aux_power_well_ops, 4250 .id = DISP_PW_ID_NONE, 4251 { 4252 .hsw.regs = &icl_aux_power_well_regs, 4253 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC5, 4254 .hsw.is_tc_tbt = false, 4255 }, 4256 }, 4257 { 4258 .name = "AUX USBC6", 4259 .domains = TGL_AUX_IO_USBC6_POWER_DOMAINS, 4260 .ops = &icl_aux_power_well_ops, 4261 .id = DISP_PW_ID_NONE, 4262 { 4263 .hsw.regs = &icl_aux_power_well_regs, 4264 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC6, 4265 .hsw.is_tc_tbt = false, 4266 }, 4267 }, 4268 { 4269 .name = "AUX TBT1", 4270 .domains = TGL_AUX_IO_TBT1_POWER_DOMAINS, 4271 .ops = &icl_aux_power_well_ops, 4272 .id = DISP_PW_ID_NONE, 4273 { 4274 .hsw.regs = &icl_aux_power_well_regs, 4275 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, 4276 .hsw.is_tc_tbt = true, 4277 }, 4278 }, 4279 { 4280 .name = "AUX TBT2", 4281 .domains = TGL_AUX_IO_TBT2_POWER_DOMAINS, 4282 .ops = &icl_aux_power_well_ops, 4283 .id = DISP_PW_ID_NONE, 4284 { 4285 .hsw.regs = &icl_aux_power_well_regs, 4286 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, 4287 .hsw.is_tc_tbt = true, 4288 }, 4289 }, 4290 { 4291 .name = "AUX TBT3", 4292 .domains = TGL_AUX_IO_TBT3_POWER_DOMAINS, 4293 .ops = &icl_aux_power_well_ops, 4294 .id = DISP_PW_ID_NONE, 4295 { 4296 .hsw.regs = &icl_aux_power_well_regs, 4297 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, 4298 .hsw.is_tc_tbt = true, 4299 }, 4300 }, 4301 { 4302 .name = "AUX TBT4", 4303 .domains = TGL_AUX_IO_TBT4_POWER_DOMAINS, 4304 .ops = &icl_aux_power_well_ops, 4305 .id = DISP_PW_ID_NONE, 4306 { 4307 .hsw.regs = &icl_aux_power_well_regs, 4308 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, 4309 .hsw.is_tc_tbt = true, 4310 }, 4311 }, 4312 { 4313 .name = "AUX TBT5", 4314 .domains = TGL_AUX_IO_TBT5_POWER_DOMAINS, 4315 .ops = &icl_aux_power_well_ops, 4316 .id = DISP_PW_ID_NONE, 4317 { 4318 .hsw.regs = &icl_aux_power_well_regs, 4319 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT5, 4320 .hsw.is_tc_tbt = true, 4321 }, 4322 }, 4323 { 4324 .name = "AUX TBT6", 4325 .domains = TGL_AUX_IO_TBT6_POWER_DOMAINS, 4326 .ops = &icl_aux_power_well_ops, 4327 .id = DISP_PW_ID_NONE, 4328 { 4329 .hsw.regs = &icl_aux_power_well_regs, 4330 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT6, 4331 .hsw.is_tc_tbt = true, 4332 }, 4333 }, 4334 { 4335 .name = "power well 4", 4336 .domains = TGL_PW_4_POWER_DOMAINS, 4337 .ops = &hsw_power_well_ops, 4338 .id = DISP_PW_ID_NONE, 4339 { 4340 .hsw.regs = &hsw_power_well_regs, 4341 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4342 .hsw.has_fuses = true, 4343 .hsw.irq_pipe_mask = BIT(PIPE_C), 4344 } 4345 }, 4346 { 4347 .name = "power well 5", 4348 .domains = TGL_PW_5_POWER_DOMAINS, 4349 .ops = &hsw_power_well_ops, 4350 .id = DISP_PW_ID_NONE, 4351 { 4352 .hsw.regs = &hsw_power_well_regs, 4353 .hsw.idx = TGL_PW_CTL_IDX_PW_5, 4354 .hsw.has_fuses = true, 4355 .hsw.irq_pipe_mask = BIT(PIPE_D), 4356 }, 4357 }, 4358 }; 4359 4360 static const struct i915_power_well_desc rkl_power_wells[] = { 4361 { 4362 .name = "always-on", 4363 .always_on = true, 4364 .domains = POWER_DOMAIN_MASK, 4365 .ops = &i9xx_always_on_power_well_ops, 4366 .id = DISP_PW_ID_NONE, 4367 }, 4368 { 4369 .name = "power well 1", 4370 /* Handled by the DMC firmware */ 4371 .always_on = true, 4372 .domains = 0, 4373 .ops = &hsw_power_well_ops, 4374 .id = SKL_DISP_PW_1, 4375 { 4376 .hsw.regs = &hsw_power_well_regs, 4377 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4378 .hsw.has_fuses = true, 4379 }, 4380 }, 4381 { 4382 .name = "DC off", 4383 .domains = RKL_DISPLAY_DC_OFF_POWER_DOMAINS, 4384 .ops = &gen9_dc_off_power_well_ops, 4385 .id = SKL_DISP_DC_OFF, 4386 }, 4387 { 4388 .name = "power well 3", 4389 .domains = RKL_PW_3_POWER_DOMAINS, 4390 .ops = &hsw_power_well_ops, 4391 .id = ICL_DISP_PW_3, 4392 { 4393 .hsw.regs = &hsw_power_well_regs, 4394 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4395 .hsw.irq_pipe_mask = BIT(PIPE_B), 4396 .hsw.has_vga = true, 4397 .hsw.has_fuses = true, 4398 }, 4399 }, 4400 { 4401 .name = "power well 4", 4402 .domains = RKL_PW_4_POWER_DOMAINS, 4403 .ops = &hsw_power_well_ops, 4404 .id = DISP_PW_ID_NONE, 4405 { 4406 .hsw.regs = &hsw_power_well_regs, 4407 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4408 .hsw.has_fuses = true, 4409 .hsw.irq_pipe_mask = BIT(PIPE_C), 4410 } 4411 }, 4412 { 4413 .name = "DDI A IO", 4414 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4415 .ops = &hsw_power_well_ops, 4416 .id = DISP_PW_ID_NONE, 4417 { 4418 .hsw.regs = &icl_ddi_power_well_regs, 4419 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4420 } 4421 }, 4422 { 4423 .name = "DDI B IO", 4424 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4425 .ops = &hsw_power_well_ops, 4426 .id = DISP_PW_ID_NONE, 4427 { 4428 .hsw.regs = &icl_ddi_power_well_regs, 4429 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4430 } 4431 }, 4432 { 4433 .name = "DDI IO TC1", 4434 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, 4435 .ops = &hsw_power_well_ops, 4436 .id = DISP_PW_ID_NONE, 4437 { 4438 .hsw.regs = &icl_ddi_power_well_regs, 4439 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4440 }, 4441 }, 4442 { 4443 .name = "DDI IO TC2", 4444 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, 4445 .ops = &hsw_power_well_ops, 4446 .id = DISP_PW_ID_NONE, 4447 { 4448 .hsw.regs = &icl_ddi_power_well_regs, 4449 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4450 }, 4451 }, 4452 { 4453 .name = "AUX A", 4454 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 4455 .ops = &icl_aux_power_well_ops, 4456 .id = DISP_PW_ID_NONE, 4457 { 4458 .hsw.regs = &icl_aux_power_well_regs, 4459 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4460 }, 4461 }, 4462 { 4463 .name = "AUX B", 4464 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 4465 .ops = &icl_aux_power_well_ops, 4466 .id = DISP_PW_ID_NONE, 4467 { 4468 .hsw.regs = &icl_aux_power_well_regs, 4469 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4470 }, 4471 }, 4472 { 4473 .name = "AUX USBC1", 4474 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, 4475 .ops = &icl_aux_power_well_ops, 4476 .id = DISP_PW_ID_NONE, 4477 { 4478 .hsw.regs = &icl_aux_power_well_regs, 4479 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4480 }, 4481 }, 4482 { 4483 .name = "AUX USBC2", 4484 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, 4485 .ops = &icl_aux_power_well_ops, 4486 .id = DISP_PW_ID_NONE, 4487 { 4488 .hsw.regs = &icl_aux_power_well_regs, 4489 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4490 }, 4491 }, 4492 }; 4493 4494 static const struct i915_power_well_desc dg1_power_wells[] = { 4495 { 4496 .name = "always-on", 4497 .always_on = true, 4498 .domains = POWER_DOMAIN_MASK, 4499 .ops = &i9xx_always_on_power_well_ops, 4500 .id = DISP_PW_ID_NONE, 4501 }, 4502 { 4503 .name = "power well 1", 4504 /* Handled by the DMC firmware */ 4505 .always_on = true, 4506 .domains = 0, 4507 .ops = &hsw_power_well_ops, 4508 .id = SKL_DISP_PW_1, 4509 { 4510 .hsw.regs = &hsw_power_well_regs, 4511 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4512 .hsw.has_fuses = true, 4513 }, 4514 }, 4515 { 4516 .name = "DC off", 4517 .domains = DG1_DISPLAY_DC_OFF_POWER_DOMAINS, 4518 .ops = &gen9_dc_off_power_well_ops, 4519 .id = SKL_DISP_DC_OFF, 4520 }, 4521 { 4522 .name = "power well 2", 4523 .domains = DG1_PW_2_POWER_DOMAINS, 4524 .ops = &hsw_power_well_ops, 4525 .id = SKL_DISP_PW_2, 4526 { 4527 .hsw.regs = &hsw_power_well_regs, 4528 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4529 .hsw.has_fuses = true, 4530 }, 4531 }, 4532 { 4533 .name = "power well 3", 4534 .domains = DG1_PW_3_POWER_DOMAINS, 4535 .ops = &hsw_power_well_ops, 4536 .id = ICL_DISP_PW_3, 4537 { 4538 .hsw.regs = &hsw_power_well_regs, 4539 .hsw.idx = ICL_PW_CTL_IDX_PW_3, 4540 .hsw.irq_pipe_mask = BIT(PIPE_B), 4541 .hsw.has_vga = true, 4542 .hsw.has_fuses = true, 4543 }, 4544 }, 4545 { 4546 .name = "DDI A IO", 4547 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4548 .ops = &hsw_power_well_ops, 4549 .id = DISP_PW_ID_NONE, 4550 { 4551 .hsw.regs = &icl_ddi_power_well_regs, 4552 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4553 } 4554 }, 4555 { 4556 .name = "DDI B IO", 4557 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4558 .ops = &hsw_power_well_ops, 4559 .id = DISP_PW_ID_NONE, 4560 { 4561 .hsw.regs = &icl_ddi_power_well_regs, 4562 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4563 } 4564 }, 4565 { 4566 .name = "DDI IO TC1", 4567 .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, 4568 .ops = &hsw_power_well_ops, 4569 .id = DISP_PW_ID_NONE, 4570 { 4571 .hsw.regs = &icl_ddi_power_well_regs, 4572 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4573 }, 4574 }, 4575 { 4576 .name = "DDI IO TC2", 4577 .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, 4578 .ops = &hsw_power_well_ops, 4579 .id = DISP_PW_ID_NONE, 4580 { 4581 .hsw.regs = &icl_ddi_power_well_regs, 4582 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4583 }, 4584 }, 4585 { 4586 .name = "AUX A", 4587 .domains = TGL_AUX_A_IO_POWER_DOMAINS, 4588 .ops = &icl_aux_power_well_ops, 4589 .id = DISP_PW_ID_NONE, 4590 { 4591 .hsw.regs = &icl_aux_power_well_regs, 4592 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4593 }, 4594 }, 4595 { 4596 .name = "AUX B", 4597 .domains = TGL_AUX_B_IO_POWER_DOMAINS, 4598 .ops = &icl_aux_power_well_ops, 4599 .id = DISP_PW_ID_NONE, 4600 { 4601 .hsw.regs = &icl_aux_power_well_regs, 4602 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4603 }, 4604 }, 4605 { 4606 .name = "AUX USBC1", 4607 .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS, 4608 .ops = &icl_aux_power_well_ops, 4609 .id = DISP_PW_ID_NONE, 4610 { 4611 .hsw.regs = &icl_aux_power_well_regs, 4612 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4613 .hsw.is_tc_tbt = false, 4614 }, 4615 }, 4616 { 4617 .name = "AUX USBC2", 4618 .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS, 4619 .ops = &icl_aux_power_well_ops, 4620 .id = DISP_PW_ID_NONE, 4621 { 4622 .hsw.regs = &icl_aux_power_well_regs, 4623 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4624 .hsw.is_tc_tbt = false, 4625 }, 4626 }, 4627 { 4628 .name = "power well 4", 4629 .domains = TGL_PW_4_POWER_DOMAINS, 4630 .ops = &hsw_power_well_ops, 4631 .id = DISP_PW_ID_NONE, 4632 { 4633 .hsw.regs = &hsw_power_well_regs, 4634 .hsw.idx = ICL_PW_CTL_IDX_PW_4, 4635 .hsw.has_fuses = true, 4636 .hsw.irq_pipe_mask = BIT(PIPE_C), 4637 } 4638 }, 4639 { 4640 .name = "power well 5", 4641 .domains = TGL_PW_5_POWER_DOMAINS, 4642 .ops = &hsw_power_well_ops, 4643 .id = DISP_PW_ID_NONE, 4644 { 4645 .hsw.regs = &hsw_power_well_regs, 4646 .hsw.idx = TGL_PW_CTL_IDX_PW_5, 4647 .hsw.has_fuses = true, 4648 .hsw.irq_pipe_mask = BIT(PIPE_D), 4649 }, 4650 }, 4651 }; 4652 4653 static const struct i915_power_well_desc xelpd_power_wells[] = { 4654 { 4655 .name = "always-on", 4656 .always_on = true, 4657 .domains = POWER_DOMAIN_MASK, 4658 .ops = &i9xx_always_on_power_well_ops, 4659 .id = DISP_PW_ID_NONE, 4660 }, 4661 { 4662 .name = "power well 1", 4663 /* Handled by the DMC firmware */ 4664 .always_on = true, 4665 .domains = 0, 4666 .ops = &hsw_power_well_ops, 4667 .id = SKL_DISP_PW_1, 4668 { 4669 .hsw.regs = &hsw_power_well_regs, 4670 .hsw.idx = ICL_PW_CTL_IDX_PW_1, 4671 .hsw.has_fuses = true, 4672 }, 4673 }, 4674 { 4675 .name = "DC off", 4676 .domains = XELPD_DISPLAY_DC_OFF_POWER_DOMAINS, 4677 .ops = &gen9_dc_off_power_well_ops, 4678 .id = SKL_DISP_DC_OFF, 4679 }, 4680 { 4681 .name = "power well 2", 4682 .domains = XELPD_PW_2_POWER_DOMAINS, 4683 .ops = &hsw_power_well_ops, 4684 .id = SKL_DISP_PW_2, 4685 { 4686 .hsw.regs = &hsw_power_well_regs, 4687 .hsw.idx = ICL_PW_CTL_IDX_PW_2, 4688 .hsw.has_vga = true, 4689 .hsw.has_fuses = true, 4690 }, 4691 }, 4692 { 4693 .name = "power well A", 4694 .domains = XELPD_PW_A_POWER_DOMAINS, 4695 .ops = &hsw_power_well_ops, 4696 .id = DISP_PW_ID_NONE, 4697 { 4698 .hsw.regs = &hsw_power_well_regs, 4699 .hsw.idx = XELPD_PW_CTL_IDX_PW_A, 4700 .hsw.irq_pipe_mask = BIT(PIPE_A), 4701 .hsw.has_fuses = true, 4702 }, 4703 }, 4704 { 4705 .name = "power well B", 4706 .domains = XELPD_PW_B_POWER_DOMAINS, 4707 .ops = &hsw_power_well_ops, 4708 .id = DISP_PW_ID_NONE, 4709 { 4710 .hsw.regs = &hsw_power_well_regs, 4711 .hsw.idx = XELPD_PW_CTL_IDX_PW_B, 4712 .hsw.irq_pipe_mask = BIT(PIPE_B), 4713 .hsw.has_fuses = true, 4714 }, 4715 }, 4716 { 4717 .name = "power well C", 4718 .domains = XELPD_PW_C_POWER_DOMAINS, 4719 .ops = &hsw_power_well_ops, 4720 .id = DISP_PW_ID_NONE, 4721 { 4722 .hsw.regs = &hsw_power_well_regs, 4723 .hsw.idx = XELPD_PW_CTL_IDX_PW_C, 4724 .hsw.irq_pipe_mask = BIT(PIPE_C), 4725 .hsw.has_fuses = true, 4726 }, 4727 }, 4728 { 4729 .name = "power well D", 4730 .domains = XELPD_PW_D_POWER_DOMAINS, 4731 .ops = &hsw_power_well_ops, 4732 .id = DISP_PW_ID_NONE, 4733 { 4734 .hsw.regs = &hsw_power_well_regs, 4735 .hsw.idx = XELPD_PW_CTL_IDX_PW_D, 4736 .hsw.irq_pipe_mask = BIT(PIPE_D), 4737 .hsw.has_fuses = true, 4738 }, 4739 }, 4740 { 4741 .name = "DDI A IO", 4742 .domains = ICL_DDI_IO_A_POWER_DOMAINS, 4743 .ops = &hsw_power_well_ops, 4744 .id = DISP_PW_ID_NONE, 4745 { 4746 .hsw.regs = &icl_ddi_power_well_regs, 4747 .hsw.idx = ICL_PW_CTL_IDX_DDI_A, 4748 } 4749 }, 4750 { 4751 .name = "DDI B IO", 4752 .domains = ICL_DDI_IO_B_POWER_DOMAINS, 4753 .ops = &hsw_power_well_ops, 4754 .id = DISP_PW_ID_NONE, 4755 { 4756 .hsw.regs = &icl_ddi_power_well_regs, 4757 .hsw.idx = ICL_PW_CTL_IDX_DDI_B, 4758 } 4759 }, 4760 { 4761 .name = "DDI C IO", 4762 .domains = ICL_DDI_IO_C_POWER_DOMAINS, 4763 .ops = &hsw_power_well_ops, 4764 .id = DISP_PW_ID_NONE, 4765 { 4766 .hsw.regs = &icl_ddi_power_well_regs, 4767 .hsw.idx = ICL_PW_CTL_IDX_DDI_C, 4768 } 4769 }, 4770 { 4771 .name = "DDI IO D_XELPD", 4772 .domains = XELPD_DDI_IO_D_XELPD_POWER_DOMAINS, 4773 .ops = &hsw_power_well_ops, 4774 .id = DISP_PW_ID_NONE, 4775 { 4776 .hsw.regs = &icl_ddi_power_well_regs, 4777 .hsw.idx = XELPD_PW_CTL_IDX_DDI_D, 4778 } 4779 }, 4780 { 4781 .name = "DDI IO E_XELPD", 4782 .domains = XELPD_DDI_IO_E_XELPD_POWER_DOMAINS, 4783 .ops = &hsw_power_well_ops, 4784 .id = DISP_PW_ID_NONE, 4785 { 4786 .hsw.regs = &icl_ddi_power_well_regs, 4787 .hsw.idx = XELPD_PW_CTL_IDX_DDI_E, 4788 } 4789 }, 4790 { 4791 .name = "DDI IO TC1", 4792 .domains = XELPD_DDI_IO_TC1_POWER_DOMAINS, 4793 .ops = &hsw_power_well_ops, 4794 .id = DISP_PW_ID_NONE, 4795 { 4796 .hsw.regs = &icl_ddi_power_well_regs, 4797 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1, 4798 } 4799 }, 4800 { 4801 .name = "DDI IO TC2", 4802 .domains = XELPD_DDI_IO_TC2_POWER_DOMAINS, 4803 .ops = &hsw_power_well_ops, 4804 .id = DISP_PW_ID_NONE, 4805 { 4806 .hsw.regs = &icl_ddi_power_well_regs, 4807 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2, 4808 } 4809 }, 4810 { 4811 .name = "DDI IO TC3", 4812 .domains = XELPD_DDI_IO_TC3_POWER_DOMAINS, 4813 .ops = &hsw_power_well_ops, 4814 .id = DISP_PW_ID_NONE, 4815 { 4816 .hsw.regs = &icl_ddi_power_well_regs, 4817 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC3, 4818 } 4819 }, 4820 { 4821 .name = "DDI IO TC4", 4822 .domains = XELPD_DDI_IO_TC4_POWER_DOMAINS, 4823 .ops = &hsw_power_well_ops, 4824 .id = DISP_PW_ID_NONE, 4825 { 4826 .hsw.regs = &icl_ddi_power_well_regs, 4827 .hsw.idx = TGL_PW_CTL_IDX_DDI_TC4, 4828 } 4829 }, 4830 { 4831 .name = "AUX A", 4832 .domains = ICL_AUX_A_IO_POWER_DOMAINS, 4833 .ops = &icl_aux_power_well_ops, 4834 .id = DISP_PW_ID_NONE, 4835 { 4836 .hsw.regs = &icl_aux_power_well_regs, 4837 .hsw.idx = ICL_PW_CTL_IDX_AUX_A, 4838 .hsw.fixed_enable_delay = 600, 4839 }, 4840 }, 4841 { 4842 .name = "AUX B", 4843 .domains = ICL_AUX_B_IO_POWER_DOMAINS, 4844 .ops = &icl_aux_power_well_ops, 4845 .id = DISP_PW_ID_NONE, 4846 { 4847 .hsw.regs = &icl_aux_power_well_regs, 4848 .hsw.idx = ICL_PW_CTL_IDX_AUX_B, 4849 .hsw.fixed_enable_delay = 600, 4850 }, 4851 }, 4852 { 4853 .name = "AUX C", 4854 .domains = TGL_AUX_C_IO_POWER_DOMAINS, 4855 .ops = &icl_aux_power_well_ops, 4856 .id = DISP_PW_ID_NONE, 4857 { 4858 .hsw.regs = &icl_aux_power_well_regs, 4859 .hsw.idx = ICL_PW_CTL_IDX_AUX_C, 4860 .hsw.fixed_enable_delay = 600, 4861 }, 4862 }, 4863 { 4864 .name = "AUX D_XELPD", 4865 .domains = XELPD_AUX_IO_D_XELPD_POWER_DOMAINS, 4866 .ops = &icl_aux_power_well_ops, 4867 .id = DISP_PW_ID_NONE, 4868 { 4869 .hsw.regs = &icl_aux_power_well_regs, 4870 .hsw.idx = XELPD_PW_CTL_IDX_AUX_D, 4871 .hsw.fixed_enable_delay = 600, 4872 }, 4873 }, 4874 { 4875 .name = "AUX E_XELPD", 4876 .domains = XELPD_AUX_IO_E_XELPD_POWER_DOMAINS, 4877 .ops = &icl_aux_power_well_ops, 4878 .id = DISP_PW_ID_NONE, 4879 { 4880 .hsw.regs = &icl_aux_power_well_regs, 4881 .hsw.idx = XELPD_PW_CTL_IDX_AUX_E, 4882 }, 4883 }, 4884 { 4885 .name = "AUX USBC1", 4886 .domains = XELPD_AUX_IO_USBC1_POWER_DOMAINS, 4887 .ops = &icl_aux_power_well_ops, 4888 .id = DISP_PW_ID_NONE, 4889 { 4890 .hsw.regs = &icl_aux_power_well_regs, 4891 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1, 4892 .hsw.fixed_enable_delay = 600, 4893 }, 4894 }, 4895 { 4896 .name = "AUX USBC2", 4897 .domains = XELPD_AUX_IO_USBC2_POWER_DOMAINS, 4898 .ops = &icl_aux_power_well_ops, 4899 .id = DISP_PW_ID_NONE, 4900 { 4901 .hsw.regs = &icl_aux_power_well_regs, 4902 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2, 4903 }, 4904 }, 4905 { 4906 .name = "AUX USBC3", 4907 .domains = XELPD_AUX_IO_USBC3_POWER_DOMAINS, 4908 .ops = &icl_aux_power_well_ops, 4909 .id = DISP_PW_ID_NONE, 4910 { 4911 .hsw.regs = &icl_aux_power_well_regs, 4912 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC3, 4913 }, 4914 }, 4915 { 4916 .name = "AUX USBC4", 4917 .domains = XELPD_AUX_IO_USBC4_POWER_DOMAINS, 4918 .ops = &icl_aux_power_well_ops, 4919 .id = DISP_PW_ID_NONE, 4920 { 4921 .hsw.regs = &icl_aux_power_well_regs, 4922 .hsw.idx = TGL_PW_CTL_IDX_AUX_TC4, 4923 }, 4924 }, 4925 { 4926 .name = "AUX TBT1", 4927 .domains = XELPD_AUX_IO_TBT1_POWER_DOMAINS, 4928 .ops = &icl_aux_power_well_ops, 4929 .id = DISP_PW_ID_NONE, 4930 { 4931 .hsw.regs = &icl_aux_power_well_regs, 4932 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT1, 4933 .hsw.is_tc_tbt = true, 4934 }, 4935 }, 4936 { 4937 .name = "AUX TBT2", 4938 .domains = XELPD_AUX_IO_TBT2_POWER_DOMAINS, 4939 .ops = &icl_aux_power_well_ops, 4940 .id = DISP_PW_ID_NONE, 4941 { 4942 .hsw.regs = &icl_aux_power_well_regs, 4943 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT2, 4944 .hsw.is_tc_tbt = true, 4945 }, 4946 }, 4947 { 4948 .name = "AUX TBT3", 4949 .domains = XELPD_AUX_IO_TBT3_POWER_DOMAINS, 4950 .ops = &icl_aux_power_well_ops, 4951 .id = DISP_PW_ID_NONE, 4952 { 4953 .hsw.regs = &icl_aux_power_well_regs, 4954 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT3, 4955 .hsw.is_tc_tbt = true, 4956 }, 4957 }, 4958 { 4959 .name = "AUX TBT4", 4960 .domains = XELPD_AUX_IO_TBT4_POWER_DOMAINS, 4961 .ops = &icl_aux_power_well_ops, 4962 .id = DISP_PW_ID_NONE, 4963 { 4964 .hsw.regs = &icl_aux_power_well_regs, 4965 .hsw.idx = TGL_PW_CTL_IDX_AUX_TBT4, 4966 .hsw.is_tc_tbt = true, 4967 }, 4968 }, 4969 }; 4970 4971 static int 4972 sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, 4973 int disable_power_well) 4974 { 4975 if (disable_power_well >= 0) 4976 return !!disable_power_well; 4977 4978 return 1; 4979 } 4980 4981 static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, 4982 int enable_dc) 4983 { 4984 u32 mask; 4985 int requested_dc; 4986 int max_dc; 4987 4988 if (!HAS_DISPLAY(dev_priv)) 4989 return 0; 4990 4991 if (IS_DG1(dev_priv)) 4992 max_dc = 3; 4993 else if (DISPLAY_VER(dev_priv) >= 12) 4994 max_dc = 4; 4995 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 4996 max_dc = 1; 4997 else if (DISPLAY_VER(dev_priv) >= 9) 4998 max_dc = 2; 4999 else 5000 max_dc = 0; 5001 5002 /* 5003 * DC9 has a separate HW flow from the rest of the DC states, 5004 * not depending on the DMC firmware. It's needed by system 5005 * suspend/resume, so allow it unconditionally. 5006 */ 5007 mask = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || 5008 DISPLAY_VER(dev_priv) >= 11 ? 5009 DC_STATE_EN_DC9 : 0; 5010 5011 if (!dev_priv->params.disable_power_well) 5012 max_dc = 0; 5013 5014 if (enable_dc >= 0 && enable_dc <= max_dc) { 5015 requested_dc = enable_dc; 5016 } else if (enable_dc == -1) { 5017 requested_dc = max_dc; 5018 } else if (enable_dc > max_dc && enable_dc <= 4) { 5019 drm_dbg_kms(&dev_priv->drm, 5020 "Adjusting requested max DC state (%d->%d)\n", 5021 enable_dc, max_dc); 5022 requested_dc = max_dc; 5023 } else { 5024 drm_err(&dev_priv->drm, 5025 "Unexpected value for enable_dc (%d)\n", enable_dc); 5026 requested_dc = max_dc; 5027 } 5028 5029 switch (requested_dc) { 5030 case 4: 5031 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; 5032 break; 5033 case 3: 5034 mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; 5035 break; 5036 case 2: 5037 mask |= DC_STATE_EN_UPTO_DC6; 5038 break; 5039 case 1: 5040 mask |= DC_STATE_EN_UPTO_DC5; 5041 break; 5042 } 5043 5044 drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask); 5045 5046 return mask; 5047 } 5048 5049 static int 5050 __set_power_wells(struct i915_power_domains *power_domains, 5051 const struct i915_power_well_desc *power_well_descs, 5052 int power_well_descs_sz, u64 skip_mask) 5053 { 5054 struct drm_i915_private *i915 = container_of(power_domains, 5055 struct drm_i915_private, 5056 power_domains); 5057 u64 power_well_ids = 0; 5058 int power_well_count = 0; 5059 int i, plt_idx = 0; 5060 5061 for (i = 0; i < power_well_descs_sz; i++) 5062 if (!(BIT_ULL(power_well_descs[i].id) & skip_mask)) 5063 power_well_count++; 5064 5065 power_domains->power_well_count = power_well_count; 5066 power_domains->power_wells = 5067 kcalloc(power_well_count, 5068 sizeof(*power_domains->power_wells), 5069 GFP_KERNEL); 5070 if (!power_domains->power_wells) 5071 return -ENOMEM; 5072 5073 for (i = 0; i < power_well_descs_sz; i++) { 5074 enum i915_power_well_id id = power_well_descs[i].id; 5075 5076 if (BIT_ULL(id) & skip_mask) 5077 continue; 5078 5079 power_domains->power_wells[plt_idx++].desc = 5080 &power_well_descs[i]; 5081 5082 if (id == DISP_PW_ID_NONE) 5083 continue; 5084 5085 drm_WARN_ON(&i915->drm, id >= sizeof(power_well_ids) * 8); 5086 drm_WARN_ON(&i915->drm, power_well_ids & BIT_ULL(id)); 5087 power_well_ids |= BIT_ULL(id); 5088 } 5089 5090 return 0; 5091 } 5092 5093 #define set_power_wells_mask(power_domains, __power_well_descs, skip_mask) \ 5094 __set_power_wells(power_domains, __power_well_descs, \ 5095 ARRAY_SIZE(__power_well_descs), skip_mask) 5096 5097 #define set_power_wells(power_domains, __power_well_descs) \ 5098 set_power_wells_mask(power_domains, __power_well_descs, 0) 5099 5100 /** 5101 * intel_power_domains_init - initializes the power domain structures 5102 * @dev_priv: i915 device instance 5103 * 5104 * Initializes the power domain structures for @dev_priv depending upon the 5105 * supported platform. 5106 */ 5107 int intel_power_domains_init(struct drm_i915_private *dev_priv) 5108 { 5109 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5110 int err; 5111 5112 dev_priv->params.disable_power_well = 5113 sanitize_disable_power_well_option(dev_priv, 5114 dev_priv->params.disable_power_well); 5115 dev_priv->dmc.allowed_dc_mask = 5116 get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc); 5117 5118 dev_priv->dmc.target_dc_state = 5119 sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); 5120 5121 BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); 5122 5123 rw_init(&power_domains->lock, "ipdl"); 5124 5125 INIT_DELAYED_WORK(&power_domains->async_put_work, 5126 intel_display_power_put_async_work); 5127 5128 /* 5129 * The enabling order will be from lower to higher indexed wells, 5130 * the disabling order is reversed. 5131 */ 5132 if (!HAS_DISPLAY(dev_priv)) { 5133 power_domains->power_well_count = 0; 5134 err = 0; 5135 } else if (DISPLAY_VER(dev_priv) >= 13) { 5136 err = set_power_wells(power_domains, xelpd_power_wells); 5137 } else if (IS_DG1(dev_priv)) { 5138 err = set_power_wells(power_domains, dg1_power_wells); 5139 } else if (IS_ALDERLAKE_S(dev_priv)) { 5140 err = set_power_wells_mask(power_domains, tgl_power_wells, 5141 BIT_ULL(TGL_DISP_PW_TC_COLD_OFF)); 5142 } else if (IS_ROCKETLAKE(dev_priv)) { 5143 err = set_power_wells(power_domains, rkl_power_wells); 5144 } else if (DISPLAY_VER(dev_priv) == 12) { 5145 err = set_power_wells(power_domains, tgl_power_wells); 5146 } else if (DISPLAY_VER(dev_priv) == 11) { 5147 err = set_power_wells(power_domains, icl_power_wells); 5148 } else if (IS_GEMINILAKE(dev_priv)) { 5149 err = set_power_wells(power_domains, glk_power_wells); 5150 } else if (IS_BROXTON(dev_priv)) { 5151 err = set_power_wells(power_domains, bxt_power_wells); 5152 } else if (DISPLAY_VER(dev_priv) == 9) { 5153 err = set_power_wells(power_domains, skl_power_wells); 5154 } else if (IS_CHERRYVIEW(dev_priv)) { 5155 err = set_power_wells(power_domains, chv_power_wells); 5156 } else if (IS_BROADWELL(dev_priv)) { 5157 err = set_power_wells(power_domains, bdw_power_wells); 5158 } else if (IS_HASWELL(dev_priv)) { 5159 err = set_power_wells(power_domains, hsw_power_wells); 5160 } else if (IS_VALLEYVIEW(dev_priv)) { 5161 err = set_power_wells(power_domains, vlv_power_wells); 5162 } else if (IS_I830(dev_priv)) { 5163 err = set_power_wells(power_domains, i830_power_wells); 5164 } else { 5165 err = set_power_wells(power_domains, i9xx_always_on_power_well); 5166 } 5167 5168 return err; 5169 } 5170 5171 /** 5172 * intel_power_domains_cleanup - clean up power domains resources 5173 * @dev_priv: i915 device instance 5174 * 5175 * Release any resources acquired by intel_power_domains_init() 5176 */ 5177 void intel_power_domains_cleanup(struct drm_i915_private *dev_priv) 5178 { 5179 kfree(dev_priv->power_domains.power_wells); 5180 } 5181 5182 static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) 5183 { 5184 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5185 struct i915_power_well *power_well; 5186 5187 mutex_lock(&power_domains->lock); 5188 for_each_power_well(dev_priv, power_well) { 5189 power_well->desc->ops->sync_hw(dev_priv, power_well); 5190 power_well->hw_enabled = 5191 power_well->desc->ops->is_enabled(dev_priv, power_well); 5192 } 5193 mutex_unlock(&power_domains->lock); 5194 } 5195 5196 static void gen9_dbuf_slice_set(struct drm_i915_private *dev_priv, 5197 enum dbuf_slice slice, bool enable) 5198 { 5199 i915_reg_t reg = DBUF_CTL_S(slice); 5200 bool state; 5201 5202 intel_de_rmw(dev_priv, reg, DBUF_POWER_REQUEST, 5203 enable ? DBUF_POWER_REQUEST : 0); 5204 intel_de_posting_read(dev_priv, reg); 5205 udelay(10); 5206 5207 state = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE; 5208 drm_WARN(&dev_priv->drm, enable != state, 5209 "DBuf slice %d power %s timeout!\n", 5210 slice, enabledisable(enable)); 5211 } 5212 5213 void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv, 5214 u8 req_slices) 5215 { 5216 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5217 u8 slice_mask = INTEL_INFO(dev_priv)->dbuf.slice_mask; 5218 enum dbuf_slice slice; 5219 5220 drm_WARN(&dev_priv->drm, req_slices & ~slice_mask, 5221 "Invalid set of dbuf slices (0x%x) requested (total dbuf slices 0x%x)\n", 5222 req_slices, slice_mask); 5223 5224 drm_dbg_kms(&dev_priv->drm, "Updating dbuf slices to 0x%x\n", 5225 req_slices); 5226 5227 /* 5228 * Might be running this in parallel to gen9_dc_off_power_well_enable 5229 * being called from intel_dp_detect for instance, 5230 * which causes assertion triggered by race condition, 5231 * as gen9_assert_dbuf_enabled might preempt this when registers 5232 * were already updated, while dev_priv was not. 5233 */ 5234 mutex_lock(&power_domains->lock); 5235 5236 for_each_dbuf_slice(dev_priv, slice) 5237 gen9_dbuf_slice_set(dev_priv, slice, req_slices & BIT(slice)); 5238 5239 dev_priv->dbuf.enabled_slices = req_slices; 5240 5241 mutex_unlock(&power_domains->lock); 5242 } 5243 5244 static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) 5245 { 5246 dev_priv->dbuf.enabled_slices = 5247 intel_enabled_dbuf_slices_mask(dev_priv); 5248 5249 /* 5250 * Just power up at least 1 slice, we will 5251 * figure out later which slices we have and what we need. 5252 */ 5253 gen9_dbuf_slices_update(dev_priv, BIT(DBUF_S1) | 5254 dev_priv->dbuf.enabled_slices); 5255 } 5256 5257 static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) 5258 { 5259 gen9_dbuf_slices_update(dev_priv, 0); 5260 } 5261 5262 static void gen12_dbuf_slices_config(struct drm_i915_private *dev_priv) 5263 { 5264 enum dbuf_slice slice; 5265 5266 if (IS_ALDERLAKE_P(dev_priv)) 5267 return; 5268 5269 for_each_dbuf_slice(dev_priv, slice) 5270 intel_de_rmw(dev_priv, DBUF_CTL_S(slice), 5271 DBUF_TRACKER_STATE_SERVICE_MASK, 5272 DBUF_TRACKER_STATE_SERVICE(8)); 5273 } 5274 5275 static void icl_mbus_init(struct drm_i915_private *dev_priv) 5276 { 5277 unsigned long abox_regs = INTEL_INFO(dev_priv)->abox_mask; 5278 u32 mask, val, i; 5279 5280 if (IS_ALDERLAKE_P(dev_priv)) 5281 return; 5282 5283 mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK | 5284 MBUS_ABOX_BT_CREDIT_POOL2_MASK | 5285 MBUS_ABOX_B_CREDIT_MASK | 5286 MBUS_ABOX_BW_CREDIT_MASK; 5287 val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 5288 MBUS_ABOX_BT_CREDIT_POOL2(16) | 5289 MBUS_ABOX_B_CREDIT(1) | 5290 MBUS_ABOX_BW_CREDIT(1); 5291 5292 /* 5293 * gen12 platforms that use abox1 and abox2 for pixel data reads still 5294 * expect us to program the abox_ctl0 register as well, even though 5295 * we don't have to program other instance-0 registers like BW_BUDDY. 5296 */ 5297 if (DISPLAY_VER(dev_priv) == 12) 5298 abox_regs |= BIT(0); 5299 5300 for_each_set_bit(i, &abox_regs, sizeof(abox_regs)) 5301 intel_de_rmw(dev_priv, MBUS_ABOX_CTL(i), mask, val); 5302 } 5303 5304 static void hsw_assert_cdclk(struct drm_i915_private *dev_priv) 5305 { 5306 u32 val = intel_de_read(dev_priv, LCPLL_CTL); 5307 5308 /* 5309 * The LCPLL register should be turned on by the BIOS. For now 5310 * let's just check its state and print errors in case 5311 * something is wrong. Don't even try to turn it on. 5312 */ 5313 5314 if (val & LCPLL_CD_SOURCE_FCLK) 5315 drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n"); 5316 5317 if (val & LCPLL_PLL_DISABLE) 5318 drm_err(&dev_priv->drm, "LCPLL is disabled\n"); 5319 5320 if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC) 5321 drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n"); 5322 } 5323 5324 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 5325 { 5326 struct drm_device *dev = &dev_priv->drm; 5327 struct intel_crtc *crtc; 5328 5329 for_each_intel_crtc(dev, crtc) 5330 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 5331 pipe_name(crtc->pipe)); 5332 5333 I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2), 5334 "Display power well on\n"); 5335 I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE, 5336 "SPLL enabled\n"); 5337 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, 5338 "WRPLL1 enabled\n"); 5339 I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, 5340 "WRPLL2 enabled\n"); 5341 I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON, 5342 "Panel power on\n"); 5343 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 5344 "CPU PWM1 enabled\n"); 5345 if (IS_HASWELL(dev_priv)) 5346 I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 5347 "CPU PWM2 enabled\n"); 5348 I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 5349 "PCH PWM1 enabled\n"); 5350 I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 5351 "Utility pin enabled\n"); 5352 I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE, 5353 "PCH GTC enabled\n"); 5354 5355 /* 5356 * In theory we can still leave IRQs enabled, as long as only the HPD 5357 * interrupts remain enabled. We used to check for that, but since it's 5358 * gen-specific and since we only disable LCPLL after we fully disable 5359 * the interrupts, the check below should be enough. 5360 */ 5361 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 5362 } 5363 5364 static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) 5365 { 5366 if (IS_HASWELL(dev_priv)) 5367 return intel_de_read(dev_priv, D_COMP_HSW); 5368 else 5369 return intel_de_read(dev_priv, D_COMP_BDW); 5370 } 5371 5372 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) 5373 { 5374 if (IS_HASWELL(dev_priv)) { 5375 if (sandybridge_pcode_write(dev_priv, 5376 GEN6_PCODE_WRITE_D_COMP, val)) 5377 drm_dbg_kms(&dev_priv->drm, 5378 "Failed to write to D_COMP\n"); 5379 } else { 5380 intel_de_write(dev_priv, D_COMP_BDW, val); 5381 intel_de_posting_read(dev_priv, D_COMP_BDW); 5382 } 5383 } 5384 5385 /* 5386 * This function implements pieces of two sequences from BSpec: 5387 * - Sequence for display software to disable LCPLL 5388 * - Sequence for display software to allow package C8+ 5389 * The steps implemented here are just the steps that actually touch the LCPLL 5390 * register. Callers should take care of disabling all the display engine 5391 * functions, doing the mode unset, fixing interrupts, etc. 5392 */ 5393 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 5394 bool switch_to_fclk, bool allow_power_down) 5395 { 5396 u32 val; 5397 5398 assert_can_disable_lcpll(dev_priv); 5399 5400 val = intel_de_read(dev_priv, LCPLL_CTL); 5401 5402 if (switch_to_fclk) { 5403 val |= LCPLL_CD_SOURCE_FCLK; 5404 intel_de_write(dev_priv, LCPLL_CTL, val); 5405 5406 if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) & 5407 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 5408 drm_err(&dev_priv->drm, "Switching to FCLK failed\n"); 5409 5410 val = intel_de_read(dev_priv, LCPLL_CTL); 5411 } 5412 5413 val |= LCPLL_PLL_DISABLE; 5414 intel_de_write(dev_priv, LCPLL_CTL, val); 5415 intel_de_posting_read(dev_priv, LCPLL_CTL); 5416 5417 if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1)) 5418 drm_err(&dev_priv->drm, "LCPLL still locked\n"); 5419 5420 val = hsw_read_dcomp(dev_priv); 5421 val |= D_COMP_COMP_DISABLE; 5422 hsw_write_dcomp(dev_priv, val); 5423 ndelay(100); 5424 5425 if (wait_for((hsw_read_dcomp(dev_priv) & 5426 D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) 5427 drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n"); 5428 5429 if (allow_power_down) { 5430 val = intel_de_read(dev_priv, LCPLL_CTL); 5431 val |= LCPLL_POWER_DOWN_ALLOW; 5432 intel_de_write(dev_priv, LCPLL_CTL, val); 5433 intel_de_posting_read(dev_priv, LCPLL_CTL); 5434 } 5435 } 5436 5437 /* 5438 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 5439 * source. 5440 */ 5441 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 5442 { 5443 u32 val; 5444 5445 val = intel_de_read(dev_priv, LCPLL_CTL); 5446 5447 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 5448 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 5449 return; 5450 5451 /* 5452 * Make sure we're not on PC8 state before disabling PC8, otherwise 5453 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 5454 */ 5455 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); 5456 5457 if (val & LCPLL_POWER_DOWN_ALLOW) { 5458 val &= ~LCPLL_POWER_DOWN_ALLOW; 5459 intel_de_write(dev_priv, LCPLL_CTL, val); 5460 intel_de_posting_read(dev_priv, LCPLL_CTL); 5461 } 5462 5463 val = hsw_read_dcomp(dev_priv); 5464 val |= D_COMP_COMP_FORCE; 5465 val &= ~D_COMP_COMP_DISABLE; 5466 hsw_write_dcomp(dev_priv, val); 5467 5468 val = intel_de_read(dev_priv, LCPLL_CTL); 5469 val &= ~LCPLL_PLL_DISABLE; 5470 intel_de_write(dev_priv, LCPLL_CTL, val); 5471 5472 if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5)) 5473 drm_err(&dev_priv->drm, "LCPLL not locked yet\n"); 5474 5475 if (val & LCPLL_CD_SOURCE_FCLK) { 5476 val = intel_de_read(dev_priv, LCPLL_CTL); 5477 val &= ~LCPLL_CD_SOURCE_FCLK; 5478 intel_de_write(dev_priv, LCPLL_CTL, val); 5479 5480 if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) & 5481 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 5482 drm_err(&dev_priv->drm, 5483 "Switching back to LCPLL failed\n"); 5484 } 5485 5486 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); 5487 5488 intel_update_cdclk(dev_priv); 5489 intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); 5490 } 5491 5492 /* 5493 * Package states C8 and deeper are really deep PC states that can only be 5494 * reached when all the devices on the system allow it, so even if the graphics 5495 * device allows PC8+, it doesn't mean the system will actually get to these 5496 * states. Our driver only allows PC8+ when going into runtime PM. 5497 * 5498 * The requirements for PC8+ are that all the outputs are disabled, the power 5499 * well is disabled and most interrupts are disabled, and these are also 5500 * requirements for runtime PM. When these conditions are met, we manually do 5501 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 5502 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 5503 * hang the machine. 5504 * 5505 * When we really reach PC8 or deeper states (not just when we allow it) we lose 5506 * the state of some registers, so when we come back from PC8+ we need to 5507 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 5508 * need to take care of the registers kept by RC6. Notice that this happens even 5509 * if we don't put the device in PCI D3 state (which is what currently happens 5510 * because of the runtime PM support). 5511 * 5512 * For more, read "Display Sequences for Package C8" on the hardware 5513 * documentation. 5514 */ 5515 static void hsw_enable_pc8(struct drm_i915_private *dev_priv) 5516 { 5517 u32 val; 5518 5519 drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n"); 5520 5521 if (HAS_PCH_LPT_LP(dev_priv)) { 5522 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 5523 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 5524 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 5525 } 5526 5527 lpt_disable_clkout_dp(dev_priv); 5528 hsw_disable_lcpll(dev_priv, true, true); 5529 } 5530 5531 static void hsw_disable_pc8(struct drm_i915_private *dev_priv) 5532 { 5533 u32 val; 5534 5535 drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n"); 5536 5537 hsw_restore_lcpll(dev_priv); 5538 intel_init_pch_refclk(dev_priv); 5539 5540 if (HAS_PCH_LPT_LP(dev_priv)) { 5541 val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D); 5542 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 5543 intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val); 5544 } 5545 } 5546 5547 static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv, 5548 bool enable) 5549 { 5550 i915_reg_t reg; 5551 u32 reset_bits, val; 5552 5553 if (IS_IVYBRIDGE(dev_priv)) { 5554 reg = GEN7_MSG_CTL; 5555 reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK; 5556 } else { 5557 reg = HSW_NDE_RSTWRN_OPT; 5558 reset_bits = RESET_PCH_HANDSHAKE_ENABLE; 5559 } 5560 5561 val = intel_de_read(dev_priv, reg); 5562 5563 if (enable) 5564 val |= reset_bits; 5565 else 5566 val &= ~reset_bits; 5567 5568 intel_de_write(dev_priv, reg, val); 5569 } 5570 5571 static void skl_display_core_init(struct drm_i915_private *dev_priv, 5572 bool resume) 5573 { 5574 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5575 struct i915_power_well *well; 5576 5577 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5578 5579 /* enable PCH reset handshake */ 5580 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5581 5582 if (!HAS_DISPLAY(dev_priv)) 5583 return; 5584 5585 /* enable PG1 and Misc I/O */ 5586 mutex_lock(&power_domains->lock); 5587 5588 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5589 intel_power_well_enable(dev_priv, well); 5590 5591 well = lookup_power_well(dev_priv, SKL_DISP_PW_MISC_IO); 5592 intel_power_well_enable(dev_priv, well); 5593 5594 mutex_unlock(&power_domains->lock); 5595 5596 intel_cdclk_init_hw(dev_priv); 5597 5598 gen9_dbuf_enable(dev_priv); 5599 5600 if (resume && intel_dmc_has_payload(dev_priv)) 5601 intel_dmc_load_program(dev_priv); 5602 } 5603 5604 static void skl_display_core_uninit(struct drm_i915_private *dev_priv) 5605 { 5606 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5607 struct i915_power_well *well; 5608 5609 if (!HAS_DISPLAY(dev_priv)) 5610 return; 5611 5612 gen9_disable_dc_states(dev_priv); 5613 5614 gen9_dbuf_disable(dev_priv); 5615 5616 intel_cdclk_uninit_hw(dev_priv); 5617 5618 /* The spec doesn't call for removing the reset handshake flag */ 5619 /* disable PG1 and Misc I/O */ 5620 5621 mutex_lock(&power_domains->lock); 5622 5623 /* 5624 * BSpec says to keep the MISC IO power well enabled here, only 5625 * remove our request for power well 1. 5626 * Note that even though the driver's request is removed power well 1 5627 * may stay enabled after this due to DMC's own request on it. 5628 */ 5629 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5630 intel_power_well_disable(dev_priv, well); 5631 5632 mutex_unlock(&power_domains->lock); 5633 5634 usleep_range(10, 30); /* 10 us delay per Bspec */ 5635 } 5636 5637 static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume) 5638 { 5639 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5640 struct i915_power_well *well; 5641 5642 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5643 5644 /* 5645 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 5646 * or else the reset will hang because there is no PCH to respond. 5647 * Move the handshake programming to initialization sequence. 5648 * Previously was left up to BIOS. 5649 */ 5650 intel_pch_reset_handshake(dev_priv, false); 5651 5652 if (!HAS_DISPLAY(dev_priv)) 5653 return; 5654 5655 /* Enable PG1 */ 5656 mutex_lock(&power_domains->lock); 5657 5658 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5659 intel_power_well_enable(dev_priv, well); 5660 5661 mutex_unlock(&power_domains->lock); 5662 5663 intel_cdclk_init_hw(dev_priv); 5664 5665 gen9_dbuf_enable(dev_priv); 5666 5667 if (resume && intel_dmc_has_payload(dev_priv)) 5668 intel_dmc_load_program(dev_priv); 5669 } 5670 5671 static void bxt_display_core_uninit(struct drm_i915_private *dev_priv) 5672 { 5673 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5674 struct i915_power_well *well; 5675 5676 if (!HAS_DISPLAY(dev_priv)) 5677 return; 5678 5679 gen9_disable_dc_states(dev_priv); 5680 5681 gen9_dbuf_disable(dev_priv); 5682 5683 intel_cdclk_uninit_hw(dev_priv); 5684 5685 /* The spec doesn't call for removing the reset handshake flag */ 5686 5687 /* 5688 * Disable PW1 (PG1). 5689 * Note that even though the driver's request is removed power well 1 5690 * may stay enabled after this due to DMC's own request on it. 5691 */ 5692 mutex_lock(&power_domains->lock); 5693 5694 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5695 intel_power_well_disable(dev_priv, well); 5696 5697 mutex_unlock(&power_domains->lock); 5698 5699 usleep_range(10, 30); /* 10 us delay per Bspec */ 5700 } 5701 5702 struct buddy_page_mask { 5703 u32 page_mask; 5704 u8 type; 5705 u8 num_channels; 5706 }; 5707 5708 static const struct buddy_page_mask tgl_buddy_page_masks[] = { 5709 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, 5710 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0xF }, 5711 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, 5712 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1C }, 5713 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, 5714 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x1E }, 5715 { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 }, 5716 { .num_channels = 4, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x38 }, 5717 {} 5718 }; 5719 5720 static const struct buddy_page_mask wa_1409767108_buddy_page_masks[] = { 5721 { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1 }, 5722 { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0x1 }, 5723 { .num_channels = 1, .type = INTEL_DRAM_DDR5, .page_mask = 0x1 }, 5724 { .num_channels = 1, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x1 }, 5725 { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x3 }, 5726 { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x3 }, 5727 { .num_channels = 2, .type = INTEL_DRAM_DDR5, .page_mask = 0x3 }, 5728 { .num_channels = 2, .type = INTEL_DRAM_LPDDR5, .page_mask = 0x3 }, 5729 {} 5730 }; 5731 5732 static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv) 5733 { 5734 enum intel_dram_type type = dev_priv->dram_info.type; 5735 u8 num_channels = dev_priv->dram_info.num_channels; 5736 const struct buddy_page_mask *table; 5737 unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask; 5738 int config, i; 5739 5740 /* BW_BUDDY registers are not used on dgpu's beyond DG1 */ 5741 if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv)) 5742 return; 5743 5744 if (IS_ALDERLAKE_S(dev_priv) || 5745 IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || 5746 IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) || 5747 IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) 5748 /* Wa_1409767108:tgl,dg1,adl-s */ 5749 table = wa_1409767108_buddy_page_masks; 5750 else 5751 table = tgl_buddy_page_masks; 5752 5753 for (config = 0; table[config].page_mask != 0; config++) 5754 if (table[config].num_channels == num_channels && 5755 table[config].type == type) 5756 break; 5757 5758 if (table[config].page_mask == 0) { 5759 drm_dbg(&dev_priv->drm, 5760 "Unknown memory configuration; disabling address buddy logic.\n"); 5761 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) 5762 intel_de_write(dev_priv, BW_BUDDY_CTL(i), 5763 BW_BUDDY_DISABLE); 5764 } else { 5765 for_each_set_bit(i, &abox_mask, sizeof(abox_mask)) { 5766 intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i), 5767 table[config].page_mask); 5768 5769 /* Wa_22010178259:tgl,dg1,rkl,adl-s */ 5770 if (DISPLAY_VER(dev_priv) == 12) 5771 intel_de_rmw(dev_priv, BW_BUDDY_CTL(i), 5772 BW_BUDDY_TLB_REQ_TIMER_MASK, 5773 BW_BUDDY_TLB_REQ_TIMER(0x8)); 5774 } 5775 } 5776 } 5777 5778 static void icl_display_core_init(struct drm_i915_private *dev_priv, 5779 bool resume) 5780 { 5781 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5782 struct i915_power_well *well; 5783 u32 val; 5784 5785 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 5786 5787 /* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */ 5788 if (INTEL_PCH_TYPE(dev_priv) >= PCH_JSP && 5789 INTEL_PCH_TYPE(dev_priv) < PCH_DG1) 5790 intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D, 0, 5791 PCH_DPMGUNIT_CLOCK_GATE_DISABLE); 5792 5793 /* 1. Enable PCH reset handshake. */ 5794 intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv)); 5795 5796 if (!HAS_DISPLAY(dev_priv)) 5797 return; 5798 5799 /* 2. Initialize all combo phys */ 5800 intel_combo_phy_init(dev_priv); 5801 5802 /* 5803 * 3. Enable Power Well 1 (PG1). 5804 * The AUX IO power wells will be enabled on demand. 5805 */ 5806 mutex_lock(&power_domains->lock); 5807 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5808 intel_power_well_enable(dev_priv, well); 5809 mutex_unlock(&power_domains->lock); 5810 5811 /* 4. Enable CDCLK. */ 5812 intel_cdclk_init_hw(dev_priv); 5813 5814 if (DISPLAY_VER(dev_priv) >= 12) 5815 gen12_dbuf_slices_config(dev_priv); 5816 5817 /* 5. Enable DBUF. */ 5818 gen9_dbuf_enable(dev_priv); 5819 5820 /* 6. Setup MBUS. */ 5821 icl_mbus_init(dev_priv); 5822 5823 /* 7. Program arbiter BW_BUDDY registers */ 5824 if (DISPLAY_VER(dev_priv) >= 12) 5825 tgl_bw_buddy_init(dev_priv); 5826 5827 /* 8. Ensure PHYs have completed calibration and adaptation */ 5828 if (IS_DG2(dev_priv)) 5829 intel_snps_phy_wait_for_calibration(dev_priv); 5830 5831 if (resume && intel_dmc_has_payload(dev_priv)) 5832 intel_dmc_load_program(dev_priv); 5833 5834 /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */ 5835 if (DISPLAY_VER(dev_priv) >= 12) { 5836 val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM | 5837 DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR; 5838 intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val); 5839 } 5840 5841 /* Wa_14011503030:xelpd */ 5842 if (DISPLAY_VER(dev_priv) >= 13) 5843 intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0); 5844 } 5845 5846 static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 5847 { 5848 struct i915_power_domains *power_domains = &dev_priv->power_domains; 5849 struct i915_power_well *well; 5850 5851 if (!HAS_DISPLAY(dev_priv)) 5852 return; 5853 5854 gen9_disable_dc_states(dev_priv); 5855 5856 /* 1. Disable all display engine functions -> aready done */ 5857 5858 /* 2. Disable DBUF */ 5859 gen9_dbuf_disable(dev_priv); 5860 5861 /* 3. Disable CD clock */ 5862 intel_cdclk_uninit_hw(dev_priv); 5863 5864 /* 5865 * 4. Disable Power Well 1 (PG1). 5866 * The AUX IO power wells are toggled on demand, so they are already 5867 * disabled at this point. 5868 */ 5869 mutex_lock(&power_domains->lock); 5870 well = lookup_power_well(dev_priv, SKL_DISP_PW_1); 5871 intel_power_well_disable(dev_priv, well); 5872 mutex_unlock(&power_domains->lock); 5873 5874 /* 5. */ 5875 intel_combo_phy_uninit(dev_priv); 5876 } 5877 5878 static void chv_phy_control_init(struct drm_i915_private *dev_priv) 5879 { 5880 struct i915_power_well *cmn_bc = 5881 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 5882 struct i915_power_well *cmn_d = 5883 lookup_power_well(dev_priv, CHV_DISP_PW_DPIO_CMN_D); 5884 5885 /* 5886 * DISPLAY_PHY_CONTROL can get corrupted if read. As a 5887 * workaround never ever read DISPLAY_PHY_CONTROL, and 5888 * instead maintain a shadow copy ourselves. Use the actual 5889 * power well state and lane status to reconstruct the 5890 * expected initial value. 5891 */ 5892 dev_priv->chv_phy_control = 5893 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY0) | 5894 PHY_LDO_SEQ_DELAY(PHY_LDO_DELAY_600NS, DPIO_PHY1) | 5895 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH0) | 5896 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY0, DPIO_CH1) | 5897 PHY_CH_POWER_MODE(PHY_CH_DEEP_PSR, DPIO_PHY1, DPIO_CH0); 5898 5899 /* 5900 * If all lanes are disabled we leave the override disabled 5901 * with all power down bits cleared to match the state we 5902 * would use after disabling the port. Otherwise enable the 5903 * override and set the lane powerdown bits accding to the 5904 * current lane status. 5905 */ 5906 if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) { 5907 u32 status = intel_de_read(dev_priv, DPLL(PIPE_A)); 5908 unsigned int mask; 5909 5910 mask = status & DPLL_PORTB_READY_MASK; 5911 if (mask == 0xf) 5912 mask = 0x0; 5913 else 5914 dev_priv->chv_phy_control |= 5915 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH0); 5916 5917 dev_priv->chv_phy_control |= 5918 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH0); 5919 5920 mask = (status & DPLL_PORTC_READY_MASK) >> 4; 5921 if (mask == 0xf) 5922 mask = 0x0; 5923 else 5924 dev_priv->chv_phy_control |= 5925 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY0, DPIO_CH1); 5926 5927 dev_priv->chv_phy_control |= 5928 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 5929 5930 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 5931 5932 dev_priv->chv_phy_assert[DPIO_PHY0] = false; 5933 } else { 5934 dev_priv->chv_phy_assert[DPIO_PHY0] = true; 5935 } 5936 5937 if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) { 5938 u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS); 5939 unsigned int mask; 5940 5941 mask = status & DPLL_PORTD_READY_MASK; 5942 5943 if (mask == 0xf) 5944 mask = 0x0; 5945 else 5946 dev_priv->chv_phy_control |= 5947 PHY_CH_POWER_DOWN_OVRD_EN(DPIO_PHY1, DPIO_CH0); 5948 5949 dev_priv->chv_phy_control |= 5950 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 5951 5952 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 5953 5954 dev_priv->chv_phy_assert[DPIO_PHY1] = false; 5955 } else { 5956 dev_priv->chv_phy_assert[DPIO_PHY1] = true; 5957 } 5958 5959 drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n", 5960 dev_priv->chv_phy_control); 5961 5962 /* Defer application of initial phy_control to enabling the powerwell */ 5963 } 5964 5965 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv) 5966 { 5967 struct i915_power_well *cmn = 5968 lookup_power_well(dev_priv, VLV_DISP_PW_DPIO_CMN_BC); 5969 struct i915_power_well *disp2d = 5970 lookup_power_well(dev_priv, VLV_DISP_PW_DISP2D); 5971 5972 /* If the display might be already active skip this */ 5973 if (cmn->desc->ops->is_enabled(dev_priv, cmn) && 5974 disp2d->desc->ops->is_enabled(dev_priv, disp2d) && 5975 intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST) 5976 return; 5977 5978 drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n"); 5979 5980 /* cmnlane needs DPLL registers */ 5981 disp2d->desc->ops->enable(dev_priv, disp2d); 5982 5983 /* 5984 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx: 5985 * Need to assert and de-assert PHY SB reset by gating the 5986 * common lane power, then un-gating it. 5987 * Simply ungating isn't enough to reset the PHY enough to get 5988 * ports and lanes running. 5989 */ 5990 cmn->desc->ops->disable(dev_priv, cmn); 5991 } 5992 5993 static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0) 5994 { 5995 bool ret; 5996 5997 vlv_punit_get(dev_priv); 5998 ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE; 5999 vlv_punit_put(dev_priv); 6000 6001 return ret; 6002 } 6003 6004 static void assert_ved_power_gated(struct drm_i915_private *dev_priv) 6005 { 6006 drm_WARN(&dev_priv->drm, 6007 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0), 6008 "VED not power gated\n"); 6009 } 6010 6011 static void assert_isp_power_gated(struct drm_i915_private *dev_priv) 6012 { 6013 #ifdef notyet 6014 static const struct pci_device_id isp_ids[] = { 6015 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)}, 6016 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)}, 6017 {} 6018 }; 6019 6020 drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) && 6021 !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0), 6022 "ISP not power gated\n"); 6023 #endif 6024 } 6025 6026 static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv); 6027 6028 /** 6029 * intel_power_domains_init_hw - initialize hardware power domain state 6030 * @i915: i915 device instance 6031 * @resume: Called from resume code paths or not 6032 * 6033 * This function initializes the hardware power domain state and enables all 6034 * power wells belonging to the INIT power domain. Power wells in other 6035 * domains (and not in the INIT domain) are referenced or disabled by 6036 * intel_modeset_readout_hw_state(). After that the reference count of each 6037 * power well must match its HW enabled state, see 6038 * intel_power_domains_verify_state(). 6039 * 6040 * It will return with power domains disabled (to be enabled later by 6041 * intel_power_domains_enable()) and must be paired with 6042 * intel_power_domains_driver_remove(). 6043 */ 6044 void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) 6045 { 6046 struct i915_power_domains *power_domains = &i915->power_domains; 6047 6048 power_domains->initializing = true; 6049 6050 if (DISPLAY_VER(i915) >= 11) { 6051 icl_display_core_init(i915, resume); 6052 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 6053 bxt_display_core_init(i915, resume); 6054 } else if (DISPLAY_VER(i915) == 9) { 6055 skl_display_core_init(i915, resume); 6056 } else if (IS_CHERRYVIEW(i915)) { 6057 mutex_lock(&power_domains->lock); 6058 chv_phy_control_init(i915); 6059 mutex_unlock(&power_domains->lock); 6060 assert_isp_power_gated(i915); 6061 } else if (IS_VALLEYVIEW(i915)) { 6062 mutex_lock(&power_domains->lock); 6063 vlv_cmnlane_wa(i915); 6064 mutex_unlock(&power_domains->lock); 6065 assert_ved_power_gated(i915); 6066 assert_isp_power_gated(i915); 6067 } else if (IS_BROADWELL(i915) || IS_HASWELL(i915)) { 6068 hsw_assert_cdclk(i915); 6069 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 6070 } else if (IS_IVYBRIDGE(i915)) { 6071 intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915)); 6072 } 6073 6074 /* 6075 * Keep all power wells enabled for any dependent HW access during 6076 * initialization and to make sure we keep BIOS enabled display HW 6077 * resources powered until display HW readout is complete. We drop 6078 * this reference in intel_power_domains_enable(). 6079 */ 6080 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 6081 power_domains->init_wakeref = 6082 intel_display_power_get(i915, POWER_DOMAIN_INIT); 6083 6084 /* Disable power support if the user asked so. */ 6085 if (!i915->params.disable_power_well) { 6086 drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); 6087 i915->power_domains.disable_wakeref = intel_display_power_get(i915, 6088 POWER_DOMAIN_INIT); 6089 } 6090 intel_power_domains_sync_hw(i915); 6091 6092 power_domains->initializing = false; 6093 } 6094 6095 /** 6096 * intel_power_domains_driver_remove - deinitialize hw power domain state 6097 * @i915: i915 device instance 6098 * 6099 * De-initializes the display power domain HW state. It also ensures that the 6100 * device stays powered up so that the driver can be reloaded. 6101 * 6102 * It must be called with power domains already disabled (after a call to 6103 * intel_power_domains_disable()) and must be paired with 6104 * intel_power_domains_init_hw(). 6105 */ 6106 void intel_power_domains_driver_remove(struct drm_i915_private *i915) 6107 { 6108 intel_wakeref_t wakeref __maybe_unused = 6109 fetch_and_zero(&i915->power_domains.init_wakeref); 6110 6111 /* Remove the refcount we took to keep power well support disabled. */ 6112 if (!i915->params.disable_power_well) 6113 intel_display_power_put(i915, POWER_DOMAIN_INIT, 6114 fetch_and_zero(&i915->power_domains.disable_wakeref)); 6115 6116 intel_display_power_flush_work_sync(i915); 6117 6118 intel_power_domains_verify_state(i915); 6119 6120 /* Keep the power well enabled, but cancel its rpm wakeref. */ 6121 intel_runtime_pm_put(&i915->runtime_pm, wakeref); 6122 } 6123 6124 /** 6125 * intel_power_domains_enable - enable toggling of display power wells 6126 * @i915: i915 device instance 6127 * 6128 * Enable the ondemand enabling/disabling of the display power wells. Note that 6129 * power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled 6130 * only at specific points of the display modeset sequence, thus they are not 6131 * affected by the intel_power_domains_enable()/disable() calls. The purpose 6132 * of these function is to keep the rest of power wells enabled until the end 6133 * of display HW readout (which will acquire the power references reflecting 6134 * the current HW state). 6135 */ 6136 void intel_power_domains_enable(struct drm_i915_private *i915) 6137 { 6138 intel_wakeref_t wakeref __maybe_unused = 6139 fetch_and_zero(&i915->power_domains.init_wakeref); 6140 6141 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 6142 intel_power_domains_verify_state(i915); 6143 } 6144 6145 /** 6146 * intel_power_domains_disable - disable toggling of display power wells 6147 * @i915: i915 device instance 6148 * 6149 * Disable the ondemand enabling/disabling of the display power wells. See 6150 * intel_power_domains_enable() for which power wells this call controls. 6151 */ 6152 void intel_power_domains_disable(struct drm_i915_private *i915) 6153 { 6154 struct i915_power_domains *power_domains = &i915->power_domains; 6155 6156 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 6157 power_domains->init_wakeref = 6158 intel_display_power_get(i915, POWER_DOMAIN_INIT); 6159 6160 intel_power_domains_verify_state(i915); 6161 } 6162 6163 /** 6164 * intel_power_domains_suspend - suspend power domain state 6165 * @i915: i915 device instance 6166 * @suspend_mode: specifies the target suspend state (idle, mem, hibernation) 6167 * 6168 * This function prepares the hardware power domain state before entering 6169 * system suspend. 6170 * 6171 * It must be called with power domains already disabled (after a call to 6172 * intel_power_domains_disable()) and paired with intel_power_domains_resume(). 6173 */ 6174 void intel_power_domains_suspend(struct drm_i915_private *i915, 6175 enum i915_drm_suspend_mode suspend_mode) 6176 { 6177 struct i915_power_domains *power_domains = &i915->power_domains; 6178 intel_wakeref_t wakeref __maybe_unused = 6179 fetch_and_zero(&power_domains->init_wakeref); 6180 6181 intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); 6182 6183 /* 6184 * In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9 6185 * support don't manually deinit the power domains. This also means the 6186 * DMC firmware will stay active, it will power down any HW 6187 * resources as required and also enable deeper system power states 6188 * that would be blocked if the firmware was inactive. 6189 */ 6190 if (!(i915->dmc.allowed_dc_mask & DC_STATE_EN_DC9) && 6191 suspend_mode == I915_DRM_SUSPEND_IDLE && 6192 intel_dmc_has_payload(i915)) { 6193 intel_display_power_flush_work(i915); 6194 intel_power_domains_verify_state(i915); 6195 return; 6196 } 6197 6198 /* 6199 * Even if power well support was disabled we still want to disable 6200 * power wells if power domains must be deinitialized for suspend. 6201 */ 6202 if (!i915->params.disable_power_well) 6203 intel_display_power_put(i915, POWER_DOMAIN_INIT, 6204 fetch_and_zero(&i915->power_domains.disable_wakeref)); 6205 6206 intel_display_power_flush_work(i915); 6207 intel_power_domains_verify_state(i915); 6208 6209 if (DISPLAY_VER(i915) >= 11) 6210 icl_display_core_uninit(i915); 6211 else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) 6212 bxt_display_core_uninit(i915); 6213 else if (DISPLAY_VER(i915) == 9) 6214 skl_display_core_uninit(i915); 6215 6216 power_domains->display_core_suspended = true; 6217 } 6218 6219 /** 6220 * intel_power_domains_resume - resume power domain state 6221 * @i915: i915 device instance 6222 * 6223 * This function resume the hardware power domain state during system resume. 6224 * 6225 * It will return with power domain support disabled (to be enabled later by 6226 * intel_power_domains_enable()) and must be paired with 6227 * intel_power_domains_suspend(). 6228 */ 6229 void intel_power_domains_resume(struct drm_i915_private *i915) 6230 { 6231 struct i915_power_domains *power_domains = &i915->power_domains; 6232 6233 if (power_domains->display_core_suspended) { 6234 intel_power_domains_init_hw(i915, true); 6235 power_domains->display_core_suspended = false; 6236 } else { 6237 drm_WARN_ON(&i915->drm, power_domains->init_wakeref); 6238 power_domains->init_wakeref = 6239 intel_display_power_get(i915, POWER_DOMAIN_INIT); 6240 } 6241 6242 intel_power_domains_verify_state(i915); 6243 } 6244 6245 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) 6246 6247 static void intel_power_domains_dump_info(struct drm_i915_private *i915) 6248 { 6249 struct i915_power_domains *power_domains = &i915->power_domains; 6250 struct i915_power_well *power_well; 6251 6252 for_each_power_well(i915, power_well) { 6253 enum intel_display_power_domain domain; 6254 6255 drm_dbg(&i915->drm, "%-25s %d\n", 6256 power_well->desc->name, power_well->count); 6257 6258 for_each_power_domain(domain, power_well->desc->domains) 6259 drm_dbg(&i915->drm, " %-23s %d\n", 6260 intel_display_power_domain_str(domain), 6261 power_domains->domain_use_count[domain]); 6262 } 6263 } 6264 6265 /** 6266 * intel_power_domains_verify_state - verify the HW/SW state for all power wells 6267 * @i915: i915 device instance 6268 * 6269 * Verify if the reference count of each power well matches its HW enabled 6270 * state and the total refcount of the domains it belongs to. This must be 6271 * called after modeset HW state sanitization, which is responsible for 6272 * acquiring reference counts for any power wells in use and disabling the 6273 * ones left on by BIOS but not required by any active output. 6274 */ 6275 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 6276 { 6277 struct i915_power_domains *power_domains = &i915->power_domains; 6278 struct i915_power_well *power_well; 6279 bool dump_domain_info; 6280 6281 mutex_lock(&power_domains->lock); 6282 6283 verify_async_put_domains_state(power_domains); 6284 6285 dump_domain_info = false; 6286 for_each_power_well(i915, power_well) { 6287 enum intel_display_power_domain domain; 6288 int domains_count; 6289 bool enabled; 6290 6291 enabled = power_well->desc->ops->is_enabled(i915, power_well); 6292 if ((power_well->count || power_well->desc->always_on) != 6293 enabled) 6294 drm_err(&i915->drm, 6295 "power well %s state mismatch (refcount %d/enabled %d)", 6296 power_well->desc->name, 6297 power_well->count, enabled); 6298 6299 domains_count = 0; 6300 for_each_power_domain(domain, power_well->desc->domains) 6301 domains_count += power_domains->domain_use_count[domain]; 6302 6303 if (power_well->count != domains_count) { 6304 drm_err(&i915->drm, 6305 "power well %s refcount/domain refcount mismatch " 6306 "(refcount %d/domains refcount %d)\n", 6307 power_well->desc->name, power_well->count, 6308 domains_count); 6309 dump_domain_info = true; 6310 } 6311 } 6312 6313 if (dump_domain_info) { 6314 static bool dumped; 6315 6316 if (!dumped) { 6317 intel_power_domains_dump_info(i915); 6318 dumped = true; 6319 } 6320 } 6321 6322 mutex_unlock(&power_domains->lock); 6323 } 6324 6325 #else 6326 6327 static void intel_power_domains_verify_state(struct drm_i915_private *i915) 6328 { 6329 } 6330 6331 #endif 6332 6333 void intel_display_power_suspend_late(struct drm_i915_private *i915) 6334 { 6335 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 6336 IS_BROXTON(i915)) { 6337 bxt_enable_dc9(i915); 6338 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6339 hsw_enable_pc8(i915); 6340 } 6341 6342 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 6343 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) 6344 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS); 6345 } 6346 6347 void intel_display_power_resume_early(struct drm_i915_private *i915) 6348 { 6349 if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) || 6350 IS_BROXTON(i915)) { 6351 gen9_sanitize_dc_state(i915); 6352 bxt_disable_dc9(i915); 6353 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6354 hsw_disable_pc8(i915); 6355 } 6356 6357 /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */ 6358 if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1) 6359 intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0); 6360 } 6361 6362 void intel_display_power_suspend(struct drm_i915_private *i915) 6363 { 6364 if (DISPLAY_VER(i915) >= 11) { 6365 icl_display_core_uninit(i915); 6366 bxt_enable_dc9(i915); 6367 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 6368 bxt_display_core_uninit(i915); 6369 bxt_enable_dc9(i915); 6370 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6371 hsw_enable_pc8(i915); 6372 } 6373 } 6374 6375 void intel_display_power_resume(struct drm_i915_private *i915) 6376 { 6377 if (DISPLAY_VER(i915) >= 11) { 6378 bxt_disable_dc9(i915); 6379 icl_display_core_init(i915, true); 6380 if (intel_dmc_has_payload(i915)) { 6381 if (i915->dmc.allowed_dc_mask & 6382 DC_STATE_EN_UPTO_DC6) 6383 skl_enable_dc6(i915); 6384 else if (i915->dmc.allowed_dc_mask & 6385 DC_STATE_EN_UPTO_DC5) 6386 gen9_enable_dc5(i915); 6387 } 6388 } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) { 6389 bxt_disable_dc9(i915); 6390 bxt_display_core_init(i915, true); 6391 if (intel_dmc_has_payload(i915) && 6392 (i915->dmc.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 6393 gen9_enable_dc5(i915); 6394 } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) { 6395 hsw_disable_pc8(i915); 6396 } 6397 } 6398