1 /* 2 * Copyright © 2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eugeni Dodonov <eugeni.dodonov@intel.com> 25 * 26 */ 27 28 #include "i915_drv.h" 29 #include "intel_drv.h" 30 #include <linux/module.h> 31 #include <machine/clock.h> 32 33 /** 34 * RC6 is a special power stage which allows the GPU to enter an very 35 * low-voltage mode when idle, using down to 0V while at this stage. This 36 * stage is entered automatically when the GPU is idle when RC6 support is 37 * enabled, and as soon as new workload arises GPU wakes up automatically as well. 38 * 39 * There are different RC6 modes available in Intel GPU, which differentiate 40 * among each other with the latency required to enter and leave RC6 and 41 * voltage consumed by the GPU in different states. 42 * 43 * The combination of the following flags define which states GPU is allowed 44 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and 45 * RC6pp is deepest RC6. Their support by hardware varies according to the 46 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one 47 * which brings the most power savings; deeper states save more power, but 48 * require higher latency to switch to and wake up. 49 */ 50 #define INTEL_RC6_ENABLE (1<<0) 51 #define INTEL_RC6p_ENABLE (1<<1) 52 #define INTEL_RC6pp_ENABLE (1<<2) 53 54 static void gen9_init_clock_gating(struct drm_device *dev) 55 { 56 struct drm_i915_private *dev_priv = dev->dev_private; 57 58 /* WaEnableLbsSlaRetryTimerDecrement:skl */ 59 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | 60 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); 61 } 62 63 static void skl_init_clock_gating(struct drm_device *dev) 64 { 65 struct drm_i915_private *dev_priv = dev->dev_private; 66 67 gen9_init_clock_gating(dev); 68 69 if (INTEL_REVID(dev) <= SKL_REVID_B0) { 70 /* 71 * WaDisableSDEUnitClockGating:skl 72 * WaSetGAPSunitClckGateDisable:skl 73 */ 74 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 75 GEN8_GAPSUNIT_CLOCK_GATE_DISABLE | 76 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 77 78 /* WaDisableVFUnitClockGating:skl */ 79 I915_WRITE(GEN6_UCGCTL2, I915_READ(GEN6_UCGCTL2) | 80 GEN6_VFUNIT_CLOCK_GATE_DISABLE); 81 } 82 83 if (INTEL_REVID(dev) <= SKL_REVID_D0) { 84 /* WaDisableHDCInvalidation:skl */ 85 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 86 BDW_DISABLE_HDC_INVALIDATION); 87 88 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ 89 I915_WRITE(FF_SLICE_CS_CHICKEN2, 90 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); 91 } 92 93 if (INTEL_REVID(dev) <= SKL_REVID_E0) 94 /* WaDisableLSQCROPERFforOCL:skl */ 95 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 96 GEN8_LQSC_RO_PERF_DIS); 97 } 98 99 static void bxt_init_clock_gating(struct drm_device *dev) 100 { 101 struct drm_i915_private *dev_priv = dev->dev_private; 102 103 gen9_init_clock_gating(dev); 104 105 /* 106 * FIXME: 107 * GEN8_SDEUNIT_CLOCK_GATE_DISABLE applies on A0 only. 108 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only. 109 */ 110 /* WaDisableSDEUnitClockGating:bxt */ 111 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 112 GEN8_SDEUNIT_CLOCK_GATE_DISABLE | 113 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); 114 115 /* FIXME: apply on A0 only */ 116 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); 117 } 118 119 static void i915_pineview_get_mem_freq(struct drm_device *dev) 120 { 121 struct drm_i915_private *dev_priv = dev->dev_private; 122 u32 tmp; 123 124 tmp = I915_READ(CLKCFG); 125 126 switch (tmp & CLKCFG_FSB_MASK) { 127 case CLKCFG_FSB_533: 128 dev_priv->fsb_freq = 533; /* 133*4 */ 129 break; 130 case CLKCFG_FSB_800: 131 dev_priv->fsb_freq = 800; /* 200*4 */ 132 break; 133 case CLKCFG_FSB_667: 134 dev_priv->fsb_freq = 667; /* 167*4 */ 135 break; 136 case CLKCFG_FSB_400: 137 dev_priv->fsb_freq = 400; /* 100*4 */ 138 break; 139 } 140 141 switch (tmp & CLKCFG_MEM_MASK) { 142 case CLKCFG_MEM_533: 143 dev_priv->mem_freq = 533; 144 break; 145 case CLKCFG_MEM_667: 146 dev_priv->mem_freq = 667; 147 break; 148 case CLKCFG_MEM_800: 149 dev_priv->mem_freq = 800; 150 break; 151 } 152 153 /* detect pineview DDR3 setting */ 154 tmp = I915_READ(CSHRDDR3CTL); 155 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; 156 } 157 158 static void i915_ironlake_get_mem_freq(struct drm_device *dev) 159 { 160 struct drm_i915_private *dev_priv = dev->dev_private; 161 u16 ddrpll, csipll; 162 163 ddrpll = I915_READ16(DDRMPLL1); 164 csipll = I915_READ16(CSIPLL0); 165 166 switch (ddrpll & 0xff) { 167 case 0xc: 168 dev_priv->mem_freq = 800; 169 break; 170 case 0x10: 171 dev_priv->mem_freq = 1066; 172 break; 173 case 0x14: 174 dev_priv->mem_freq = 1333; 175 break; 176 case 0x18: 177 dev_priv->mem_freq = 1600; 178 break; 179 default: 180 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n", 181 ddrpll & 0xff); 182 dev_priv->mem_freq = 0; 183 break; 184 } 185 186 dev_priv->ips.r_t = dev_priv->mem_freq; 187 188 switch (csipll & 0x3ff) { 189 case 0x00c: 190 dev_priv->fsb_freq = 3200; 191 break; 192 case 0x00e: 193 dev_priv->fsb_freq = 3733; 194 break; 195 case 0x010: 196 dev_priv->fsb_freq = 4266; 197 break; 198 case 0x012: 199 dev_priv->fsb_freq = 4800; 200 break; 201 case 0x014: 202 dev_priv->fsb_freq = 5333; 203 break; 204 case 0x016: 205 dev_priv->fsb_freq = 5866; 206 break; 207 case 0x018: 208 dev_priv->fsb_freq = 6400; 209 break; 210 default: 211 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n", 212 csipll & 0x3ff); 213 dev_priv->fsb_freq = 0; 214 break; 215 } 216 217 if (dev_priv->fsb_freq == 3200) { 218 dev_priv->ips.c_m = 0; 219 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { 220 dev_priv->ips.c_m = 1; 221 } else { 222 dev_priv->ips.c_m = 2; 223 } 224 } 225 226 static const struct cxsr_latency cxsr_latency_table[] = { 227 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ 228 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ 229 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ 230 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ 231 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ 232 233 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ 234 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ 235 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ 236 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ 237 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ 238 239 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ 240 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ 241 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ 242 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ 243 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ 244 245 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ 246 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ 247 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ 248 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ 249 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ 250 251 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ 252 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ 253 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ 254 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ 255 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ 256 257 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ 258 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ 259 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ 260 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ 261 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ 262 }; 263 264 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, 265 int is_ddr3, 266 int fsb, 267 int mem) 268 { 269 const struct cxsr_latency *latency; 270 int i; 271 272 if (fsb == 0 || mem == 0) 273 return NULL; 274 275 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { 276 latency = &cxsr_latency_table[i]; 277 if (is_desktop == latency->is_desktop && 278 is_ddr3 == latency->is_ddr3 && 279 fsb == latency->fsb_freq && mem == latency->mem_freq) 280 return latency; 281 } 282 283 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 284 285 return NULL; 286 } 287 288 static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) 289 { 290 u32 val; 291 292 mutex_lock(&dev_priv->rps.hw_lock); 293 294 val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); 295 if (enable) 296 val &= ~FORCE_DDR_HIGH_FREQ; 297 else 298 val |= FORCE_DDR_HIGH_FREQ; 299 val &= ~FORCE_DDR_LOW_FREQ; 300 val |= FORCE_DDR_FREQ_REQ_ACK; 301 vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); 302 303 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & 304 FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) 305 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n"); 306 307 mutex_unlock(&dev_priv->rps.hw_lock); 308 } 309 310 static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) 311 { 312 u32 val; 313 314 mutex_lock(&dev_priv->rps.hw_lock); 315 316 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 317 if (enable) 318 val |= DSP_MAXFIFO_PM5_ENABLE; 319 else 320 val &= ~DSP_MAXFIFO_PM5_ENABLE; 321 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 322 323 mutex_unlock(&dev_priv->rps.hw_lock); 324 } 325 326 #define FW_WM(value, plane) \ 327 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) 328 329 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) 330 { 331 struct drm_device *dev = dev_priv->dev; 332 u32 val; 333 334 if (IS_VALLEYVIEW(dev)) { 335 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); 336 if (IS_CHERRYVIEW(dev)) 337 chv_set_memory_pm5(dev_priv, enable); 338 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) { 339 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); 340 } else if (IS_PINEVIEW(dev)) { 341 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN; 342 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0; 343 I915_WRITE(DSPFW3, val); 344 } else if (IS_I945G(dev) || IS_I945GM(dev)) { 345 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : 346 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); 347 I915_WRITE(FW_BLC_SELF, val); 348 } else if (IS_I915GM(dev)) { 349 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : 350 _MASKED_BIT_DISABLE(INSTPM_SELF_EN); 351 I915_WRITE(INSTPM, val); 352 } else { 353 return; 354 } 355 356 DRM_DEBUG_KMS("memory self-refresh is %s\n", 357 enable ? "enabled" : "disabled"); 358 } 359 360 361 /* 362 * Latency for FIFO fetches is dependent on several factors: 363 * - memory configuration (speed, channels) 364 * - chipset 365 * - current MCH state 366 * It can be fairly high in some situations, so here we assume a fairly 367 * pessimal value. It's a tradeoff between extra memory fetches (if we 368 * set this value too high, the FIFO will fetch frequently to stay full) 369 * and power consumption (set it too low to save power and we might see 370 * FIFO underruns and display "flicker"). 371 * 372 * A value of 5us seems to be a good balance; safe for very low end 373 * platforms but not overly aggressive on lower latency configs. 374 */ 375 static const int pessimal_latency_ns = 5000; 376 377 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \ 378 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) 379 380 static int vlv_get_fifo_size(struct drm_device *dev, 381 enum i915_pipe pipe, int plane) 382 { 383 struct drm_i915_private *dev_priv = dev->dev_private; 384 int sprite0_start, sprite1_start, size; 385 386 switch (pipe) { 387 uint32_t dsparb, dsparb2, dsparb3; 388 case PIPE_A: 389 dsparb = I915_READ(DSPARB); 390 dsparb2 = I915_READ(DSPARB2); 391 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0); 392 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4); 393 break; 394 case PIPE_B: 395 dsparb = I915_READ(DSPARB); 396 dsparb2 = I915_READ(DSPARB2); 397 sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8); 398 sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12); 399 break; 400 case PIPE_C: 401 dsparb2 = I915_READ(DSPARB2); 402 dsparb3 = I915_READ(DSPARB3); 403 sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16); 404 sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20); 405 break; 406 default: 407 return 0; 408 } 409 410 switch (plane) { 411 case 0: 412 size = sprite0_start; 413 break; 414 case 1: 415 size = sprite1_start - sprite0_start; 416 break; 417 case 2: 418 size = 512 - 1 - sprite1_start; 419 break; 420 default: 421 return 0; 422 } 423 424 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n", 425 pipe_name(pipe), plane == 0 ? "primary" : "sprite", 426 plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1), 427 size); 428 429 return size; 430 } 431 432 static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 433 { 434 struct drm_i915_private *dev_priv = dev->dev_private; 435 uint32_t dsparb = I915_READ(DSPARB); 436 int size; 437 438 size = dsparb & 0x7f; 439 if (plane) 440 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; 441 442 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 443 plane ? "B" : "A", size); 444 445 return size; 446 } 447 448 static int i830_get_fifo_size(struct drm_device *dev, int plane) 449 { 450 struct drm_i915_private *dev_priv = dev->dev_private; 451 uint32_t dsparb = I915_READ(DSPARB); 452 int size; 453 454 size = dsparb & 0x1ff; 455 if (plane) 456 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; 457 size >>= 1; /* Convert to cachelines */ 458 459 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 460 plane ? "B" : "A", size); 461 462 return size; 463 } 464 465 static int i845_get_fifo_size(struct drm_device *dev, int plane) 466 { 467 struct drm_i915_private *dev_priv = dev->dev_private; 468 uint32_t dsparb = I915_READ(DSPARB); 469 int size; 470 471 size = dsparb & 0x7f; 472 size >>= 2; /* Convert to cachelines */ 473 474 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 475 plane ? "B" : "A", 476 size); 477 478 return size; 479 } 480 481 /* Pineview has different values for various configs */ 482 static const struct intel_watermark_params pineview_display_wm = { 483 .fifo_size = PINEVIEW_DISPLAY_FIFO, 484 .max_wm = PINEVIEW_MAX_WM, 485 .default_wm = PINEVIEW_DFT_WM, 486 .guard_size = PINEVIEW_GUARD_WM, 487 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 488 }; 489 static const struct intel_watermark_params pineview_display_hplloff_wm = { 490 .fifo_size = PINEVIEW_DISPLAY_FIFO, 491 .max_wm = PINEVIEW_MAX_WM, 492 .default_wm = PINEVIEW_DFT_HPLLOFF_WM, 493 .guard_size = PINEVIEW_GUARD_WM, 494 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 495 }; 496 static const struct intel_watermark_params pineview_cursor_wm = { 497 .fifo_size = PINEVIEW_CURSOR_FIFO, 498 .max_wm = PINEVIEW_CURSOR_MAX_WM, 499 .default_wm = PINEVIEW_CURSOR_DFT_WM, 500 .guard_size = PINEVIEW_CURSOR_GUARD_WM, 501 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 502 }; 503 static const struct intel_watermark_params pineview_cursor_hplloff_wm = { 504 .fifo_size = PINEVIEW_CURSOR_FIFO, 505 .max_wm = PINEVIEW_CURSOR_MAX_WM, 506 .default_wm = PINEVIEW_CURSOR_DFT_WM, 507 .guard_size = PINEVIEW_CURSOR_GUARD_WM, 508 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE, 509 }; 510 static const struct intel_watermark_params g4x_wm_info = { 511 .fifo_size = G4X_FIFO_SIZE, 512 .max_wm = G4X_MAX_WM, 513 .default_wm = G4X_MAX_WM, 514 .guard_size = 2, 515 .cacheline_size = G4X_FIFO_LINE_SIZE, 516 }; 517 static const struct intel_watermark_params g4x_cursor_wm_info = { 518 .fifo_size = I965_CURSOR_FIFO, 519 .max_wm = I965_CURSOR_MAX_WM, 520 .default_wm = I965_CURSOR_DFT_WM, 521 .guard_size = 2, 522 .cacheline_size = G4X_FIFO_LINE_SIZE, 523 }; 524 static const struct intel_watermark_params valleyview_wm_info = { 525 .fifo_size = VALLEYVIEW_FIFO_SIZE, 526 .max_wm = VALLEYVIEW_MAX_WM, 527 .default_wm = VALLEYVIEW_MAX_WM, 528 .guard_size = 2, 529 .cacheline_size = G4X_FIFO_LINE_SIZE, 530 }; 531 static const struct intel_watermark_params valleyview_cursor_wm_info = { 532 .fifo_size = I965_CURSOR_FIFO, 533 .max_wm = VALLEYVIEW_CURSOR_MAX_WM, 534 .default_wm = I965_CURSOR_DFT_WM, 535 .guard_size = 2, 536 .cacheline_size = G4X_FIFO_LINE_SIZE, 537 }; 538 static const struct intel_watermark_params i965_cursor_wm_info = { 539 .fifo_size = I965_CURSOR_FIFO, 540 .max_wm = I965_CURSOR_MAX_WM, 541 .default_wm = I965_CURSOR_DFT_WM, 542 .guard_size = 2, 543 .cacheline_size = I915_FIFO_LINE_SIZE, 544 }; 545 static const struct intel_watermark_params i945_wm_info = { 546 .fifo_size = I945_FIFO_SIZE, 547 .max_wm = I915_MAX_WM, 548 .default_wm = 1, 549 .guard_size = 2, 550 .cacheline_size = I915_FIFO_LINE_SIZE, 551 }; 552 static const struct intel_watermark_params i915_wm_info = { 553 .fifo_size = I915_FIFO_SIZE, 554 .max_wm = I915_MAX_WM, 555 .default_wm = 1, 556 .guard_size = 2, 557 .cacheline_size = I915_FIFO_LINE_SIZE, 558 }; 559 static const struct intel_watermark_params i830_a_wm_info = { 560 .fifo_size = I855GM_FIFO_SIZE, 561 .max_wm = I915_MAX_WM, 562 .default_wm = 1, 563 .guard_size = 2, 564 .cacheline_size = I830_FIFO_LINE_SIZE, 565 }; 566 static const struct intel_watermark_params i830_bc_wm_info = { 567 .fifo_size = I855GM_FIFO_SIZE, 568 .max_wm = I915_MAX_WM/2, 569 .default_wm = 1, 570 .guard_size = 2, 571 .cacheline_size = I830_FIFO_LINE_SIZE, 572 }; 573 static const struct intel_watermark_params i845_wm_info = { 574 .fifo_size = I830_FIFO_SIZE, 575 .max_wm = I915_MAX_WM, 576 .default_wm = 1, 577 .guard_size = 2, 578 .cacheline_size = I830_FIFO_LINE_SIZE, 579 }; 580 581 /** 582 * intel_calculate_wm - calculate watermark level 583 * @clock_in_khz: pixel clock 584 * @wm: chip FIFO params 585 * @pixel_size: display pixel size 586 * @latency_ns: memory latency for the platform 587 * 588 * Calculate the watermark level (the level at which the display plane will 589 * start fetching from memory again). Each chip has a different display 590 * FIFO size and allocation, so the caller needs to figure that out and pass 591 * in the correct intel_watermark_params structure. 592 * 593 * As the pixel clock runs, the FIFO will be drained at a rate that depends 594 * on the pixel size. When it reaches the watermark level, it'll start 595 * fetching FIFO line sized based chunks from memory until the FIFO fills 596 * past the watermark point. If the FIFO drains completely, a FIFO underrun 597 * will occur, and a display engine hang could result. 598 */ 599 static unsigned long intel_calculate_wm(unsigned long clock_in_khz, 600 const struct intel_watermark_params *wm, 601 int fifo_size, 602 int pixel_size, 603 unsigned long latency_ns) 604 { 605 long entries_required, wm_size; 606 607 /* 608 * Note: we need to make sure we don't overflow for various clock & 609 * latency values. 610 * clocks go from a few thousand to several hundred thousand. 611 * latency is usually a few thousand 612 */ 613 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / 614 1000; 615 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); 616 617 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); 618 619 wm_size = fifo_size - (entries_required + wm->guard_size); 620 621 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); 622 623 /* Don't promote wm_size to unsigned... */ 624 if (wm_size > (long)wm->max_wm) 625 wm_size = wm->max_wm; 626 if (wm_size <= 0) 627 wm_size = wm->default_wm; 628 629 /* 630 * Bspec seems to indicate that the value shouldn't be lower than 631 * 'burst size + 1'. Certainly 830 is quite unhappy with low values. 632 * Lets go for 8 which is the burst size since certain platforms 633 * already use a hardcoded 8 (which is what the spec says should be 634 * done). 635 */ 636 if (wm_size <= 8) 637 wm_size = 8; 638 639 return wm_size; 640 } 641 642 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) 643 { 644 struct drm_crtc *crtc, *enabled = NULL; 645 646 for_each_crtc(dev, crtc) { 647 if (intel_crtc_active(crtc)) { 648 if (enabled) 649 return NULL; 650 enabled = crtc; 651 } 652 } 653 654 return enabled; 655 } 656 657 static void pineview_update_wm(struct drm_crtc *unused_crtc) 658 { 659 struct drm_device *dev = unused_crtc->dev; 660 struct drm_i915_private *dev_priv = dev->dev_private; 661 struct drm_crtc *crtc; 662 const struct cxsr_latency *latency; 663 u32 reg; 664 unsigned long wm; 665 666 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, 667 dev_priv->fsb_freq, dev_priv->mem_freq); 668 if (!latency) { 669 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 670 intel_set_memory_cxsr(dev_priv, false); 671 return; 672 } 673 674 crtc = single_enabled_crtc(dev); 675 if (crtc) { 676 const struct drm_display_mode *adjusted_mode; 677 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; 678 int clock; 679 680 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 681 clock = adjusted_mode->crtc_clock; 682 683 /* Display SR */ 684 wm = intel_calculate_wm(clock, &pineview_display_wm, 685 pineview_display_wm.fifo_size, 686 pixel_size, latency->display_sr); 687 reg = I915_READ(DSPFW1); 688 reg &= ~DSPFW_SR_MASK; 689 reg |= FW_WM(wm, SR); 690 I915_WRITE(DSPFW1, reg); 691 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); 692 693 /* cursor SR */ 694 wm = intel_calculate_wm(clock, &pineview_cursor_wm, 695 pineview_display_wm.fifo_size, 696 pixel_size, latency->cursor_sr); 697 reg = I915_READ(DSPFW3); 698 reg &= ~DSPFW_CURSOR_SR_MASK; 699 reg |= FW_WM(wm, CURSOR_SR); 700 I915_WRITE(DSPFW3, reg); 701 702 /* Display HPLL off SR */ 703 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, 704 pineview_display_hplloff_wm.fifo_size, 705 pixel_size, latency->display_hpll_disable); 706 reg = I915_READ(DSPFW3); 707 reg &= ~DSPFW_HPLL_SR_MASK; 708 reg |= FW_WM(wm, HPLL_SR); 709 I915_WRITE(DSPFW3, reg); 710 711 /* cursor HPLL off SR */ 712 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, 713 pineview_display_hplloff_wm.fifo_size, 714 pixel_size, latency->cursor_hpll_disable); 715 reg = I915_READ(DSPFW3); 716 reg &= ~DSPFW_HPLL_CURSOR_MASK; 717 reg |= FW_WM(wm, HPLL_CURSOR); 718 I915_WRITE(DSPFW3, reg); 719 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); 720 721 intel_set_memory_cxsr(dev_priv, true); 722 } else { 723 intel_set_memory_cxsr(dev_priv, false); 724 } 725 } 726 727 static bool g4x_compute_wm0(struct drm_device *dev, 728 int plane, 729 const struct intel_watermark_params *display, 730 int display_latency_ns, 731 const struct intel_watermark_params *cursor, 732 int cursor_latency_ns, 733 int *plane_wm, 734 int *cursor_wm) 735 { 736 struct drm_crtc *crtc; 737 const struct drm_display_mode *adjusted_mode; 738 int htotal, hdisplay, clock, pixel_size; 739 int line_time_us, line_count; 740 int entries, tlb_miss; 741 742 crtc = intel_get_crtc_for_plane(dev, plane); 743 if (!intel_crtc_active(crtc)) { 744 *cursor_wm = cursor->guard_size; 745 *plane_wm = display->guard_size; 746 return false; 747 } 748 749 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 750 clock = adjusted_mode->crtc_clock; 751 htotal = adjusted_mode->crtc_htotal; 752 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; 753 pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; 754 755 /* Use the small buffer method to calculate plane watermark */ 756 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; 757 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; 758 if (tlb_miss > 0) 759 entries += tlb_miss; 760 entries = DIV_ROUND_UP(entries, display->cacheline_size); 761 *plane_wm = entries + display->guard_size; 762 if (*plane_wm > (int)display->max_wm) 763 *plane_wm = display->max_wm; 764 765 /* Use the large buffer method to calculate cursor watermark */ 766 line_time_us = max(htotal * 1000 / clock, 1); 767 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; 768 entries = line_count * crtc->cursor->state->crtc_w * pixel_size; 769 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; 770 if (tlb_miss > 0) 771 entries += tlb_miss; 772 entries = DIV_ROUND_UP(entries, cursor->cacheline_size); 773 *cursor_wm = entries + cursor->guard_size; 774 if (*cursor_wm > (int)cursor->max_wm) 775 *cursor_wm = (int)cursor->max_wm; 776 777 return true; 778 } 779 780 /* 781 * Check the wm result. 782 * 783 * If any calculated watermark values is larger than the maximum value that 784 * can be programmed into the associated watermark register, that watermark 785 * must be disabled. 786 */ 787 static bool g4x_check_srwm(struct drm_device *dev, 788 int display_wm, int cursor_wm, 789 const struct intel_watermark_params *display, 790 const struct intel_watermark_params *cursor) 791 { 792 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", 793 display_wm, cursor_wm); 794 795 if (display_wm > display->max_wm) { 796 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n", 797 display_wm, display->max_wm); 798 return false; 799 } 800 801 if (cursor_wm > cursor->max_wm) { 802 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n", 803 cursor_wm, cursor->max_wm); 804 return false; 805 } 806 807 if (!(display_wm || cursor_wm)) { 808 DRM_DEBUG_KMS("SR latency is 0, disabling\n"); 809 return false; 810 } 811 812 return true; 813 } 814 815 static bool g4x_compute_srwm(struct drm_device *dev, 816 int plane, 817 int latency_ns, 818 const struct intel_watermark_params *display, 819 const struct intel_watermark_params *cursor, 820 int *display_wm, int *cursor_wm) 821 { 822 struct drm_crtc *crtc; 823 const struct drm_display_mode *adjusted_mode; 824 int hdisplay, htotal, pixel_size, clock; 825 unsigned long line_time_us; 826 int line_count, line_size; 827 int small, large; 828 int entries; 829 830 if (!latency_ns) { 831 *display_wm = *cursor_wm = 0; 832 return false; 833 } 834 835 crtc = intel_get_crtc_for_plane(dev, plane); 836 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 837 clock = adjusted_mode->crtc_clock; 838 htotal = adjusted_mode->crtc_htotal; 839 hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; 840 pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; 841 842 line_time_us = max(htotal * 1000 / clock, 1); 843 line_count = (latency_ns / line_time_us + 1000) / 1000; 844 line_size = hdisplay * pixel_size; 845 846 /* Use the minimum of the small and large buffer method for primary */ 847 small = ((clock * pixel_size / 1000) * latency_ns) / 1000; 848 large = line_count * line_size; 849 850 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); 851 *display_wm = entries + display->guard_size; 852 853 /* calculate the self-refresh watermark for display cursor */ 854 entries = line_count * pixel_size * crtc->cursor->state->crtc_w; 855 entries = DIV_ROUND_UP(entries, cursor->cacheline_size); 856 *cursor_wm = entries + cursor->guard_size; 857 858 return g4x_check_srwm(dev, 859 *display_wm, *cursor_wm, 860 display, cursor); 861 } 862 863 #define FW_WM_VLV(value, plane) \ 864 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) 865 866 static void vlv_write_wm_values(struct intel_crtc *crtc, 867 const struct vlv_wm_values *wm) 868 { 869 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 870 enum i915_pipe pipe = crtc->pipe; 871 872 I915_WRITE(VLV_DDL(pipe), 873 (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) | 874 (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) | 875 (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) | 876 (wm->ddl[pipe].primary << DDL_PLANE_SHIFT)); 877 878 I915_WRITE(DSPFW1, 879 FW_WM(wm->sr.plane, SR) | 880 FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) | 881 FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) | 882 FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA)); 883 I915_WRITE(DSPFW2, 884 FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) | 885 FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) | 886 FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA)); 887 I915_WRITE(DSPFW3, 888 FW_WM(wm->sr.cursor, CURSOR_SR)); 889 890 if (IS_CHERRYVIEW(dev_priv)) { 891 I915_WRITE(DSPFW7_CHV, 892 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) | 893 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC)); 894 I915_WRITE(DSPFW8_CHV, 895 FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) | 896 FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE)); 897 I915_WRITE(DSPFW9_CHV, 898 FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) | 899 FW_WM(wm->pipe[PIPE_C].cursor, CURSORC)); 900 I915_WRITE(DSPHOWM, 901 FW_WM(wm->sr.plane >> 9, SR_HI) | 902 FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) | 903 FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) | 904 FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) | 905 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) | 906 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) | 907 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) | 908 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) | 909 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) | 910 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); 911 } else { 912 I915_WRITE(DSPFW7, 913 FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) | 914 FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC)); 915 I915_WRITE(DSPHOWM, 916 FW_WM(wm->sr.plane >> 9, SR_HI) | 917 FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) | 918 FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) | 919 FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) | 920 FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) | 921 FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) | 922 FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); 923 } 924 925 POSTING_READ(DSPFW1); 926 927 dev_priv->wm.vlv = *wm; 928 } 929 930 #undef FW_WM_VLV 931 932 static uint8_t vlv_compute_drain_latency(struct drm_crtc *crtc, 933 struct drm_plane *plane) 934 { 935 struct drm_device *dev = crtc->dev; 936 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 937 int entries, prec_mult, drain_latency, pixel_size; 938 int clock = intel_crtc->config->base.adjusted_mode.crtc_clock; 939 const int high_precision = IS_CHERRYVIEW(dev) ? 16 : 64; 940 941 /* 942 * FIXME the plane might have an fb 943 * but be invisible (eg. due to clipping) 944 */ 945 if (!intel_crtc->active || !plane->state->fb) 946 return 0; 947 948 if (WARN(clock == 0, "Pixel clock is zero!\n")) 949 return 0; 950 951 pixel_size = drm_format_plane_cpp(plane->state->fb->pixel_format, 0); 952 953 if (WARN(pixel_size == 0, "Pixel size is zero!\n")) 954 return 0; 955 956 entries = DIV_ROUND_UP(clock, 1000) * pixel_size; 957 958 prec_mult = high_precision; 959 drain_latency = 64 * prec_mult * 4 / entries; 960 961 if (drain_latency > DRAIN_LATENCY_MASK) { 962 prec_mult /= 2; 963 drain_latency = 64 * prec_mult * 4 / entries; 964 } 965 966 if (drain_latency > DRAIN_LATENCY_MASK) 967 drain_latency = DRAIN_LATENCY_MASK; 968 969 return drain_latency | (prec_mult == high_precision ? 970 DDL_PRECISION_HIGH : DDL_PRECISION_LOW); 971 } 972 973 static int vlv_compute_wm(struct intel_crtc *crtc, 974 struct intel_plane *plane, 975 int fifo_size) 976 { 977 int clock, entries, pixel_size; 978 979 /* 980 * FIXME the plane might have an fb 981 * but be invisible (eg. due to clipping) 982 */ 983 if (!crtc->active || !plane->base.state->fb) 984 return 0; 985 986 pixel_size = drm_format_plane_cpp(plane->base.state->fb->pixel_format, 0); 987 clock = crtc->config->base.adjusted_mode.crtc_clock; 988 989 entries = DIV_ROUND_UP(clock, 1000) * pixel_size; 990 991 /* 992 * Set up the watermark such that we don't start issuing memory 993 * requests until we are within PND's max deadline value (256us). 994 * Idea being to be idle as long as possible while still taking 995 * advatange of PND's deadline scheduling. The limit of 8 996 * cachelines (used when the FIFO will anyway drain in less time 997 * than 256us) should match what we would be done if trickle 998 * feed were enabled. 999 */ 1000 return fifo_size - clamp(DIV_ROUND_UP(256 * entries, 64), 0, fifo_size - 8); 1001 } 1002 1003 static bool vlv_compute_sr_wm(struct drm_device *dev, 1004 struct vlv_wm_values *wm) 1005 { 1006 struct drm_i915_private *dev_priv = to_i915(dev); 1007 struct drm_crtc *crtc; 1008 enum i915_pipe pipe = INVALID_PIPE; 1009 int num_planes = 0; 1010 int fifo_size = 0; 1011 struct intel_plane *plane; 1012 1013 wm->sr.cursor = wm->sr.plane = 0; 1014 1015 crtc = single_enabled_crtc(dev); 1016 /* maxfifo not supported on pipe C */ 1017 if (crtc && to_intel_crtc(crtc)->pipe != PIPE_C) { 1018 pipe = to_intel_crtc(crtc)->pipe; 1019 num_planes = !!wm->pipe[pipe].primary + 1020 !!wm->pipe[pipe].sprite[0] + 1021 !!wm->pipe[pipe].sprite[1]; 1022 fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1; 1023 } 1024 1025 if (fifo_size == 0 || num_planes > 1) 1026 return false; 1027 1028 wm->sr.cursor = vlv_compute_wm(to_intel_crtc(crtc), 1029 to_intel_plane(crtc->cursor), 0x3f); 1030 1031 list_for_each_entry(plane, &dev->mode_config.plane_list, base.head) { 1032 if (plane->base.type == DRM_PLANE_TYPE_CURSOR) 1033 continue; 1034 1035 if (plane->pipe != pipe) 1036 continue; 1037 1038 wm->sr.plane = vlv_compute_wm(to_intel_crtc(crtc), 1039 plane, fifo_size); 1040 if (wm->sr.plane != 0) 1041 break; 1042 } 1043 1044 return true; 1045 } 1046 1047 static void valleyview_update_wm(struct drm_crtc *crtc) 1048 { 1049 struct drm_device *dev = crtc->dev; 1050 struct drm_i915_private *dev_priv = dev->dev_private; 1051 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1052 enum i915_pipe pipe = intel_crtc->pipe; 1053 bool cxsr_enabled; 1054 struct vlv_wm_values wm = dev_priv->wm.vlv; 1055 1056 wm.ddl[pipe].primary = vlv_compute_drain_latency(crtc, crtc->primary); 1057 wm.pipe[pipe].primary = vlv_compute_wm(intel_crtc, 1058 to_intel_plane(crtc->primary), 1059 vlv_get_fifo_size(dev, pipe, 0)); 1060 1061 wm.ddl[pipe].cursor = vlv_compute_drain_latency(crtc, crtc->cursor); 1062 wm.pipe[pipe].cursor = vlv_compute_wm(intel_crtc, 1063 to_intel_plane(crtc->cursor), 1064 0x3f); 1065 1066 cxsr_enabled = vlv_compute_sr_wm(dev, &wm); 1067 1068 if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0) 1069 return; 1070 1071 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, " 1072 "SR: plane=%d, cursor=%d\n", pipe_name(pipe), 1073 wm.pipe[pipe].primary, wm.pipe[pipe].cursor, 1074 wm.sr.plane, wm.sr.cursor); 1075 1076 /* 1077 * FIXME DDR DVFS introduces massive memory latencies which 1078 * are not known to system agent so any deadline specified 1079 * by the display may not be respected. To support DDR DVFS 1080 * the watermark code needs to be rewritten to essentially 1081 * bypass deadline mechanism and rely solely on the 1082 * watermarks. For now disable DDR DVFS. 1083 */ 1084 if (IS_CHERRYVIEW(dev_priv)) 1085 chv_set_memory_dvfs(dev_priv, false); 1086 1087 if (!cxsr_enabled) 1088 intel_set_memory_cxsr(dev_priv, false); 1089 1090 vlv_write_wm_values(intel_crtc, &wm); 1091 1092 if (cxsr_enabled) 1093 intel_set_memory_cxsr(dev_priv, true); 1094 } 1095 1096 static void valleyview_update_sprite_wm(struct drm_plane *plane, 1097 struct drm_crtc *crtc, 1098 uint32_t sprite_width, 1099 uint32_t sprite_height, 1100 int pixel_size, 1101 bool enabled, bool scaled) 1102 { 1103 struct drm_device *dev = crtc->dev; 1104 struct drm_i915_private *dev_priv = dev->dev_private; 1105 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1106 enum i915_pipe pipe = intel_crtc->pipe; 1107 int sprite = to_intel_plane(plane)->plane; 1108 bool cxsr_enabled; 1109 struct vlv_wm_values wm = dev_priv->wm.vlv; 1110 1111 if (enabled) { 1112 wm.ddl[pipe].sprite[sprite] = 1113 vlv_compute_drain_latency(crtc, plane); 1114 1115 wm.pipe[pipe].sprite[sprite] = 1116 vlv_compute_wm(intel_crtc, 1117 to_intel_plane(plane), 1118 vlv_get_fifo_size(dev, pipe, sprite+1)); 1119 } else { 1120 wm.ddl[pipe].sprite[sprite] = 0; 1121 wm.pipe[pipe].sprite[sprite] = 0; 1122 } 1123 1124 cxsr_enabled = vlv_compute_sr_wm(dev, &wm); 1125 1126 if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0) 1127 return; 1128 1129 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: sprite %c=%d, " 1130 "SR: plane=%d, cursor=%d\n", pipe_name(pipe), 1131 sprite_name(pipe, sprite), 1132 wm.pipe[pipe].sprite[sprite], 1133 wm.sr.plane, wm.sr.cursor); 1134 1135 if (!cxsr_enabled) 1136 intel_set_memory_cxsr(dev_priv, false); 1137 1138 vlv_write_wm_values(intel_crtc, &wm); 1139 1140 if (cxsr_enabled) 1141 intel_set_memory_cxsr(dev_priv, true); 1142 } 1143 1144 #define single_plane_enabled(mask) is_power_of_2(mask) 1145 1146 static void g4x_update_wm(struct drm_crtc *crtc) 1147 { 1148 struct drm_device *dev = crtc->dev; 1149 static const int sr_latency_ns = 12000; 1150 struct drm_i915_private *dev_priv = dev->dev_private; 1151 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1152 int plane_sr, cursor_sr; 1153 unsigned int enabled = 0; 1154 bool cxsr_enabled; 1155 1156 if (g4x_compute_wm0(dev, PIPE_A, 1157 &g4x_wm_info, pessimal_latency_ns, 1158 &g4x_cursor_wm_info, pessimal_latency_ns, 1159 &planea_wm, &cursora_wm)) 1160 enabled |= 1 << PIPE_A; 1161 1162 if (g4x_compute_wm0(dev, PIPE_B, 1163 &g4x_wm_info, pessimal_latency_ns, 1164 &g4x_cursor_wm_info, pessimal_latency_ns, 1165 &planeb_wm, &cursorb_wm)) 1166 enabled |= 1 << PIPE_B; 1167 1168 if (single_plane_enabled(enabled) && 1169 g4x_compute_srwm(dev, ffs(enabled) - 1, 1170 sr_latency_ns, 1171 &g4x_wm_info, 1172 &g4x_cursor_wm_info, 1173 &plane_sr, &cursor_sr)) { 1174 cxsr_enabled = true; 1175 } else { 1176 cxsr_enabled = false; 1177 intel_set_memory_cxsr(dev_priv, false); 1178 plane_sr = cursor_sr = 0; 1179 } 1180 1181 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, " 1182 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", 1183 planea_wm, cursora_wm, 1184 planeb_wm, cursorb_wm, 1185 plane_sr, cursor_sr); 1186 1187 I915_WRITE(DSPFW1, 1188 FW_WM(plane_sr, SR) | 1189 FW_WM(cursorb_wm, CURSORB) | 1190 FW_WM(planeb_wm, PLANEB) | 1191 FW_WM(planea_wm, PLANEA)); 1192 I915_WRITE(DSPFW2, 1193 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | 1194 FW_WM(cursora_wm, CURSORA)); 1195 /* HPLL off in SR has some issues on G4x... disable it */ 1196 I915_WRITE(DSPFW3, 1197 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) | 1198 FW_WM(cursor_sr, CURSOR_SR)); 1199 1200 if (cxsr_enabled) 1201 intel_set_memory_cxsr(dev_priv, true); 1202 } 1203 1204 static void i965_update_wm(struct drm_crtc *unused_crtc) 1205 { 1206 struct drm_device *dev = unused_crtc->dev; 1207 struct drm_i915_private *dev_priv = dev->dev_private; 1208 struct drm_crtc *crtc; 1209 int srwm = 1; 1210 int cursor_sr = 16; 1211 bool cxsr_enabled; 1212 1213 /* Calc sr entries for one plane configs */ 1214 crtc = single_enabled_crtc(dev); 1215 if (crtc) { 1216 /* self-refresh has much higher latency */ 1217 static const int sr_latency_ns = 12000; 1218 const struct drm_display_mode *adjusted_mode = 1219 &to_intel_crtc(crtc)->config->base.adjusted_mode; 1220 int clock = adjusted_mode->crtc_clock; 1221 int htotal = adjusted_mode->crtc_htotal; 1222 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; 1223 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; 1224 unsigned long line_time_us; 1225 int entries; 1226 1227 line_time_us = max(htotal * 1000 / clock, 1); 1228 1229 /* Use ns/us then divide to preserve precision */ 1230 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1231 pixel_size * hdisplay; 1232 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); 1233 srwm = I965_FIFO_SIZE - entries; 1234 if (srwm < 0) 1235 srwm = 1; 1236 srwm &= 0x1ff; 1237 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", 1238 entries, srwm); 1239 1240 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1241 pixel_size * crtc->cursor->state->crtc_w; 1242 entries = DIV_ROUND_UP(entries, 1243 i965_cursor_wm_info.cacheline_size); 1244 cursor_sr = i965_cursor_wm_info.fifo_size - 1245 (entries + i965_cursor_wm_info.guard_size); 1246 1247 if (cursor_sr > i965_cursor_wm_info.max_wm) 1248 cursor_sr = i965_cursor_wm_info.max_wm; 1249 1250 DRM_DEBUG_KMS("self-refresh watermark: display plane %d " 1251 "cursor %d\n", srwm, cursor_sr); 1252 1253 cxsr_enabled = true; 1254 } else { 1255 cxsr_enabled = false; 1256 /* Turn off self refresh if both pipes are enabled */ 1257 intel_set_memory_cxsr(dev_priv, false); 1258 } 1259 1260 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 1261 srwm); 1262 1263 /* 965 has limitations... */ 1264 I915_WRITE(DSPFW1, FW_WM(srwm, SR) | 1265 FW_WM(8, CURSORB) | 1266 FW_WM(8, PLANEB) | 1267 FW_WM(8, PLANEA)); 1268 I915_WRITE(DSPFW2, FW_WM(8, CURSORA) | 1269 FW_WM(8, PLANEC_OLD)); 1270 /* update cursor SR watermark */ 1271 I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); 1272 1273 if (cxsr_enabled) 1274 intel_set_memory_cxsr(dev_priv, true); 1275 } 1276 1277 #undef FW_WM 1278 1279 static void i9xx_update_wm(struct drm_crtc *unused_crtc) 1280 { 1281 struct drm_device *dev = unused_crtc->dev; 1282 struct drm_i915_private *dev_priv = dev->dev_private; 1283 const struct intel_watermark_params *wm_info; 1284 uint32_t fwater_lo; 1285 uint32_t fwater_hi; 1286 int cwm, srwm = 1; 1287 int fifo_size; 1288 int planea_wm, planeb_wm; 1289 struct drm_crtc *crtc, *enabled = NULL; 1290 1291 if (IS_I945GM(dev)) 1292 wm_info = &i945_wm_info; 1293 else if (!IS_GEN2(dev)) 1294 wm_info = &i915_wm_info; 1295 else 1296 wm_info = &i830_a_wm_info; 1297 1298 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1299 crtc = intel_get_crtc_for_plane(dev, 0); 1300 if (intel_crtc_active(crtc)) { 1301 const struct drm_display_mode *adjusted_mode; 1302 int cpp = crtc->primary->state->fb->bits_per_pixel / 8; 1303 if (IS_GEN2(dev)) 1304 cpp = 4; 1305 1306 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 1307 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1308 wm_info, fifo_size, cpp, 1309 pessimal_latency_ns); 1310 enabled = crtc; 1311 } else { 1312 planea_wm = fifo_size - wm_info->guard_size; 1313 if (planea_wm > (long)wm_info->max_wm) 1314 planea_wm = wm_info->max_wm; 1315 } 1316 1317 if (IS_GEN2(dev)) 1318 wm_info = &i830_bc_wm_info; 1319 1320 fifo_size = dev_priv->display.get_fifo_size(dev, 1); 1321 crtc = intel_get_crtc_for_plane(dev, 1); 1322 if (intel_crtc_active(crtc)) { 1323 const struct drm_display_mode *adjusted_mode; 1324 int cpp = crtc->primary->state->fb->bits_per_pixel / 8; 1325 if (IS_GEN2(dev)) 1326 cpp = 4; 1327 1328 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 1329 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1330 wm_info, fifo_size, cpp, 1331 pessimal_latency_ns); 1332 if (enabled == NULL) 1333 enabled = crtc; 1334 else 1335 enabled = NULL; 1336 } else { 1337 planeb_wm = fifo_size - wm_info->guard_size; 1338 if (planeb_wm > (long)wm_info->max_wm) 1339 planeb_wm = wm_info->max_wm; 1340 } 1341 1342 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 1343 1344 if (IS_I915GM(dev) && enabled) { 1345 struct drm_i915_gem_object *obj; 1346 1347 obj = intel_fb_obj(enabled->primary->state->fb); 1348 1349 /* self-refresh seems busted with untiled */ 1350 if (obj->tiling_mode == I915_TILING_NONE) 1351 enabled = NULL; 1352 } 1353 1354 /* 1355 * Overlay gets an aggressive default since video jitter is bad. 1356 */ 1357 cwm = 2; 1358 1359 /* Play safe and disable self-refresh before adjusting watermarks. */ 1360 intel_set_memory_cxsr(dev_priv, false); 1361 1362 /* Calc sr entries for one plane configs */ 1363 if (HAS_FW_BLC(dev) && enabled) { 1364 /* self-refresh has much higher latency */ 1365 static const int sr_latency_ns = 6000; 1366 const struct drm_display_mode *adjusted_mode = 1367 &to_intel_crtc(enabled)->config->base.adjusted_mode; 1368 int clock = adjusted_mode->crtc_clock; 1369 int htotal = adjusted_mode->crtc_htotal; 1370 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w; 1371 int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8; 1372 unsigned long line_time_us; 1373 int entries; 1374 1375 line_time_us = max(htotal * 1000 / clock, 1); 1376 1377 /* Use ns/us then divide to preserve precision */ 1378 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1379 pixel_size * hdisplay; 1380 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); 1381 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); 1382 srwm = wm_info->fifo_size - entries; 1383 if (srwm < 0) 1384 srwm = 1; 1385 1386 if (IS_I945G(dev) || IS_I945GM(dev)) 1387 I915_WRITE(FW_BLC_SELF, 1388 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); 1389 else if (IS_I915GM(dev)) 1390 I915_WRITE(FW_BLC_SELF, srwm & 0x3f); 1391 } 1392 1393 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 1394 planea_wm, planeb_wm, cwm, srwm); 1395 1396 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); 1397 fwater_hi = (cwm & 0x1f); 1398 1399 /* Set request length to 8 cachelines per fetch */ 1400 fwater_lo = fwater_lo | (1 << 24) | (1 << 8); 1401 fwater_hi = fwater_hi | (1 << 8); 1402 1403 I915_WRITE(FW_BLC, fwater_lo); 1404 I915_WRITE(FW_BLC2, fwater_hi); 1405 1406 if (enabled) 1407 intel_set_memory_cxsr(dev_priv, true); 1408 } 1409 1410 static void i845_update_wm(struct drm_crtc *unused_crtc) 1411 { 1412 struct drm_device *dev = unused_crtc->dev; 1413 struct drm_i915_private *dev_priv = dev->dev_private; 1414 struct drm_crtc *crtc; 1415 const struct drm_display_mode *adjusted_mode; 1416 uint32_t fwater_lo; 1417 int planea_wm; 1418 1419 crtc = single_enabled_crtc(dev); 1420 if (crtc == NULL) 1421 return; 1422 1423 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 1424 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, 1425 &i845_wm_info, 1426 dev_priv->display.get_fifo_size(dev, 0), 1427 4, pessimal_latency_ns); 1428 fwater_lo = I915_READ(FW_BLC) & ~0xfff; 1429 fwater_lo |= (3<<8) | planea_wm; 1430 1431 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); 1432 1433 I915_WRITE(FW_BLC, fwater_lo); 1434 } 1435 1436 static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev, 1437 struct drm_crtc *crtc) 1438 { 1439 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1440 uint32_t pixel_rate; 1441 1442 pixel_rate = intel_crtc->config->base.adjusted_mode.crtc_clock; 1443 1444 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to 1445 * adjust the pixel_rate here. */ 1446 1447 if (intel_crtc->config->pch_pfit.enabled) { 1448 uint64_t pipe_w, pipe_h, pfit_w, pfit_h; 1449 uint32_t pfit_size = intel_crtc->config->pch_pfit.size; 1450 1451 pipe_w = intel_crtc->config->pipe_src_w; 1452 pipe_h = intel_crtc->config->pipe_src_h; 1453 pfit_w = (pfit_size >> 16) & 0xFFFF; 1454 pfit_h = pfit_size & 0xFFFF; 1455 if (pipe_w < pfit_w) 1456 pipe_w = pfit_w; 1457 if (pipe_h < pfit_h) 1458 pipe_h = pfit_h; 1459 1460 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h, 1461 pfit_w * pfit_h); 1462 } 1463 1464 return pixel_rate; 1465 } 1466 1467 /* latency must be in 0.1us units. */ 1468 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, 1469 uint32_t latency) 1470 { 1471 uint64_t ret; 1472 1473 if (WARN(latency == 0, "Latency value missing\n")) 1474 return UINT_MAX; 1475 1476 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency; 1477 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2; 1478 1479 return ret; 1480 } 1481 1482 /* latency must be in 0.1us units. */ 1483 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, 1484 uint32_t horiz_pixels, uint8_t bytes_per_pixel, 1485 uint32_t latency) 1486 { 1487 uint32_t ret; 1488 1489 if (WARN(latency == 0, "Latency value missing\n")) 1490 return UINT_MAX; 1491 1492 ret = (latency * pixel_rate) / (pipe_htotal * 10000); 1493 ret = (ret + 1) * horiz_pixels * bytes_per_pixel; 1494 ret = DIV_ROUND_UP(ret, 64) + 2; 1495 return ret; 1496 } 1497 1498 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels, 1499 uint8_t bytes_per_pixel) 1500 { 1501 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; 1502 } 1503 1504 struct skl_pipe_wm_parameters { 1505 bool active; 1506 uint32_t pipe_htotal; 1507 uint32_t pixel_rate; /* in KHz */ 1508 struct intel_plane_wm_parameters plane[I915_MAX_PLANES]; 1509 struct intel_plane_wm_parameters cursor; 1510 }; 1511 1512 struct ilk_pipe_wm_parameters { 1513 bool active; 1514 uint32_t pipe_htotal; 1515 uint32_t pixel_rate; 1516 struct intel_plane_wm_parameters pri; 1517 struct intel_plane_wm_parameters spr; 1518 struct intel_plane_wm_parameters cur; 1519 }; 1520 1521 struct ilk_wm_maximums { 1522 uint16_t pri; 1523 uint16_t spr; 1524 uint16_t cur; 1525 uint16_t fbc; 1526 }; 1527 1528 /* used in computing the new watermarks state */ 1529 struct intel_wm_config { 1530 unsigned int num_pipes_active; 1531 bool sprites_enabled; 1532 bool sprites_scaled; 1533 }; 1534 1535 /* 1536 * For both WM_PIPE and WM_LP. 1537 * mem_value must be in 0.1us units. 1538 */ 1539 static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params, 1540 uint32_t mem_value, 1541 bool is_lp) 1542 { 1543 uint32_t method1, method2; 1544 1545 if (!params->active || !params->pri.enabled) 1546 return 0; 1547 1548 method1 = ilk_wm_method1(params->pixel_rate, 1549 params->pri.bytes_per_pixel, 1550 mem_value); 1551 1552 if (!is_lp) 1553 return method1; 1554 1555 method2 = ilk_wm_method2(params->pixel_rate, 1556 params->pipe_htotal, 1557 params->pri.horiz_pixels, 1558 params->pri.bytes_per_pixel, 1559 mem_value); 1560 1561 return min(method1, method2); 1562 } 1563 1564 /* 1565 * For both WM_PIPE and WM_LP. 1566 * mem_value must be in 0.1us units. 1567 */ 1568 static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params, 1569 uint32_t mem_value) 1570 { 1571 uint32_t method1, method2; 1572 1573 if (!params->active || !params->spr.enabled) 1574 return 0; 1575 1576 method1 = ilk_wm_method1(params->pixel_rate, 1577 params->spr.bytes_per_pixel, 1578 mem_value); 1579 method2 = ilk_wm_method2(params->pixel_rate, 1580 params->pipe_htotal, 1581 params->spr.horiz_pixels, 1582 params->spr.bytes_per_pixel, 1583 mem_value); 1584 return min(method1, method2); 1585 } 1586 1587 /* 1588 * For both WM_PIPE and WM_LP. 1589 * mem_value must be in 0.1us units. 1590 */ 1591 static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params, 1592 uint32_t mem_value) 1593 { 1594 if (!params->active || !params->cur.enabled) 1595 return 0; 1596 1597 return ilk_wm_method2(params->pixel_rate, 1598 params->pipe_htotal, 1599 params->cur.horiz_pixels, 1600 params->cur.bytes_per_pixel, 1601 mem_value); 1602 } 1603 1604 /* Only for WM_LP. */ 1605 static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params, 1606 uint32_t pri_val) 1607 { 1608 if (!params->active || !params->pri.enabled) 1609 return 0; 1610 1611 return ilk_wm_fbc(pri_val, 1612 params->pri.horiz_pixels, 1613 params->pri.bytes_per_pixel); 1614 } 1615 1616 static unsigned int ilk_display_fifo_size(const struct drm_device *dev) 1617 { 1618 if (INTEL_INFO(dev)->gen >= 8) 1619 return 3072; 1620 else if (INTEL_INFO(dev)->gen >= 7) 1621 return 768; 1622 else 1623 return 512; 1624 } 1625 1626 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev, 1627 int level, bool is_sprite) 1628 { 1629 if (INTEL_INFO(dev)->gen >= 8) 1630 /* BDW primary/sprite plane watermarks */ 1631 return level == 0 ? 255 : 2047; 1632 else if (INTEL_INFO(dev)->gen >= 7) 1633 /* IVB/HSW primary/sprite plane watermarks */ 1634 return level == 0 ? 127 : 1023; 1635 else if (!is_sprite) 1636 /* ILK/SNB primary plane watermarks */ 1637 return level == 0 ? 127 : 511; 1638 else 1639 /* ILK/SNB sprite plane watermarks */ 1640 return level == 0 ? 63 : 255; 1641 } 1642 1643 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev, 1644 int level) 1645 { 1646 if (INTEL_INFO(dev)->gen >= 7) 1647 return level == 0 ? 63 : 255; 1648 else 1649 return level == 0 ? 31 : 63; 1650 } 1651 1652 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev) 1653 { 1654 if (INTEL_INFO(dev)->gen >= 8) 1655 return 31; 1656 else 1657 return 15; 1658 } 1659 1660 /* Calculate the maximum primary/sprite plane watermark */ 1661 static unsigned int ilk_plane_wm_max(const struct drm_device *dev, 1662 int level, 1663 const struct intel_wm_config *config, 1664 enum intel_ddb_partitioning ddb_partitioning, 1665 bool is_sprite) 1666 { 1667 unsigned int fifo_size = ilk_display_fifo_size(dev); 1668 1669 /* if sprites aren't enabled, sprites get nothing */ 1670 if (is_sprite && !config->sprites_enabled) 1671 return 0; 1672 1673 /* HSW allows LP1+ watermarks even with multiple pipes */ 1674 if (level == 0 || config->num_pipes_active > 1) { 1675 fifo_size /= INTEL_INFO(dev)->num_pipes; 1676 1677 /* 1678 * For some reason the non self refresh 1679 * FIFO size is only half of the self 1680 * refresh FIFO size on ILK/SNB. 1681 */ 1682 if (INTEL_INFO(dev)->gen <= 6) 1683 fifo_size /= 2; 1684 } 1685 1686 if (config->sprites_enabled) { 1687 /* level 0 is always calculated with 1:1 split */ 1688 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) { 1689 if (is_sprite) 1690 fifo_size *= 5; 1691 fifo_size /= 6; 1692 } else { 1693 fifo_size /= 2; 1694 } 1695 } 1696 1697 /* clamp to max that the registers can hold */ 1698 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite)); 1699 } 1700 1701 /* Calculate the maximum cursor plane watermark */ 1702 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev, 1703 int level, 1704 const struct intel_wm_config *config) 1705 { 1706 /* HSW LP1+ watermarks w/ multiple pipes */ 1707 if (level > 0 && config->num_pipes_active > 1) 1708 return 64; 1709 1710 /* otherwise just report max that registers can hold */ 1711 return ilk_cursor_wm_reg_max(dev, level); 1712 } 1713 1714 static void ilk_compute_wm_maximums(const struct drm_device *dev, 1715 int level, 1716 const struct intel_wm_config *config, 1717 enum intel_ddb_partitioning ddb_partitioning, 1718 struct ilk_wm_maximums *max) 1719 { 1720 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false); 1721 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true); 1722 max->cur = ilk_cursor_wm_max(dev, level, config); 1723 max->fbc = ilk_fbc_wm_reg_max(dev); 1724 } 1725 1726 static void ilk_compute_wm_reg_maximums(struct drm_device *dev, 1727 int level, 1728 struct ilk_wm_maximums *max) 1729 { 1730 max->pri = ilk_plane_wm_reg_max(dev, level, false); 1731 max->spr = ilk_plane_wm_reg_max(dev, level, true); 1732 max->cur = ilk_cursor_wm_reg_max(dev, level); 1733 max->fbc = ilk_fbc_wm_reg_max(dev); 1734 } 1735 1736 static bool ilk_validate_wm_level(int level, 1737 const struct ilk_wm_maximums *max, 1738 struct intel_wm_level *result) 1739 { 1740 bool ret; 1741 1742 /* already determined to be invalid? */ 1743 if (!result->enable) 1744 return false; 1745 1746 result->enable = result->pri_val <= max->pri && 1747 result->spr_val <= max->spr && 1748 result->cur_val <= max->cur; 1749 1750 ret = result->enable; 1751 1752 /* 1753 * HACK until we can pre-compute everything, 1754 * and thus fail gracefully if LP0 watermarks 1755 * are exceeded... 1756 */ 1757 if (level == 0 && !result->enable) { 1758 if (result->pri_val > max->pri) 1759 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n", 1760 level, result->pri_val, max->pri); 1761 if (result->spr_val > max->spr) 1762 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n", 1763 level, result->spr_val, max->spr); 1764 if (result->cur_val > max->cur) 1765 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n", 1766 level, result->cur_val, max->cur); 1767 1768 result->pri_val = min_t(uint32_t, result->pri_val, max->pri); 1769 result->spr_val = min_t(uint32_t, result->spr_val, max->spr); 1770 result->cur_val = min_t(uint32_t, result->cur_val, max->cur); 1771 result->enable = true; 1772 } 1773 1774 return ret; 1775 } 1776 1777 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, 1778 int level, 1779 const struct ilk_pipe_wm_parameters *p, 1780 struct intel_wm_level *result) 1781 { 1782 uint16_t pri_latency = dev_priv->wm.pri_latency[level]; 1783 uint16_t spr_latency = dev_priv->wm.spr_latency[level]; 1784 uint16_t cur_latency = dev_priv->wm.cur_latency[level]; 1785 1786 /* WM1+ latency values stored in 0.5us units */ 1787 if (level > 0) { 1788 pri_latency *= 5; 1789 spr_latency *= 5; 1790 cur_latency *= 5; 1791 } 1792 1793 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level); 1794 result->spr_val = ilk_compute_spr_wm(p, spr_latency); 1795 result->cur_val = ilk_compute_cur_wm(p, cur_latency); 1796 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val); 1797 result->enable = true; 1798 } 1799 1800 static uint32_t 1801 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) 1802 { 1803 struct drm_i915_private *dev_priv = dev->dev_private; 1804 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1805 struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode; 1806 u32 linetime, ips_linetime; 1807 1808 if (!intel_crtc->active) 1809 return 0; 1810 1811 /* The WM are computed with base on how long it takes to fill a single 1812 * row at the given clock rate, multiplied by 8. 1813 * */ 1814 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, 1815 mode->crtc_clock); 1816 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, 1817 dev_priv->display.get_display_clock_speed(dev_priv->dev)); 1818 1819 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | 1820 PIPE_WM_LINETIME_TIME(linetime); 1821 } 1822 1823 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8]) 1824 { 1825 struct drm_i915_private *dev_priv = dev->dev_private; 1826 1827 if (IS_GEN9(dev)) { 1828 uint32_t val; 1829 int ret, i; 1830 int level, max_level = ilk_wm_max_level(dev); 1831 1832 /* read the first set of memory latencies[0:3] */ 1833 val = 0; /* data0 to be programmed to 0 for first set */ 1834 mutex_lock(&dev_priv->rps.hw_lock); 1835 ret = sandybridge_pcode_read(dev_priv, 1836 GEN9_PCODE_READ_MEM_LATENCY, 1837 &val); 1838 mutex_unlock(&dev_priv->rps.hw_lock); 1839 1840 if (ret) { 1841 DRM_ERROR("SKL Mailbox read error = %d\n", ret); 1842 return; 1843 } 1844 1845 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK; 1846 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & 1847 GEN9_MEM_LATENCY_LEVEL_MASK; 1848 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & 1849 GEN9_MEM_LATENCY_LEVEL_MASK; 1850 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & 1851 GEN9_MEM_LATENCY_LEVEL_MASK; 1852 1853 /* read the second set of memory latencies[4:7] */ 1854 val = 1; /* data0 to be programmed to 1 for second set */ 1855 mutex_lock(&dev_priv->rps.hw_lock); 1856 ret = sandybridge_pcode_read(dev_priv, 1857 GEN9_PCODE_READ_MEM_LATENCY, 1858 &val); 1859 mutex_unlock(&dev_priv->rps.hw_lock); 1860 if (ret) { 1861 DRM_ERROR("SKL Mailbox read error = %d\n", ret); 1862 return; 1863 } 1864 1865 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK; 1866 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) & 1867 GEN9_MEM_LATENCY_LEVEL_MASK; 1868 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) & 1869 GEN9_MEM_LATENCY_LEVEL_MASK; 1870 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) & 1871 GEN9_MEM_LATENCY_LEVEL_MASK; 1872 1873 /* 1874 * WaWmMemoryReadLatency:skl 1875 * 1876 * punit doesn't take into account the read latency so we need 1877 * to add 2us to the various latency levels we retrieve from 1878 * the punit. 1879 * - W0 is a bit special in that it's the only level that 1880 * can't be disabled if we want to have display working, so 1881 * we always add 2us there. 1882 * - For levels >=1, punit returns 0us latency when they are 1883 * disabled, so we respect that and don't add 2us then 1884 * 1885 * Additionally, if a level n (n > 1) has a 0us latency, all 1886 * levels m (m >= n) need to be disabled. We make sure to 1887 * sanitize the values out of the punit to satisfy this 1888 * requirement. 1889 */ 1890 wm[0] += 2; 1891 for (level = 1; level <= max_level; level++) 1892 if (wm[level] != 0) 1893 wm[level] += 2; 1894 else { 1895 for (i = level + 1; i <= max_level; i++) 1896 wm[i] = 0; 1897 1898 break; 1899 } 1900 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 1901 uint64_t sskpd = I915_READ64(MCH_SSKPD); 1902 1903 wm[0] = (sskpd >> 56) & 0xFF; 1904 if (wm[0] == 0) 1905 wm[0] = sskpd & 0xF; 1906 wm[1] = (sskpd >> 4) & 0xFF; 1907 wm[2] = (sskpd >> 12) & 0xFF; 1908 wm[3] = (sskpd >> 20) & 0x1FF; 1909 wm[4] = (sskpd >> 32) & 0x1FF; 1910 } else if (INTEL_INFO(dev)->gen >= 6) { 1911 uint32_t sskpd = I915_READ(MCH_SSKPD); 1912 1913 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK; 1914 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK; 1915 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK; 1916 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK; 1917 } else if (INTEL_INFO(dev)->gen >= 5) { 1918 uint32_t mltr = I915_READ(MLTR_ILK); 1919 1920 /* ILK primary LP0 latency is 700 ns */ 1921 wm[0] = 7; 1922 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK; 1923 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK; 1924 } 1925 } 1926 1927 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5]) 1928 { 1929 /* ILK sprite LP0 latency is 1300 ns */ 1930 if (INTEL_INFO(dev)->gen == 5) 1931 wm[0] = 13; 1932 } 1933 1934 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5]) 1935 { 1936 /* ILK cursor LP0 latency is 1300 ns */ 1937 if (INTEL_INFO(dev)->gen == 5) 1938 wm[0] = 13; 1939 1940 /* WaDoubleCursorLP3Latency:ivb */ 1941 if (IS_IVYBRIDGE(dev)) 1942 wm[3] *= 2; 1943 } 1944 1945 int ilk_wm_max_level(const struct drm_device *dev) 1946 { 1947 /* how many WM levels are we expecting */ 1948 if (INTEL_INFO(dev)->gen >= 9) 1949 return 7; 1950 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1951 return 4; 1952 else if (INTEL_INFO(dev)->gen >= 6) 1953 return 3; 1954 else 1955 return 2; 1956 } 1957 1958 static void intel_print_wm_latency(struct drm_device *dev, 1959 const char *name, 1960 const uint16_t wm[8]) 1961 { 1962 int level, max_level = ilk_wm_max_level(dev); 1963 1964 for (level = 0; level <= max_level; level++) { 1965 unsigned int latency = wm[level]; 1966 1967 if (latency == 0) { 1968 DRM_ERROR("%s WM%d latency not provided\n", 1969 name, level); 1970 continue; 1971 } 1972 1973 /* 1974 * - latencies are in us on gen9. 1975 * - before then, WM1+ latency values are in 0.5us units 1976 */ 1977 if (IS_GEN9(dev)) 1978 latency *= 10; 1979 else if (level > 0) 1980 latency *= 5; 1981 1982 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n", 1983 name, level, wm[level], 1984 latency / 10, latency % 10); 1985 } 1986 } 1987 1988 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, 1989 uint16_t wm[5], uint16_t min) 1990 { 1991 int level, max_level = ilk_wm_max_level(dev_priv->dev); 1992 1993 if (wm[0] >= min) 1994 return false; 1995 1996 wm[0] = max(wm[0], min); 1997 for (level = 1; level <= max_level; level++) 1998 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5)); 1999 2000 return true; 2001 } 2002 2003 static void snb_wm_latency_quirk(struct drm_device *dev) 2004 { 2005 struct drm_i915_private *dev_priv = dev->dev_private; 2006 bool changed; 2007 2008 /* 2009 * The BIOS provided WM memory latency values are often 2010 * inadequate for high resolution displays. Adjust them. 2011 */ 2012 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | 2013 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | 2014 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); 2015 2016 if (!changed) 2017 return; 2018 2019 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n"); 2020 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); 2021 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); 2022 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); 2023 } 2024 2025 static void ilk_setup_wm_latency(struct drm_device *dev) 2026 { 2027 struct drm_i915_private *dev_priv = dev->dev_private; 2028 2029 intel_read_wm_latency(dev, dev_priv->wm.pri_latency); 2030 2031 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency, 2032 sizeof(dev_priv->wm.pri_latency)); 2033 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency, 2034 sizeof(dev_priv->wm.pri_latency)); 2035 2036 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency); 2037 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency); 2038 2039 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); 2040 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); 2041 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); 2042 2043 if (IS_GEN6(dev)) 2044 snb_wm_latency_quirk(dev); 2045 } 2046 2047 static void skl_setup_wm_latency(struct drm_device *dev) 2048 { 2049 struct drm_i915_private *dev_priv = dev->dev_private; 2050 2051 intel_read_wm_latency(dev, dev_priv->wm.skl_latency); 2052 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency); 2053 } 2054 2055 static void ilk_compute_wm_parameters(struct drm_crtc *crtc, 2056 struct ilk_pipe_wm_parameters *p) 2057 { 2058 struct drm_device *dev = crtc->dev; 2059 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2060 enum i915_pipe pipe = intel_crtc->pipe; 2061 struct drm_plane *plane; 2062 2063 if (!intel_crtc->active) 2064 return; 2065 2066 p->active = true; 2067 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal; 2068 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); 2069 2070 if (crtc->primary->state->fb) 2071 p->pri.bytes_per_pixel = 2072 crtc->primary->state->fb->bits_per_pixel / 8; 2073 else 2074 p->pri.bytes_per_pixel = 4; 2075 2076 p->cur.bytes_per_pixel = 4; 2077 /* 2078 * TODO: for now, assume primary and cursor planes are always enabled. 2079 * Setting them to false makes the screen flicker. 2080 */ 2081 p->pri.enabled = true; 2082 p->cur.enabled = true; 2083 2084 p->pri.horiz_pixels = intel_crtc->config->pipe_src_w; 2085 p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w; 2086 2087 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { 2088 struct intel_plane *intel_plane = to_intel_plane(plane); 2089 2090 if (intel_plane->pipe == pipe) { 2091 p->spr = intel_plane->wm; 2092 break; 2093 } 2094 } 2095 } 2096 2097 static void ilk_compute_wm_config(struct drm_device *dev, 2098 struct intel_wm_config *config) 2099 { 2100 struct intel_crtc *intel_crtc; 2101 2102 /* Compute the currently _active_ config */ 2103 for_each_intel_crtc(dev, intel_crtc) { 2104 const struct intel_pipe_wm *wm = &intel_crtc->wm.active; 2105 2106 if (!wm->pipe_enabled) 2107 continue; 2108 2109 config->sprites_enabled |= wm->sprites_enabled; 2110 config->sprites_scaled |= wm->sprites_scaled; 2111 config->num_pipes_active++; 2112 } 2113 } 2114 2115 /* Compute new watermarks for the pipe */ 2116 static bool intel_compute_pipe_wm(struct drm_crtc *crtc, 2117 const struct ilk_pipe_wm_parameters *params, 2118 struct intel_pipe_wm *pipe_wm) 2119 { 2120 struct drm_device *dev = crtc->dev; 2121 const struct drm_i915_private *dev_priv = dev->dev_private; 2122 int level, max_level = ilk_wm_max_level(dev); 2123 /* LP0 watermark maximums depend on this pipe alone */ 2124 struct intel_wm_config config = { 2125 .num_pipes_active = 1, 2126 .sprites_enabled = params->spr.enabled, 2127 .sprites_scaled = params->spr.scaled, 2128 }; 2129 struct ilk_wm_maximums max; 2130 2131 pipe_wm->pipe_enabled = params->active; 2132 pipe_wm->sprites_enabled = params->spr.enabled; 2133 pipe_wm->sprites_scaled = params->spr.scaled; 2134 2135 /* ILK/SNB: LP2+ watermarks only w/o sprites */ 2136 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled) 2137 max_level = 1; 2138 2139 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ 2140 if (params->spr.scaled) 2141 max_level = 0; 2142 2143 ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]); 2144 2145 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2146 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); 2147 2148 /* LP0 watermarks always use 1/2 DDB partitioning */ 2149 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); 2150 2151 /* At least LP0 must be valid */ 2152 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) 2153 return false; 2154 2155 ilk_compute_wm_reg_maximums(dev, 1, &max); 2156 2157 for (level = 1; level <= max_level; level++) { 2158 struct intel_wm_level wm = {}; 2159 2160 ilk_compute_wm_level(dev_priv, level, params, &wm); 2161 2162 /* 2163 * Disable any watermark level that exceeds the 2164 * register maximums since such watermarks are 2165 * always invalid. 2166 */ 2167 if (!ilk_validate_wm_level(level, &max, &wm)) 2168 break; 2169 2170 pipe_wm->wm[level] = wm; 2171 } 2172 2173 return true; 2174 } 2175 2176 /* 2177 * Merge the watermarks from all active pipes for a specific level. 2178 */ 2179 static void ilk_merge_wm_level(struct drm_device *dev, 2180 int level, 2181 struct intel_wm_level *ret_wm) 2182 { 2183 struct intel_crtc *intel_crtc; 2184 2185 ret_wm->enable = true; 2186 2187 for_each_intel_crtc(dev, intel_crtc) { 2188 const struct intel_pipe_wm *active = &intel_crtc->wm.active; 2189 const struct intel_wm_level *wm = &active->wm[level]; 2190 2191 if (!active->pipe_enabled) 2192 continue; 2193 2194 /* 2195 * The watermark values may have been used in the past, 2196 * so we must maintain them in the registers for some 2197 * time even if the level is now disabled. 2198 */ 2199 if (!wm->enable) 2200 ret_wm->enable = false; 2201 2202 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val); 2203 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val); 2204 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val); 2205 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val); 2206 } 2207 } 2208 2209 /* 2210 * Merge all low power watermarks for all active pipes. 2211 */ 2212 static void ilk_wm_merge(struct drm_device *dev, 2213 const struct intel_wm_config *config, 2214 const struct ilk_wm_maximums *max, 2215 struct intel_pipe_wm *merged) 2216 { 2217 int level, max_level = ilk_wm_max_level(dev); 2218 int last_enabled_level = max_level; 2219 2220 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ 2221 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) && 2222 config->num_pipes_active > 1) 2223 return; 2224 2225 /* ILK: FBC WM must be disabled always */ 2226 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6; 2227 2228 /* merge each WM1+ level */ 2229 for (level = 1; level <= max_level; level++) { 2230 struct intel_wm_level *wm = &merged->wm[level]; 2231 2232 ilk_merge_wm_level(dev, level, wm); 2233 2234 if (level > last_enabled_level) 2235 wm->enable = false; 2236 else if (!ilk_validate_wm_level(level, max, wm)) 2237 /* make sure all following levels get disabled */ 2238 last_enabled_level = level - 1; 2239 2240 /* 2241 * The spec says it is preferred to disable 2242 * FBC WMs instead of disabling a WM level. 2243 */ 2244 if (wm->fbc_val > max->fbc) { 2245 if (wm->enable) 2246 merged->fbc_wm_enabled = false; 2247 wm->fbc_val = 0; 2248 } 2249 } 2250 2251 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */ 2252 /* 2253 * FIXME this is racy. FBC might get enabled later. 2254 * What we should check here is whether FBC can be 2255 * enabled sometime later. 2256 */ 2257 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) { 2258 for (level = 2; level <= max_level; level++) { 2259 struct intel_wm_level *wm = &merged->wm[level]; 2260 2261 wm->enable = false; 2262 } 2263 } 2264 } 2265 2266 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm) 2267 { 2268 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */ 2269 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable); 2270 } 2271 2272 /* The value we need to program into the WM_LPx latency field */ 2273 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level) 2274 { 2275 struct drm_i915_private *dev_priv = dev->dev_private; 2276 2277 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2278 return 2 * level; 2279 else 2280 return dev_priv->wm.pri_latency[level]; 2281 } 2282 2283 static void ilk_compute_wm_results(struct drm_device *dev, 2284 const struct intel_pipe_wm *merged, 2285 enum intel_ddb_partitioning partitioning, 2286 struct ilk_wm_values *results) 2287 { 2288 struct intel_crtc *intel_crtc; 2289 int level, wm_lp; 2290 2291 results->enable_fbc_wm = merged->fbc_wm_enabled; 2292 results->partitioning = partitioning; 2293 2294 /* LP1+ register values */ 2295 for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 2296 const struct intel_wm_level *r; 2297 2298 level = ilk_wm_lp_to_level(wm_lp, merged); 2299 2300 r = &merged->wm[level]; 2301 2302 /* 2303 * Maintain the watermark values even if the level is 2304 * disabled. Doing otherwise could cause underruns. 2305 */ 2306 results->wm_lp[wm_lp - 1] = 2307 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) | 2308 (r->pri_val << WM1_LP_SR_SHIFT) | 2309 r->cur_val; 2310 2311 if (r->enable) 2312 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN; 2313 2314 if (INTEL_INFO(dev)->gen >= 8) 2315 results->wm_lp[wm_lp - 1] |= 2316 r->fbc_val << WM1_LP_FBC_SHIFT_BDW; 2317 else 2318 results->wm_lp[wm_lp - 1] |= 2319 r->fbc_val << WM1_LP_FBC_SHIFT; 2320 2321 /* 2322 * Always set WM1S_LP_EN when spr_val != 0, even if the 2323 * level is disabled. Doing otherwise could cause underruns. 2324 */ 2325 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) { 2326 WARN_ON(wm_lp != 1); 2327 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val; 2328 } else 2329 results->wm_lp_spr[wm_lp - 1] = r->spr_val; 2330 } 2331 2332 /* LP0 register values */ 2333 for_each_intel_crtc(dev, intel_crtc) { 2334 enum i915_pipe pipe = intel_crtc->pipe; 2335 const struct intel_wm_level *r = 2336 &intel_crtc->wm.active.wm[0]; 2337 2338 if (WARN_ON(!r->enable)) 2339 continue; 2340 2341 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime; 2342 2343 results->wm_pipe[pipe] = 2344 (r->pri_val << WM0_PIPE_PLANE_SHIFT) | 2345 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) | 2346 r->cur_val; 2347 } 2348 } 2349 2350 /* Find the result with the highest level enabled. Check for enable_fbc_wm in 2351 * case both are at the same level. Prefer r1 in case they're the same. */ 2352 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev, 2353 struct intel_pipe_wm *r1, 2354 struct intel_pipe_wm *r2) 2355 { 2356 int level, max_level = ilk_wm_max_level(dev); 2357 int level1 = 0, level2 = 0; 2358 2359 for (level = 1; level <= max_level; level++) { 2360 if (r1->wm[level].enable) 2361 level1 = level; 2362 if (r2->wm[level].enable) 2363 level2 = level; 2364 } 2365 2366 if (level1 == level2) { 2367 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled) 2368 return r2; 2369 else 2370 return r1; 2371 } else if (level1 > level2) { 2372 return r1; 2373 } else { 2374 return r2; 2375 } 2376 } 2377 2378 /* dirty bits used to track which watermarks need changes */ 2379 #define WM_DIRTY_PIPE(pipe) (1 << (pipe)) 2380 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe))) 2381 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp))) 2382 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3)) 2383 #define WM_DIRTY_FBC (1 << 24) 2384 #define WM_DIRTY_DDB (1 << 25) 2385 2386 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv, 2387 const struct ilk_wm_values *old, 2388 const struct ilk_wm_values *new) 2389 { 2390 unsigned int dirty = 0; 2391 enum i915_pipe pipe; 2392 int wm_lp; 2393 2394 for_each_pipe(dev_priv, pipe) { 2395 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) { 2396 dirty |= WM_DIRTY_LINETIME(pipe); 2397 /* Must disable LP1+ watermarks too */ 2398 dirty |= WM_DIRTY_LP_ALL; 2399 } 2400 2401 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) { 2402 dirty |= WM_DIRTY_PIPE(pipe); 2403 /* Must disable LP1+ watermarks too */ 2404 dirty |= WM_DIRTY_LP_ALL; 2405 } 2406 } 2407 2408 if (old->enable_fbc_wm != new->enable_fbc_wm) { 2409 dirty |= WM_DIRTY_FBC; 2410 /* Must disable LP1+ watermarks too */ 2411 dirty |= WM_DIRTY_LP_ALL; 2412 } 2413 2414 if (old->partitioning != new->partitioning) { 2415 dirty |= WM_DIRTY_DDB; 2416 /* Must disable LP1+ watermarks too */ 2417 dirty |= WM_DIRTY_LP_ALL; 2418 } 2419 2420 /* LP1+ watermarks already deemed dirty, no need to continue */ 2421 if (dirty & WM_DIRTY_LP_ALL) 2422 return dirty; 2423 2424 /* Find the lowest numbered LP1+ watermark in need of an update... */ 2425 for (wm_lp = 1; wm_lp <= 3; wm_lp++) { 2426 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] || 2427 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1]) 2428 break; 2429 } 2430 2431 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */ 2432 for (; wm_lp <= 3; wm_lp++) 2433 dirty |= WM_DIRTY_LP(wm_lp); 2434 2435 return dirty; 2436 } 2437 2438 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, 2439 unsigned int dirty) 2440 { 2441 struct ilk_wm_values *previous = &dev_priv->wm.hw; 2442 bool changed = false; 2443 2444 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) { 2445 previous->wm_lp[2] &= ~WM1_LP_SR_EN; 2446 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]); 2447 changed = true; 2448 } 2449 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) { 2450 previous->wm_lp[1] &= ~WM1_LP_SR_EN; 2451 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]); 2452 changed = true; 2453 } 2454 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) { 2455 previous->wm_lp[0] &= ~WM1_LP_SR_EN; 2456 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]); 2457 changed = true; 2458 } 2459 2460 /* 2461 * Don't touch WM1S_LP_EN here. 2462 * Doing so could cause underruns. 2463 */ 2464 2465 return changed; 2466 } 2467 2468 /* 2469 * The spec says we shouldn't write when we don't need, because every write 2470 * causes WMs to be re-evaluated, expending some power. 2471 */ 2472 static void ilk_write_wm_values(struct drm_i915_private *dev_priv, 2473 struct ilk_wm_values *results) 2474 { 2475 struct drm_device *dev = dev_priv->dev; 2476 struct ilk_wm_values *previous = &dev_priv->wm.hw; 2477 unsigned int dirty; 2478 uint32_t val; 2479 2480 dirty = ilk_compute_wm_dirty(dev_priv, previous, results); 2481 if (!dirty) 2482 return; 2483 2484 _ilk_disable_lp_wm(dev_priv, dirty); 2485 2486 if (dirty & WM_DIRTY_PIPE(PIPE_A)) 2487 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]); 2488 if (dirty & WM_DIRTY_PIPE(PIPE_B)) 2489 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]); 2490 if (dirty & WM_DIRTY_PIPE(PIPE_C)) 2491 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]); 2492 2493 if (dirty & WM_DIRTY_LINETIME(PIPE_A)) 2494 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]); 2495 if (dirty & WM_DIRTY_LINETIME(PIPE_B)) 2496 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]); 2497 if (dirty & WM_DIRTY_LINETIME(PIPE_C)) 2498 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]); 2499 2500 if (dirty & WM_DIRTY_DDB) { 2501 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2502 val = I915_READ(WM_MISC); 2503 if (results->partitioning == INTEL_DDB_PART_1_2) 2504 val &= ~WM_MISC_DATA_PARTITION_5_6; 2505 else 2506 val |= WM_MISC_DATA_PARTITION_5_6; 2507 I915_WRITE(WM_MISC, val); 2508 } else { 2509 val = I915_READ(DISP_ARB_CTL2); 2510 if (results->partitioning == INTEL_DDB_PART_1_2) 2511 val &= ~DISP_DATA_PARTITION_5_6; 2512 else 2513 val |= DISP_DATA_PARTITION_5_6; 2514 I915_WRITE(DISP_ARB_CTL2, val); 2515 } 2516 } 2517 2518 if (dirty & WM_DIRTY_FBC) { 2519 val = I915_READ(DISP_ARB_CTL); 2520 if (results->enable_fbc_wm) 2521 val &= ~DISP_FBC_WM_DIS; 2522 else 2523 val |= DISP_FBC_WM_DIS; 2524 I915_WRITE(DISP_ARB_CTL, val); 2525 } 2526 2527 if (dirty & WM_DIRTY_LP(1) && 2528 previous->wm_lp_spr[0] != results->wm_lp_spr[0]) 2529 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); 2530 2531 if (INTEL_INFO(dev)->gen >= 7) { 2532 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) 2533 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); 2534 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) 2535 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); 2536 } 2537 2538 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) 2539 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); 2540 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) 2541 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); 2542 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) 2543 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); 2544 2545 dev_priv->wm.hw = *results; 2546 } 2547 2548 static bool ilk_disable_lp_wm(struct drm_device *dev) 2549 { 2550 struct drm_i915_private *dev_priv = dev->dev_private; 2551 2552 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); 2553 } 2554 2555 /* 2556 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the 2557 * different active planes. 2558 */ 2559 2560 #define SKL_DDB_SIZE 896 /* in blocks */ 2561 #define BXT_DDB_SIZE 512 2562 2563 static void 2564 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, 2565 struct drm_crtc *for_crtc, 2566 const struct intel_wm_config *config, 2567 const struct skl_pipe_wm_parameters *params, 2568 struct skl_ddb_entry *alloc /* out */) 2569 { 2570 struct drm_crtc *crtc; 2571 unsigned int pipe_size, ddb_size; 2572 int nth_active_pipe; 2573 2574 if (!params->active) { 2575 alloc->start = 0; 2576 alloc->end = 0; 2577 return; 2578 } 2579 2580 if (IS_BROXTON(dev)) 2581 ddb_size = BXT_DDB_SIZE; 2582 else 2583 ddb_size = SKL_DDB_SIZE; 2584 2585 ddb_size -= 4; /* 4 blocks for bypass path allocation */ 2586 2587 nth_active_pipe = 0; 2588 for_each_crtc(dev, crtc) { 2589 if (!to_intel_crtc(crtc)->active) 2590 continue; 2591 2592 if (crtc == for_crtc) 2593 break; 2594 2595 nth_active_pipe++; 2596 } 2597 2598 pipe_size = ddb_size / config->num_pipes_active; 2599 alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active; 2600 alloc->end = alloc->start + pipe_size; 2601 } 2602 2603 static unsigned int skl_cursor_allocation(const struct intel_wm_config *config) 2604 { 2605 if (config->num_pipes_active == 1) 2606 return 32; 2607 2608 return 8; 2609 } 2610 2611 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg) 2612 { 2613 entry->start = reg & 0x3ff; 2614 entry->end = (reg >> 16) & 0x3ff; 2615 if (entry->end) 2616 entry->end += 1; 2617 } 2618 2619 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, 2620 struct skl_ddb_allocation *ddb /* out */) 2621 { 2622 enum i915_pipe pipe; 2623 int plane; 2624 u32 val; 2625 2626 for_each_pipe(dev_priv, pipe) { 2627 for_each_plane(dev_priv, pipe, plane) { 2628 val = I915_READ(PLANE_BUF_CFG(pipe, plane)); 2629 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane], 2630 val); 2631 } 2632 2633 val = I915_READ(CUR_BUF_CFG(pipe)); 2634 skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val); 2635 } 2636 } 2637 2638 static unsigned int 2639 skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y) 2640 { 2641 2642 /* for planar format */ 2643 if (p->y_bytes_per_pixel) { 2644 if (y) /* y-plane data rate */ 2645 return p->horiz_pixels * p->vert_pixels * p->y_bytes_per_pixel; 2646 else /* uv-plane data rate */ 2647 return (p->horiz_pixels/2) * (p->vert_pixels/2) * p->bytes_per_pixel; 2648 } 2649 2650 /* for packed formats */ 2651 return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel; 2652 } 2653 2654 /* 2655 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching 2656 * a 8192x4096@32bpp framebuffer: 2657 * 3 * 4096 * 8192 * 4 < 2^32 2658 */ 2659 static unsigned int 2660 skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc, 2661 const struct skl_pipe_wm_parameters *params) 2662 { 2663 unsigned int total_data_rate = 0; 2664 int plane; 2665 2666 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) { 2667 const struct intel_plane_wm_parameters *p; 2668 2669 p = ¶ms->plane[plane]; 2670 if (!p->enabled) 2671 continue; 2672 2673 total_data_rate += skl_plane_relative_data_rate(p, 0); /* packed/uv */ 2674 if (p->y_bytes_per_pixel) { 2675 total_data_rate += skl_plane_relative_data_rate(p, 1); /* y-plane */ 2676 } 2677 } 2678 2679 return total_data_rate; 2680 } 2681 2682 static void 2683 skl_allocate_pipe_ddb(struct drm_crtc *crtc, 2684 const struct intel_wm_config *config, 2685 const struct skl_pipe_wm_parameters *params, 2686 struct skl_ddb_allocation *ddb /* out */) 2687 { 2688 struct drm_device *dev = crtc->dev; 2689 struct drm_i915_private *dev_priv = dev->dev_private; 2690 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2691 enum i915_pipe pipe = intel_crtc->pipe; 2692 struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; 2693 uint16_t alloc_size, start, cursor_blocks; 2694 uint16_t minimum[I915_MAX_PLANES]; 2695 uint16_t y_minimum[I915_MAX_PLANES]; 2696 unsigned int total_data_rate; 2697 int plane; 2698 2699 skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc); 2700 alloc_size = skl_ddb_entry_size(alloc); 2701 if (alloc_size == 0) { 2702 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); 2703 memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe])); 2704 return; 2705 } 2706 2707 cursor_blocks = skl_cursor_allocation(config); 2708 ddb->cursor[pipe].start = alloc->end - cursor_blocks; 2709 ddb->cursor[pipe].end = alloc->end; 2710 2711 alloc_size -= cursor_blocks; 2712 alloc->end -= cursor_blocks; 2713 2714 /* 1. Allocate the mininum required blocks for each active plane */ 2715 for_each_plane(dev_priv, pipe, plane) { 2716 const struct intel_plane_wm_parameters *p; 2717 2718 p = ¶ms->plane[plane]; 2719 if (!p->enabled) 2720 continue; 2721 2722 minimum[plane] = 8; 2723 alloc_size -= minimum[plane]; 2724 y_minimum[plane] = p->y_bytes_per_pixel ? 8 : 0; 2725 alloc_size -= y_minimum[plane]; 2726 } 2727 2728 /* 2729 * 2. Distribute the remaining space in proportion to the amount of 2730 * data each plane needs to fetch from memory. 2731 * 2732 * FIXME: we may not allocate every single block here. 2733 */ 2734 total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params); 2735 2736 start = alloc->start; 2737 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) { 2738 const struct intel_plane_wm_parameters *p; 2739 unsigned int data_rate, y_data_rate; 2740 uint16_t plane_blocks, y_plane_blocks = 0; 2741 2742 p = ¶ms->plane[plane]; 2743 if (!p->enabled) 2744 continue; 2745 2746 data_rate = skl_plane_relative_data_rate(p, 0); 2747 2748 /* 2749 * allocation for (packed formats) or (uv-plane part of planar format): 2750 * promote the expression to 64 bits to avoid overflowing, the 2751 * result is < available as data_rate / total_data_rate < 1 2752 */ 2753 plane_blocks = minimum[plane]; 2754 plane_blocks += div_u64((uint64_t)alloc_size * data_rate, 2755 total_data_rate); 2756 2757 ddb->plane[pipe][plane].start = start; 2758 ddb->plane[pipe][plane].end = start + plane_blocks; 2759 2760 start += plane_blocks; 2761 2762 /* 2763 * allocation for y_plane part of planar format: 2764 */ 2765 if (p->y_bytes_per_pixel) { 2766 y_data_rate = skl_plane_relative_data_rate(p, 1); 2767 y_plane_blocks = y_minimum[plane]; 2768 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, 2769 total_data_rate); 2770 2771 ddb->y_plane[pipe][plane].start = start; 2772 ddb->y_plane[pipe][plane].end = start + y_plane_blocks; 2773 2774 start += y_plane_blocks; 2775 } 2776 2777 } 2778 2779 } 2780 2781 static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config) 2782 { 2783 /* TODO: Take into account the scalers once we support them */ 2784 return config->base.adjusted_mode.crtc_clock; 2785 } 2786 2787 /* 2788 * The max latency should be 257 (max the punit can code is 255 and we add 2us 2789 * for the read latency) and bytes_per_pixel should always be <= 8, so that 2790 * should allow pixel_rate up to ~2 GHz which seems sufficient since max 2791 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that. 2792 */ 2793 static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel, 2794 uint32_t latency) 2795 { 2796 uint32_t wm_intermediate_val, ret; 2797 2798 if (latency == 0) 2799 return UINT_MAX; 2800 2801 wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512; 2802 ret = DIV_ROUND_UP(wm_intermediate_val, 1000); 2803 2804 return ret; 2805 } 2806 2807 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, 2808 uint32_t horiz_pixels, uint8_t bytes_per_pixel, 2809 uint64_t tiling, uint32_t latency) 2810 { 2811 uint32_t ret; 2812 uint32_t plane_bytes_per_line, plane_blocks_per_line; 2813 uint32_t wm_intermediate_val; 2814 2815 if (latency == 0) 2816 return UINT_MAX; 2817 2818 plane_bytes_per_line = horiz_pixels * bytes_per_pixel; 2819 2820 if (tiling == I915_FORMAT_MOD_Y_TILED || 2821 tiling == I915_FORMAT_MOD_Yf_TILED) { 2822 plane_bytes_per_line *= 4; 2823 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 2824 plane_blocks_per_line /= 4; 2825 } else { 2826 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 2827 } 2828 2829 wm_intermediate_val = latency * pixel_rate; 2830 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) * 2831 plane_blocks_per_line; 2832 2833 return ret; 2834 } 2835 2836 static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb, 2837 const struct intel_crtc *intel_crtc) 2838 { 2839 struct drm_device *dev = intel_crtc->base.dev; 2840 struct drm_i915_private *dev_priv = dev->dev_private; 2841 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; 2842 enum i915_pipe pipe = intel_crtc->pipe; 2843 2844 if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe], 2845 sizeof(new_ddb->plane[pipe]))) 2846 return true; 2847 2848 if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe], 2849 sizeof(new_ddb->cursor[pipe]))) 2850 return true; 2851 2852 return false; 2853 } 2854 2855 static void skl_compute_wm_global_parameters(struct drm_device *dev, 2856 struct intel_wm_config *config) 2857 { 2858 struct drm_crtc *crtc; 2859 struct drm_plane *plane; 2860 2861 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 2862 config->num_pipes_active += to_intel_crtc(crtc)->active; 2863 2864 /* FIXME: I don't think we need those two global parameters on SKL */ 2865 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 2866 struct intel_plane *intel_plane = to_intel_plane(plane); 2867 2868 config->sprites_enabled |= intel_plane->wm.enabled; 2869 config->sprites_scaled |= intel_plane->wm.scaled; 2870 } 2871 } 2872 2873 static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc, 2874 struct skl_pipe_wm_parameters *p) 2875 { 2876 struct drm_device *dev = crtc->dev; 2877 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2878 enum i915_pipe pipe = intel_crtc->pipe; 2879 struct drm_plane *plane; 2880 struct drm_framebuffer *fb; 2881 int i = 1; /* Index for sprite planes start */ 2882 2883 p->active = intel_crtc->active; 2884 if (p->active) { 2885 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal; 2886 p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config); 2887 2888 fb = crtc->primary->state->fb; 2889 /* For planar: Bpp is for uv plane, y_Bpp is for y plane */ 2890 if (fb) { 2891 p->plane[0].enabled = true; 2892 p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ? 2893 drm_format_plane_cpp(fb->pixel_format, 1) : fb->bits_per_pixel / 8; 2894 p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ? 2895 drm_format_plane_cpp(fb->pixel_format, 0) : 0; 2896 p->plane[0].tiling = fb->modifier[0]; 2897 } else { 2898 p->plane[0].enabled = false; 2899 p->plane[0].bytes_per_pixel = 0; 2900 p->plane[0].y_bytes_per_pixel = 0; 2901 p->plane[0].tiling = DRM_FORMAT_MOD_NONE; 2902 } 2903 p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w; 2904 p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h; 2905 p->plane[0].rotation = crtc->primary->state->rotation; 2906 2907 fb = crtc->cursor->state->fb; 2908 p->cursor.y_bytes_per_pixel = 0; 2909 if (fb) { 2910 p->cursor.enabled = true; 2911 p->cursor.bytes_per_pixel = fb->bits_per_pixel / 8; 2912 p->cursor.horiz_pixels = crtc->cursor->state->crtc_w; 2913 p->cursor.vert_pixels = crtc->cursor->state->crtc_h; 2914 } else { 2915 p->cursor.enabled = false; 2916 p->cursor.bytes_per_pixel = 0; 2917 p->cursor.horiz_pixels = 64; 2918 p->cursor.vert_pixels = 64; 2919 } 2920 } 2921 2922 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 2923 struct intel_plane *intel_plane = to_intel_plane(plane); 2924 2925 if (intel_plane->pipe == pipe && 2926 plane->type == DRM_PLANE_TYPE_OVERLAY) 2927 p->plane[i++] = intel_plane->wm; 2928 } 2929 } 2930 2931 static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, 2932 struct skl_pipe_wm_parameters *p, 2933 struct intel_plane_wm_parameters *p_params, 2934 uint16_t ddb_allocation, 2935 int level, 2936 uint16_t *out_blocks, /* out */ 2937 uint8_t *out_lines /* out */) 2938 { 2939 uint32_t latency = dev_priv->wm.skl_latency[level]; 2940 uint32_t method1, method2; 2941 uint32_t plane_bytes_per_line, plane_blocks_per_line; 2942 uint32_t res_blocks, res_lines; 2943 uint32_t selected_result; 2944 uint8_t bytes_per_pixel; 2945 2946 if (latency == 0 || !p->active || !p_params->enabled) 2947 return false; 2948 2949 bytes_per_pixel = p_params->y_bytes_per_pixel ? 2950 p_params->y_bytes_per_pixel : 2951 p_params->bytes_per_pixel; 2952 method1 = skl_wm_method1(p->pixel_rate, 2953 bytes_per_pixel, 2954 latency); 2955 method2 = skl_wm_method2(p->pixel_rate, 2956 p->pipe_htotal, 2957 p_params->horiz_pixels, 2958 bytes_per_pixel, 2959 p_params->tiling, 2960 latency); 2961 2962 plane_bytes_per_line = p_params->horiz_pixels * bytes_per_pixel; 2963 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 2964 2965 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED || 2966 p_params->tiling == I915_FORMAT_MOD_Yf_TILED) { 2967 uint32_t min_scanlines = 4; 2968 uint32_t y_tile_minimum; 2969 if (intel_rotation_90_or_270(p_params->rotation)) { 2970 switch (p_params->bytes_per_pixel) { 2971 case 1: 2972 min_scanlines = 16; 2973 break; 2974 case 2: 2975 min_scanlines = 8; 2976 break; 2977 case 8: 2978 WARN(1, "Unsupported pixel depth for rotation"); 2979 } 2980 } 2981 y_tile_minimum = plane_blocks_per_line * min_scanlines; 2982 selected_result = max(method2, y_tile_minimum); 2983 } else { 2984 if ((ddb_allocation / plane_blocks_per_line) >= 1) 2985 selected_result = min(method1, method2); 2986 else 2987 selected_result = method1; 2988 } 2989 2990 res_blocks = selected_result + 1; 2991 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line); 2992 2993 if (level >= 1 && level <= 7) { 2994 if (p_params->tiling == I915_FORMAT_MOD_Y_TILED || 2995 p_params->tiling == I915_FORMAT_MOD_Yf_TILED) 2996 res_lines += 4; 2997 else 2998 res_blocks++; 2999 } 3000 3001 if (res_blocks >= ddb_allocation || res_lines > 31) 3002 return false; 3003 3004 *out_blocks = res_blocks; 3005 *out_lines = res_lines; 3006 3007 return true; 3008 } 3009 3010 static void skl_compute_wm_level(const struct drm_i915_private *dev_priv, 3011 struct skl_ddb_allocation *ddb, 3012 struct skl_pipe_wm_parameters *p, 3013 enum i915_pipe pipe, 3014 int level, 3015 int num_planes, 3016 struct skl_wm_level *result) 3017 { 3018 uint16_t ddb_blocks; 3019 int i; 3020 3021 for (i = 0; i < num_planes; i++) { 3022 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); 3023 3024 result->plane_en[i] = skl_compute_plane_wm(dev_priv, 3025 p, &p->plane[i], 3026 ddb_blocks, 3027 level, 3028 &result->plane_res_b[i], 3029 &result->plane_res_l[i]); 3030 } 3031 3032 ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]); 3033 result->cursor_en = skl_compute_plane_wm(dev_priv, p, &p->cursor, 3034 ddb_blocks, level, 3035 &result->cursor_res_b, 3036 &result->cursor_res_l); 3037 } 3038 3039 static uint32_t 3040 skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p) 3041 { 3042 if (!to_intel_crtc(crtc)->active) 3043 return 0; 3044 3045 return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate); 3046 3047 } 3048 3049 static void skl_compute_transition_wm(struct drm_crtc *crtc, 3050 struct skl_pipe_wm_parameters *params, 3051 struct skl_wm_level *trans_wm /* out */) 3052 { 3053 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3054 int i; 3055 3056 if (!params->active) 3057 return; 3058 3059 /* Until we know more, just disable transition WMs */ 3060 for (i = 0; i < intel_num_planes(intel_crtc); i++) 3061 trans_wm->plane_en[i] = false; 3062 trans_wm->cursor_en = false; 3063 } 3064 3065 static void skl_compute_pipe_wm(struct drm_crtc *crtc, 3066 struct skl_ddb_allocation *ddb, 3067 struct skl_pipe_wm_parameters *params, 3068 struct skl_pipe_wm *pipe_wm) 3069 { 3070 struct drm_device *dev = crtc->dev; 3071 const struct drm_i915_private *dev_priv = dev->dev_private; 3072 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3073 int level, max_level = ilk_wm_max_level(dev); 3074 3075 for (level = 0; level <= max_level; level++) { 3076 skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe, 3077 level, intel_num_planes(intel_crtc), 3078 &pipe_wm->wm[level]); 3079 } 3080 pipe_wm->linetime = skl_compute_linetime_wm(crtc, params); 3081 3082 skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm); 3083 } 3084 3085 static void skl_compute_wm_results(struct drm_device *dev, 3086 struct skl_pipe_wm_parameters *p, 3087 struct skl_pipe_wm *p_wm, 3088 struct skl_wm_values *r, 3089 struct intel_crtc *intel_crtc) 3090 { 3091 int level, max_level = ilk_wm_max_level(dev); 3092 enum i915_pipe pipe = intel_crtc->pipe; 3093 uint32_t temp; 3094 int i; 3095 3096 for (level = 0; level <= max_level; level++) { 3097 for (i = 0; i < intel_num_planes(intel_crtc); i++) { 3098 temp = 0; 3099 3100 temp |= p_wm->wm[level].plane_res_l[i] << 3101 PLANE_WM_LINES_SHIFT; 3102 temp |= p_wm->wm[level].plane_res_b[i]; 3103 if (p_wm->wm[level].plane_en[i]) 3104 temp |= PLANE_WM_EN; 3105 3106 r->plane[pipe][i][level] = temp; 3107 } 3108 3109 temp = 0; 3110 3111 temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT; 3112 temp |= p_wm->wm[level].cursor_res_b; 3113 3114 if (p_wm->wm[level].cursor_en) 3115 temp |= PLANE_WM_EN; 3116 3117 r->cursor[pipe][level] = temp; 3118 3119 } 3120 3121 /* transition WMs */ 3122 for (i = 0; i < intel_num_planes(intel_crtc); i++) { 3123 temp = 0; 3124 temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT; 3125 temp |= p_wm->trans_wm.plane_res_b[i]; 3126 if (p_wm->trans_wm.plane_en[i]) 3127 temp |= PLANE_WM_EN; 3128 3129 r->plane_trans[pipe][i] = temp; 3130 } 3131 3132 temp = 0; 3133 temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT; 3134 temp |= p_wm->trans_wm.cursor_res_b; 3135 if (p_wm->trans_wm.cursor_en) 3136 temp |= PLANE_WM_EN; 3137 3138 r->cursor_trans[pipe] = temp; 3139 3140 r->wm_linetime[pipe] = p_wm->linetime; 3141 } 3142 3143 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg, 3144 const struct skl_ddb_entry *entry) 3145 { 3146 if (entry->end) 3147 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start); 3148 else 3149 I915_WRITE(reg, 0); 3150 } 3151 3152 static void skl_write_wm_values(struct drm_i915_private *dev_priv, 3153 const struct skl_wm_values *new) 3154 { 3155 struct drm_device *dev = dev_priv->dev; 3156 struct intel_crtc *crtc; 3157 3158 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { 3159 int i, level, max_level = ilk_wm_max_level(dev); 3160 enum i915_pipe pipe = crtc->pipe; 3161 3162 if (!new->dirty[pipe]) 3163 continue; 3164 3165 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]); 3166 3167 for (level = 0; level <= max_level; level++) { 3168 for (i = 0; i < intel_num_planes(crtc); i++) 3169 I915_WRITE(PLANE_WM(pipe, i, level), 3170 new->plane[pipe][i][level]); 3171 I915_WRITE(CUR_WM(pipe, level), 3172 new->cursor[pipe][level]); 3173 } 3174 for (i = 0; i < intel_num_planes(crtc); i++) 3175 I915_WRITE(PLANE_WM_TRANS(pipe, i), 3176 new->plane_trans[pipe][i]); 3177 I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]); 3178 3179 for (i = 0; i < intel_num_planes(crtc); i++) { 3180 skl_ddb_entry_write(dev_priv, 3181 PLANE_BUF_CFG(pipe, i), 3182 &new->ddb.plane[pipe][i]); 3183 skl_ddb_entry_write(dev_priv, 3184 PLANE_NV12_BUF_CFG(pipe, i), 3185 &new->ddb.y_plane[pipe][i]); 3186 } 3187 3188 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), 3189 &new->ddb.cursor[pipe]); 3190 } 3191 } 3192 3193 /* 3194 * When setting up a new DDB allocation arrangement, we need to correctly 3195 * sequence the times at which the new allocations for the pipes are taken into 3196 * account or we'll have pipes fetching from space previously allocated to 3197 * another pipe. 3198 * 3199 * Roughly the sequence looks like: 3200 * 1. re-allocate the pipe(s) with the allocation being reduced and not 3201 * overlapping with a previous light-up pipe (another way to put it is: 3202 * pipes with their new allocation strickly included into their old ones). 3203 * 2. re-allocate the other pipes that get their allocation reduced 3204 * 3. allocate the pipes having their allocation increased 3205 * 3206 * Steps 1. and 2. are here to take care of the following case: 3207 * - Initially DDB looks like this: 3208 * | B | C | 3209 * - enable pipe A. 3210 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C 3211 * allocation 3212 * | A | B | C | 3213 * 3214 * We need to sequence the re-allocation: C, B, A (and not B, C, A). 3215 */ 3216 3217 static void 3218 skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int pass) 3219 { 3220 int plane; 3221 3222 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass); 3223 3224 for_each_plane(dev_priv, pipe, plane) { 3225 I915_WRITE(PLANE_SURF(pipe, plane), 3226 I915_READ(PLANE_SURF(pipe, plane))); 3227 } 3228 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe))); 3229 } 3230 3231 static bool 3232 skl_ddb_allocation_included(const struct skl_ddb_allocation *old, 3233 const struct skl_ddb_allocation *new, 3234 enum i915_pipe pipe) 3235 { 3236 uint16_t old_size, new_size; 3237 3238 old_size = skl_ddb_entry_size(&old->pipe[pipe]); 3239 new_size = skl_ddb_entry_size(&new->pipe[pipe]); 3240 3241 return old_size != new_size && 3242 new->pipe[pipe].start >= old->pipe[pipe].start && 3243 new->pipe[pipe].end <= old->pipe[pipe].end; 3244 } 3245 3246 static void skl_flush_wm_values(struct drm_i915_private *dev_priv, 3247 struct skl_wm_values *new_values) 3248 { 3249 struct drm_device *dev = dev_priv->dev; 3250 struct skl_ddb_allocation *cur_ddb, *new_ddb; 3251 bool reallocated[I915_MAX_PIPES] = {}; 3252 struct intel_crtc *crtc; 3253 enum i915_pipe pipe; 3254 3255 new_ddb = &new_values->ddb; 3256 cur_ddb = &dev_priv->wm.skl_hw.ddb; 3257 3258 /* 3259 * First pass: flush the pipes with the new allocation contained into 3260 * the old space. 3261 * 3262 * We'll wait for the vblank on those pipes to ensure we can safely 3263 * re-allocate the freed space without this pipe fetching from it. 3264 */ 3265 for_each_intel_crtc(dev, crtc) { 3266 if (!crtc->active) 3267 continue; 3268 3269 pipe = crtc->pipe; 3270 3271 if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe)) 3272 continue; 3273 3274 skl_wm_flush_pipe(dev_priv, pipe, 1); 3275 intel_wait_for_vblank(dev, pipe); 3276 3277 reallocated[pipe] = true; 3278 } 3279 3280 3281 /* 3282 * Second pass: flush the pipes that are having their allocation 3283 * reduced, but overlapping with a previous allocation. 3284 * 3285 * Here as well we need to wait for the vblank to make sure the freed 3286 * space is not used anymore. 3287 */ 3288 for_each_intel_crtc(dev, crtc) { 3289 if (!crtc->active) 3290 continue; 3291 3292 pipe = crtc->pipe; 3293 3294 if (reallocated[pipe]) 3295 continue; 3296 3297 if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) < 3298 skl_ddb_entry_size(&cur_ddb->pipe[pipe])) { 3299 skl_wm_flush_pipe(dev_priv, pipe, 2); 3300 intel_wait_for_vblank(dev, pipe); 3301 reallocated[pipe] = true; 3302 } 3303 } 3304 3305 /* 3306 * Third pass: flush the pipes that got more space allocated. 3307 * 3308 * We don't need to actively wait for the update here, next vblank 3309 * will just get more DDB space with the correct WM values. 3310 */ 3311 for_each_intel_crtc(dev, crtc) { 3312 if (!crtc->active) 3313 continue; 3314 3315 pipe = crtc->pipe; 3316 3317 /* 3318 * At this point, only the pipes more space than before are 3319 * left to re-allocate. 3320 */ 3321 if (reallocated[pipe]) 3322 continue; 3323 3324 skl_wm_flush_pipe(dev_priv, pipe, 3); 3325 } 3326 } 3327 3328 static bool skl_update_pipe_wm(struct drm_crtc *crtc, 3329 struct skl_pipe_wm_parameters *params, 3330 struct intel_wm_config *config, 3331 struct skl_ddb_allocation *ddb, /* out */ 3332 struct skl_pipe_wm *pipe_wm /* out */) 3333 { 3334 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3335 3336 skl_compute_wm_pipe_parameters(crtc, params); 3337 skl_allocate_pipe_ddb(crtc, config, params, ddb); 3338 skl_compute_pipe_wm(crtc, ddb, params, pipe_wm); 3339 3340 if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm))) 3341 return false; 3342 3343 intel_crtc->wm.skl_active = *pipe_wm; 3344 3345 return true; 3346 } 3347 3348 static void skl_update_other_pipe_wm(struct drm_device *dev, 3349 struct drm_crtc *crtc, 3350 struct intel_wm_config *config, 3351 struct skl_wm_values *r) 3352 { 3353 struct intel_crtc *intel_crtc; 3354 struct intel_crtc *this_crtc = to_intel_crtc(crtc); 3355 3356 /* 3357 * If the WM update hasn't changed the allocation for this_crtc (the 3358 * crtc we are currently computing the new WM values for), other 3359 * enabled crtcs will keep the same allocation and we don't need to 3360 * recompute anything for them. 3361 */ 3362 if (!skl_ddb_allocation_changed(&r->ddb, this_crtc)) 3363 return; 3364 3365 /* 3366 * Otherwise, because of this_crtc being freshly enabled/disabled, the 3367 * other active pipes need new DDB allocation and WM values. 3368 */ 3369 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 3370 base.head) { 3371 struct skl_pipe_wm_parameters params = {}; 3372 struct skl_pipe_wm pipe_wm = {}; 3373 bool wm_changed; 3374 3375 if (this_crtc->pipe == intel_crtc->pipe) 3376 continue; 3377 3378 if (!intel_crtc->active) 3379 continue; 3380 3381 wm_changed = skl_update_pipe_wm(&intel_crtc->base, 3382 ¶ms, config, 3383 &r->ddb, &pipe_wm); 3384 3385 /* 3386 * If we end up re-computing the other pipe WM values, it's 3387 * because it was really needed, so we expect the WM values to 3388 * be different. 3389 */ 3390 WARN_ON(!wm_changed); 3391 3392 skl_compute_wm_results(dev, ¶ms, &pipe_wm, r, intel_crtc); 3393 r->dirty[intel_crtc->pipe] = true; 3394 } 3395 } 3396 3397 static void skl_update_wm(struct drm_crtc *crtc) 3398 { 3399 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3400 struct drm_device *dev = crtc->dev; 3401 struct drm_i915_private *dev_priv = dev->dev_private; 3402 struct skl_pipe_wm_parameters params = {}; 3403 struct skl_wm_values *results = &dev_priv->wm.skl_results; 3404 struct skl_pipe_wm pipe_wm = {}; 3405 struct intel_wm_config config = {}; 3406 3407 memset(results, 0, sizeof(*results)); 3408 3409 skl_compute_wm_global_parameters(dev, &config); 3410 3411 if (!skl_update_pipe_wm(crtc, ¶ms, &config, 3412 &results->ddb, &pipe_wm)) 3413 return; 3414 3415 skl_compute_wm_results(dev, ¶ms, &pipe_wm, results, intel_crtc); 3416 results->dirty[intel_crtc->pipe] = true; 3417 3418 skl_update_other_pipe_wm(dev, crtc, &config, results); 3419 skl_write_wm_values(dev_priv, results); 3420 skl_flush_wm_values(dev_priv, results); 3421 3422 /* store the new configuration */ 3423 dev_priv->wm.skl_hw = *results; 3424 } 3425 3426 static void 3427 skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc, 3428 uint32_t sprite_width, uint32_t sprite_height, 3429 int pixel_size, bool enabled, bool scaled) 3430 { 3431 struct intel_plane *intel_plane = to_intel_plane(plane); 3432 struct drm_framebuffer *fb = plane->state->fb; 3433 3434 intel_plane->wm.enabled = enabled; 3435 intel_plane->wm.scaled = scaled; 3436 intel_plane->wm.horiz_pixels = sprite_width; 3437 intel_plane->wm.vert_pixels = sprite_height; 3438 intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE; 3439 3440 /* For planar: Bpp is for UV plane, y_Bpp is for Y plane */ 3441 intel_plane->wm.bytes_per_pixel = 3442 (fb && fb->pixel_format == DRM_FORMAT_NV12) ? 3443 drm_format_plane_cpp(plane->state->fb->pixel_format, 1) : pixel_size; 3444 intel_plane->wm.y_bytes_per_pixel = 3445 (fb && fb->pixel_format == DRM_FORMAT_NV12) ? 3446 drm_format_plane_cpp(plane->state->fb->pixel_format, 0) : 0; 3447 3448 /* 3449 * Framebuffer can be NULL on plane disable, but it does not 3450 * matter for watermarks if we assume no tiling in that case. 3451 */ 3452 if (fb) 3453 intel_plane->wm.tiling = fb->modifier[0]; 3454 intel_plane->wm.rotation = plane->state->rotation; 3455 3456 skl_update_wm(crtc); 3457 } 3458 3459 static void ilk_update_wm(struct drm_crtc *crtc) 3460 { 3461 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3462 struct drm_device *dev = crtc->dev; 3463 struct drm_i915_private *dev_priv = dev->dev_private; 3464 struct ilk_wm_maximums max; 3465 struct ilk_pipe_wm_parameters params = {}; 3466 struct ilk_wm_values results = {}; 3467 enum intel_ddb_partitioning partitioning; 3468 struct intel_pipe_wm pipe_wm = {}; 3469 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 3470 struct intel_wm_config config = {}; 3471 3472 ilk_compute_wm_parameters(crtc, ¶ms); 3473 3474 intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm); 3475 3476 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm))) 3477 return; 3478 3479 intel_crtc->wm.active = pipe_wm; 3480 3481 ilk_compute_wm_config(dev, &config); 3482 3483 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max); 3484 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2); 3485 3486 /* 5/6 split only in single pipe config on IVB+ */ 3487 if (INTEL_INFO(dev)->gen >= 7 && 3488 config.num_pipes_active == 1 && config.sprites_enabled) { 3489 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max); 3490 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6); 3491 3492 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6); 3493 } else { 3494 best_lp_wm = &lp_wm_1_2; 3495 } 3496 3497 partitioning = (best_lp_wm == &lp_wm_1_2) ? 3498 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6; 3499 3500 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results); 3501 3502 ilk_write_wm_values(dev_priv, &results); 3503 } 3504 3505 static void 3506 ilk_update_sprite_wm(struct drm_plane *plane, 3507 struct drm_crtc *crtc, 3508 uint32_t sprite_width, uint32_t sprite_height, 3509 int pixel_size, bool enabled, bool scaled) 3510 { 3511 struct drm_device *dev = plane->dev; 3512 struct intel_plane *intel_plane = to_intel_plane(plane); 3513 3514 intel_plane->wm.enabled = enabled; 3515 intel_plane->wm.scaled = scaled; 3516 intel_plane->wm.horiz_pixels = sprite_width; 3517 intel_plane->wm.vert_pixels = sprite_width; 3518 intel_plane->wm.bytes_per_pixel = pixel_size; 3519 3520 /* 3521 * IVB workaround: must disable low power watermarks for at least 3522 * one frame before enabling scaling. LP watermarks can be re-enabled 3523 * when scaling is disabled. 3524 * 3525 * WaCxSRDisabledForSpriteScaling:ivb 3526 */ 3527 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev)) 3528 intel_wait_for_vblank(dev, intel_plane->pipe); 3529 3530 ilk_update_wm(crtc); 3531 } 3532 3533 static void skl_pipe_wm_active_state(uint32_t val, 3534 struct skl_pipe_wm *active, 3535 bool is_transwm, 3536 bool is_cursor, 3537 int i, 3538 int level) 3539 { 3540 bool is_enabled = (val & PLANE_WM_EN) != 0; 3541 3542 if (!is_transwm) { 3543 if (!is_cursor) { 3544 active->wm[level].plane_en[i] = is_enabled; 3545 active->wm[level].plane_res_b[i] = 3546 val & PLANE_WM_BLOCKS_MASK; 3547 active->wm[level].plane_res_l[i] = 3548 (val >> PLANE_WM_LINES_SHIFT) & 3549 PLANE_WM_LINES_MASK; 3550 } else { 3551 active->wm[level].cursor_en = is_enabled; 3552 active->wm[level].cursor_res_b = 3553 val & PLANE_WM_BLOCKS_MASK; 3554 active->wm[level].cursor_res_l = 3555 (val >> PLANE_WM_LINES_SHIFT) & 3556 PLANE_WM_LINES_MASK; 3557 } 3558 } else { 3559 if (!is_cursor) { 3560 active->trans_wm.plane_en[i] = is_enabled; 3561 active->trans_wm.plane_res_b[i] = 3562 val & PLANE_WM_BLOCKS_MASK; 3563 active->trans_wm.plane_res_l[i] = 3564 (val >> PLANE_WM_LINES_SHIFT) & 3565 PLANE_WM_LINES_MASK; 3566 } else { 3567 active->trans_wm.cursor_en = is_enabled; 3568 active->trans_wm.cursor_res_b = 3569 val & PLANE_WM_BLOCKS_MASK; 3570 active->trans_wm.cursor_res_l = 3571 (val >> PLANE_WM_LINES_SHIFT) & 3572 PLANE_WM_LINES_MASK; 3573 } 3574 } 3575 } 3576 3577 static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) 3578 { 3579 struct drm_device *dev = crtc->dev; 3580 struct drm_i915_private *dev_priv = dev->dev_private; 3581 struct skl_wm_values *hw = &dev_priv->wm.skl_hw; 3582 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3583 struct skl_pipe_wm *active = &intel_crtc->wm.skl_active; 3584 enum i915_pipe pipe = intel_crtc->pipe; 3585 int level, i, max_level; 3586 uint32_t temp; 3587 3588 max_level = ilk_wm_max_level(dev); 3589 3590 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); 3591 3592 for (level = 0; level <= max_level; level++) { 3593 for (i = 0; i < intel_num_planes(intel_crtc); i++) 3594 hw->plane[pipe][i][level] = 3595 I915_READ(PLANE_WM(pipe, i, level)); 3596 hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level)); 3597 } 3598 3599 for (i = 0; i < intel_num_planes(intel_crtc); i++) 3600 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i)); 3601 hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe)); 3602 3603 if (!intel_crtc->active) 3604 return; 3605 3606 hw->dirty[pipe] = true; 3607 3608 active->linetime = hw->wm_linetime[pipe]; 3609 3610 for (level = 0; level <= max_level; level++) { 3611 for (i = 0; i < intel_num_planes(intel_crtc); i++) { 3612 temp = hw->plane[pipe][i][level]; 3613 skl_pipe_wm_active_state(temp, active, false, 3614 false, i, level); 3615 } 3616 temp = hw->cursor[pipe][level]; 3617 skl_pipe_wm_active_state(temp, active, false, true, i, level); 3618 } 3619 3620 for (i = 0; i < intel_num_planes(intel_crtc); i++) { 3621 temp = hw->plane_trans[pipe][i]; 3622 skl_pipe_wm_active_state(temp, active, true, false, i, 0); 3623 } 3624 3625 temp = hw->cursor_trans[pipe]; 3626 skl_pipe_wm_active_state(temp, active, true, true, i, 0); 3627 } 3628 3629 void skl_wm_get_hw_state(struct drm_device *dev) 3630 { 3631 struct drm_i915_private *dev_priv = dev->dev_private; 3632 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb; 3633 struct drm_crtc *crtc; 3634 3635 skl_ddb_get_hw_state(dev_priv, ddb); 3636 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 3637 skl_pipe_wm_get_hw_state(crtc); 3638 } 3639 3640 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) 3641 { 3642 struct drm_device *dev = crtc->dev; 3643 struct drm_i915_private *dev_priv = dev->dev_private; 3644 struct ilk_wm_values *hw = &dev_priv->wm.hw; 3645 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3646 struct intel_pipe_wm *active = &intel_crtc->wm.active; 3647 enum i915_pipe pipe = intel_crtc->pipe; 3648 static const unsigned int wm0_pipe_reg[] = { 3649 [PIPE_A] = WM0_PIPEA_ILK, 3650 [PIPE_B] = WM0_PIPEB_ILK, 3651 [PIPE_C] = WM0_PIPEC_IVB, 3652 }; 3653 3654 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]); 3655 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 3656 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); 3657 3658 active->pipe_enabled = intel_crtc->active; 3659 3660 if (active->pipe_enabled) { 3661 u32 tmp = hw->wm_pipe[pipe]; 3662 3663 /* 3664 * For active pipes LP0 watermark is marked as 3665 * enabled, and LP1+ watermaks as disabled since 3666 * we can't really reverse compute them in case 3667 * multiple pipes are active. 3668 */ 3669 active->wm[0].enable = true; 3670 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT; 3671 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT; 3672 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK; 3673 active->linetime = hw->wm_linetime[pipe]; 3674 } else { 3675 int level, max_level = ilk_wm_max_level(dev); 3676 3677 /* 3678 * For inactive pipes, all watermark levels 3679 * should be marked as enabled but zeroed, 3680 * which is what we'd compute them to. 3681 */ 3682 for (level = 0; level <= max_level; level++) 3683 active->wm[level].enable = true; 3684 } 3685 } 3686 3687 void ilk_wm_get_hw_state(struct drm_device *dev) 3688 { 3689 struct drm_i915_private *dev_priv = dev->dev_private; 3690 struct ilk_wm_values *hw = &dev_priv->wm.hw; 3691 struct drm_crtc *crtc; 3692 3693 for_each_crtc(dev, crtc) 3694 ilk_pipe_wm_get_hw_state(crtc); 3695 3696 hw->wm_lp[0] = I915_READ(WM1_LP_ILK); 3697 hw->wm_lp[1] = I915_READ(WM2_LP_ILK); 3698 hw->wm_lp[2] = I915_READ(WM3_LP_ILK); 3699 3700 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK); 3701 if (INTEL_INFO(dev)->gen >= 7) { 3702 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); 3703 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); 3704 } 3705 3706 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 3707 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? 3708 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 3709 else if (IS_IVYBRIDGE(dev)) 3710 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? 3711 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; 3712 3713 hw->enable_fbc_wm = 3714 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); 3715 } 3716 3717 /** 3718 * intel_update_watermarks - update FIFO watermark values based on current modes 3719 * 3720 * Calculate watermark values for the various WM regs based on current mode 3721 * and plane configuration. 3722 * 3723 * There are several cases to deal with here: 3724 * - normal (i.e. non-self-refresh) 3725 * - self-refresh (SR) mode 3726 * - lines are large relative to FIFO size (buffer can hold up to 2) 3727 * - lines are small relative to FIFO size (buffer can hold more than 2 3728 * lines), so need to account for TLB latency 3729 * 3730 * The normal calculation is: 3731 * watermark = dotclock * bytes per pixel * latency 3732 * where latency is platform & configuration dependent (we assume pessimal 3733 * values here). 3734 * 3735 * The SR calculation is: 3736 * watermark = (trunc(latency/line time)+1) * surface width * 3737 * bytes per pixel 3738 * where 3739 * line time = htotal / dotclock 3740 * surface width = hdisplay for normal plane and 64 for cursor 3741 * and latency is assumed to be high, as above. 3742 * 3743 * The final value programmed to the register should always be rounded up, 3744 * and include an extra 2 entries to account for clock crossings. 3745 * 3746 * We don't use the sprite, so we can ignore that. And on Crestline we have 3747 * to set the non-SR watermarks to 8. 3748 */ 3749 void intel_update_watermarks(struct drm_crtc *crtc) 3750 { 3751 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 3752 3753 if (dev_priv->display.update_wm) 3754 dev_priv->display.update_wm(crtc); 3755 } 3756 3757 void intel_update_sprite_watermarks(struct drm_plane *plane, 3758 struct drm_crtc *crtc, 3759 uint32_t sprite_width, 3760 uint32_t sprite_height, 3761 int pixel_size, 3762 bool enabled, bool scaled) 3763 { 3764 struct drm_i915_private *dev_priv = plane->dev->dev_private; 3765 3766 if (dev_priv->display.update_sprite_wm) 3767 dev_priv->display.update_sprite_wm(plane, crtc, 3768 sprite_width, sprite_height, 3769 pixel_size, enabled, scaled); 3770 } 3771 3772 /** 3773 * Lock protecting IPS related data structures 3774 */ 3775 struct lock mchdev_lock; 3776 LOCK_SYSINIT(mchdev, &mchdev_lock, "mchdev", LK_CANRECURSE); 3777 3778 /* Global for IPS driver to get at the current i915 device. Protected by 3779 * mchdev_lock. */ 3780 static struct drm_i915_private *i915_mch_dev; 3781 3782 bool ironlake_set_drps(struct drm_device *dev, u8 val) 3783 { 3784 struct drm_i915_private *dev_priv = dev->dev_private; 3785 u16 rgvswctl; 3786 3787 assert_spin_locked(&mchdev_lock); 3788 3789 rgvswctl = I915_READ16(MEMSWCTL); 3790 if (rgvswctl & MEMCTL_CMD_STS) { 3791 DRM_DEBUG("gpu busy, RCS change rejected\n"); 3792 return false; /* still busy with another command */ 3793 } 3794 3795 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 3796 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; 3797 I915_WRITE16(MEMSWCTL, rgvswctl); 3798 POSTING_READ16(MEMSWCTL); 3799 3800 rgvswctl |= MEMCTL_CMD_STS; 3801 I915_WRITE16(MEMSWCTL, rgvswctl); 3802 3803 return true; 3804 } 3805 3806 static void ironlake_enable_drps(struct drm_device *dev) 3807 { 3808 struct drm_i915_private *dev_priv = dev->dev_private; 3809 u32 rgvmodectl = I915_READ(MEMMODECTL); 3810 u8 fmax, fmin, fstart, vstart; 3811 3812 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 3813 3814 /* Enable temp reporting */ 3815 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); 3816 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); 3817 3818 /* 100ms RC evaluation intervals */ 3819 I915_WRITE(RCUPEI, 100000); 3820 I915_WRITE(RCDNEI, 100000); 3821 3822 /* Set max/min thresholds to 90ms and 80ms respectively */ 3823 I915_WRITE(RCBMAXAVG, 90000); 3824 I915_WRITE(RCBMINAVG, 80000); 3825 3826 I915_WRITE(MEMIHYST, 1); 3827 3828 /* Set up min, max, and cur for interrupt handling */ 3829 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; 3830 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 3831 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 3832 MEMMODE_FSTART_SHIFT; 3833 3834 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 3835 PXVFREQ_PX_SHIFT; 3836 3837 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */ 3838 dev_priv->ips.fstart = fstart; 3839 3840 dev_priv->ips.max_delay = fstart; 3841 dev_priv->ips.min_delay = fmin; 3842 dev_priv->ips.cur_delay = fstart; 3843 3844 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", 3845 fmax, fmin, fstart); 3846 3847 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 3848 3849 /* 3850 * Interrupts will be enabled in ironlake_irq_postinstall 3851 */ 3852 3853 I915_WRITE(VIDSTART, vstart); 3854 POSTING_READ(VIDSTART); 3855 3856 rgvmodectl |= MEMMODE_SWMODE_EN; 3857 I915_WRITE(MEMMODECTL, rgvmodectl); 3858 3859 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) 3860 DRM_ERROR("stuck trying to change perf mode\n"); 3861 mdelay(1); 3862 3863 ironlake_set_drps(dev, fstart); 3864 3865 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + 3866 I915_READ(0x112e0); 3867 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); 3868 dev_priv->ips.last_count2 = I915_READ(0x112f4); 3869 dev_priv->ips.last_time2 = ktime_get_raw_ns(); 3870 3871 lockmgr(&mchdev_lock, LK_RELEASE); 3872 } 3873 3874 static void ironlake_disable_drps(struct drm_device *dev) 3875 { 3876 struct drm_i915_private *dev_priv = dev->dev_private; 3877 u16 rgvswctl; 3878 3879 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 3880 3881 rgvswctl = I915_READ16(MEMSWCTL); 3882 3883 /* Ack interrupts, disable EFC interrupt */ 3884 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); 3885 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); 3886 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); 3887 I915_WRITE(DEIIR, DE_PCU_EVENT); 3888 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 3889 3890 /* Go back to the starting frequency */ 3891 ironlake_set_drps(dev, dev_priv->ips.fstart); 3892 mdelay(1); 3893 rgvswctl |= MEMCTL_CMD_STS; 3894 I915_WRITE(MEMSWCTL, rgvswctl); 3895 mdelay(1); 3896 3897 lockmgr(&mchdev_lock, LK_RELEASE); 3898 } 3899 3900 /* There's a funny hw issue where the hw returns all 0 when reading from 3901 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value 3902 * ourselves, instead of doing a rmw cycle (which might result in us clearing 3903 * all limits and the gpu stuck at whatever frequency it is at atm). 3904 */ 3905 static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val) 3906 { 3907 u32 limits; 3908 3909 /* Only set the down limit when we've reached the lowest level to avoid 3910 * getting more interrupts, otherwise leave this clear. This prevents a 3911 * race in the hw when coming out of rc6: There's a tiny window where 3912 * the hw runs at the minimal clock before selecting the desired 3913 * frequency, if the down threshold expires in that window we will not 3914 * receive a down interrupt. */ 3915 if (IS_GEN9(dev_priv->dev)) { 3916 limits = (dev_priv->rps.max_freq_softlimit) << 23; 3917 if (val <= dev_priv->rps.min_freq_softlimit) 3918 limits |= (dev_priv->rps.min_freq_softlimit) << 14; 3919 } else { 3920 limits = dev_priv->rps.max_freq_softlimit << 24; 3921 if (val <= dev_priv->rps.min_freq_softlimit) 3922 limits |= dev_priv->rps.min_freq_softlimit << 16; 3923 } 3924 3925 return limits; 3926 } 3927 3928 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) 3929 { 3930 int new_power; 3931 u32 threshold_up = 0, threshold_down = 0; /* in % */ 3932 u32 ei_up = 0, ei_down = 0; 3933 3934 new_power = dev_priv->rps.power; 3935 switch (dev_priv->rps.power) { 3936 case LOW_POWER: 3937 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq) 3938 new_power = BETWEEN; 3939 break; 3940 3941 case BETWEEN: 3942 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq) 3943 new_power = LOW_POWER; 3944 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq) 3945 new_power = HIGH_POWER; 3946 break; 3947 3948 case HIGH_POWER: 3949 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq) 3950 new_power = BETWEEN; 3951 break; 3952 } 3953 /* Max/min bins are special */ 3954 if (val <= dev_priv->rps.min_freq_softlimit) 3955 new_power = LOW_POWER; 3956 if (val >= dev_priv->rps.max_freq_softlimit) 3957 new_power = HIGH_POWER; 3958 if (new_power == dev_priv->rps.power) 3959 return; 3960 3961 /* Note the units here are not exactly 1us, but 1280ns. */ 3962 switch (new_power) { 3963 case LOW_POWER: 3964 /* Upclock if more than 95% busy over 16ms */ 3965 ei_up = 16000; 3966 threshold_up = 95; 3967 3968 /* Downclock if less than 85% busy over 32ms */ 3969 ei_down = 32000; 3970 threshold_down = 85; 3971 break; 3972 3973 case BETWEEN: 3974 /* Upclock if more than 90% busy over 13ms */ 3975 ei_up = 13000; 3976 threshold_up = 90; 3977 3978 /* Downclock if less than 75% busy over 32ms */ 3979 ei_down = 32000; 3980 threshold_down = 75; 3981 break; 3982 3983 case HIGH_POWER: 3984 /* Upclock if more than 85% busy over 10ms */ 3985 ei_up = 10000; 3986 threshold_up = 85; 3987 3988 /* Downclock if less than 60% busy over 32ms */ 3989 ei_down = 32000; 3990 threshold_down = 60; 3991 break; 3992 } 3993 3994 I915_WRITE(GEN6_RP_UP_EI, 3995 GT_INTERVAL_FROM_US(dev_priv, ei_up)); 3996 I915_WRITE(GEN6_RP_UP_THRESHOLD, 3997 GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100))); 3998 3999 I915_WRITE(GEN6_RP_DOWN_EI, 4000 GT_INTERVAL_FROM_US(dev_priv, ei_down)); 4001 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 4002 GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100))); 4003 4004 I915_WRITE(GEN6_RP_CONTROL, 4005 GEN6_RP_MEDIA_TURBO | 4006 GEN6_RP_MEDIA_HW_NORMAL_MODE | 4007 GEN6_RP_MEDIA_IS_GFX | 4008 GEN6_RP_ENABLE | 4009 GEN6_RP_UP_BUSY_AVG | 4010 GEN6_RP_DOWN_IDLE_AVG); 4011 4012 dev_priv->rps.power = new_power; 4013 dev_priv->rps.up_threshold = threshold_up; 4014 dev_priv->rps.down_threshold = threshold_down; 4015 dev_priv->rps.last_adj = 0; 4016 } 4017 4018 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) 4019 { 4020 u32 mask = 0; 4021 4022 if (val > dev_priv->rps.min_freq_softlimit) 4023 mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; 4024 if (val < dev_priv->rps.max_freq_softlimit) 4025 mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; 4026 4027 mask &= dev_priv->pm_rps_events; 4028 4029 return gen6_sanitize_rps_pm_mask(dev_priv, ~mask); 4030 } 4031 4032 /* gen6_set_rps is called to update the frequency request, but should also be 4033 * called when the range (min_delay and max_delay) is modified so that we can 4034 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ 4035 static void gen6_set_rps(struct drm_device *dev, u8 val) 4036 { 4037 struct drm_i915_private *dev_priv = dev->dev_private; 4038 4039 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4040 WARN_ON(val > dev_priv->rps.max_freq); 4041 WARN_ON(val < dev_priv->rps.min_freq); 4042 4043 /* min/max delay may still have been modified so be sure to 4044 * write the limits value. 4045 */ 4046 if (val != dev_priv->rps.cur_freq) { 4047 gen6_set_rps_thresholds(dev_priv, val); 4048 4049 if (IS_GEN9(dev)) 4050 I915_WRITE(GEN6_RPNSWREQ, 4051 GEN9_FREQUENCY(val)); 4052 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 4053 I915_WRITE(GEN6_RPNSWREQ, 4054 HSW_FREQUENCY(val)); 4055 else 4056 I915_WRITE(GEN6_RPNSWREQ, 4057 GEN6_FREQUENCY(val) | 4058 GEN6_OFFSET(0) | 4059 GEN6_AGGRESSIVE_TURBO); 4060 } 4061 4062 /* Make sure we continue to get interrupts 4063 * until we hit the minimum or maximum frequencies. 4064 */ 4065 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val)); 4066 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 4067 4068 POSTING_READ(GEN6_RPNSWREQ); 4069 4070 dev_priv->rps.cur_freq = val; 4071 trace_intel_gpu_freq_change(val * 50); 4072 } 4073 4074 static void valleyview_set_rps(struct drm_device *dev, u8 val) 4075 { 4076 struct drm_i915_private *dev_priv = dev->dev_private; 4077 4078 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4079 WARN_ON(val > dev_priv->rps.max_freq); 4080 WARN_ON(val < dev_priv->rps.min_freq); 4081 4082 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1), 4083 "Odd GPU freq value\n")) 4084 val &= ~1; 4085 4086 if (val != dev_priv->rps.cur_freq) { 4087 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); 4088 if (!IS_CHERRYVIEW(dev_priv)) 4089 gen6_set_rps_thresholds(dev_priv, val); 4090 } 4091 4092 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 4093 4094 dev_priv->rps.cur_freq = val; 4095 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); 4096 } 4097 4098 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down 4099 * 4100 * * If Gfx is Idle, then 4101 * 1. Forcewake Media well. 4102 * 2. Request idle freq. 4103 * 3. Release Forcewake of Media well. 4104 */ 4105 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) 4106 { 4107 u32 val = dev_priv->rps.idle_freq; 4108 4109 if (dev_priv->rps.cur_freq <= val) 4110 return; 4111 4112 /* Wake up the media well, as that takes a lot less 4113 * power than the Render well. */ 4114 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); 4115 valleyview_set_rps(dev_priv->dev, val); 4116 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); 4117 } 4118 4119 void gen6_rps_busy(struct drm_i915_private *dev_priv) 4120 { 4121 mutex_lock(&dev_priv->rps.hw_lock); 4122 if (dev_priv->rps.enabled) { 4123 if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) 4124 gen6_rps_reset_ei(dev_priv); 4125 I915_WRITE(GEN6_PMINTRMSK, 4126 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); 4127 } 4128 mutex_unlock(&dev_priv->rps.hw_lock); 4129 } 4130 4131 void gen6_rps_idle(struct drm_i915_private *dev_priv) 4132 { 4133 struct drm_device *dev = dev_priv->dev; 4134 4135 mutex_lock(&dev_priv->rps.hw_lock); 4136 if (dev_priv->rps.enabled) { 4137 if (IS_VALLEYVIEW(dev)) 4138 vlv_set_rps_idle(dev_priv); 4139 else 4140 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 4141 dev_priv->rps.last_adj = 0; 4142 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 4143 } 4144 mutex_unlock(&dev_priv->rps.hw_lock); 4145 4146 spin_lock(&dev_priv->rps.client_lock); 4147 while (!list_empty(&dev_priv->rps.clients)) 4148 list_del_init(dev_priv->rps.clients.next); 4149 spin_unlock(&dev_priv->rps.client_lock); 4150 } 4151 4152 void gen6_rps_boost(struct drm_i915_private *dev_priv, 4153 struct intel_rps_client *rps, 4154 unsigned long submitted) 4155 { 4156 /* This is intentionally racy! We peek at the state here, then 4157 * validate inside the RPS worker. 4158 */ 4159 if (!(dev_priv->mm.busy && 4160 dev_priv->rps.enabled && 4161 dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)) 4162 return; 4163 4164 /* Force a RPS boost (and don't count it against the client) if 4165 * the GPU is severely congested. 4166 */ 4167 if (rps && time_after(jiffies, submitted + DRM_I915_THROTTLE_JIFFIES)) 4168 rps = NULL; 4169 4170 spin_lock(&dev_priv->rps.client_lock); 4171 if (rps == NULL || list_empty(&rps->link)) { 4172 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4173 if (dev_priv->rps.interrupts_enabled) { 4174 dev_priv->rps.client_boost = true; 4175 queue_work(dev_priv->wq, &dev_priv->rps.work); 4176 } 4177 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4178 4179 if (rps != NULL) { 4180 list_add(&rps->link, &dev_priv->rps.clients); 4181 rps->boosts++; 4182 } else 4183 dev_priv->rps.boosts++; 4184 } 4185 spin_unlock(&dev_priv->rps.client_lock); 4186 } 4187 4188 void intel_set_rps(struct drm_device *dev, u8 val) 4189 { 4190 if (IS_VALLEYVIEW(dev)) 4191 valleyview_set_rps(dev, val); 4192 else 4193 gen6_set_rps(dev, val); 4194 } 4195 4196 static void gen9_disable_rps(struct drm_device *dev) 4197 { 4198 struct drm_i915_private *dev_priv = dev->dev_private; 4199 4200 I915_WRITE(GEN6_RC_CONTROL, 0); 4201 I915_WRITE(GEN9_PG_ENABLE, 0); 4202 } 4203 4204 static void gen6_disable_rps(struct drm_device *dev) 4205 { 4206 struct drm_i915_private *dev_priv = dev->dev_private; 4207 4208 I915_WRITE(GEN6_RC_CONTROL, 0); 4209 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 4210 } 4211 4212 static void cherryview_disable_rps(struct drm_device *dev) 4213 { 4214 struct drm_i915_private *dev_priv = dev->dev_private; 4215 4216 I915_WRITE(GEN6_RC_CONTROL, 0); 4217 } 4218 4219 static void valleyview_disable_rps(struct drm_device *dev) 4220 { 4221 struct drm_i915_private *dev_priv = dev->dev_private; 4222 4223 /* we're doing forcewake before Disabling RC6, 4224 * This what the BIOS expects when going into suspend */ 4225 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4226 4227 I915_WRITE(GEN6_RC_CONTROL, 0); 4228 4229 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4230 } 4231 4232 static void intel_print_rc6_info(struct drm_device *dev, u32 mode) 4233 { 4234 if (IS_VALLEYVIEW(dev)) { 4235 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1))) 4236 mode = GEN6_RC_CTL_RC6_ENABLE; 4237 else 4238 mode = 0; 4239 } 4240 if (HAS_RC6p(dev)) 4241 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n", 4242 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", 4243 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", 4244 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); 4245 4246 else 4247 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n", 4248 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off"); 4249 } 4250 4251 static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) 4252 { 4253 /* No RC6 before Ironlake */ 4254 if (INTEL_INFO(dev)->gen < 5) 4255 return 0; 4256 4257 /* RC6 is only on Ironlake mobile not on desktop */ 4258 if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev)) 4259 return 0; 4260 4261 /* Respect the kernel parameter if it is set */ 4262 if (enable_rc6 >= 0) { 4263 int mask; 4264 4265 if (HAS_RC6p(dev)) 4266 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE | 4267 INTEL_RC6pp_ENABLE; 4268 else 4269 mask = INTEL_RC6_ENABLE; 4270 4271 if ((enable_rc6 & mask) != enable_rc6) 4272 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n", 4273 enable_rc6 & mask, enable_rc6, mask); 4274 4275 return enable_rc6 & mask; 4276 } 4277 4278 /* Disable RC6 on Ironlake */ 4279 if (INTEL_INFO(dev)->gen == 5) 4280 return 0; 4281 4282 if (IS_IVYBRIDGE(dev)) 4283 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 4284 4285 return INTEL_RC6_ENABLE; 4286 } 4287 4288 int intel_enable_rc6(const struct drm_device *dev) 4289 { 4290 return i915.enable_rc6; 4291 } 4292 4293 static void gen6_init_rps_frequencies(struct drm_device *dev) 4294 { 4295 struct drm_i915_private *dev_priv = dev->dev_private; 4296 uint32_t rp_state_cap; 4297 u32 ddcc_status = 0; 4298 int ret; 4299 4300 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 4301 /* All of these values are in units of 50MHz */ 4302 dev_priv->rps.cur_freq = 0; 4303 /* static values from HW: RP0 > RP1 > RPn (min_freq) */ 4304 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; 4305 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; 4306 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; 4307 if (IS_SKYLAKE(dev)) { 4308 /* Store the frequency values in 16.66 MHZ units, which is 4309 the natural hardware unit for SKL */ 4310 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; 4311 dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER; 4312 dev_priv->rps.min_freq *= GEN9_FREQ_SCALER; 4313 } 4314 /* hw_max = RP0 until we check for overclocking */ 4315 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; 4316 4317 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; 4318 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 4319 ret = sandybridge_pcode_read(dev_priv, 4320 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, 4321 &ddcc_status); 4322 if (0 == ret) 4323 dev_priv->rps.efficient_freq = 4324 clamp_t(u8, 4325 ((ddcc_status >> 8) & 0xff), 4326 dev_priv->rps.min_freq, 4327 dev_priv->rps.max_freq); 4328 } 4329 4330 dev_priv->rps.idle_freq = dev_priv->rps.min_freq; 4331 4332 /* Preserve min/max settings in case of re-init */ 4333 if (dev_priv->rps.max_freq_softlimit == 0) 4334 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 4335 4336 if (dev_priv->rps.min_freq_softlimit == 0) { 4337 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 4338 dev_priv->rps.min_freq_softlimit = 4339 max_t(int, dev_priv->rps.efficient_freq, 4340 intel_freq_opcode(dev_priv, 450)); 4341 else 4342 dev_priv->rps.min_freq_softlimit = 4343 dev_priv->rps.min_freq; 4344 } 4345 } 4346 4347 /* See the Gen9_GT_PM_Programming_Guide doc for the below */ 4348 static void gen9_enable_rps(struct drm_device *dev) 4349 { 4350 struct drm_i915_private *dev_priv = dev->dev_private; 4351 4352 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4353 4354 gen6_init_rps_frequencies(dev); 4355 4356 /* Program defaults and thresholds for RPS*/ 4357 I915_WRITE(GEN6_RC_VIDEO_FREQ, 4358 GEN9_FREQUENCY(dev_priv->rps.rp1_freq)); 4359 4360 /* 1 second timeout*/ 4361 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 4362 GT_INTERVAL_FROM_US(dev_priv, 1000000)); 4363 4364 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa); 4365 4366 /* Leaning on the below call to gen6_set_rps to program/setup the 4367 * Up/Down EI & threshold registers, as well as the RP_CONTROL, 4368 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ 4369 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 4370 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 4371 4372 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4373 } 4374 4375 static void gen9_enable_rc6(struct drm_device *dev) 4376 { 4377 struct drm_i915_private *dev_priv = dev->dev_private; 4378 struct intel_engine_cs *ring; 4379 uint32_t rc6_mask = 0; 4380 int unused; 4381 4382 /* 1a: Software RC state - RC0 */ 4383 I915_WRITE(GEN6_RC_STATE, 0); 4384 4385 /* 1b: Get forcewake during program sequence. Although the driver 4386 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ 4387 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4388 4389 /* 2a: Disable RC states. */ 4390 I915_WRITE(GEN6_RC_CONTROL, 0); 4391 4392 /* 2b: Program RC6 thresholds.*/ 4393 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); 4394 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 4395 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 4396 for_each_ring(ring, dev_priv, unused) 4397 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 4398 I915_WRITE(GEN6_RC_SLEEP, 0); 4399 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */ 4400 4401 /* 2c: Program Coarse Power Gating Policies. */ 4402 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25); 4403 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25); 4404 4405 /* 3a: Enable RC6 */ 4406 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 4407 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 4408 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 4409 "on" : "off"); 4410 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 4411 GEN6_RC_CTL_EI_MODE(1) | 4412 rc6_mask); 4413 4414 /* 4415 * 3b: Enable Coarse Power Gating only when RC6 is enabled. 4416 * WaDisableRenderPowerGating:skl,bxt - Render PG need to be disabled with RC6. 4417 */ 4418 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 4419 GEN9_MEDIA_PG_ENABLE : 0); 4420 4421 4422 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4423 4424 } 4425 4426 static void gen8_enable_rps(struct drm_device *dev) 4427 { 4428 struct drm_i915_private *dev_priv = dev->dev_private; 4429 struct intel_engine_cs *ring; 4430 uint32_t rc6_mask = 0; 4431 int unused; 4432 4433 /* 1a: Software RC state - RC0 */ 4434 I915_WRITE(GEN6_RC_STATE, 0); 4435 4436 /* 1c & 1d: Get forcewake during program sequence. Although the driver 4437 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ 4438 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4439 4440 /* 2a: Disable RC states. */ 4441 I915_WRITE(GEN6_RC_CONTROL, 0); 4442 4443 /* Initialize rps frequencies */ 4444 gen6_init_rps_frequencies(dev); 4445 4446 /* 2b: Program RC6 thresholds.*/ 4447 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 4448 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 4449 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 4450 for_each_ring(ring, dev_priv, unused) 4451 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 4452 I915_WRITE(GEN6_RC_SLEEP, 0); 4453 if (IS_BROADWELL(dev)) 4454 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ 4455 else 4456 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ 4457 4458 /* 3: Enable RC6 */ 4459 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 4460 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 4461 intel_print_rc6_info(dev, rc6_mask); 4462 if (IS_BROADWELL(dev)) 4463 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 4464 GEN7_RC_CTL_TO_MODE | 4465 rc6_mask); 4466 else 4467 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 4468 GEN6_RC_CTL_EI_MODE(1) | 4469 rc6_mask); 4470 4471 /* 4 Program defaults and thresholds for RPS*/ 4472 I915_WRITE(GEN6_RPNSWREQ, 4473 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 4474 I915_WRITE(GEN6_RC_VIDEO_FREQ, 4475 HSW_FREQUENCY(dev_priv->rps.rp1_freq)); 4476 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */ 4477 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */ 4478 4479 /* Docs recommend 900MHz, and 300 MHz respectively */ 4480 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 4481 dev_priv->rps.max_freq_softlimit << 24 | 4482 dev_priv->rps.min_freq_softlimit << 16); 4483 4484 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ 4485 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ 4486 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */ 4487 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */ 4488 4489 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 4490 4491 /* 5: Enable RPS */ 4492 I915_WRITE(GEN6_RP_CONTROL, 4493 GEN6_RP_MEDIA_TURBO | 4494 GEN6_RP_MEDIA_HW_NORMAL_MODE | 4495 GEN6_RP_MEDIA_IS_GFX | 4496 GEN6_RP_ENABLE | 4497 GEN6_RP_UP_BUSY_AVG | 4498 GEN6_RP_DOWN_IDLE_AVG); 4499 4500 /* 6: Ring frequency + overclocking (our driver does this later */ 4501 4502 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 4503 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 4504 4505 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4506 } 4507 4508 static void gen6_enable_rps(struct drm_device *dev) 4509 { 4510 struct drm_i915_private *dev_priv = dev->dev_private; 4511 struct intel_engine_cs *ring; 4512 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; 4513 u32 gtfifodbg; 4514 int rc6_mode; 4515 int i, ret; 4516 4517 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4518 4519 /* Here begins a magic sequence of register writes to enable 4520 * auto-downclocking. 4521 * 4522 * Perhaps there might be some value in exposing these to 4523 * userspace... 4524 */ 4525 I915_WRITE(GEN6_RC_STATE, 0); 4526 4527 /* Clear the DBG now so we don't confuse earlier errors */ 4528 if ((gtfifodbg = I915_READ(GTFIFODBG))) { 4529 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); 4530 I915_WRITE(GTFIFODBG, gtfifodbg); 4531 } 4532 4533 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4534 4535 /* Initialize rps frequencies */ 4536 gen6_init_rps_frequencies(dev); 4537 4538 /* disable the counters and set deterministic thresholds */ 4539 I915_WRITE(GEN6_RC_CONTROL, 0); 4540 4541 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); 4542 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); 4543 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); 4544 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); 4545 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); 4546 4547 for_each_ring(ring, dev_priv, i) 4548 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 4549 4550 I915_WRITE(GEN6_RC_SLEEP, 0); 4551 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 4552 if (IS_IVYBRIDGE(dev)) 4553 I915_WRITE(GEN6_RC6_THRESHOLD, 125000); 4554 else 4555 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 4556 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); 4557 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 4558 4559 /* Check if we are enabling RC6 */ 4560 rc6_mode = intel_enable_rc6(dev_priv->dev); 4561 if (rc6_mode & INTEL_RC6_ENABLE) 4562 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; 4563 4564 /* We don't use those on Haswell */ 4565 if (!IS_HASWELL(dev)) { 4566 if (rc6_mode & INTEL_RC6p_ENABLE) 4567 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; 4568 4569 if (rc6_mode & INTEL_RC6pp_ENABLE) 4570 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; 4571 } 4572 4573 intel_print_rc6_info(dev, rc6_mask); 4574 4575 I915_WRITE(GEN6_RC_CONTROL, 4576 rc6_mask | 4577 GEN6_RC_CTL_EI_MODE(1) | 4578 GEN6_RC_CTL_HW_ENABLE); 4579 4580 /* Power down if completely idle for over 50ms */ 4581 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000); 4582 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 4583 4584 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0); 4585 if (ret) 4586 DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); 4587 4588 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox); 4589 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */ 4590 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n", 4591 (dev_priv->rps.max_freq_softlimit & 0xff) * 50, 4592 (pcu_mbox & 0xff) * 50); 4593 dev_priv->rps.max_freq = pcu_mbox & 0xff; 4594 } 4595 4596 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 4597 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 4598 4599 rc6vids = 0; 4600 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 4601 if (IS_GEN6(dev) && ret) { 4602 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); 4603 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { 4604 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", 4605 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); 4606 rc6vids &= 0xffff00; 4607 rc6vids |= GEN6_ENCODE_RC6_VID(450); 4608 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); 4609 if (ret) 4610 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); 4611 } 4612 4613 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4614 } 4615 4616 static void __gen6_update_ring_freq(struct drm_device *dev) 4617 { 4618 struct drm_i915_private *dev_priv = dev->dev_private; 4619 int min_freq = 15; 4620 unsigned int gpu_freq; 4621 unsigned int max_ia_freq, min_ring_freq; 4622 int scaling_factor = 180; 4623 4624 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4625 4626 #if 0 4627 policy = cpufreq_cpu_get(0); 4628 if (policy) { 4629 max_ia_freq = policy->cpuinfo.max_freq; 4630 cpufreq_cpu_put(policy); 4631 } else { 4632 /* 4633 * Default to measured freq if none found, PCU will ensure we 4634 * don't go over 4635 */ 4636 max_ia_freq = tsc_khz; 4637 } 4638 #else 4639 max_ia_freq = tsc_frequency / 1000; 4640 #endif 4641 4642 /* Convert from kHz to MHz */ 4643 max_ia_freq /= 1000; 4644 4645 min_ring_freq = I915_READ(DCLK) & 0xf; 4646 /* convert DDR frequency from units of 266.6MHz to bandwidth */ 4647 min_ring_freq = mult_frac(min_ring_freq, 8, 3); 4648 4649 /* 4650 * For each potential GPU frequency, load a ring frequency we'd like 4651 * to use for memory access. We do this by specifying the IA frequency 4652 * the PCU should use as a reference to determine the ring frequency. 4653 */ 4654 for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq; 4655 gpu_freq--) { 4656 int diff = dev_priv->rps.max_freq - gpu_freq; 4657 unsigned int ia_freq = 0, ring_freq = 0; 4658 4659 if (INTEL_INFO(dev)->gen >= 8) { 4660 /* max(2 * GT, DDR). NB: GT is 50MHz units */ 4661 ring_freq = max(min_ring_freq, gpu_freq); 4662 } else if (IS_HASWELL(dev)) { 4663 ring_freq = mult_frac(gpu_freq, 5, 4); 4664 ring_freq = max(min_ring_freq, ring_freq); 4665 /* leave ia_freq as the default, chosen by cpufreq */ 4666 } else { 4667 /* On older processors, there is no separate ring 4668 * clock domain, so in order to boost the bandwidth 4669 * of the ring, we need to upclock the CPU (ia_freq). 4670 * 4671 * For GPU frequencies less than 750MHz, 4672 * just use the lowest ring freq. 4673 */ 4674 if (gpu_freq < min_freq) 4675 ia_freq = 800; 4676 else 4677 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); 4678 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); 4679 } 4680 4681 sandybridge_pcode_write(dev_priv, 4682 GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 4683 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT | 4684 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT | 4685 gpu_freq); 4686 } 4687 } 4688 4689 void gen6_update_ring_freq(struct drm_device *dev) 4690 { 4691 struct drm_i915_private *dev_priv = dev->dev_private; 4692 4693 if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev)) 4694 return; 4695 4696 mutex_lock(&dev_priv->rps.hw_lock); 4697 __gen6_update_ring_freq(dev); 4698 mutex_unlock(&dev_priv->rps.hw_lock); 4699 } 4700 4701 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) 4702 { 4703 struct drm_device *dev = dev_priv->dev; 4704 u32 val, rp0; 4705 4706 if (dev->pdev->revision >= 0x20) { 4707 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 4708 4709 switch (INTEL_INFO(dev)->eu_total) { 4710 case 8: 4711 /* (2 * 4) config */ 4712 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); 4713 break; 4714 case 12: 4715 /* (2 * 6) config */ 4716 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT); 4717 break; 4718 case 16: 4719 /* (2 * 8) config */ 4720 default: 4721 /* Setting (2 * 8) Min RP0 for any other combination */ 4722 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT); 4723 break; 4724 } 4725 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK); 4726 } else { 4727 /* For pre-production hardware */ 4728 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG); 4729 rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & 4730 PUNIT_GPU_STATUS_MAX_FREQ_MASK; 4731 } 4732 return rp0; 4733 } 4734 4735 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv) 4736 { 4737 u32 val, rpe; 4738 4739 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG); 4740 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; 4741 4742 return rpe; 4743 } 4744 4745 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv) 4746 { 4747 struct drm_device *dev = dev_priv->dev; 4748 u32 val, rp1; 4749 4750 if (dev->pdev->revision >= 0x20) { 4751 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 4752 rp1 = (val & FB_GFX_FREQ_FUSE_MASK); 4753 } else { 4754 /* For pre-production hardware */ 4755 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 4756 rp1 = ((val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & 4757 PUNIT_GPU_STATUS_MAX_FREQ_MASK); 4758 } 4759 return rp1; 4760 } 4761 4762 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv) 4763 { 4764 u32 val, rp1; 4765 4766 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); 4767 4768 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; 4769 4770 return rp1; 4771 } 4772 4773 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) 4774 { 4775 u32 val, rp0; 4776 4777 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); 4778 4779 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; 4780 /* Clamp to max */ 4781 rp0 = min_t(u32, rp0, 0xea); 4782 4783 return rp0; 4784 } 4785 4786 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv) 4787 { 4788 u32 val, rpe; 4789 4790 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO); 4791 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; 4792 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI); 4793 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; 4794 4795 return rpe; 4796 } 4797 4798 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) 4799 { 4800 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; 4801 } 4802 4803 /* Check that the pctx buffer wasn't move under us. */ 4804 static void valleyview_check_pctx(struct drm_i915_private *dev_priv) 4805 { 4806 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; 4807 4808 /* DragonFly - if EDID fails vlv_pctx can wind up NULL */ 4809 if (WARN_ON(!dev_priv->vlv_pctx)) 4810 return; 4811 4812 WARN_ON(pctx_addr != dev_priv->mm.stolen_base + 4813 dev_priv->vlv_pctx->stolen->start); 4814 } 4815 4816 4817 /* Check that the pcbr address is not empty. */ 4818 static void cherryview_check_pctx(struct drm_i915_private *dev_priv) 4819 { 4820 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; 4821 4822 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); 4823 } 4824 4825 static void cherryview_setup_pctx(struct drm_device *dev) 4826 { 4827 struct drm_i915_private *dev_priv = dev->dev_private; 4828 unsigned long pctx_paddr, paddr; 4829 struct i915_gtt *gtt = &dev_priv->gtt; 4830 u32 pcbr; 4831 int pctx_size = 32*1024; 4832 4833 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 4834 4835 pcbr = I915_READ(VLV_PCBR); 4836 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { 4837 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); 4838 paddr = (dev_priv->mm.stolen_base + 4839 (gtt->stolen_size - pctx_size)); 4840 4841 pctx_paddr = (paddr & (~4095)); 4842 I915_WRITE(VLV_PCBR, pctx_paddr); 4843 } 4844 4845 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 4846 } 4847 4848 static void valleyview_setup_pctx(struct drm_device *dev) 4849 { 4850 struct drm_i915_private *dev_priv = dev->dev_private; 4851 struct drm_i915_gem_object *pctx; 4852 unsigned long pctx_paddr; 4853 u32 pcbr; 4854 int pctx_size = 24*1024; 4855 4856 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 4857 4858 pcbr = I915_READ(VLV_PCBR); 4859 if (pcbr) { 4860 /* BIOS set it up already, grab the pre-alloc'd space */ 4861 int pcbr_offset; 4862 4863 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; 4864 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev, 4865 pcbr_offset, 4866 I915_GTT_OFFSET_NONE, 4867 pctx_size); 4868 goto out; 4869 } 4870 4871 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); 4872 4873 /* 4874 * From the Gunit register HAS: 4875 * The Gfx driver is expected to program this register and ensure 4876 * proper allocation within Gfx stolen memory. For example, this 4877 * register should be programmed such than the PCBR range does not 4878 * overlap with other ranges, such as the frame buffer, protected 4879 * memory, or any other relevant ranges. 4880 */ 4881 pctx = i915_gem_object_create_stolen(dev, pctx_size); 4882 if (!pctx) { 4883 DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); 4884 return; 4885 } 4886 4887 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start; 4888 I915_WRITE(VLV_PCBR, pctx_paddr); 4889 4890 out: 4891 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 4892 dev_priv->vlv_pctx = pctx; 4893 } 4894 4895 static void valleyview_cleanup_pctx(struct drm_device *dev) 4896 { 4897 struct drm_i915_private *dev_priv = dev->dev_private; 4898 4899 if (WARN_ON(!dev_priv->vlv_pctx)) 4900 return; 4901 4902 drm_gem_object_unreference(&dev_priv->vlv_pctx->base); 4903 dev_priv->vlv_pctx = NULL; 4904 } 4905 4906 static void valleyview_init_gt_powersave(struct drm_device *dev) 4907 { 4908 struct drm_i915_private *dev_priv = dev->dev_private; 4909 u32 val; 4910 4911 valleyview_setup_pctx(dev); 4912 4913 mutex_lock(&dev_priv->rps.hw_lock); 4914 4915 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 4916 switch ((val >> 6) & 3) { 4917 case 0: 4918 case 1: 4919 dev_priv->mem_freq = 800; 4920 break; 4921 case 2: 4922 dev_priv->mem_freq = 1066; 4923 break; 4924 case 3: 4925 dev_priv->mem_freq = 1333; 4926 break; 4927 } 4928 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); 4929 4930 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv); 4931 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 4932 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 4933 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq), 4934 dev_priv->rps.max_freq); 4935 4936 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv); 4937 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", 4938 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 4939 dev_priv->rps.efficient_freq); 4940 4941 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv); 4942 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n", 4943 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), 4944 dev_priv->rps.rp1_freq); 4945 4946 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv); 4947 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 4948 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 4949 dev_priv->rps.min_freq); 4950 4951 dev_priv->rps.idle_freq = dev_priv->rps.min_freq; 4952 4953 /* Preserve min/max settings in case of re-init */ 4954 if (dev_priv->rps.max_freq_softlimit == 0) 4955 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 4956 4957 if (dev_priv->rps.min_freq_softlimit == 0) 4958 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; 4959 4960 mutex_unlock(&dev_priv->rps.hw_lock); 4961 } 4962 4963 static void cherryview_init_gt_powersave(struct drm_device *dev) 4964 { 4965 struct drm_i915_private *dev_priv = dev->dev_private; 4966 u32 val; 4967 4968 cherryview_setup_pctx(dev); 4969 4970 mutex_lock(&dev_priv->rps.hw_lock); 4971 4972 mutex_lock(&dev_priv->sb_lock); 4973 val = vlv_cck_read(dev_priv, CCK_FUSE_REG); 4974 mutex_unlock(&dev_priv->sb_lock); 4975 4976 switch ((val >> 2) & 0x7) { 4977 case 0: 4978 case 1: 4979 dev_priv->rps.cz_freq = 200; 4980 dev_priv->mem_freq = 1600; 4981 break; 4982 case 2: 4983 dev_priv->rps.cz_freq = 267; 4984 dev_priv->mem_freq = 1600; 4985 break; 4986 case 3: 4987 dev_priv->rps.cz_freq = 333; 4988 dev_priv->mem_freq = 2000; 4989 break; 4990 case 4: 4991 dev_priv->rps.cz_freq = 320; 4992 dev_priv->mem_freq = 1600; 4993 break; 4994 case 5: 4995 dev_priv->rps.cz_freq = 400; 4996 dev_priv->mem_freq = 1600; 4997 break; 4998 } 4999 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); 5000 5001 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv); 5002 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 5003 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", 5004 intel_gpu_freq(dev_priv, dev_priv->rps.max_freq), 5005 dev_priv->rps.max_freq); 5006 5007 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv); 5008 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", 5009 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 5010 dev_priv->rps.efficient_freq); 5011 5012 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv); 5013 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n", 5014 intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq), 5015 dev_priv->rps.rp1_freq); 5016 5017 /* PUnit validated range is only [RPe, RP0] */ 5018 dev_priv->rps.min_freq = dev_priv->rps.efficient_freq; 5019 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", 5020 intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), 5021 dev_priv->rps.min_freq); 5022 5023 WARN_ONCE((dev_priv->rps.max_freq | 5024 dev_priv->rps.efficient_freq | 5025 dev_priv->rps.rp1_freq | 5026 dev_priv->rps.min_freq) & 1, 5027 "Odd GPU freq values\n"); 5028 5029 dev_priv->rps.idle_freq = dev_priv->rps.min_freq; 5030 5031 /* Preserve min/max settings in case of re-init */ 5032 if (dev_priv->rps.max_freq_softlimit == 0) 5033 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 5034 5035 if (dev_priv->rps.min_freq_softlimit == 0) 5036 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; 5037 5038 mutex_unlock(&dev_priv->rps.hw_lock); 5039 } 5040 5041 static void valleyview_cleanup_gt_powersave(struct drm_device *dev) 5042 { 5043 valleyview_cleanup_pctx(dev); 5044 } 5045 5046 static void cherryview_enable_rps(struct drm_device *dev) 5047 { 5048 struct drm_i915_private *dev_priv = dev->dev_private; 5049 struct intel_engine_cs *ring; 5050 u32 gtfifodbg, val, rc6_mode = 0, pcbr; 5051 int i; 5052 5053 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 5054 5055 gtfifodbg = I915_READ(GTFIFODBG); 5056 if (gtfifodbg) { 5057 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", 5058 gtfifodbg); 5059 I915_WRITE(GTFIFODBG, gtfifodbg); 5060 } 5061 5062 cherryview_check_pctx(dev_priv); 5063 5064 /* 1a & 1b: Get forcewake during program sequence. Although the driver 5065 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ 5066 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5067 5068 /* Disable RC states. */ 5069 I915_WRITE(GEN6_RC_CONTROL, 0); 5070 5071 /* 2a: Program RC6 thresholds.*/ 5072 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 5073 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 5074 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 5075 5076 for_each_ring(ring, dev_priv, i) 5077 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 5078 I915_WRITE(GEN6_RC_SLEEP, 0); 5079 5080 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */ 5081 I915_WRITE(GEN6_RC6_THRESHOLD, 0x186); 5082 5083 /* allows RC6 residency counter to work */ 5084 I915_WRITE(VLV_COUNTER_CONTROL, 5085 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | 5086 VLV_MEDIA_RC6_COUNT_EN | 5087 VLV_RENDER_RC6_COUNT_EN)); 5088 5089 /* For now we assume BIOS is allocating and populating the PCBR */ 5090 pcbr = I915_READ(VLV_PCBR); 5091 5092 /* 3: Enable RC6 */ 5093 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && 5094 (pcbr >> VLV_PCBR_ADDR_SHIFT)) 5095 rc6_mode = GEN7_RC_CTL_TO_MODE; 5096 5097 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 5098 5099 /* 4 Program defaults and thresholds for RPS*/ 5100 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); 5101 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); 5102 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); 5103 I915_WRITE(GEN6_RP_UP_EI, 66000); 5104 I915_WRITE(GEN6_RP_DOWN_EI, 350000); 5105 5106 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 5107 5108 /* 5: Enable RPS */ 5109 I915_WRITE(GEN6_RP_CONTROL, 5110 GEN6_RP_MEDIA_HW_NORMAL_MODE | 5111 GEN6_RP_MEDIA_IS_GFX | 5112 GEN6_RP_ENABLE | 5113 GEN6_RP_UP_BUSY_AVG | 5114 GEN6_RP_DOWN_IDLE_AVG); 5115 5116 /* Setting Fixed Bias */ 5117 val = VLV_OVERRIDE_EN | 5118 VLV_SOC_TDP_EN | 5119 CHV_BIAS_CPU_50_SOC_50; 5120 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val); 5121 5122 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 5123 5124 /* RPS code assumes GPLL is used */ 5125 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); 5126 5127 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no"); 5128 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 5129 5130 dev_priv->rps.cur_freq = (val >> 8) & 0xff; 5131 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", 5132 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq), 5133 dev_priv->rps.cur_freq); 5134 5135 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", 5136 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 5137 dev_priv->rps.efficient_freq); 5138 5139 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); 5140 5141 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5142 } 5143 5144 static void valleyview_enable_rps(struct drm_device *dev) 5145 { 5146 struct drm_i915_private *dev_priv = dev->dev_private; 5147 struct intel_engine_cs *ring; 5148 u32 gtfifodbg, val, rc6_mode = 0; 5149 int i; 5150 5151 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 5152 5153 valleyview_check_pctx(dev_priv); 5154 5155 if ((gtfifodbg = I915_READ(GTFIFODBG))) { 5156 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", 5157 gtfifodbg); 5158 I915_WRITE(GTFIFODBG, gtfifodbg); 5159 } 5160 5161 /* If VLV, Forcewake all wells, else re-direct to regular path */ 5162 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5163 5164 /* Disable RC states. */ 5165 I915_WRITE(GEN6_RC_CONTROL, 0); 5166 5167 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); 5168 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); 5169 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); 5170 I915_WRITE(GEN6_RP_UP_EI, 66000); 5171 I915_WRITE(GEN6_RP_DOWN_EI, 350000); 5172 5173 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 5174 5175 I915_WRITE(GEN6_RP_CONTROL, 5176 GEN6_RP_MEDIA_TURBO | 5177 GEN6_RP_MEDIA_HW_NORMAL_MODE | 5178 GEN6_RP_MEDIA_IS_GFX | 5179 GEN6_RP_ENABLE | 5180 GEN6_RP_UP_BUSY_AVG | 5181 GEN6_RP_DOWN_IDLE_CONT); 5182 5183 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000); 5184 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); 5185 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); 5186 5187 for_each_ring(ring, dev_priv, i) 5188 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 5189 5190 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557); 5191 5192 /* allows RC6 residency counter to work */ 5193 I915_WRITE(VLV_COUNTER_CONTROL, 5194 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN | 5195 VLV_RENDER_RC0_COUNT_EN | 5196 VLV_MEDIA_RC6_COUNT_EN | 5197 VLV_RENDER_RC6_COUNT_EN)); 5198 5199 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 5200 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; 5201 5202 intel_print_rc6_info(dev, rc6_mode); 5203 5204 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 5205 5206 /* Setting Fixed Bias */ 5207 val = VLV_OVERRIDE_EN | 5208 VLV_SOC_TDP_EN | 5209 VLV_BIAS_CPU_125_SOC_875; 5210 vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val); 5211 5212 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 5213 5214 /* RPS code assumes GPLL is used */ 5215 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); 5216 5217 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no"); 5218 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 5219 5220 dev_priv->rps.cur_freq = (val >> 8) & 0xff; 5221 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n", 5222 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq), 5223 dev_priv->rps.cur_freq); 5224 5225 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", 5226 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), 5227 dev_priv->rps.efficient_freq); 5228 5229 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); 5230 5231 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5232 } 5233 5234 static unsigned long intel_pxfreq(u32 vidfreq) 5235 { 5236 unsigned long freq; 5237 int div = (vidfreq & 0x3f0000) >> 16; 5238 int post = (vidfreq & 0x3000) >> 12; 5239 int pre = (vidfreq & 0x7); 5240 5241 if (!pre) 5242 return 0; 5243 5244 freq = ((div * 133333) / ((1<<post) * pre)); 5245 5246 return freq; 5247 } 5248 5249 static const struct cparams { 5250 u16 i; 5251 u16 t; 5252 u16 m; 5253 u16 c; 5254 } cparams[] = { 5255 { 1, 1333, 301, 28664 }, 5256 { 1, 1066, 294, 24460 }, 5257 { 1, 800, 294, 25192 }, 5258 { 0, 1333, 276, 27605 }, 5259 { 0, 1066, 276, 27605 }, 5260 { 0, 800, 231, 23784 }, 5261 }; 5262 5263 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) 5264 { 5265 u64 total_count, diff, ret; 5266 u32 count1, count2, count3, m = 0, c = 0; 5267 unsigned long now = jiffies_to_msecs(jiffies), diff1; 5268 int i; 5269 5270 assert_spin_locked(&mchdev_lock); 5271 5272 diff1 = now - dev_priv->ips.last_time1; 5273 5274 /* Prevent division-by-zero if we are asking too fast. 5275 * Also, we don't get interesting results if we are polling 5276 * faster than once in 10ms, so just return the saved value 5277 * in such cases. 5278 */ 5279 if (diff1 <= 10) 5280 return dev_priv->ips.chipset_power; 5281 5282 count1 = I915_READ(DMIEC); 5283 count2 = I915_READ(DDREC); 5284 count3 = I915_READ(CSIEC); 5285 5286 total_count = count1 + count2 + count3; 5287 5288 /* FIXME: handle per-counter overflow */ 5289 if (total_count < dev_priv->ips.last_count1) { 5290 diff = ~0UL - dev_priv->ips.last_count1; 5291 diff += total_count; 5292 } else { 5293 diff = total_count - dev_priv->ips.last_count1; 5294 } 5295 5296 for (i = 0; i < ARRAY_SIZE(cparams); i++) { 5297 if (cparams[i].i == dev_priv->ips.c_m && 5298 cparams[i].t == dev_priv->ips.r_t) { 5299 m = cparams[i].m; 5300 c = cparams[i].c; 5301 break; 5302 } 5303 } 5304 5305 diff = div_u64(diff, diff1); 5306 ret = ((m * diff) + c); 5307 ret = div_u64(ret, 10); 5308 5309 dev_priv->ips.last_count1 = total_count; 5310 dev_priv->ips.last_time1 = now; 5311 5312 dev_priv->ips.chipset_power = ret; 5313 5314 return ret; 5315 } 5316 5317 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 5318 { 5319 struct drm_device *dev = dev_priv->dev; 5320 unsigned long val; 5321 5322 if (INTEL_INFO(dev)->gen != 5) 5323 return 0; 5324 5325 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 5326 5327 val = __i915_chipset_val(dev_priv); 5328 5329 lockmgr(&mchdev_lock, LK_RELEASE); 5330 5331 return val; 5332 } 5333 5334 unsigned long i915_mch_val(struct drm_i915_private *dev_priv) 5335 { 5336 unsigned long m, x, b; 5337 u32 tsfs; 5338 5339 tsfs = I915_READ(TSFS); 5340 5341 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); 5342 x = I915_READ8(TR1); 5343 5344 b = tsfs & TSFS_INTR_MASK; 5345 5346 return ((m * x) / 127) - b; 5347 } 5348 5349 static int _pxvid_to_vd(u8 pxvid) 5350 { 5351 if (pxvid == 0) 5352 return 0; 5353 5354 if (pxvid >= 8 && pxvid < 31) 5355 pxvid = 31; 5356 5357 return (pxvid + 2) * 125; 5358 } 5359 5360 static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 5361 { 5362 struct drm_device *dev = dev_priv->dev; 5363 const int vd = _pxvid_to_vd(pxvid); 5364 const int vm = vd - 1125; 5365 5366 if (INTEL_INFO(dev)->is_mobile) 5367 return vm > 0 ? vm : 0; 5368 5369 return vd; 5370 } 5371 5372 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) 5373 { 5374 u64 now, diff, diffms; 5375 u32 count; 5376 5377 assert_spin_locked(&mchdev_lock); 5378 5379 now = ktime_get_raw_ns(); 5380 diffms = now - dev_priv->ips.last_time2; 5381 do_div(diffms, NSEC_PER_MSEC); 5382 5383 /* Don't divide by 0 */ 5384 if (!diffms) 5385 return; 5386 5387 count = I915_READ(GFXEC); 5388 5389 if (count < dev_priv->ips.last_count2) { 5390 diff = ~0UL - dev_priv->ips.last_count2; 5391 diff += count; 5392 } else { 5393 diff = count - dev_priv->ips.last_count2; 5394 } 5395 5396 dev_priv->ips.last_count2 = count; 5397 dev_priv->ips.last_time2 = now; 5398 5399 /* More magic constants... */ 5400 diff = diff * 1181; 5401 diff = div_u64(diff, diffms * 10); 5402 dev_priv->ips.gfx_power = diff; 5403 } 5404 5405 void i915_update_gfx_val(struct drm_i915_private *dev_priv) 5406 { 5407 struct drm_device *dev = dev_priv->dev; 5408 5409 if (INTEL_INFO(dev)->gen != 5) 5410 return; 5411 5412 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 5413 5414 __i915_update_gfx_val(dev_priv); 5415 5416 lockmgr(&mchdev_lock, LK_RELEASE); 5417 } 5418 5419 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) 5420 { 5421 unsigned long t, corr, state1, corr2, state2; 5422 u32 pxvid, ext_v; 5423 5424 assert_spin_locked(&mchdev_lock); 5425 5426 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4)); 5427 pxvid = (pxvid >> 24) & 0x7f; 5428 ext_v = pvid_to_extvid(dev_priv, pxvid); 5429 5430 state1 = ext_v; 5431 5432 t = i915_mch_val(dev_priv); 5433 5434 /* Revel in the empirically derived constants */ 5435 5436 /* Correction factor in 1/100000 units */ 5437 if (t > 80) 5438 corr = ((t * 2349) + 135940); 5439 else if (t >= 50) 5440 corr = ((t * 964) + 29317); 5441 else /* < 50 */ 5442 corr = ((t * 301) + 1004); 5443 5444 corr = corr * ((150142 * state1) / 10000 - 78642); 5445 corr /= 100000; 5446 corr2 = (corr * dev_priv->ips.corr); 5447 5448 state2 = (corr2 * state1) / 10000; 5449 state2 /= 100; /* convert to mW */ 5450 5451 __i915_update_gfx_val(dev_priv); 5452 5453 return dev_priv->ips.gfx_power + state2; 5454 } 5455 5456 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 5457 { 5458 struct drm_device *dev = dev_priv->dev; 5459 unsigned long val; 5460 5461 if (INTEL_INFO(dev)->gen != 5) 5462 return 0; 5463 5464 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 5465 5466 val = __i915_gfx_val(dev_priv); 5467 5468 lockmgr(&mchdev_lock, LK_RELEASE); 5469 5470 return val; 5471 } 5472 5473 /** 5474 * i915_read_mch_val - return value for IPS use 5475 * 5476 * Calculate and return a value for the IPS driver to use when deciding whether 5477 * we have thermal and power headroom to increase CPU or GPU power budget. 5478 */ 5479 unsigned long i915_read_mch_val(void) 5480 { 5481 struct drm_i915_private *dev_priv; 5482 unsigned long chipset_val, graphics_val, ret = 0; 5483 5484 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 5485 if (!i915_mch_dev) 5486 goto out_unlock; 5487 dev_priv = i915_mch_dev; 5488 5489 chipset_val = __i915_chipset_val(dev_priv); 5490 graphics_val = __i915_gfx_val(dev_priv); 5491 5492 ret = chipset_val + graphics_val; 5493 5494 out_unlock: 5495 lockmgr(&mchdev_lock, LK_RELEASE); 5496 5497 return ret; 5498 } 5499 5500 /** 5501 * i915_gpu_raise - raise GPU frequency limit 5502 * 5503 * Raise the limit; IPS indicates we have thermal headroom. 5504 */ 5505 bool i915_gpu_raise(void) 5506 { 5507 struct drm_i915_private *dev_priv; 5508 bool ret = true; 5509 5510 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 5511 if (!i915_mch_dev) { 5512 ret = false; 5513 goto out_unlock; 5514 } 5515 dev_priv = i915_mch_dev; 5516 5517 if (dev_priv->ips.max_delay > dev_priv->ips.fmax) 5518 dev_priv->ips.max_delay--; 5519 5520 out_unlock: 5521 lockmgr(&mchdev_lock, LK_RELEASE); 5522 5523 return ret; 5524 } 5525 5526 /** 5527 * i915_gpu_lower - lower GPU frequency limit 5528 * 5529 * IPS indicates we're close to a thermal limit, so throttle back the GPU 5530 * frequency maximum. 5531 */ 5532 bool i915_gpu_lower(void) 5533 { 5534 struct drm_i915_private *dev_priv; 5535 bool ret = true; 5536 5537 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 5538 if (!i915_mch_dev) { 5539 ret = false; 5540 goto out_unlock; 5541 } 5542 dev_priv = i915_mch_dev; 5543 5544 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay) 5545 dev_priv->ips.max_delay++; 5546 5547 out_unlock: 5548 lockmgr(&mchdev_lock, LK_RELEASE); 5549 5550 return ret; 5551 } 5552 5553 /** 5554 * i915_gpu_busy - indicate GPU business to IPS 5555 * 5556 * Tell the IPS driver whether or not the GPU is busy. 5557 */ 5558 bool i915_gpu_busy(void) 5559 { 5560 struct drm_i915_private *dev_priv; 5561 struct intel_engine_cs *ring; 5562 bool ret = false; 5563 int i; 5564 5565 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 5566 if (!i915_mch_dev) 5567 goto out_unlock; 5568 dev_priv = i915_mch_dev; 5569 5570 for_each_ring(ring, dev_priv, i) 5571 ret |= !list_empty(&ring->request_list); 5572 5573 out_unlock: 5574 lockmgr(&mchdev_lock, LK_RELEASE); 5575 5576 return ret; 5577 } 5578 5579 /** 5580 * i915_gpu_turbo_disable - disable graphics turbo 5581 * 5582 * Disable graphics turbo by resetting the max frequency and setting the 5583 * current frequency to the default. 5584 */ 5585 bool i915_gpu_turbo_disable(void) 5586 { 5587 struct drm_i915_private *dev_priv; 5588 bool ret = true; 5589 5590 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 5591 if (!i915_mch_dev) { 5592 ret = false; 5593 goto out_unlock; 5594 } 5595 dev_priv = i915_mch_dev; 5596 5597 dev_priv->ips.max_delay = dev_priv->ips.fstart; 5598 5599 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart)) 5600 ret = false; 5601 5602 out_unlock: 5603 lockmgr(&mchdev_lock, LK_RELEASE); 5604 5605 return ret; 5606 } 5607 5608 #if 0 5609 /** 5610 * Tells the intel_ips driver that the i915 driver is now loaded, if 5611 * IPS got loaded first. 5612 * 5613 * This awkward dance is so that neither module has to depend on the 5614 * other in order for IPS to do the appropriate communication of 5615 * GPU turbo limits to i915. 5616 */ 5617 static void 5618 ips_ping_for_i915_load(void) 5619 { 5620 void (*link)(void); 5621 5622 link = symbol_get(ips_link_to_i915_driver); 5623 if (link) { 5624 link(); 5625 symbol_put(ips_link_to_i915_driver); 5626 } 5627 } 5628 #endif 5629 5630 void intel_gpu_ips_init(struct drm_i915_private *dev_priv) 5631 { 5632 /* We only register the i915 ips part with intel-ips once everything is 5633 * set up, to avoid intel-ips sneaking in and reading bogus values. */ 5634 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 5635 i915_mch_dev = dev_priv; 5636 lockmgr(&mchdev_lock, LK_RELEASE); 5637 } 5638 5639 void intel_gpu_ips_teardown(void) 5640 { 5641 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 5642 i915_mch_dev = NULL; 5643 lockmgr(&mchdev_lock, LK_RELEASE); 5644 } 5645 5646 static void intel_init_emon(struct drm_device *dev) 5647 { 5648 struct drm_i915_private *dev_priv = dev->dev_private; 5649 u32 lcfuse; 5650 u8 pxw[16]; 5651 int i; 5652 5653 /* Disable to program */ 5654 I915_WRITE(ECR, 0); 5655 POSTING_READ(ECR); 5656 5657 /* Program energy weights for various events */ 5658 I915_WRITE(SDEW, 0x15040d00); 5659 I915_WRITE(CSIEW0, 0x007f0000); 5660 I915_WRITE(CSIEW1, 0x1e220004); 5661 I915_WRITE(CSIEW2, 0x04000004); 5662 5663 for (i = 0; i < 5; i++) 5664 I915_WRITE(PEW + (i * 4), 0); 5665 for (i = 0; i < 3; i++) 5666 I915_WRITE(DEW + (i * 4), 0); 5667 5668 /* Program P-state weights to account for frequency power adjustment */ 5669 for (i = 0; i < 16; i++) { 5670 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); 5671 unsigned long freq = intel_pxfreq(pxvidfreq); 5672 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> 5673 PXVFREQ_PX_SHIFT; 5674 unsigned long val; 5675 5676 val = vid * vid; 5677 val *= (freq / 1000); 5678 val *= 255; 5679 val /= (127*127*900); 5680 if (val > 0xff) 5681 DRM_ERROR("bad pxval: %ld\n", val); 5682 pxw[i] = val; 5683 } 5684 /* Render standby states get 0 weight */ 5685 pxw[14] = 0; 5686 pxw[15] = 0; 5687 5688 for (i = 0; i < 4; i++) { 5689 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | 5690 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); 5691 I915_WRITE(PXW + (i * 4), val); 5692 } 5693 5694 /* Adjust magic regs to magic values (more experimental results) */ 5695 I915_WRITE(OGW0, 0); 5696 I915_WRITE(OGW1, 0); 5697 I915_WRITE(EG0, 0x00007f00); 5698 I915_WRITE(EG1, 0x0000000e); 5699 I915_WRITE(EG2, 0x000e0000); 5700 I915_WRITE(EG3, 0x68000300); 5701 I915_WRITE(EG4, 0x42000000); 5702 I915_WRITE(EG5, 0x00140031); 5703 I915_WRITE(EG6, 0); 5704 I915_WRITE(EG7, 0); 5705 5706 for (i = 0; i < 8; i++) 5707 I915_WRITE(PXWL + (i * 4), 0); 5708 5709 /* Enable PMON + select events */ 5710 I915_WRITE(ECR, 0x80000019); 5711 5712 lcfuse = I915_READ(LCFUSE02); 5713 5714 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); 5715 } 5716 5717 void intel_init_gt_powersave(struct drm_device *dev) 5718 { 5719 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); 5720 5721 if (IS_CHERRYVIEW(dev)) 5722 cherryview_init_gt_powersave(dev); 5723 else if (IS_VALLEYVIEW(dev)) 5724 valleyview_init_gt_powersave(dev); 5725 } 5726 5727 void intel_cleanup_gt_powersave(struct drm_device *dev) 5728 { 5729 if (IS_CHERRYVIEW(dev)) 5730 return; 5731 else if (IS_VALLEYVIEW(dev)) 5732 valleyview_cleanup_gt_powersave(dev); 5733 } 5734 5735 static void gen6_suspend_rps(struct drm_device *dev) 5736 { 5737 #if 0 5738 struct drm_i915_private *dev_priv = dev->dev_private; 5739 5740 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 5741 #endif 5742 5743 gen6_disable_rps_interrupts(dev); 5744 } 5745 5746 /** 5747 * intel_suspend_gt_powersave - suspend PM work and helper threads 5748 * @dev: drm device 5749 * 5750 * We don't want to disable RC6 or other features here, we just want 5751 * to make sure any work we've queued has finished and won't bother 5752 * us while we're suspended. 5753 */ 5754 void intel_suspend_gt_powersave(struct drm_device *dev) 5755 { 5756 struct drm_i915_private *dev_priv = dev->dev_private; 5757 5758 if (INTEL_INFO(dev)->gen < 6) 5759 return; 5760 5761 gen6_suspend_rps(dev); 5762 5763 /* Force GPU to min freq during suspend */ 5764 gen6_rps_idle(dev_priv); 5765 } 5766 5767 void intel_disable_gt_powersave(struct drm_device *dev) 5768 { 5769 struct drm_i915_private *dev_priv = dev->dev_private; 5770 5771 if (IS_IRONLAKE_M(dev)) { 5772 ironlake_disable_drps(dev); 5773 } else if (INTEL_INFO(dev)->gen >= 6) { 5774 intel_suspend_gt_powersave(dev); 5775 5776 mutex_lock(&dev_priv->rps.hw_lock); 5777 if (INTEL_INFO(dev)->gen >= 9) 5778 gen9_disable_rps(dev); 5779 else if (IS_CHERRYVIEW(dev)) 5780 cherryview_disable_rps(dev); 5781 else if (IS_VALLEYVIEW(dev)) 5782 valleyview_disable_rps(dev); 5783 else 5784 gen6_disable_rps(dev); 5785 5786 dev_priv->rps.enabled = false; 5787 mutex_unlock(&dev_priv->rps.hw_lock); 5788 } 5789 } 5790 5791 static void intel_gen6_powersave_work(struct work_struct *work) 5792 { 5793 struct drm_i915_private *dev_priv = 5794 container_of(work, struct drm_i915_private, 5795 rps.delayed_resume_work.work); 5796 struct drm_device *dev = dev_priv->dev; 5797 5798 mutex_lock(&dev_priv->rps.hw_lock); 5799 5800 gen6_reset_rps_interrupts(dev); 5801 5802 if (IS_CHERRYVIEW(dev)) { 5803 cherryview_enable_rps(dev); 5804 } else if (IS_VALLEYVIEW(dev)) { 5805 valleyview_enable_rps(dev); 5806 } else if (INTEL_INFO(dev)->gen >= 9) { 5807 gen9_enable_rc6(dev); 5808 gen9_enable_rps(dev); 5809 __gen6_update_ring_freq(dev); 5810 } else if (IS_BROADWELL(dev)) { 5811 gen8_enable_rps(dev); 5812 __gen6_update_ring_freq(dev); 5813 } else { 5814 gen6_enable_rps(dev); 5815 __gen6_update_ring_freq(dev); 5816 } 5817 5818 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq); 5819 WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq); 5820 5821 WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq); 5822 WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq); 5823 5824 dev_priv->rps.enabled = true; 5825 5826 gen6_enable_rps_interrupts(dev); 5827 5828 mutex_unlock(&dev_priv->rps.hw_lock); 5829 5830 intel_runtime_pm_put(dev_priv); 5831 } 5832 5833 void intel_enable_gt_powersave(struct drm_device *dev) 5834 { 5835 struct drm_i915_private *dev_priv = dev->dev_private; 5836 5837 /* Powersaving is controlled by the host when inside a VM */ 5838 if (intel_vgpu_active(dev)) 5839 return; 5840 5841 if (IS_IRONLAKE_M(dev)) { 5842 mutex_lock(&dev->struct_mutex); 5843 ironlake_enable_drps(dev); 5844 intel_init_emon(dev); 5845 mutex_unlock(&dev->struct_mutex); 5846 } else if (INTEL_INFO(dev)->gen >= 6) { 5847 /* 5848 * PCU communication is slow and this doesn't need to be 5849 * done at any specific time, so do this out of our fast path 5850 * to make resume and init faster. 5851 * 5852 * We depend on the HW RC6 power context save/restore 5853 * mechanism when entering D3 through runtime PM suspend. So 5854 * disable RPM until RPS/RC6 is properly setup. We can only 5855 * get here via the driver load/system resume/runtime resume 5856 * paths, so the _noresume version is enough (and in case of 5857 * runtime resume it's necessary). 5858 */ 5859 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work, 5860 round_jiffies_up_relative(HZ))) 5861 intel_runtime_pm_get_noresume(dev_priv); 5862 } 5863 } 5864 5865 void intel_reset_gt_powersave(struct drm_device *dev) 5866 { 5867 struct drm_i915_private *dev_priv = dev->dev_private; 5868 5869 if (INTEL_INFO(dev)->gen < 6) 5870 return; 5871 5872 gen6_suspend_rps(dev); 5873 dev_priv->rps.enabled = false; 5874 } 5875 5876 static void ibx_init_clock_gating(struct drm_device *dev) 5877 { 5878 struct drm_i915_private *dev_priv = dev->dev_private; 5879 5880 /* 5881 * On Ibex Peak and Cougar Point, we need to disable clock 5882 * gating for the panel power sequencer or it will fail to 5883 * start up when no ports are active. 5884 */ 5885 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 5886 } 5887 5888 static void g4x_disable_trickle_feed(struct drm_device *dev) 5889 { 5890 struct drm_i915_private *dev_priv = dev->dev_private; 5891 enum i915_pipe pipe; 5892 5893 for_each_pipe(dev_priv, pipe) { 5894 I915_WRITE(DSPCNTR(pipe), 5895 I915_READ(DSPCNTR(pipe)) | 5896 DISPPLANE_TRICKLE_FEED_DISABLE); 5897 5898 I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe))); 5899 POSTING_READ(DSPSURF(pipe)); 5900 } 5901 } 5902 5903 static void ilk_init_lp_watermarks(struct drm_device *dev) 5904 { 5905 struct drm_i915_private *dev_priv = dev->dev_private; 5906 5907 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); 5908 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); 5909 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); 5910 5911 /* 5912 * Don't touch WM1S_LP_EN here. 5913 * Doing so could cause underruns. 5914 */ 5915 } 5916 5917 static void ironlake_init_clock_gating(struct drm_device *dev) 5918 { 5919 struct drm_i915_private *dev_priv = dev->dev_private; 5920 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 5921 5922 /* 5923 * Required for FBC 5924 * WaFbcDisableDpfcClockGating:ilk 5925 */ 5926 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE | 5927 ILK_DPFCUNIT_CLOCK_GATE_DISABLE | 5928 ILK_DPFDUNIT_CLOCK_GATE_ENABLE; 5929 5930 I915_WRITE(PCH_3DCGDIS0, 5931 MARIUNIT_CLOCK_GATE_DISABLE | 5932 SVSMUNIT_CLOCK_GATE_DISABLE); 5933 I915_WRITE(PCH_3DCGDIS1, 5934 VFMUNIT_CLOCK_GATE_DISABLE); 5935 5936 /* 5937 * According to the spec the following bits should be set in 5938 * order to enable memory self-refresh 5939 * The bit 22/21 of 0x42004 5940 * The bit 5 of 0x42020 5941 * The bit 15 of 0x45000 5942 */ 5943 I915_WRITE(ILK_DISPLAY_CHICKEN2, 5944 (I915_READ(ILK_DISPLAY_CHICKEN2) | 5945 ILK_DPARB_GATE | ILK_VSDPFD_FULL)); 5946 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE; 5947 I915_WRITE(DISP_ARB_CTL, 5948 (I915_READ(DISP_ARB_CTL) | 5949 DISP_FBC_WM_DIS)); 5950 5951 ilk_init_lp_watermarks(dev); 5952 5953 /* 5954 * Based on the document from hardware guys the following bits 5955 * should be set unconditionally in order to enable FBC. 5956 * The bit 22 of 0x42000 5957 * The bit 22 of 0x42004 5958 * The bit 7,8,9 of 0x42020. 5959 */ 5960 if (IS_IRONLAKE_M(dev)) { 5961 /* WaFbcAsynchFlipDisableFbcQueue:ilk */ 5962 I915_WRITE(ILK_DISPLAY_CHICKEN1, 5963 I915_READ(ILK_DISPLAY_CHICKEN1) | 5964 ILK_FBCQ_DIS); 5965 I915_WRITE(ILK_DISPLAY_CHICKEN2, 5966 I915_READ(ILK_DISPLAY_CHICKEN2) | 5967 ILK_DPARB_GATE); 5968 } 5969 5970 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 5971 5972 I915_WRITE(ILK_DISPLAY_CHICKEN2, 5973 I915_READ(ILK_DISPLAY_CHICKEN2) | 5974 ILK_ELPIN_409_SELECT); 5975 I915_WRITE(_3D_CHICKEN2, 5976 _3D_CHICKEN2_WM_READ_PIPELINED << 16 | 5977 _3D_CHICKEN2_WM_READ_PIPELINED); 5978 5979 /* WaDisableRenderCachePipelinedFlush:ilk */ 5980 I915_WRITE(CACHE_MODE_0, 5981 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); 5982 5983 /* WaDisable_RenderCache_OperationalFlush:ilk */ 5984 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 5985 5986 g4x_disable_trickle_feed(dev); 5987 5988 ibx_init_clock_gating(dev); 5989 } 5990 5991 static void cpt_init_clock_gating(struct drm_device *dev) 5992 { 5993 struct drm_i915_private *dev_priv = dev->dev_private; 5994 int pipe; 5995 uint32_t val; 5996 5997 /* 5998 * On Ibex Peak and Cougar Point, we need to disable clock 5999 * gating for the panel power sequencer or it will fail to 6000 * start up when no ports are active. 6001 */ 6002 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE | 6003 PCH_DPLUNIT_CLOCK_GATE_DISABLE | 6004 PCH_CPUNIT_CLOCK_GATE_DISABLE); 6005 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | 6006 DPLS_EDP_PPS_FIX_DIS); 6007 /* The below fixes the weird display corruption, a few pixels shifted 6008 * downward, on (only) LVDS of some HP laptops with IVY. 6009 */ 6010 for_each_pipe(dev_priv, pipe) { 6011 val = I915_READ(TRANS_CHICKEN2(pipe)); 6012 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 6013 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 6014 if (dev_priv->vbt.fdi_rx_polarity_inverted) 6015 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; 6016 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; 6017 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER; 6018 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH; 6019 I915_WRITE(TRANS_CHICKEN2(pipe), val); 6020 } 6021 /* WADP0ClockGatingDisable */ 6022 for_each_pipe(dev_priv, pipe) { 6023 I915_WRITE(TRANS_CHICKEN1(pipe), 6024 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 6025 } 6026 } 6027 6028 static void gen6_check_mch_setup(struct drm_device *dev) 6029 { 6030 struct drm_i915_private *dev_priv = dev->dev_private; 6031 uint32_t tmp; 6032 6033 tmp = I915_READ(MCH_SSKPD); 6034 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) 6035 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", 6036 tmp); 6037 } 6038 6039 static void gen6_init_clock_gating(struct drm_device *dev) 6040 { 6041 struct drm_i915_private *dev_priv = dev->dev_private; 6042 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 6043 6044 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 6045 6046 I915_WRITE(ILK_DISPLAY_CHICKEN2, 6047 I915_READ(ILK_DISPLAY_CHICKEN2) | 6048 ILK_ELPIN_409_SELECT); 6049 6050 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */ 6051 I915_WRITE(_3D_CHICKEN, 6052 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); 6053 6054 /* WaDisable_RenderCache_OperationalFlush:snb */ 6055 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 6056 6057 /* 6058 * BSpec recoomends 8x4 when MSAA is used, 6059 * however in practice 16x4 seems fastest. 6060 * 6061 * Note that PS/WM thread counts depend on the WIZ hashing 6062 * disable bit, which we don't touch here, but it's good 6063 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 6064 */ 6065 I915_WRITE(GEN6_GT_MODE, 6066 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 6067 6068 ilk_init_lp_watermarks(dev); 6069 6070 I915_WRITE(CACHE_MODE_0, 6071 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 6072 6073 I915_WRITE(GEN6_UCGCTL1, 6074 I915_READ(GEN6_UCGCTL1) | 6075 GEN6_BLBUNIT_CLOCK_GATE_DISABLE | 6076 GEN6_CSUNIT_CLOCK_GATE_DISABLE); 6077 6078 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 6079 * gating disable must be set. Failure to set it results in 6080 * flickering pixels due to Z write ordering failures after 6081 * some amount of runtime in the Mesa "fire" demo, and Unigine 6082 * Sanctuary and Tropics, and apparently anything else with 6083 * alpha test or pixel discard. 6084 * 6085 * According to the spec, bit 11 (RCCUNIT) must also be set, 6086 * but we didn't debug actual testcases to find it out. 6087 * 6088 * WaDisableRCCUnitClockGating:snb 6089 * WaDisableRCPBUnitClockGating:snb 6090 */ 6091 I915_WRITE(GEN6_UCGCTL2, 6092 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | 6093 GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 6094 6095 /* WaStripsFansDisableFastClipPerformanceFix:snb */ 6096 I915_WRITE(_3D_CHICKEN3, 6097 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL)); 6098 6099 /* 6100 * Bspec says: 6101 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and 6102 * 3DSTATE_SF number of SF output attributes is more than 16." 6103 */ 6104 I915_WRITE(_3D_CHICKEN3, 6105 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH)); 6106 6107 /* 6108 * According to the spec the following bits should be 6109 * set in order to enable memory self-refresh and fbc: 6110 * The bit21 and bit22 of 0x42000 6111 * The bit21 and bit22 of 0x42004 6112 * The bit5 and bit7 of 0x42020 6113 * The bit14 of 0x70180 6114 * The bit14 of 0x71180 6115 * 6116 * WaFbcAsynchFlipDisableFbcQueue:snb 6117 */ 6118 I915_WRITE(ILK_DISPLAY_CHICKEN1, 6119 I915_READ(ILK_DISPLAY_CHICKEN1) | 6120 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); 6121 I915_WRITE(ILK_DISPLAY_CHICKEN2, 6122 I915_READ(ILK_DISPLAY_CHICKEN2) | 6123 ILK_DPARB_GATE | ILK_VSDPFD_FULL); 6124 I915_WRITE(ILK_DSPCLK_GATE_D, 6125 I915_READ(ILK_DSPCLK_GATE_D) | 6126 ILK_DPARBUNIT_CLOCK_GATE_ENABLE | 6127 ILK_DPFDUNIT_CLOCK_GATE_ENABLE); 6128 6129 g4x_disable_trickle_feed(dev); 6130 6131 cpt_init_clock_gating(dev); 6132 6133 gen6_check_mch_setup(dev); 6134 } 6135 6136 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) 6137 { 6138 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE); 6139 6140 /* 6141 * WaVSThreadDispatchOverride:ivb,vlv 6142 * 6143 * This actually overrides the dispatch 6144 * mode for all thread types. 6145 */ 6146 reg &= ~GEN7_FF_SCHED_MASK; 6147 reg |= GEN7_FF_TS_SCHED_HW; 6148 reg |= GEN7_FF_VS_SCHED_HW; 6149 reg |= GEN7_FF_DS_SCHED_HW; 6150 6151 I915_WRITE(GEN7_FF_THREAD_MODE, reg); 6152 } 6153 6154 static void lpt_init_clock_gating(struct drm_device *dev) 6155 { 6156 struct drm_i915_private *dev_priv = dev->dev_private; 6157 6158 /* 6159 * TODO: this bit should only be enabled when really needed, then 6160 * disabled when not needed anymore in order to save power. 6161 */ 6162 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) 6163 I915_WRITE(SOUTH_DSPCLK_GATE_D, 6164 I915_READ(SOUTH_DSPCLK_GATE_D) | 6165 PCH_LP_PARTITION_LEVEL_DISABLE); 6166 6167 /* WADPOClockGatingDisable:hsw */ 6168 I915_WRITE(_TRANSA_CHICKEN1, 6169 I915_READ(_TRANSA_CHICKEN1) | 6170 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 6171 } 6172 6173 static void lpt_suspend_hw(struct drm_device *dev) 6174 { 6175 struct drm_i915_private *dev_priv = dev->dev_private; 6176 6177 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 6178 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D); 6179 6180 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 6181 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 6182 } 6183 } 6184 6185 static void broadwell_init_clock_gating(struct drm_device *dev) 6186 { 6187 struct drm_i915_private *dev_priv = dev->dev_private; 6188 enum i915_pipe pipe; 6189 uint32_t misccpctl; 6190 6191 ilk_init_lp_watermarks(dev); 6192 6193 /* WaSwitchSolVfFArbitrationPriority:bdw */ 6194 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 6195 6196 /* WaPsrDPAMaskVBlankInSRD:bdw */ 6197 I915_WRITE(CHICKEN_PAR1_1, 6198 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); 6199 6200 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ 6201 for_each_pipe(dev_priv, pipe) { 6202 I915_WRITE(CHICKEN_PIPESL_1(pipe), 6203 I915_READ(CHICKEN_PIPESL_1(pipe)) | 6204 BDW_DPRS_MASK_VBLANK_SRD); 6205 } 6206 6207 /* WaVSRefCountFullforceMissDisable:bdw */ 6208 /* WaDSRefCountFullforceMissDisable:bdw */ 6209 I915_WRITE(GEN7_FF_THREAD_MODE, 6210 I915_READ(GEN7_FF_THREAD_MODE) & 6211 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); 6212 6213 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, 6214 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 6215 6216 /* WaDisableSDEUnitClockGating:bdw */ 6217 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 6218 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 6219 6220 /* 6221 * WaProgramL3SqcReg1Default:bdw 6222 * WaTempDisableDOPClkGating:bdw 6223 */ 6224 misccpctl = I915_READ(GEN7_MISCCPCTL); 6225 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 6226 I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT); 6227 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 6228 6229 /* 6230 * WaGttCachingOffByDefault:bdw 6231 * GTT cache may not work with big pages, so if those 6232 * are ever enabled GTT cache may need to be disabled. 6233 */ 6234 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL); 6235 6236 lpt_init_clock_gating(dev); 6237 } 6238 6239 static void haswell_init_clock_gating(struct drm_device *dev) 6240 { 6241 struct drm_i915_private *dev_priv = dev->dev_private; 6242 6243 ilk_init_lp_watermarks(dev); 6244 6245 /* L3 caching of data atomics doesn't work -- disable it. */ 6246 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); 6247 I915_WRITE(HSW_ROW_CHICKEN3, 6248 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE)); 6249 6250 /* This is required by WaCatErrorRejectionIssue:hsw */ 6251 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 6252 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 6253 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 6254 6255 /* WaVSRefCountFullforceMissDisable:hsw */ 6256 I915_WRITE(GEN7_FF_THREAD_MODE, 6257 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME); 6258 6259 /* WaDisable_RenderCache_OperationalFlush:hsw */ 6260 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 6261 6262 /* enable HiZ Raw Stall Optimization */ 6263 I915_WRITE(CACHE_MODE_0_GEN7, 6264 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); 6265 6266 /* WaDisable4x2SubspanOptimization:hsw */ 6267 I915_WRITE(CACHE_MODE_1, 6268 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 6269 6270 /* 6271 * BSpec recommends 8x4 when MSAA is used, 6272 * however in practice 16x4 seems fastest. 6273 * 6274 * Note that PS/WM thread counts depend on the WIZ hashing 6275 * disable bit, which we don't touch here, but it's good 6276 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 6277 */ 6278 I915_WRITE(GEN7_GT_MODE, 6279 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 6280 6281 /* WaSampleCChickenBitEnable:hsw */ 6282 I915_WRITE(HALF_SLICE_CHICKEN3, 6283 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE)); 6284 6285 /* WaSwitchSolVfFArbitrationPriority:hsw */ 6286 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 6287 6288 /* WaRsPkgCStateDisplayPMReq:hsw */ 6289 I915_WRITE(CHICKEN_PAR1_1, 6290 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); 6291 6292 lpt_init_clock_gating(dev); 6293 } 6294 6295 static void ivybridge_init_clock_gating(struct drm_device *dev) 6296 { 6297 struct drm_i915_private *dev_priv = dev->dev_private; 6298 uint32_t snpcr; 6299 6300 ilk_init_lp_watermarks(dev); 6301 6302 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); 6303 6304 /* WaDisableEarlyCull:ivb */ 6305 I915_WRITE(_3D_CHICKEN3, 6306 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); 6307 6308 /* WaDisableBackToBackFlipFix:ivb */ 6309 I915_WRITE(IVB_CHICKEN3, 6310 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 6311 CHICKEN3_DGMG_DONE_FIX_DISABLE); 6312 6313 /* WaDisablePSDDualDispatchEnable:ivb */ 6314 if (IS_IVB_GT1(dev)) 6315 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 6316 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 6317 6318 /* WaDisable_RenderCache_OperationalFlush:ivb */ 6319 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 6320 6321 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ 6322 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 6323 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 6324 6325 /* WaApplyL3ControlAndL3ChickenMode:ivb */ 6326 I915_WRITE(GEN7_L3CNTLREG1, 6327 GEN7_WA_FOR_GEN7_L3_CONTROL); 6328 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, 6329 GEN7_WA_L3_CHICKEN_MODE); 6330 if (IS_IVB_GT1(dev)) 6331 I915_WRITE(GEN7_ROW_CHICKEN2, 6332 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 6333 else { 6334 /* must write both registers */ 6335 I915_WRITE(GEN7_ROW_CHICKEN2, 6336 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 6337 I915_WRITE(GEN7_ROW_CHICKEN2_GT2, 6338 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 6339 } 6340 6341 /* WaForceL3Serialization:ivb */ 6342 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 6343 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 6344 6345 /* 6346 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 6347 * This implements the WaDisableRCZUnitClockGating:ivb workaround. 6348 */ 6349 I915_WRITE(GEN6_UCGCTL2, 6350 GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 6351 6352 /* This is required by WaCatErrorRejectionIssue:ivb */ 6353 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 6354 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 6355 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 6356 6357 g4x_disable_trickle_feed(dev); 6358 6359 gen7_setup_fixed_func_scheduler(dev_priv); 6360 6361 if (0) { /* causes HiZ corruption on ivb:gt1 */ 6362 /* enable HiZ Raw Stall Optimization */ 6363 I915_WRITE(CACHE_MODE_0_GEN7, 6364 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE)); 6365 } 6366 6367 /* WaDisable4x2SubspanOptimization:ivb */ 6368 I915_WRITE(CACHE_MODE_1, 6369 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 6370 6371 /* 6372 * BSpec recommends 8x4 when MSAA is used, 6373 * however in practice 16x4 seems fastest. 6374 * 6375 * Note that PS/WM thread counts depend on the WIZ hashing 6376 * disable bit, which we don't touch here, but it's good 6377 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 6378 */ 6379 I915_WRITE(GEN7_GT_MODE, 6380 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 6381 6382 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 6383 snpcr &= ~GEN6_MBC_SNPCR_MASK; 6384 snpcr |= GEN6_MBC_SNPCR_MED; 6385 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 6386 6387 if (!HAS_PCH_NOP(dev)) 6388 cpt_init_clock_gating(dev); 6389 6390 gen6_check_mch_setup(dev); 6391 } 6392 6393 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 6394 { 6395 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); 6396 6397 /* 6398 * Disable trickle feed and enable pnd deadline calculation 6399 */ 6400 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 6401 I915_WRITE(CBR1_VLV, 0); 6402 } 6403 6404 static void valleyview_init_clock_gating(struct drm_device *dev) 6405 { 6406 struct drm_i915_private *dev_priv = dev->dev_private; 6407 6408 vlv_init_display_clock_gating(dev_priv); 6409 6410 /* WaDisableEarlyCull:vlv */ 6411 I915_WRITE(_3D_CHICKEN3, 6412 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); 6413 6414 /* WaDisableBackToBackFlipFix:vlv */ 6415 I915_WRITE(IVB_CHICKEN3, 6416 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 6417 CHICKEN3_DGMG_DONE_FIX_DISABLE); 6418 6419 /* WaPsdDispatchEnable:vlv */ 6420 /* WaDisablePSDDualDispatchEnable:vlv */ 6421 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1, 6422 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP | 6423 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE)); 6424 6425 /* WaDisable_RenderCache_OperationalFlush:vlv */ 6426 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 6427 6428 /* WaForceL3Serialization:vlv */ 6429 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) & 6430 ~L3SQ_URB_READ_CAM_MATCH_DISABLE); 6431 6432 /* WaDisableDopClockGating:vlv */ 6433 I915_WRITE(GEN7_ROW_CHICKEN2, 6434 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 6435 6436 /* This is required by WaCatErrorRejectionIssue:vlv */ 6437 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 6438 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 6439 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 6440 6441 gen7_setup_fixed_func_scheduler(dev_priv); 6442 6443 /* 6444 * According to the spec, bit 13 (RCZUNIT) must be set on IVB. 6445 * This implements the WaDisableRCZUnitClockGating:vlv workaround. 6446 */ 6447 I915_WRITE(GEN6_UCGCTL2, 6448 GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 6449 6450 /* WaDisableL3Bank2xClockGate:vlv 6451 * Disabling L3 clock gating- MMIO 940c[25] = 1 6452 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */ 6453 I915_WRITE(GEN7_UCGCTL4, 6454 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE); 6455 6456 /* 6457 * BSpec says this must be set, even though 6458 * WaDisable4x2SubspanOptimization isn't listed for VLV. 6459 */ 6460 I915_WRITE(CACHE_MODE_1, 6461 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 6462 6463 /* 6464 * BSpec recommends 8x4 when MSAA is used, 6465 * however in practice 16x4 seems fastest. 6466 * 6467 * Note that PS/WM thread counts depend on the WIZ hashing 6468 * disable bit, which we don't touch here, but it's good 6469 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 6470 */ 6471 I915_WRITE(GEN7_GT_MODE, 6472 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4)); 6473 6474 /* 6475 * WaIncreaseL3CreditsForVLVB0:vlv 6476 * This is the hardware default actually. 6477 */ 6478 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE); 6479 6480 /* 6481 * WaDisableVLVClockGating_VBIIssue:vlv 6482 * Disable clock gating on th GCFG unit to prevent a delay 6483 * in the reporting of vblank events. 6484 */ 6485 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); 6486 } 6487 6488 static void cherryview_init_clock_gating(struct drm_device *dev) 6489 { 6490 struct drm_i915_private *dev_priv = dev->dev_private; 6491 6492 vlv_init_display_clock_gating(dev_priv); 6493 6494 /* WaVSRefCountFullforceMissDisable:chv */ 6495 /* WaDSRefCountFullforceMissDisable:chv */ 6496 I915_WRITE(GEN7_FF_THREAD_MODE, 6497 I915_READ(GEN7_FF_THREAD_MODE) & 6498 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); 6499 6500 /* WaDisableSemaphoreAndSyncFlipWait:chv */ 6501 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, 6502 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); 6503 6504 /* WaDisableCSUnitClockGating:chv */ 6505 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | 6506 GEN6_CSUNIT_CLOCK_GATE_DISABLE); 6507 6508 /* WaDisableSDEUnitClockGating:chv */ 6509 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 6510 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 6511 6512 /* 6513 * GTT cache may not work with big pages, so if those 6514 * are ever enabled GTT cache may need to be disabled. 6515 */ 6516 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL); 6517 } 6518 6519 static void g4x_init_clock_gating(struct drm_device *dev) 6520 { 6521 struct drm_i915_private *dev_priv = dev->dev_private; 6522 uint32_t dspclk_gate; 6523 6524 I915_WRITE(RENCLK_GATE_D1, 0); 6525 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | 6526 GS_UNIT_CLOCK_GATE_DISABLE | 6527 CL_UNIT_CLOCK_GATE_DISABLE); 6528 I915_WRITE(RAMCLK_GATE_D, 0); 6529 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | 6530 OVRUNIT_CLOCK_GATE_DISABLE | 6531 OVCUNIT_CLOCK_GATE_DISABLE; 6532 if (IS_GM45(dev)) 6533 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; 6534 I915_WRITE(DSPCLK_GATE_D, dspclk_gate); 6535 6536 /* WaDisableRenderCachePipelinedFlush */ 6537 I915_WRITE(CACHE_MODE_0, 6538 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); 6539 6540 /* WaDisable_RenderCache_OperationalFlush:g4x */ 6541 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 6542 6543 g4x_disable_trickle_feed(dev); 6544 } 6545 6546 static void crestline_init_clock_gating(struct drm_device *dev) 6547 { 6548 struct drm_i915_private *dev_priv = dev->dev_private; 6549 6550 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); 6551 I915_WRITE(RENCLK_GATE_D2, 0); 6552 I915_WRITE(DSPCLK_GATE_D, 0); 6553 I915_WRITE(RAMCLK_GATE_D, 0); 6554 I915_WRITE16(DEUC, 0); 6555 I915_WRITE(MI_ARB_STATE, 6556 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 6557 6558 /* WaDisable_RenderCache_OperationalFlush:gen4 */ 6559 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 6560 } 6561 6562 static void broadwater_init_clock_gating(struct drm_device *dev) 6563 { 6564 struct drm_i915_private *dev_priv = dev->dev_private; 6565 6566 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 6567 I965_RCC_CLOCK_GATE_DISABLE | 6568 I965_RCPB_CLOCK_GATE_DISABLE | 6569 I965_ISC_CLOCK_GATE_DISABLE | 6570 I965_FBC_CLOCK_GATE_DISABLE); 6571 I915_WRITE(RENCLK_GATE_D2, 0); 6572 I915_WRITE(MI_ARB_STATE, 6573 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 6574 6575 /* WaDisable_RenderCache_OperationalFlush:gen4 */ 6576 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 6577 } 6578 6579 static void gen3_init_clock_gating(struct drm_device *dev) 6580 { 6581 struct drm_i915_private *dev_priv = dev->dev_private; 6582 u32 dstate = I915_READ(D_STATE); 6583 6584 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 6585 DSTATE_DOT_CLOCK_GATING; 6586 I915_WRITE(D_STATE, dstate); 6587 6588 if (IS_PINEVIEW(dev)) 6589 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); 6590 6591 /* IIR "flip pending" means done if this bit is set */ 6592 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); 6593 6594 /* interrupts should cause a wake up from C3 */ 6595 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN)); 6596 6597 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 6598 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); 6599 6600 I915_WRITE(MI_ARB_STATE, 6601 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); 6602 } 6603 6604 static void i85x_init_clock_gating(struct drm_device *dev) 6605 { 6606 struct drm_i915_private *dev_priv = dev->dev_private; 6607 6608 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 6609 6610 /* interrupts should cause a wake up from C3 */ 6611 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) | 6612 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE)); 6613 6614 I915_WRITE(MEM_MODE, 6615 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE)); 6616 } 6617 6618 static void i830_init_clock_gating(struct drm_device *dev) 6619 { 6620 struct drm_i915_private *dev_priv = dev->dev_private; 6621 6622 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 6623 6624 I915_WRITE(MEM_MODE, 6625 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) | 6626 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE)); 6627 } 6628 6629 void intel_init_clock_gating(struct drm_device *dev) 6630 { 6631 struct drm_i915_private *dev_priv = dev->dev_private; 6632 6633 if (dev_priv->display.init_clock_gating) 6634 dev_priv->display.init_clock_gating(dev); 6635 } 6636 6637 void intel_suspend_hw(struct drm_device *dev) 6638 { 6639 if (HAS_PCH_LPT(dev)) 6640 lpt_suspend_hw(dev); 6641 } 6642 6643 /* Set up chip specific power management-related functions */ 6644 void intel_init_pm(struct drm_device *dev) 6645 { 6646 struct drm_i915_private *dev_priv = dev->dev_private; 6647 6648 intel_fbc_init(dev_priv); 6649 6650 /* For cxsr */ 6651 if (IS_PINEVIEW(dev)) 6652 i915_pineview_get_mem_freq(dev); 6653 else if (IS_GEN5(dev)) 6654 i915_ironlake_get_mem_freq(dev); 6655 6656 /* For FIFO watermark updates */ 6657 if (INTEL_INFO(dev)->gen >= 9) { 6658 skl_setup_wm_latency(dev); 6659 6660 if (IS_BROXTON(dev)) 6661 dev_priv->display.init_clock_gating = 6662 bxt_init_clock_gating; 6663 else if (IS_SKYLAKE(dev)) 6664 dev_priv->display.init_clock_gating = 6665 skl_init_clock_gating; 6666 dev_priv->display.update_wm = skl_update_wm; 6667 dev_priv->display.update_sprite_wm = skl_update_sprite_wm; 6668 } else if (HAS_PCH_SPLIT(dev)) { 6669 ilk_setup_wm_latency(dev); 6670 6671 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] && 6672 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || 6673 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] && 6674 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { 6675 dev_priv->display.update_wm = ilk_update_wm; 6676 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm; 6677 } else { 6678 DRM_DEBUG_KMS("Failed to read display plane latency. " 6679 "Disable CxSR\n"); 6680 } 6681 6682 if (IS_GEN5(dev)) 6683 dev_priv->display.init_clock_gating = ironlake_init_clock_gating; 6684 else if (IS_GEN6(dev)) 6685 dev_priv->display.init_clock_gating = gen6_init_clock_gating; 6686 else if (IS_IVYBRIDGE(dev)) 6687 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; 6688 else if (IS_HASWELL(dev)) 6689 dev_priv->display.init_clock_gating = haswell_init_clock_gating; 6690 else if (INTEL_INFO(dev)->gen == 8) 6691 dev_priv->display.init_clock_gating = broadwell_init_clock_gating; 6692 } else if (IS_CHERRYVIEW(dev)) { 6693 dev_priv->display.update_wm = valleyview_update_wm; 6694 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm; 6695 dev_priv->display.init_clock_gating = 6696 cherryview_init_clock_gating; 6697 } else if (IS_VALLEYVIEW(dev)) { 6698 dev_priv->display.update_wm = valleyview_update_wm; 6699 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm; 6700 dev_priv->display.init_clock_gating = 6701 valleyview_init_clock_gating; 6702 } else if (IS_PINEVIEW(dev)) { 6703 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), 6704 dev_priv->is_ddr3, 6705 dev_priv->fsb_freq, 6706 dev_priv->mem_freq)) { 6707 DRM_INFO("failed to find known CxSR latency " 6708 "(found ddr%s fsb freq %d, mem freq %d), " 6709 "disabling CxSR\n", 6710 (dev_priv->is_ddr3 == 1) ? "3" : "2", 6711 dev_priv->fsb_freq, dev_priv->mem_freq); 6712 /* Disable CxSR and never update its watermark again */ 6713 intel_set_memory_cxsr(dev_priv, false); 6714 dev_priv->display.update_wm = NULL; 6715 } else 6716 dev_priv->display.update_wm = pineview_update_wm; 6717 dev_priv->display.init_clock_gating = gen3_init_clock_gating; 6718 } else if (IS_G4X(dev)) { 6719 dev_priv->display.update_wm = g4x_update_wm; 6720 dev_priv->display.init_clock_gating = g4x_init_clock_gating; 6721 } else if (IS_GEN4(dev)) { 6722 dev_priv->display.update_wm = i965_update_wm; 6723 if (IS_CRESTLINE(dev)) 6724 dev_priv->display.init_clock_gating = crestline_init_clock_gating; 6725 else if (IS_BROADWATER(dev)) 6726 dev_priv->display.init_clock_gating = broadwater_init_clock_gating; 6727 } else if (IS_GEN3(dev)) { 6728 dev_priv->display.update_wm = i9xx_update_wm; 6729 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 6730 dev_priv->display.init_clock_gating = gen3_init_clock_gating; 6731 } else if (IS_GEN2(dev)) { 6732 if (INTEL_INFO(dev)->num_pipes == 1) { 6733 dev_priv->display.update_wm = i845_update_wm; 6734 dev_priv->display.get_fifo_size = i845_get_fifo_size; 6735 } else { 6736 dev_priv->display.update_wm = i9xx_update_wm; 6737 dev_priv->display.get_fifo_size = i830_get_fifo_size; 6738 } 6739 6740 if (IS_I85X(dev) || IS_I865G(dev)) 6741 dev_priv->display.init_clock_gating = i85x_init_clock_gating; 6742 else 6743 dev_priv->display.init_clock_gating = i830_init_clock_gating; 6744 } else { 6745 DRM_ERROR("unexpected fall-through in intel_init_pm\n"); 6746 } 6747 } 6748 6749 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val) 6750 { 6751 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 6752 6753 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { 6754 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n"); 6755 return -EAGAIN; 6756 } 6757 6758 I915_WRITE(GEN6_PCODE_DATA, *val); 6759 I915_WRITE(GEN6_PCODE_DATA1, 0); 6760 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 6761 6762 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 6763 500)) { 6764 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox); 6765 return -ETIMEDOUT; 6766 } 6767 6768 *val = I915_READ(GEN6_PCODE_DATA); 6769 I915_WRITE(GEN6_PCODE_DATA, 0); 6770 6771 return 0; 6772 } 6773 6774 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val) 6775 { 6776 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 6777 6778 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { 6779 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n"); 6780 return -EAGAIN; 6781 } 6782 6783 I915_WRITE(GEN6_PCODE_DATA, val); 6784 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 6785 6786 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 6787 500)) { 6788 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox); 6789 return -ETIMEDOUT; 6790 } 6791 6792 I915_WRITE(GEN6_PCODE_DATA, 0); 6793 6794 return 0; 6795 } 6796 6797 static int vlv_gpu_freq_div(unsigned int czclk_freq) 6798 { 6799 switch (czclk_freq) { 6800 case 200: 6801 return 10; 6802 case 267: 6803 return 12; 6804 case 320: 6805 case 333: 6806 return 16; 6807 case 400: 6808 return 20; 6809 default: 6810 return -1; 6811 } 6812 } 6813 6814 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) 6815 { 6816 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4); 6817 6818 div = vlv_gpu_freq_div(czclk_freq); 6819 if (div < 0) 6820 return div; 6821 6822 return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div); 6823 } 6824 6825 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val) 6826 { 6827 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4); 6828 6829 mul = vlv_gpu_freq_div(czclk_freq); 6830 if (mul < 0) 6831 return mul; 6832 6833 return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6; 6834 } 6835 6836 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) 6837 { 6838 int div, czclk_freq = dev_priv->rps.cz_freq; 6839 6840 div = vlv_gpu_freq_div(czclk_freq) / 2; 6841 if (div < 0) 6842 return div; 6843 6844 return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2; 6845 } 6846 6847 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) 6848 { 6849 int mul, czclk_freq = dev_priv->rps.cz_freq; 6850 6851 mul = vlv_gpu_freq_div(czclk_freq) / 2; 6852 if (mul < 0) 6853 return mul; 6854 6855 /* CHV needs even values */ 6856 return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2; 6857 } 6858 6859 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) 6860 { 6861 if (IS_GEN9(dev_priv->dev)) 6862 return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER; 6863 else if (IS_CHERRYVIEW(dev_priv->dev)) 6864 return chv_gpu_freq(dev_priv, val); 6865 else if (IS_VALLEYVIEW(dev_priv->dev)) 6866 return byt_gpu_freq(dev_priv, val); 6867 else 6868 return val * GT_FREQUENCY_MULTIPLIER; 6869 } 6870 6871 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) 6872 { 6873 if (IS_GEN9(dev_priv->dev)) 6874 return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER; 6875 else if (IS_CHERRYVIEW(dev_priv->dev)) 6876 return chv_freq_opcode(dev_priv, val); 6877 else if (IS_VALLEYVIEW(dev_priv->dev)) 6878 return byt_freq_opcode(dev_priv, val); 6879 else 6880 return val / GT_FREQUENCY_MULTIPLIER; 6881 } 6882 6883 struct request_boost { 6884 struct work_struct work; 6885 struct drm_i915_gem_request *req; 6886 }; 6887 6888 static void __intel_rps_boost_work(struct work_struct *work) 6889 { 6890 struct request_boost *boost = container_of(work, struct request_boost, work); 6891 struct drm_i915_gem_request *req = boost->req; 6892 6893 if (!i915_gem_request_completed(req, true)) 6894 gen6_rps_boost(to_i915(req->ring->dev), NULL, 6895 req->emitted_jiffies); 6896 6897 i915_gem_request_unreference__unlocked(req); 6898 kfree(boost); 6899 } 6900 6901 void intel_queue_rps_boost_for_request(struct drm_device *dev, 6902 struct drm_i915_gem_request *req) 6903 { 6904 struct request_boost *boost; 6905 6906 if (req == NULL || INTEL_INFO(dev)->gen < 6) 6907 return; 6908 6909 if (i915_gem_request_completed(req, true)) 6910 return; 6911 6912 boost = kmalloc(sizeof(*boost), M_DRM, M_NOWAIT); 6913 if (boost == NULL) 6914 return; 6915 6916 i915_gem_request_reference(req); 6917 boost->req = req; 6918 6919 INIT_WORK(&boost->work, __intel_rps_boost_work); 6920 queue_work(to_i915(dev)->wq, &boost->work); 6921 } 6922 6923 void intel_pm_setup(struct drm_device *dev) 6924 { 6925 struct drm_i915_private *dev_priv = dev->dev_private; 6926 6927 lockinit(&dev_priv->rps.hw_lock, "i915 rps.hw_lock", 0, LK_CANRECURSE); 6928 spin_init(&dev_priv->rps.client_lock, "i915rcl"); 6929 6930 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, 6931 intel_gen6_powersave_work); 6932 INIT_LIST_HEAD(&dev_priv->rps.clients); 6933 INIT_LIST_HEAD(&dev_priv->rps.semaphores.link); 6934 INIT_LIST_HEAD(&dev_priv->rps.mmioflips.link); 6935 6936 dev_priv->pm.suspended = false; 6937 } 6938