1 /* 2 * Copyright © 2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eugeni Dodonov <eugeni.dodonov@intel.com> 25 * 26 */ 27 28 #include <drm/drmP.h> 29 #include "intel_drv.h" 30 #include "i915_drv.h" 31 32 void i8xx_disable_fbc(struct drm_device *dev) 33 { 34 struct drm_i915_private *dev_priv = dev->dev_private; 35 u32 fbc_ctl; 36 37 /* Disable compression */ 38 fbc_ctl = I915_READ(FBC_CONTROL); 39 if ((fbc_ctl & FBC_CTL_EN) == 0) 40 return; 41 42 fbc_ctl &= ~FBC_CTL_EN; 43 I915_WRITE(FBC_CONTROL, fbc_ctl); 44 45 /* Wait for compressing bit to clear */ 46 if (_intel_wait_for(dev, 47 (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10, 48 1, "915fbd")) { 49 DRM_DEBUG_KMS("FBC idle timed out\n"); 50 return; 51 } 52 53 DRM_DEBUG_KMS("disabled FBC\n"); 54 } 55 56 void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 57 { 58 struct drm_device *dev = crtc->dev; 59 struct drm_i915_private *dev_priv = dev->dev_private; 60 struct drm_framebuffer *fb = crtc->fb; 61 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 62 struct drm_i915_gem_object *obj = intel_fb->obj; 63 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 64 int cfb_pitch; 65 int plane, i; 66 u32 fbc_ctl, fbc_ctl2; 67 68 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; 69 if (fb->pitches[0] < cfb_pitch) 70 cfb_pitch = fb->pitches[0]; 71 72 /* FBC_CTL wants 64B units */ 73 cfb_pitch = (cfb_pitch / 64) - 1; 74 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; 75 76 /* Clear old tags */ 77 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) 78 I915_WRITE(FBC_TAG + (i * 4), 0); 79 80 /* Set it up... */ 81 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; 82 fbc_ctl2 |= plane; 83 I915_WRITE(FBC_CONTROL2, fbc_ctl2); 84 I915_WRITE(FBC_FENCE_OFF, crtc->y); 85 86 /* enable it... */ 87 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; 88 if (IS_I945GM(dev)) 89 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 90 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 91 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 92 fbc_ctl |= obj->fence_reg; 93 I915_WRITE(FBC_CONTROL, fbc_ctl); 94 95 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ", 96 cfb_pitch, crtc->y, intel_crtc->plane); 97 } 98 99 bool i8xx_fbc_enabled(struct drm_device *dev) 100 { 101 struct drm_i915_private *dev_priv = dev->dev_private; 102 103 return I915_READ(FBC_CONTROL) & FBC_CTL_EN; 104 } 105 106 void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 107 { 108 struct drm_device *dev = crtc->dev; 109 struct drm_i915_private *dev_priv = dev->dev_private; 110 struct drm_framebuffer *fb = crtc->fb; 111 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 112 struct drm_i915_gem_object *obj = intel_fb->obj; 113 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 114 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; 115 unsigned long stall_watermark = 200; 116 u32 dpfc_ctl; 117 118 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; 119 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; 120 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); 121 122 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | 123 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 124 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); 125 I915_WRITE(DPFC_FENCE_YOFF, crtc->y); 126 127 /* enable it... */ 128 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); 129 130 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 131 } 132 133 void g4x_disable_fbc(struct drm_device *dev) 134 { 135 struct drm_i915_private *dev_priv = dev->dev_private; 136 u32 dpfc_ctl; 137 138 /* Disable compression */ 139 dpfc_ctl = I915_READ(DPFC_CONTROL); 140 if (dpfc_ctl & DPFC_CTL_EN) { 141 dpfc_ctl &= ~DPFC_CTL_EN; 142 I915_WRITE(DPFC_CONTROL, dpfc_ctl); 143 144 DRM_DEBUG_KMS("disabled FBC\n"); 145 } 146 } 147 148 bool g4x_fbc_enabled(struct drm_device *dev) 149 { 150 struct drm_i915_private *dev_priv = dev->dev_private; 151 152 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; 153 } 154 155 static void sandybridge_blit_fbc_update(struct drm_device *dev) 156 { 157 struct drm_i915_private *dev_priv = dev->dev_private; 158 u32 blt_ecoskpd; 159 160 /* Make sure blitter notifies FBC of writes */ 161 gen6_gt_force_wake_get(dev_priv); 162 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); 163 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << 164 GEN6_BLITTER_LOCK_SHIFT; 165 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 166 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; 167 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 168 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << 169 GEN6_BLITTER_LOCK_SHIFT); 170 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 171 POSTING_READ(GEN6_BLITTER_ECOSKPD); 172 gen6_gt_force_wake_put(dev_priv); 173 } 174 175 void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 176 { 177 struct drm_device *dev = crtc->dev; 178 struct drm_i915_private *dev_priv = dev->dev_private; 179 struct drm_framebuffer *fb = crtc->fb; 180 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 181 struct drm_i915_gem_object *obj = intel_fb->obj; 182 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 183 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; 184 unsigned long stall_watermark = 200; 185 u32 dpfc_ctl; 186 187 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 188 dpfc_ctl &= DPFC_RESERVED; 189 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); 190 /* Set persistent mode for front-buffer rendering, ala X. */ 191 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE; 192 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg); 193 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); 194 195 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | 196 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 197 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); 198 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); 199 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); 200 /* enable it... */ 201 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 202 203 if (IS_GEN6(dev)) { 204 I915_WRITE(SNB_DPFC_CTL_SA, 205 SNB_CPU_FENCE_ENABLE | obj->fence_reg); 206 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); 207 sandybridge_blit_fbc_update(dev); 208 } 209 210 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 211 } 212 213 void ironlake_disable_fbc(struct drm_device *dev) 214 { 215 struct drm_i915_private *dev_priv = dev->dev_private; 216 u32 dpfc_ctl; 217 218 /* Disable compression */ 219 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 220 if (dpfc_ctl & DPFC_CTL_EN) { 221 dpfc_ctl &= ~DPFC_CTL_EN; 222 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); 223 224 DRM_DEBUG_KMS("disabled FBC\n"); 225 } 226 } 227 228 bool ironlake_fbc_enabled(struct drm_device *dev) 229 { 230 struct drm_i915_private *dev_priv = dev->dev_private; 231 232 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; 233 } 234 235 bool intel_fbc_enabled(struct drm_device *dev) 236 { 237 struct drm_i915_private *dev_priv = dev->dev_private; 238 239 if (!dev_priv->display.fbc_enabled) 240 return false; 241 242 return dev_priv->display.fbc_enabled(dev); 243 } 244 245 static void intel_fbc_work_fn(void *arg, int pending) 246 { 247 struct intel_fbc_work *work = arg; 248 struct drm_device *dev = work->crtc->dev; 249 struct drm_i915_private *dev_priv = dev->dev_private; 250 251 DRM_LOCK(dev); 252 if (work == dev_priv->fbc_work) { 253 /* Double check that we haven't switched fb without cancelling 254 * the prior work. 255 */ 256 if (work->crtc->fb == work->fb) { 257 dev_priv->display.enable_fbc(work->crtc, 258 work->interval); 259 260 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane; 261 dev_priv->cfb_fb = work->crtc->fb->base.id; 262 dev_priv->cfb_y = work->crtc->y; 263 } 264 265 dev_priv->fbc_work = NULL; 266 } 267 DRM_UNLOCK(dev); 268 269 drm_free(work, DRM_MEM_KMS); 270 } 271 272 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv) 273 { 274 u_int pending; 275 276 if (dev_priv->fbc_work == NULL) 277 return; 278 279 DRM_DEBUG_KMS("cancelling pending FBC enable\n"); 280 281 /* Synchronisation is provided by struct_mutex and checking of 282 * dev_priv->fbc_work, so we can perform the cancellation 283 * entirely asynchronously. 284 */ 285 if (taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->fbc_work->task, 286 &pending) == 0) 287 /* tasklet was killed before being run, clean up */ 288 drm_free(dev_priv->fbc_work, DRM_MEM_KMS); 289 290 /* Mark the work as no longer wanted so that if it does 291 * wake-up (because the work was already running and waiting 292 * for our mutex), it will discover that is no longer 293 * necessary to run. 294 */ 295 dev_priv->fbc_work = NULL; 296 } 297 298 void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 299 { 300 struct intel_fbc_work *work; 301 struct drm_device *dev = crtc->dev; 302 struct drm_i915_private *dev_priv = dev->dev_private; 303 304 if (!dev_priv->display.enable_fbc) 305 return; 306 307 intel_cancel_fbc_work(dev_priv); 308 309 work = kmalloc(sizeof(*work), DRM_MEM_KMS, M_WAITOK | M_ZERO); 310 work->crtc = crtc; 311 work->fb = crtc->fb; 312 work->interval = interval; 313 TIMEOUT_TASK_INIT(dev_priv->tq, &work->task, 0, intel_fbc_work_fn, 314 work); 315 316 dev_priv->fbc_work = work; 317 318 DRM_DEBUG_KMS("scheduling delayed FBC enable\n"); 319 320 /* Delay the actual enabling to let pageflipping cease and the 321 * display to settle before starting the compression. Note that 322 * this delay also serves a second purpose: it allows for a 323 * vblank to pass after disabling the FBC before we attempt 324 * to modify the control registers. 325 * 326 * A more complicated solution would involve tracking vblanks 327 * following the termination of the page-flipping sequence 328 * and indeed performing the enable as a co-routine and not 329 * waiting synchronously upon the vblank. 330 */ 331 taskqueue_enqueue_timeout(dev_priv->tq, &work->task, 332 msecs_to_jiffies(50)); 333 } 334 335 void intel_disable_fbc(struct drm_device *dev) 336 { 337 struct drm_i915_private *dev_priv = dev->dev_private; 338 339 intel_cancel_fbc_work(dev_priv); 340 341 if (!dev_priv->display.disable_fbc) 342 return; 343 344 dev_priv->display.disable_fbc(dev); 345 dev_priv->cfb_plane = -1; 346 } 347 348 /** 349 * intel_update_fbc - enable/disable FBC as needed 350 * @dev: the drm_device 351 * 352 * Set up the framebuffer compression hardware at mode set time. We 353 * enable it if possible: 354 * - plane A only (on pre-965) 355 * - no pixel mulitply/line duplication 356 * - no alpha buffer discard 357 * - no dual wide 358 * - framebuffer <= 2048 in width, 1536 in height 359 * 360 * We can't assume that any compression will take place (worst case), 361 * so the compressed buffer has to be the same size as the uncompressed 362 * one. It also must reside (along with the line length buffer) in 363 * stolen memory. 364 * 365 * We need to enable/disable FBC on a global basis. 366 */ 367 void intel_update_fbc(struct drm_device *dev) 368 { 369 struct drm_i915_private *dev_priv = dev->dev_private; 370 struct drm_crtc *crtc = NULL, *tmp_crtc; 371 struct intel_crtc *intel_crtc; 372 struct drm_framebuffer *fb; 373 struct intel_framebuffer *intel_fb; 374 struct drm_i915_gem_object *obj; 375 int enable_fbc; 376 377 DRM_DEBUG_KMS("\n"); 378 379 if (!i915_powersave) 380 return; 381 382 if (!I915_HAS_FBC(dev)) 383 return; 384 385 /* 386 * If FBC is already on, we just have to verify that we can 387 * keep it that way... 388 * Need to disable if: 389 * - more than one pipe is active 390 * - changing FBC params (stride, fence, mode) 391 * - new fb is too large to fit in compressed buffer 392 * - going to an unsupported config (interlace, pixel multiply, etc.) 393 */ 394 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { 395 if (tmp_crtc->enabled && tmp_crtc->fb) { 396 if (crtc) { 397 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); 398 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; 399 goto out_disable; 400 } 401 crtc = tmp_crtc; 402 } 403 } 404 405 if (!crtc || crtc->fb == NULL) { 406 DRM_DEBUG_KMS("no output, disabling\n"); 407 dev_priv->no_fbc_reason = FBC_NO_OUTPUT; 408 goto out_disable; 409 } 410 411 intel_crtc = to_intel_crtc(crtc); 412 fb = crtc->fb; 413 intel_fb = to_intel_framebuffer(fb); 414 obj = intel_fb->obj; 415 416 enable_fbc = i915_enable_fbc; 417 if (enable_fbc < 0) { 418 DRM_DEBUG_KMS("fbc set to per-chip default\n"); 419 enable_fbc = 1; 420 if (INTEL_INFO(dev)->gen <= 6) 421 enable_fbc = 0; 422 } 423 if (!enable_fbc) { 424 DRM_DEBUG_KMS("fbc disabled per module param\n"); 425 dev_priv->no_fbc_reason = FBC_MODULE_PARAM; 426 goto out_disable; 427 } 428 if (intel_fb->obj->base.size > dev_priv->cfb_size) { 429 DRM_DEBUG_KMS("framebuffer too large, disabling " 430 "compression\n"); 431 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 432 goto out_disable; 433 } 434 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || 435 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { 436 DRM_DEBUG_KMS("mode incompatible with compression, " 437 "disabling\n"); 438 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; 439 goto out_disable; 440 } 441 if ((crtc->mode.hdisplay > 2048) || 442 (crtc->mode.vdisplay > 1536)) { 443 DRM_DEBUG_KMS("mode too large for compression, disabling\n"); 444 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; 445 goto out_disable; 446 } 447 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) { 448 DRM_DEBUG_KMS("plane not 0, disabling compression\n"); 449 dev_priv->no_fbc_reason = FBC_BAD_PLANE; 450 goto out_disable; 451 } 452 if (obj->tiling_mode != I915_TILING_X || 453 obj->fence_reg == I915_FENCE_REG_NONE) { 454 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); 455 dev_priv->no_fbc_reason = FBC_NOT_TILED; 456 goto out_disable; 457 } 458 459 #ifdef DDB 460 /* If the kernel debugger is active, always disable compression */ 461 if (db_active) 462 goto out_disable; 463 #endif 464 465 /* If the scanout has not changed, don't modify the FBC settings. 466 * Note that we make the fundamental assumption that the fb->obj 467 * cannot be unpinned (and have its GTT offset and fence revoked) 468 * without first being decoupled from the scanout and FBC disabled. 469 */ 470 if (dev_priv->cfb_plane == intel_crtc->plane && 471 dev_priv->cfb_fb == fb->base.id && 472 dev_priv->cfb_y == crtc->y) 473 return; 474 475 if (intel_fbc_enabled(dev)) { 476 /* We update FBC along two paths, after changing fb/crtc 477 * configuration (modeswitching) and after page-flipping 478 * finishes. For the latter, we know that not only did 479 * we disable the FBC at the start of the page-flip 480 * sequence, but also more than one vblank has passed. 481 * 482 * For the former case of modeswitching, it is possible 483 * to switch between two FBC valid configurations 484 * instantaneously so we do need to disable the FBC 485 * before we can modify its control registers. We also 486 * have to wait for the next vblank for that to take 487 * effect. However, since we delay enabling FBC we can 488 * assume that a vblank has passed since disabling and 489 * that we can safely alter the registers in the deferred 490 * callback. 491 * 492 * In the scenario that we go from a valid to invalid 493 * and then back to valid FBC configuration we have 494 * no strict enforcement that a vblank occurred since 495 * disabling the FBC. However, along all current pipe 496 * disabling paths we do need to wait for a vblank at 497 * some point. And we wait before enabling FBC anyway. 498 */ 499 DRM_DEBUG_KMS("disabling active FBC for update\n"); 500 intel_disable_fbc(dev); 501 } 502 503 intel_enable_fbc(crtc, 500); 504 return; 505 506 out_disable: 507 /* Multiple disables should be harmless */ 508 if (intel_fbc_enabled(dev)) { 509 DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); 510 intel_disable_fbc(dev); 511 } 512 } 513 514 void i915_ironlake_get_mem_freq(struct drm_device *dev); 515 void i915_pineview_get_mem_freq(struct drm_device *dev) 516 { 517 drm_i915_private_t *dev_priv = dev->dev_private; 518 u32 tmp; 519 520 tmp = I915_READ(CLKCFG); 521 522 switch (tmp & CLKCFG_FSB_MASK) { 523 case CLKCFG_FSB_533: 524 dev_priv->fsb_freq = 533; /* 133*4 */ 525 break; 526 case CLKCFG_FSB_800: 527 dev_priv->fsb_freq = 800; /* 200*4 */ 528 break; 529 case CLKCFG_FSB_667: 530 dev_priv->fsb_freq = 667; /* 167*4 */ 531 break; 532 case CLKCFG_FSB_400: 533 dev_priv->fsb_freq = 400; /* 100*4 */ 534 break; 535 } 536 537 switch (tmp & CLKCFG_MEM_MASK) { 538 case CLKCFG_MEM_533: 539 dev_priv->mem_freq = 533; 540 break; 541 case CLKCFG_MEM_667: 542 dev_priv->mem_freq = 667; 543 break; 544 case CLKCFG_MEM_800: 545 dev_priv->mem_freq = 800; 546 break; 547 } 548 549 /* detect pineview DDR3 setting */ 550 tmp = I915_READ(CSHRDDR3CTL); 551 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; 552 } 553 554 void i915_ironlake_get_mem_freq(struct drm_device *dev) 555 { 556 drm_i915_private_t *dev_priv = dev->dev_private; 557 u16 ddrpll, csipll; 558 559 ddrpll = I915_READ16(DDRMPLL1); 560 csipll = I915_READ16(CSIPLL0); 561 562 switch (ddrpll & 0xff) { 563 case 0xc: 564 dev_priv->mem_freq = 800; 565 break; 566 case 0x10: 567 dev_priv->mem_freq = 1066; 568 break; 569 case 0x14: 570 dev_priv->mem_freq = 1333; 571 break; 572 case 0x18: 573 dev_priv->mem_freq = 1600; 574 break; 575 default: 576 DRM_DEBUG("unknown memory frequency 0x%02x\n", 577 ddrpll & 0xff); 578 dev_priv->mem_freq = 0; 579 break; 580 } 581 582 dev_priv->r_t = dev_priv->mem_freq; 583 584 switch (csipll & 0x3ff) { 585 case 0x00c: 586 dev_priv->fsb_freq = 3200; 587 break; 588 case 0x00e: 589 dev_priv->fsb_freq = 3733; 590 break; 591 case 0x010: 592 dev_priv->fsb_freq = 4266; 593 break; 594 case 0x012: 595 dev_priv->fsb_freq = 4800; 596 break; 597 case 0x014: 598 dev_priv->fsb_freq = 5333; 599 break; 600 case 0x016: 601 dev_priv->fsb_freq = 5866; 602 break; 603 case 0x018: 604 dev_priv->fsb_freq = 6400; 605 break; 606 default: 607 DRM_DEBUG("unknown fsb frequency 0x%04x\n", 608 csipll & 0x3ff); 609 dev_priv->fsb_freq = 0; 610 break; 611 } 612 613 if (dev_priv->fsb_freq == 3200) { 614 dev_priv->c_m = 0; 615 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { 616 dev_priv->c_m = 1; 617 } else { 618 dev_priv->c_m = 2; 619 } 620 } 621 622 /* Pineview has different values for various configs */ 623 static const struct intel_watermark_params pineview_display_wm = { 624 PINEVIEW_DISPLAY_FIFO, 625 PINEVIEW_MAX_WM, 626 PINEVIEW_DFT_WM, 627 PINEVIEW_GUARD_WM, 628 PINEVIEW_FIFO_LINE_SIZE 629 }; 630 static const struct intel_watermark_params pineview_display_hplloff_wm = { 631 PINEVIEW_DISPLAY_FIFO, 632 PINEVIEW_MAX_WM, 633 PINEVIEW_DFT_HPLLOFF_WM, 634 PINEVIEW_GUARD_WM, 635 PINEVIEW_FIFO_LINE_SIZE 636 }; 637 static const struct intel_watermark_params pineview_cursor_wm = { 638 PINEVIEW_CURSOR_FIFO, 639 PINEVIEW_CURSOR_MAX_WM, 640 PINEVIEW_CURSOR_DFT_WM, 641 PINEVIEW_CURSOR_GUARD_WM, 642 PINEVIEW_FIFO_LINE_SIZE, 643 }; 644 static const struct intel_watermark_params pineview_cursor_hplloff_wm = { 645 PINEVIEW_CURSOR_FIFO, 646 PINEVIEW_CURSOR_MAX_WM, 647 PINEVIEW_CURSOR_DFT_WM, 648 PINEVIEW_CURSOR_GUARD_WM, 649 PINEVIEW_FIFO_LINE_SIZE 650 }; 651 static const struct intel_watermark_params g4x_wm_info = { 652 G4X_FIFO_SIZE, 653 G4X_MAX_WM, 654 G4X_MAX_WM, 655 2, 656 G4X_FIFO_LINE_SIZE, 657 }; 658 static const struct intel_watermark_params g4x_cursor_wm_info = { 659 I965_CURSOR_FIFO, 660 I965_CURSOR_MAX_WM, 661 I965_CURSOR_DFT_WM, 662 2, 663 G4X_FIFO_LINE_SIZE, 664 }; 665 static const struct intel_watermark_params i965_cursor_wm_info = { 666 I965_CURSOR_FIFO, 667 I965_CURSOR_MAX_WM, 668 I965_CURSOR_DFT_WM, 669 2, 670 I915_FIFO_LINE_SIZE, 671 }; 672 static const struct intel_watermark_params i945_wm_info = { 673 I945_FIFO_SIZE, 674 I915_MAX_WM, 675 1, 676 2, 677 I915_FIFO_LINE_SIZE 678 }; 679 static const struct intel_watermark_params i915_wm_info = { 680 I915_FIFO_SIZE, 681 I915_MAX_WM, 682 1, 683 2, 684 I915_FIFO_LINE_SIZE 685 }; 686 static const struct intel_watermark_params i855_wm_info = { 687 I855GM_FIFO_SIZE, 688 I915_MAX_WM, 689 1, 690 2, 691 I830_FIFO_LINE_SIZE 692 }; 693 static const struct intel_watermark_params i830_wm_info = { 694 I830_FIFO_SIZE, 695 I915_MAX_WM, 696 1, 697 2, 698 I830_FIFO_LINE_SIZE 699 }; 700 701 static const struct intel_watermark_params ironlake_display_wm_info = { 702 ILK_DISPLAY_FIFO, 703 ILK_DISPLAY_MAXWM, 704 ILK_DISPLAY_DFTWM, 705 2, 706 ILK_FIFO_LINE_SIZE 707 }; 708 static const struct intel_watermark_params ironlake_cursor_wm_info = { 709 ILK_CURSOR_FIFO, 710 ILK_CURSOR_MAXWM, 711 ILK_CURSOR_DFTWM, 712 2, 713 ILK_FIFO_LINE_SIZE 714 }; 715 static const struct intel_watermark_params ironlake_display_srwm_info = { 716 ILK_DISPLAY_SR_FIFO, 717 ILK_DISPLAY_MAX_SRWM, 718 ILK_DISPLAY_DFT_SRWM, 719 2, 720 ILK_FIFO_LINE_SIZE 721 }; 722 static const struct intel_watermark_params ironlake_cursor_srwm_info = { 723 ILK_CURSOR_SR_FIFO, 724 ILK_CURSOR_MAX_SRWM, 725 ILK_CURSOR_DFT_SRWM, 726 2, 727 ILK_FIFO_LINE_SIZE 728 }; 729 730 static const struct intel_watermark_params sandybridge_display_wm_info = { 731 SNB_DISPLAY_FIFO, 732 SNB_DISPLAY_MAXWM, 733 SNB_DISPLAY_DFTWM, 734 2, 735 SNB_FIFO_LINE_SIZE 736 }; 737 static const struct intel_watermark_params sandybridge_cursor_wm_info = { 738 SNB_CURSOR_FIFO, 739 SNB_CURSOR_MAXWM, 740 SNB_CURSOR_DFTWM, 741 2, 742 SNB_FIFO_LINE_SIZE 743 }; 744 static const struct intel_watermark_params sandybridge_display_srwm_info = { 745 SNB_DISPLAY_SR_FIFO, 746 SNB_DISPLAY_MAX_SRWM, 747 SNB_DISPLAY_DFT_SRWM, 748 2, 749 SNB_FIFO_LINE_SIZE 750 }; 751 static const struct intel_watermark_params sandybridge_cursor_srwm_info = { 752 SNB_CURSOR_SR_FIFO, 753 SNB_CURSOR_MAX_SRWM, 754 SNB_CURSOR_DFT_SRWM, 755 2, 756 SNB_FIFO_LINE_SIZE 757 }; 758 759 760 /** 761 * intel_calculate_wm - calculate watermark level 762 * @clock_in_khz: pixel clock 763 * @wm: chip FIFO params 764 * @pixel_size: display pixel size 765 * @latency_ns: memory latency for the platform 766 * 767 * Calculate the watermark level (the level at which the display plane will 768 * start fetching from memory again). Each chip has a different display 769 * FIFO size and allocation, so the caller needs to figure that out and pass 770 * in the correct intel_watermark_params structure. 771 * 772 * As the pixel clock runs, the FIFO will be drained at a rate that depends 773 * on the pixel size. When it reaches the watermark level, it'll start 774 * fetching FIFO line sized based chunks from memory until the FIFO fills 775 * past the watermark point. If the FIFO drains completely, a FIFO underrun 776 * will occur, and a display engine hang could result. 777 */ 778 static unsigned long intel_calculate_wm(unsigned long clock_in_khz, 779 const struct intel_watermark_params *wm, 780 int fifo_size, 781 int pixel_size, 782 unsigned long latency_ns) 783 { 784 long entries_required, wm_size; 785 786 /* 787 * Note: we need to make sure we don't overflow for various clock & 788 * latency values. 789 * clocks go from a few thousand to several hundred thousand. 790 * latency is usually a few thousand 791 */ 792 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / 793 1000; 794 entries_required = howmany(entries_required, wm->cacheline_size); 795 796 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); 797 798 wm_size = fifo_size - (entries_required + wm->guard_size); 799 800 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); 801 802 /* Don't promote wm_size to unsigned... */ 803 if (wm_size > (long)wm->max_wm) 804 wm_size = wm->max_wm; 805 if (wm_size <= 0) 806 wm_size = wm->default_wm; 807 return wm_size; 808 } 809 810 struct cxsr_latency { 811 int is_desktop; 812 int is_ddr3; 813 unsigned long fsb_freq; 814 unsigned long mem_freq; 815 unsigned long display_sr; 816 unsigned long display_hpll_disable; 817 unsigned long cursor_sr; 818 unsigned long cursor_hpll_disable; 819 }; 820 821 static const struct cxsr_latency cxsr_latency_table[] = { 822 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ 823 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ 824 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ 825 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ 826 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ 827 828 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ 829 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ 830 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ 831 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ 832 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ 833 834 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ 835 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ 836 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ 837 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ 838 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ 839 840 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ 841 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ 842 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ 843 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ 844 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ 845 846 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ 847 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ 848 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ 849 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ 850 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ 851 852 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ 853 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ 854 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ 855 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ 856 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ 857 }; 858 859 const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, 860 int is_ddr3, 861 int fsb, 862 int mem) 863 { 864 const struct cxsr_latency *latency; 865 int i; 866 867 if (fsb == 0 || mem == 0) 868 return NULL; 869 870 for (i = 0; i < DRM_ARRAY_SIZE(cxsr_latency_table); i++) { 871 latency = &cxsr_latency_table[i]; 872 if (is_desktop == latency->is_desktop && 873 is_ddr3 == latency->is_ddr3 && 874 fsb == latency->fsb_freq && mem == latency->mem_freq) 875 return latency; 876 } 877 878 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 879 880 return NULL; 881 } 882 883 void pineview_disable_cxsr(struct drm_device *dev) 884 { 885 struct drm_i915_private *dev_priv = dev->dev_private; 886 887 /* deactivate cxsr */ 888 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); 889 } 890 891 /* 892 * Latency for FIFO fetches is dependent on several factors: 893 * - memory configuration (speed, channels) 894 * - chipset 895 * - current MCH state 896 * It can be fairly high in some situations, so here we assume a fairly 897 * pessimal value. It's a tradeoff between extra memory fetches (if we 898 * set this value too high, the FIFO will fetch frequently to stay full) 899 * and power consumption (set it too low to save power and we might see 900 * FIFO underruns and display "flicker"). 901 * 902 * A value of 5us seems to be a good balance; safe for very low end 903 * platforms but not overly aggressive on lower latency configs. 904 */ 905 static const int latency_ns = 5000; 906 907 int i9xx_get_fifo_size(struct drm_device *dev, int plane) 908 { 909 struct drm_i915_private *dev_priv = dev->dev_private; 910 uint32_t dsparb = I915_READ(DSPARB); 911 int size; 912 913 size = dsparb & 0x7f; 914 if (plane) 915 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; 916 917 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 918 plane ? "B" : "A", size); 919 920 return size; 921 } 922 923 int i85x_get_fifo_size(struct drm_device *dev, int plane) 924 { 925 struct drm_i915_private *dev_priv = dev->dev_private; 926 uint32_t dsparb = I915_READ(DSPARB); 927 int size; 928 929 size = dsparb & 0x1ff; 930 if (plane) 931 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; 932 size >>= 1; /* Convert to cachelines */ 933 934 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 935 plane ? "B" : "A", size); 936 937 return size; 938 } 939 940 int i845_get_fifo_size(struct drm_device *dev, int plane) 941 { 942 struct drm_i915_private *dev_priv = dev->dev_private; 943 uint32_t dsparb = I915_READ(DSPARB); 944 int size; 945 946 size = dsparb & 0x7f; 947 size >>= 2; /* Convert to cachelines */ 948 949 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 950 plane ? "B" : "A", 951 size); 952 953 return size; 954 } 955 956 int i830_get_fifo_size(struct drm_device *dev, int plane) 957 { 958 struct drm_i915_private *dev_priv = dev->dev_private; 959 uint32_t dsparb = I915_READ(DSPARB); 960 int size; 961 962 size = dsparb & 0x7f; 963 size >>= 1; /* Convert to cachelines */ 964 965 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, 966 plane ? "B" : "A", size); 967 968 return size; 969 } 970 971 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) 972 { 973 struct drm_crtc *crtc, *enabled = NULL; 974 975 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 976 if (crtc->enabled && crtc->fb) { 977 if (enabled) 978 return NULL; 979 enabled = crtc; 980 } 981 } 982 983 return enabled; 984 } 985 986 void pineview_update_wm(struct drm_device *dev) 987 { 988 struct drm_i915_private *dev_priv = dev->dev_private; 989 struct drm_crtc *crtc; 990 const struct cxsr_latency *latency; 991 u32 reg; 992 unsigned long wm; 993 994 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, 995 dev_priv->fsb_freq, dev_priv->mem_freq); 996 if (!latency) { 997 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); 998 pineview_disable_cxsr(dev); 999 return; 1000 } 1001 1002 crtc = single_enabled_crtc(dev); 1003 if (crtc) { 1004 int clock = crtc->mode.clock; 1005 int pixel_size = crtc->fb->bits_per_pixel / 8; 1006 1007 /* Display SR */ 1008 wm = intel_calculate_wm(clock, &pineview_display_wm, 1009 pineview_display_wm.fifo_size, 1010 pixel_size, latency->display_sr); 1011 reg = I915_READ(DSPFW1); 1012 reg &= ~DSPFW_SR_MASK; 1013 reg |= wm << DSPFW_SR_SHIFT; 1014 I915_WRITE(DSPFW1, reg); 1015 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); 1016 1017 /* cursor SR */ 1018 wm = intel_calculate_wm(clock, &pineview_cursor_wm, 1019 pineview_display_wm.fifo_size, 1020 pixel_size, latency->cursor_sr); 1021 reg = I915_READ(DSPFW3); 1022 reg &= ~DSPFW_CURSOR_SR_MASK; 1023 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; 1024 I915_WRITE(DSPFW3, reg); 1025 1026 /* Display HPLL off SR */ 1027 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, 1028 pineview_display_hplloff_wm.fifo_size, 1029 pixel_size, latency->display_hpll_disable); 1030 reg = I915_READ(DSPFW3); 1031 reg &= ~DSPFW_HPLL_SR_MASK; 1032 reg |= wm & DSPFW_HPLL_SR_MASK; 1033 I915_WRITE(DSPFW3, reg); 1034 1035 /* cursor HPLL off SR */ 1036 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, 1037 pineview_display_hplloff_wm.fifo_size, 1038 pixel_size, latency->cursor_hpll_disable); 1039 reg = I915_READ(DSPFW3); 1040 reg &= ~DSPFW_HPLL_CURSOR_MASK; 1041 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; 1042 I915_WRITE(DSPFW3, reg); 1043 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); 1044 1045 /* activate cxsr */ 1046 I915_WRITE(DSPFW3, 1047 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN); 1048 DRM_DEBUG_KMS("Self-refresh is enabled\n"); 1049 } else { 1050 pineview_disable_cxsr(dev); 1051 DRM_DEBUG_KMS("Self-refresh is disabled\n"); 1052 } 1053 } 1054 1055 static bool g4x_compute_wm0(struct drm_device *dev, 1056 int plane, 1057 const struct intel_watermark_params *display, 1058 int display_latency_ns, 1059 const struct intel_watermark_params *cursor, 1060 int cursor_latency_ns, 1061 int *plane_wm, 1062 int *cursor_wm) 1063 { 1064 struct drm_crtc *crtc; 1065 int htotal, hdisplay, clock, pixel_size; 1066 int line_time_us, line_count; 1067 int entries, tlb_miss; 1068 1069 crtc = intel_get_crtc_for_plane(dev, plane); 1070 if (crtc->fb == NULL || !crtc->enabled) { 1071 *cursor_wm = cursor->guard_size; 1072 *plane_wm = display->guard_size; 1073 return false; 1074 } 1075 1076 htotal = crtc->mode.htotal; 1077 hdisplay = crtc->mode.hdisplay; 1078 clock = crtc->mode.clock; 1079 pixel_size = crtc->fb->bits_per_pixel / 8; 1080 1081 /* Use the small buffer method to calculate plane watermark */ 1082 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; 1083 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; 1084 if (tlb_miss > 0) 1085 entries += tlb_miss; 1086 entries = howmany(entries, display->cacheline_size); 1087 *plane_wm = entries + display->guard_size; 1088 if (*plane_wm > (int)display->max_wm) 1089 *plane_wm = display->max_wm; 1090 1091 /* Use the large buffer method to calculate cursor watermark */ 1092 line_time_us = ((htotal * 1000) / clock); 1093 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; 1094 entries = line_count * 64 * pixel_size; 1095 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; 1096 if (tlb_miss > 0) 1097 entries += tlb_miss; 1098 entries = howmany(entries, cursor->cacheline_size); 1099 *cursor_wm = entries + cursor->guard_size; 1100 if (*cursor_wm > (int)cursor->max_wm) 1101 *cursor_wm = (int)cursor->max_wm; 1102 1103 return true; 1104 } 1105 1106 /* 1107 * Check the wm result. 1108 * 1109 * If any calculated watermark values is larger than the maximum value that 1110 * can be programmed into the associated watermark register, that watermark 1111 * must be disabled. 1112 */ 1113 static bool g4x_check_srwm(struct drm_device *dev, 1114 int display_wm, int cursor_wm, 1115 const struct intel_watermark_params *display, 1116 const struct intel_watermark_params *cursor) 1117 { 1118 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", 1119 display_wm, cursor_wm); 1120 1121 if (display_wm > display->max_wm) { 1122 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n", 1123 display_wm, display->max_wm); 1124 return false; 1125 } 1126 1127 if (cursor_wm > cursor->max_wm) { 1128 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n", 1129 cursor_wm, cursor->max_wm); 1130 return false; 1131 } 1132 1133 if (!(display_wm || cursor_wm)) { 1134 DRM_DEBUG_KMS("SR latency is 0, disabling\n"); 1135 return false; 1136 } 1137 1138 return true; 1139 } 1140 1141 static bool g4x_compute_srwm(struct drm_device *dev, 1142 int plane, 1143 int latency_ns, 1144 const struct intel_watermark_params *display, 1145 const struct intel_watermark_params *cursor, 1146 int *display_wm, int *cursor_wm) 1147 { 1148 struct drm_crtc *crtc; 1149 int hdisplay, htotal, pixel_size, clock; 1150 unsigned long line_time_us; 1151 int line_count, line_size; 1152 int small, large; 1153 int entries; 1154 1155 if (!latency_ns) { 1156 *display_wm = *cursor_wm = 0; 1157 return false; 1158 } 1159 1160 crtc = intel_get_crtc_for_plane(dev, plane); 1161 hdisplay = crtc->mode.hdisplay; 1162 htotal = crtc->mode.htotal; 1163 clock = crtc->mode.clock; 1164 pixel_size = crtc->fb->bits_per_pixel / 8; 1165 1166 line_time_us = (htotal * 1000) / clock; 1167 line_count = (latency_ns / line_time_us + 1000) / 1000; 1168 line_size = hdisplay * pixel_size; 1169 1170 /* Use the minimum of the small and large buffer method for primary */ 1171 small = ((clock * pixel_size / 1000) * latency_ns) / 1000; 1172 large = line_count * line_size; 1173 1174 entries = howmany(min(small, large), display->cacheline_size); 1175 *display_wm = entries + display->guard_size; 1176 1177 /* calculate the self-refresh watermark for display cursor */ 1178 entries = line_count * pixel_size * 64; 1179 entries = howmany(entries, cursor->cacheline_size); 1180 *cursor_wm = entries + cursor->guard_size; 1181 1182 return g4x_check_srwm(dev, 1183 *display_wm, *cursor_wm, 1184 display, cursor); 1185 } 1186 1187 #define single_plane_enabled(mask) ((mask) != 0 && powerof2(mask)) 1188 1189 void g4x_update_wm(struct drm_device *dev) 1190 { 1191 static const int sr_latency_ns = 12000; 1192 struct drm_i915_private *dev_priv = dev->dev_private; 1193 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1194 int plane_sr, cursor_sr; 1195 unsigned int enabled = 0; 1196 1197 if (g4x_compute_wm0(dev, 0, 1198 &g4x_wm_info, latency_ns, 1199 &g4x_cursor_wm_info, latency_ns, 1200 &planea_wm, &cursora_wm)) 1201 enabled |= 1; 1202 1203 if (g4x_compute_wm0(dev, 1, 1204 &g4x_wm_info, latency_ns, 1205 &g4x_cursor_wm_info, latency_ns, 1206 &planeb_wm, &cursorb_wm)) 1207 enabled |= 2; 1208 1209 plane_sr = cursor_sr = 0; 1210 if (single_plane_enabled(enabled) && 1211 g4x_compute_srwm(dev, ffs(enabled) - 1, 1212 sr_latency_ns, 1213 &g4x_wm_info, 1214 &g4x_cursor_wm_info, 1215 &plane_sr, &cursor_sr)) 1216 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 1217 else 1218 I915_WRITE(FW_BLC_SELF, 1219 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); 1220 1221 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", 1222 planea_wm, cursora_wm, 1223 planeb_wm, cursorb_wm, 1224 plane_sr, cursor_sr); 1225 1226 I915_WRITE(DSPFW1, 1227 (plane_sr << DSPFW_SR_SHIFT) | 1228 (cursorb_wm << DSPFW_CURSORB_SHIFT) | 1229 (planeb_wm << DSPFW_PLANEB_SHIFT) | 1230 planea_wm); 1231 I915_WRITE(DSPFW2, 1232 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | 1233 (cursora_wm << DSPFW_CURSORA_SHIFT)); 1234 /* HPLL off in SR has some issues on G4x... disable it */ 1235 I915_WRITE(DSPFW3, 1236 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | 1237 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1238 } 1239 1240 void i965_update_wm(struct drm_device *dev) 1241 { 1242 struct drm_i915_private *dev_priv = dev->dev_private; 1243 struct drm_crtc *crtc; 1244 int srwm = 1; 1245 int cursor_sr = 16; 1246 1247 /* Calc sr entries for one plane configs */ 1248 crtc = single_enabled_crtc(dev); 1249 if (crtc) { 1250 /* self-refresh has much higher latency */ 1251 static const int sr_latency_ns = 12000; 1252 int clock = crtc->mode.clock; 1253 int htotal = crtc->mode.htotal; 1254 int hdisplay = crtc->mode.hdisplay; 1255 int pixel_size = crtc->fb->bits_per_pixel / 8; 1256 unsigned long line_time_us; 1257 int entries; 1258 1259 line_time_us = ((htotal * 1000) / clock); 1260 1261 /* Use ns/us then divide to preserve precision */ 1262 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1263 pixel_size * hdisplay; 1264 entries = howmany(entries, I915_FIFO_LINE_SIZE); 1265 srwm = I965_FIFO_SIZE - entries; 1266 if (srwm < 0) 1267 srwm = 1; 1268 srwm &= 0x1ff; 1269 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", 1270 entries, srwm); 1271 1272 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1273 pixel_size * 64; 1274 entries = howmany(entries, i965_cursor_wm_info.cacheline_size); 1275 cursor_sr = i965_cursor_wm_info.fifo_size - 1276 (entries + i965_cursor_wm_info.guard_size); 1277 1278 if (cursor_sr > i965_cursor_wm_info.max_wm) 1279 cursor_sr = i965_cursor_wm_info.max_wm; 1280 1281 DRM_DEBUG_KMS("self-refresh watermark: display plane %d " 1282 "cursor %d\n", srwm, cursor_sr); 1283 1284 if (IS_CRESTLINE(dev)) 1285 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 1286 } else { 1287 /* Turn off self refresh if both pipes are enabled */ 1288 if (IS_CRESTLINE(dev)) 1289 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) 1290 & ~FW_BLC_SELF_EN); 1291 } 1292 1293 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", 1294 srwm); 1295 1296 /* 965 has limitations... */ 1297 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | 1298 (8 << 16) | (8 << 8) | (8 << 0)); 1299 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); 1300 /* update cursor SR watermark */ 1301 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1302 } 1303 1304 void i9xx_update_wm(struct drm_device *dev) 1305 { 1306 struct drm_i915_private *dev_priv = dev->dev_private; 1307 const struct intel_watermark_params *wm_info; 1308 uint32_t fwater_lo; 1309 uint32_t fwater_hi; 1310 int cwm, srwm = 1; 1311 int fifo_size; 1312 int planea_wm, planeb_wm; 1313 struct drm_crtc *crtc, *enabled = NULL; 1314 1315 if (IS_I945GM(dev)) 1316 wm_info = &i945_wm_info; 1317 else if (!IS_GEN2(dev)) 1318 wm_info = &i915_wm_info; 1319 else 1320 wm_info = &i855_wm_info; 1321 1322 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1323 crtc = intel_get_crtc_for_plane(dev, 0); 1324 if (crtc->enabled && crtc->fb) { 1325 planea_wm = intel_calculate_wm(crtc->mode.clock, 1326 wm_info, fifo_size, 1327 crtc->fb->bits_per_pixel / 8, 1328 latency_ns); 1329 enabled = crtc; 1330 } else 1331 planea_wm = fifo_size - wm_info->guard_size; 1332 1333 fifo_size = dev_priv->display.get_fifo_size(dev, 1); 1334 crtc = intel_get_crtc_for_plane(dev, 1); 1335 if (crtc->enabled && crtc->fb) { 1336 planeb_wm = intel_calculate_wm(crtc->mode.clock, 1337 wm_info, fifo_size, 1338 crtc->fb->bits_per_pixel / 8, 1339 latency_ns); 1340 if (enabled == NULL) 1341 enabled = crtc; 1342 else 1343 enabled = NULL; 1344 } else 1345 planeb_wm = fifo_size - wm_info->guard_size; 1346 1347 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 1348 1349 /* 1350 * Overlay gets an aggressive default since video jitter is bad. 1351 */ 1352 cwm = 2; 1353 1354 /* Play safe and disable self-refresh before adjusting watermarks. */ 1355 if (IS_I945G(dev) || IS_I945GM(dev)) 1356 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); 1357 else if (IS_I915GM(dev)) 1358 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); 1359 1360 /* Calc sr entries for one plane configs */ 1361 if (HAS_FW_BLC(dev) && enabled) { 1362 /* self-refresh has much higher latency */ 1363 static const int sr_latency_ns = 6000; 1364 int clock = enabled->mode.clock; 1365 int htotal = enabled->mode.htotal; 1366 int hdisplay = enabled->mode.hdisplay; 1367 int pixel_size = enabled->fb->bits_per_pixel / 8; 1368 unsigned long line_time_us; 1369 int entries; 1370 1371 line_time_us = (htotal * 1000) / clock; 1372 1373 /* Use ns/us then divide to preserve precision */ 1374 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * 1375 pixel_size * hdisplay; 1376 entries = howmany(entries, wm_info->cacheline_size); 1377 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); 1378 srwm = wm_info->fifo_size - entries; 1379 if (srwm < 0) 1380 srwm = 1; 1381 1382 if (IS_I945G(dev) || IS_I945GM(dev)) 1383 I915_WRITE(FW_BLC_SELF, 1384 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); 1385 else if (IS_I915GM(dev)) 1386 I915_WRITE(FW_BLC_SELF, srwm & 0x3f); 1387 } 1388 1389 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 1390 planea_wm, planeb_wm, cwm, srwm); 1391 1392 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); 1393 fwater_hi = (cwm & 0x1f); 1394 1395 /* Set request length to 8 cachelines per fetch */ 1396 fwater_lo = fwater_lo | (1 << 24) | (1 << 8); 1397 fwater_hi = fwater_hi | (1 << 8); 1398 1399 I915_WRITE(FW_BLC, fwater_lo); 1400 I915_WRITE(FW_BLC2, fwater_hi); 1401 1402 if (HAS_FW_BLC(dev)) { 1403 if (enabled) { 1404 if (IS_I945G(dev) || IS_I945GM(dev)) 1405 I915_WRITE(FW_BLC_SELF, 1406 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); 1407 else if (IS_I915GM(dev)) 1408 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); 1409 DRM_DEBUG_KMS("memory self refresh enabled\n"); 1410 } else 1411 DRM_DEBUG_KMS("memory self refresh disabled\n"); 1412 } 1413 } 1414 1415 void i830_update_wm(struct drm_device *dev) 1416 { 1417 struct drm_i915_private *dev_priv = dev->dev_private; 1418 struct drm_crtc *crtc; 1419 uint32_t fwater_lo; 1420 int planea_wm; 1421 1422 crtc = single_enabled_crtc(dev); 1423 if (crtc == NULL) 1424 return; 1425 1426 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, 1427 dev_priv->display.get_fifo_size(dev, 0), 1428 crtc->fb->bits_per_pixel / 8, 1429 latency_ns); 1430 fwater_lo = I915_READ(FW_BLC) & ~0xfff; 1431 fwater_lo |= (3<<8) | planea_wm; 1432 1433 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); 1434 1435 I915_WRITE(FW_BLC, fwater_lo); 1436 } 1437 1438 #define ILK_LP0_PLANE_LATENCY 700 1439 #define ILK_LP0_CURSOR_LATENCY 1300 1440 1441 /* 1442 * Check the wm result. 1443 * 1444 * If any calculated watermark values is larger than the maximum value that 1445 * can be programmed into the associated watermark register, that watermark 1446 * must be disabled. 1447 */ 1448 static bool ironlake_check_srwm(struct drm_device *dev, int level, 1449 int fbc_wm, int display_wm, int cursor_wm, 1450 const struct intel_watermark_params *display, 1451 const struct intel_watermark_params *cursor) 1452 { 1453 struct drm_i915_private *dev_priv = dev->dev_private; 1454 1455 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," 1456 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); 1457 1458 if (fbc_wm > SNB_FBC_MAX_SRWM) { 1459 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", 1460 fbc_wm, SNB_FBC_MAX_SRWM, level); 1461 1462 /* fbc has it's own way to disable FBC WM */ 1463 I915_WRITE(DISP_ARB_CTL, 1464 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); 1465 return false; 1466 } 1467 1468 if (display_wm > display->max_wm) { 1469 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", 1470 display_wm, SNB_DISPLAY_MAX_SRWM, level); 1471 return false; 1472 } 1473 1474 if (cursor_wm > cursor->max_wm) { 1475 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", 1476 cursor_wm, SNB_CURSOR_MAX_SRWM, level); 1477 return false; 1478 } 1479 1480 if (!(fbc_wm || display_wm || cursor_wm)) { 1481 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); 1482 return false; 1483 } 1484 1485 return true; 1486 } 1487 1488 /* 1489 * Compute watermark values of WM[1-3], 1490 */ 1491 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, 1492 int latency_ns, 1493 const struct intel_watermark_params *display, 1494 const struct intel_watermark_params *cursor, 1495 int *fbc_wm, int *display_wm, int *cursor_wm) 1496 { 1497 struct drm_crtc *crtc; 1498 unsigned long line_time_us; 1499 int hdisplay, htotal, pixel_size, clock; 1500 int line_count, line_size; 1501 int small, large; 1502 int entries; 1503 1504 if (!latency_ns) { 1505 *fbc_wm = *display_wm = *cursor_wm = 0; 1506 return false; 1507 } 1508 1509 crtc = intel_get_crtc_for_plane(dev, plane); 1510 hdisplay = crtc->mode.hdisplay; 1511 htotal = crtc->mode.htotal; 1512 clock = crtc->mode.clock; 1513 pixel_size = crtc->fb->bits_per_pixel / 8; 1514 1515 line_time_us = (htotal * 1000) / clock; 1516 line_count = (latency_ns / line_time_us + 1000) / 1000; 1517 line_size = hdisplay * pixel_size; 1518 1519 /* Use the minimum of the small and large buffer method for primary */ 1520 small = ((clock * pixel_size / 1000) * latency_ns) / 1000; 1521 large = line_count * line_size; 1522 1523 entries = howmany(min(small, large), display->cacheline_size); 1524 *display_wm = entries + display->guard_size; 1525 1526 /* 1527 * Spec says: 1528 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 1529 */ 1530 *fbc_wm = howmany(*display_wm * 64, line_size) + 2; 1531 1532 /* calculate the self-refresh watermark for display cursor */ 1533 entries = line_count * pixel_size * 64; 1534 entries = howmany(entries, cursor->cacheline_size); 1535 *cursor_wm = entries + cursor->guard_size; 1536 1537 return ironlake_check_srwm(dev, level, 1538 *fbc_wm, *display_wm, *cursor_wm, 1539 display, cursor); 1540 } 1541 1542 void ironlake_update_wm(struct drm_device *dev) 1543 { 1544 struct drm_i915_private *dev_priv = dev->dev_private; 1545 int fbc_wm, plane_wm, cursor_wm; 1546 unsigned int enabled; 1547 1548 enabled = 0; 1549 if (g4x_compute_wm0(dev, 0, 1550 &ironlake_display_wm_info, 1551 ILK_LP0_PLANE_LATENCY, 1552 &ironlake_cursor_wm_info, 1553 ILK_LP0_CURSOR_LATENCY, 1554 &plane_wm, &cursor_wm)) { 1555 I915_WRITE(WM0_PIPEA_ILK, 1556 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 1557 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 1558 " plane %d, " "cursor: %d\n", 1559 plane_wm, cursor_wm); 1560 enabled |= 1; 1561 } 1562 1563 if (g4x_compute_wm0(dev, 1, 1564 &ironlake_display_wm_info, 1565 ILK_LP0_PLANE_LATENCY, 1566 &ironlake_cursor_wm_info, 1567 ILK_LP0_CURSOR_LATENCY, 1568 &plane_wm, &cursor_wm)) { 1569 I915_WRITE(WM0_PIPEB_ILK, 1570 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 1571 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 1572 " plane %d, cursor: %d\n", 1573 plane_wm, cursor_wm); 1574 enabled |= 2; 1575 } 1576 1577 /* 1578 * Calculate and update the self-refresh watermark only when one 1579 * display plane is used. 1580 */ 1581 I915_WRITE(WM3_LP_ILK, 0); 1582 I915_WRITE(WM2_LP_ILK, 0); 1583 I915_WRITE(WM1_LP_ILK, 0); 1584 1585 if (!single_plane_enabled(enabled)) 1586 return; 1587 enabled = ffs(enabled) - 1; 1588 1589 /* WM1 */ 1590 if (!ironlake_compute_srwm(dev, 1, enabled, 1591 ILK_READ_WM1_LATENCY() * 500, 1592 &ironlake_display_srwm_info, 1593 &ironlake_cursor_srwm_info, 1594 &fbc_wm, &plane_wm, &cursor_wm)) 1595 return; 1596 1597 I915_WRITE(WM1_LP_ILK, 1598 WM1_LP_SR_EN | 1599 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1600 (fbc_wm << WM1_LP_FBC_SHIFT) | 1601 (plane_wm << WM1_LP_SR_SHIFT) | 1602 cursor_wm); 1603 1604 /* WM2 */ 1605 if (!ironlake_compute_srwm(dev, 2, enabled, 1606 ILK_READ_WM2_LATENCY() * 500, 1607 &ironlake_display_srwm_info, 1608 &ironlake_cursor_srwm_info, 1609 &fbc_wm, &plane_wm, &cursor_wm)) 1610 return; 1611 1612 I915_WRITE(WM2_LP_ILK, 1613 WM2_LP_EN | 1614 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1615 (fbc_wm << WM1_LP_FBC_SHIFT) | 1616 (plane_wm << WM1_LP_SR_SHIFT) | 1617 cursor_wm); 1618 1619 /* 1620 * WM3 is unsupported on ILK, probably because we don't have latency 1621 * data for that power state 1622 */ 1623 } 1624 1625 void sandybridge_update_wm(struct drm_device *dev) 1626 { 1627 struct drm_i915_private *dev_priv = dev->dev_private; 1628 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ 1629 u32 val; 1630 int fbc_wm, plane_wm, cursor_wm; 1631 unsigned int enabled; 1632 1633 enabled = 0; 1634 if (g4x_compute_wm0(dev, 0, 1635 &sandybridge_display_wm_info, latency, 1636 &sandybridge_cursor_wm_info, latency, 1637 &plane_wm, &cursor_wm)) { 1638 val = I915_READ(WM0_PIPEA_ILK); 1639 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); 1640 I915_WRITE(WM0_PIPEA_ILK, val | 1641 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); 1642 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 1643 " plane %d, " "cursor: %d\n", 1644 plane_wm, cursor_wm); 1645 enabled |= 1; 1646 } 1647 1648 if (g4x_compute_wm0(dev, 1, 1649 &sandybridge_display_wm_info, latency, 1650 &sandybridge_cursor_wm_info, latency, 1651 &plane_wm, &cursor_wm)) { 1652 val = I915_READ(WM0_PIPEB_ILK); 1653 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); 1654 I915_WRITE(WM0_PIPEB_ILK, val | 1655 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); 1656 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 1657 " plane %d, cursor: %d\n", 1658 plane_wm, cursor_wm); 1659 enabled |= 2; 1660 } 1661 1662 /* IVB has 3 pipes */ 1663 if (IS_IVYBRIDGE(dev) && 1664 g4x_compute_wm0(dev, 2, 1665 &sandybridge_display_wm_info, latency, 1666 &sandybridge_cursor_wm_info, latency, 1667 &plane_wm, &cursor_wm)) { 1668 val = I915_READ(WM0_PIPEC_IVB); 1669 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); 1670 I915_WRITE(WM0_PIPEC_IVB, val | 1671 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm)); 1672 DRM_DEBUG_KMS("FIFO watermarks For pipe C -" 1673 " plane %d, cursor: %d\n", 1674 plane_wm, cursor_wm); 1675 enabled |= 3; 1676 } 1677 1678 /* 1679 * Calculate and update the self-refresh watermark only when one 1680 * display plane is used. 1681 * 1682 * SNB support 3 levels of watermark. 1683 * 1684 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, 1685 * and disabled in the descending order 1686 * 1687 */ 1688 I915_WRITE(WM3_LP_ILK, 0); 1689 I915_WRITE(WM2_LP_ILK, 0); 1690 I915_WRITE(WM1_LP_ILK, 0); 1691 1692 if (!single_plane_enabled(enabled) || 1693 dev_priv->sprite_scaling_enabled) 1694 return; 1695 enabled = ffs(enabled) - 1; 1696 1697 /* WM1 */ 1698 if (!ironlake_compute_srwm(dev, 1, enabled, 1699 SNB_READ_WM1_LATENCY() * 500, 1700 &sandybridge_display_srwm_info, 1701 &sandybridge_cursor_srwm_info, 1702 &fbc_wm, &plane_wm, &cursor_wm)) 1703 return; 1704 1705 I915_WRITE(WM1_LP_ILK, 1706 WM1_LP_SR_EN | 1707 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1708 (fbc_wm << WM1_LP_FBC_SHIFT) | 1709 (plane_wm << WM1_LP_SR_SHIFT) | 1710 cursor_wm); 1711 1712 /* WM2 */ 1713 if (!ironlake_compute_srwm(dev, 2, enabled, 1714 SNB_READ_WM2_LATENCY() * 500, 1715 &sandybridge_display_srwm_info, 1716 &sandybridge_cursor_srwm_info, 1717 &fbc_wm, &plane_wm, &cursor_wm)) 1718 return; 1719 1720 I915_WRITE(WM2_LP_ILK, 1721 WM2_LP_EN | 1722 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1723 (fbc_wm << WM1_LP_FBC_SHIFT) | 1724 (plane_wm << WM1_LP_SR_SHIFT) | 1725 cursor_wm); 1726 1727 /* WM3 */ 1728 if (!ironlake_compute_srwm(dev, 3, enabled, 1729 SNB_READ_WM3_LATENCY() * 500, 1730 &sandybridge_display_srwm_info, 1731 &sandybridge_cursor_srwm_info, 1732 &fbc_wm, &plane_wm, &cursor_wm)) 1733 return; 1734 1735 I915_WRITE(WM3_LP_ILK, 1736 WM3_LP_EN | 1737 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | 1738 (fbc_wm << WM1_LP_FBC_SHIFT) | 1739 (plane_wm << WM1_LP_SR_SHIFT) | 1740 cursor_wm); 1741 } 1742 1743 static bool 1744 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane, 1745 uint32_t sprite_width, int pixel_size, 1746 const struct intel_watermark_params *display, 1747 int display_latency_ns, int *sprite_wm) 1748 { 1749 struct drm_crtc *crtc; 1750 int clock; 1751 int entries, tlb_miss; 1752 1753 crtc = intel_get_crtc_for_plane(dev, plane); 1754 if (crtc->fb == NULL || !crtc->enabled) { 1755 *sprite_wm = display->guard_size; 1756 return false; 1757 } 1758 1759 clock = crtc->mode.clock; 1760 1761 /* Use the small buffer method to calculate the sprite watermark */ 1762 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; 1763 tlb_miss = display->fifo_size*display->cacheline_size - 1764 sprite_width * 8; 1765 if (tlb_miss > 0) 1766 entries += tlb_miss; 1767 entries = howmany(entries, display->cacheline_size); 1768 *sprite_wm = entries + display->guard_size; 1769 if (*sprite_wm > (int)display->max_wm) 1770 *sprite_wm = display->max_wm; 1771 1772 return true; 1773 } 1774 1775 static bool 1776 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane, 1777 uint32_t sprite_width, int pixel_size, 1778 const struct intel_watermark_params *display, 1779 int latency_ns, int *sprite_wm) 1780 { 1781 struct drm_crtc *crtc; 1782 unsigned long line_time_us; 1783 int clock; 1784 int line_count, line_size; 1785 int small, large; 1786 int entries; 1787 1788 if (!latency_ns) { 1789 *sprite_wm = 0; 1790 return false; 1791 } 1792 1793 crtc = intel_get_crtc_for_plane(dev, plane); 1794 clock = crtc->mode.clock; 1795 if (!clock) { 1796 *sprite_wm = 0; 1797 return false; 1798 } 1799 1800 line_time_us = (sprite_width * 1000) / clock; 1801 if (!line_time_us) { 1802 *sprite_wm = 0; 1803 return false; 1804 } 1805 1806 line_count = (latency_ns / line_time_us + 1000) / 1000; 1807 line_size = sprite_width * pixel_size; 1808 1809 /* Use the minimum of the small and large buffer method for primary */ 1810 small = ((clock * pixel_size / 1000) * latency_ns) / 1000; 1811 large = line_count * line_size; 1812 1813 entries = howmany(min(small, large), display->cacheline_size); 1814 *sprite_wm = entries + display->guard_size; 1815 1816 return *sprite_wm > 0x3ff ? false : true; 1817 } 1818 1819 void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe, 1820 uint32_t sprite_width, int pixel_size) 1821 { 1822 struct drm_i915_private *dev_priv = dev->dev_private; 1823 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ 1824 u32 val; 1825 int sprite_wm, reg; 1826 int ret; 1827 1828 switch (pipe) { 1829 case 0: 1830 reg = WM0_PIPEA_ILK; 1831 break; 1832 case 1: 1833 reg = WM0_PIPEB_ILK; 1834 break; 1835 case 2: 1836 reg = WM0_PIPEC_IVB; 1837 break; 1838 default: 1839 return; /* bad pipe */ 1840 } 1841 1842 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size, 1843 &sandybridge_display_wm_info, 1844 latency, &sprite_wm); 1845 if (!ret) { 1846 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n", 1847 pipe); 1848 return; 1849 } 1850 1851 val = I915_READ(reg); 1852 val &= ~WM0_PIPE_SPRITE_MASK; 1853 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT)); 1854 DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm); 1855 1856 1857 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, 1858 pixel_size, 1859 &sandybridge_display_srwm_info, 1860 SNB_READ_WM1_LATENCY() * 500, 1861 &sprite_wm); 1862 if (!ret) { 1863 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n", 1864 pipe); 1865 return; 1866 } 1867 I915_WRITE(WM1S_LP_ILK, sprite_wm); 1868 1869 /* Only IVB has two more LP watermarks for sprite */ 1870 if (!IS_IVYBRIDGE(dev)) 1871 return; 1872 1873 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, 1874 pixel_size, 1875 &sandybridge_display_srwm_info, 1876 SNB_READ_WM2_LATENCY() * 500, 1877 &sprite_wm); 1878 if (!ret) { 1879 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n", 1880 pipe); 1881 return; 1882 } 1883 I915_WRITE(WM2S_LP_IVB, sprite_wm); 1884 1885 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width, 1886 pixel_size, 1887 &sandybridge_display_srwm_info, 1888 SNB_READ_WM3_LATENCY() * 500, 1889 &sprite_wm); 1890 if (!ret) { 1891 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n", 1892 pipe); 1893 return; 1894 } 1895 I915_WRITE(WM3S_LP_IVB, sprite_wm); 1896 } 1897 1898 /** 1899 * intel_update_watermarks - update FIFO watermark values based on current modes 1900 * 1901 * Calculate watermark values for the various WM regs based on current mode 1902 * and plane configuration. 1903 * 1904 * There are several cases to deal with here: 1905 * - normal (i.e. non-self-refresh) 1906 * - self-refresh (SR) mode 1907 * - lines are large relative to FIFO size (buffer can hold up to 2) 1908 * - lines are small relative to FIFO size (buffer can hold more than 2 1909 * lines), so need to account for TLB latency 1910 * 1911 * The normal calculation is: 1912 * watermark = dotclock * bytes per pixel * latency 1913 * where latency is platform & configuration dependent (we assume pessimal 1914 * values here). 1915 * 1916 * The SR calculation is: 1917 * watermark = (trunc(latency/line time)+1) * surface width * 1918 * bytes per pixel 1919 * where 1920 * line time = htotal / dotclock 1921 * surface width = hdisplay for normal plane and 64 for cursor 1922 * and latency is assumed to be high, as above. 1923 * 1924 * The final value programmed to the register should always be rounded up, 1925 * and include an extra 2 entries to account for clock crossings. 1926 * 1927 * We don't use the sprite, so we can ignore that. And on Crestline we have 1928 * to set the non-SR watermarks to 8. 1929 */ 1930 void intel_update_watermarks(struct drm_device *dev) 1931 { 1932 struct drm_i915_private *dev_priv = dev->dev_private; 1933 1934 if (dev_priv->display.update_wm) 1935 dev_priv->display.update_wm(dev); 1936 } 1937 1938 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, 1939 uint32_t sprite_width, int pixel_size) 1940 { 1941 struct drm_i915_private *dev_priv = dev->dev_private; 1942 1943 if (dev_priv->display.update_sprite_wm) 1944 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width, 1945 pixel_size); 1946 } 1947 1948 static struct drm_i915_gem_object * 1949 intel_alloc_context_page(struct drm_device *dev) 1950 { 1951 struct drm_i915_gem_object *ctx; 1952 int ret; 1953 1954 DRM_LOCK_ASSERT(dev); 1955 1956 ctx = i915_gem_alloc_object(dev, 4096); 1957 if (!ctx) { 1958 DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); 1959 return NULL; 1960 } 1961 1962 ret = i915_gem_object_pin(ctx, 4096, true); 1963 if (ret) { 1964 DRM_ERROR("failed to pin power context: %d\n", ret); 1965 goto err_unref; 1966 } 1967 1968 ret = i915_gem_object_set_to_gtt_domain(ctx, 1); 1969 if (ret) { 1970 DRM_ERROR("failed to set-domain on power context: %d\n", ret); 1971 goto err_unpin; 1972 } 1973 1974 return ctx; 1975 1976 err_unpin: 1977 i915_gem_object_unpin(ctx); 1978 err_unref: 1979 drm_gem_object_unreference(&ctx->base); 1980 DRM_UNLOCK(dev); 1981 return NULL; 1982 } 1983 1984 /** 1985 * Lock protecting IPS related data structures 1986 */ 1987 struct lock mchdev_lock; 1988 LOCK_SYSINIT(mchdev, &mchdev_lock, "mchdev", LK_CANRECURSE); 1989 1990 /* Global for IPS driver to get at the current i915 device. Protected by 1991 * mchdev_lock. */ 1992 struct drm_i915_private *i915_mch_dev; 1993 1994 bool ironlake_set_drps(struct drm_device *dev, u8 val) 1995 { 1996 struct drm_i915_private *dev_priv = dev->dev_private; 1997 u16 rgvswctl; 1998 1999 rgvswctl = I915_READ16(MEMSWCTL); 2000 if (rgvswctl & MEMCTL_CMD_STS) { 2001 DRM_DEBUG("gpu busy, RCS change rejected\n"); 2002 return false; /* still busy with another command */ 2003 } 2004 2005 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | 2006 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; 2007 I915_WRITE16(MEMSWCTL, rgvswctl); 2008 POSTING_READ16(MEMSWCTL); 2009 2010 rgvswctl |= MEMCTL_CMD_STS; 2011 I915_WRITE16(MEMSWCTL, rgvswctl); 2012 2013 return true; 2014 } 2015 2016 void ironlake_enable_drps(struct drm_device *dev) 2017 { 2018 struct drm_i915_private *dev_priv = dev->dev_private; 2019 u32 rgvmodectl = I915_READ(MEMMODECTL); 2020 u8 fmax, fmin, fstart, vstart; 2021 2022 /* Enable temp reporting */ 2023 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); 2024 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); 2025 2026 /* 100ms RC evaluation intervals */ 2027 I915_WRITE(RCUPEI, 100000); 2028 I915_WRITE(RCDNEI, 100000); 2029 2030 /* Set max/min thresholds to 90ms and 80ms respectively */ 2031 I915_WRITE(RCBMAXAVG, 90000); 2032 I915_WRITE(RCBMINAVG, 80000); 2033 2034 I915_WRITE(MEMIHYST, 1); 2035 2036 /* Set up min, max, and cur for interrupt handling */ 2037 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; 2038 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 2039 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 2040 MEMMODE_FSTART_SHIFT; 2041 2042 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 2043 PXVFREQ_PX_SHIFT; 2044 2045 dev_priv->fmax = fmax; /* IPS callback will increase this */ 2046 dev_priv->fstart = fstart; 2047 2048 dev_priv->max_delay = fstart; 2049 dev_priv->min_delay = fmin; 2050 dev_priv->cur_delay = fstart; 2051 2052 DRM_DEBUG("fmax: %d, fmin: %d, fstart: %d\n", 2053 fmax, fmin, fstart); 2054 2055 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 2056 2057 /* 2058 * Interrupts will be enabled in ironlake_irq_postinstall 2059 */ 2060 2061 I915_WRITE(VIDSTART, vstart); 2062 POSTING_READ(VIDSTART); 2063 2064 rgvmodectl |= MEMMODE_SWMODE_EN; 2065 I915_WRITE(MEMMODECTL, rgvmodectl); 2066 2067 if (_intel_wait_for(dev, 2068 (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10, 2069 1, "915per")) 2070 DRM_ERROR("stuck trying to change perf mode\n"); 2071 DELAY(1000); 2072 2073 ironlake_set_drps(dev, fstart); 2074 2075 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + 2076 I915_READ(0x112e0); 2077 dev_priv->last_time1 = jiffies_to_msecs(jiffies); 2078 dev_priv->last_count2 = I915_READ(0x112f4); 2079 nanotime(&dev_priv->last_time2); 2080 } 2081 2082 void ironlake_disable_drps(struct drm_device *dev) 2083 { 2084 struct drm_i915_private *dev_priv = dev->dev_private; 2085 u16 rgvswctl = I915_READ16(MEMSWCTL); 2086 2087 /* Ack interrupts, disable EFC interrupt */ 2088 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); 2089 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); 2090 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); 2091 I915_WRITE(DEIIR, DE_PCU_EVENT); 2092 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 2093 2094 /* Go back to the starting frequency */ 2095 ironlake_set_drps(dev, dev_priv->fstart); 2096 DELAY(1000); 2097 rgvswctl |= MEMCTL_CMD_STS; 2098 I915_WRITE(MEMSWCTL, rgvswctl); 2099 DELAY(1000); 2100 2101 } 2102 2103 void gen6_set_rps(struct drm_device *dev, u8 val) 2104 { 2105 struct drm_i915_private *dev_priv = dev->dev_private; 2106 u32 swreq; 2107 2108 swreq = (val & 0x3ff) << 25; 2109 I915_WRITE(GEN6_RPNSWREQ, swreq); 2110 } 2111 2112 void gen6_disable_rps(struct drm_device *dev) 2113 { 2114 struct drm_i915_private *dev_priv = dev->dev_private; 2115 2116 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 2117 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 2118 I915_WRITE(GEN6_PMIER, 0); 2119 /* Complete PM interrupt masking here doesn't race with the rps work 2120 * item again unmasking PM interrupts because that is using a different 2121 * register (PMIMR) to mask PM interrupts. The only risk is in leaving 2122 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ 2123 2124 lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE); 2125 dev_priv->pm_iir = 0; 2126 lockmgr(&dev_priv->rps_lock, LK_RELEASE); 2127 2128 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2129 } 2130 2131 static unsigned long intel_pxfreq(u32 vidfreq) 2132 { 2133 unsigned long freq; 2134 int div = (vidfreq & 0x3f0000) >> 16; 2135 int post = (vidfreq & 0x3000) >> 12; 2136 int pre = (vidfreq & 0x7); 2137 2138 if (!pre) 2139 return 0; 2140 2141 freq = ((div * 133333) / ((1<<post) * pre)); 2142 2143 return freq; 2144 } 2145 2146 static const struct cparams { 2147 u16 i; 2148 u16 t; 2149 u16 m; 2150 u16 c; 2151 } cparams[] = { 2152 { 1, 1333, 301, 28664 }, 2153 { 1, 1066, 294, 24460 }, 2154 { 1, 800, 294, 25192 }, 2155 { 0, 1333, 276, 27605 }, 2156 { 0, 1066, 276, 27605 }, 2157 { 0, 800, 231, 23784 }, 2158 }; 2159 2160 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 2161 { 2162 u64 total_count, diff, ret; 2163 u32 count1, count2, count3, m = 0, c = 0; 2164 unsigned long now = jiffies_to_msecs(jiffies), diff1; 2165 int i; 2166 2167 diff1 = now - dev_priv->last_time1; 2168 /* 2169 * sysctl(8) reads the value of sysctl twice in rapid 2170 * succession. There is high chance that it happens in the 2171 * same timer tick. Use the cached value to not divide by 2172 * zero and give the hw a chance to gather more samples. 2173 */ 2174 if (diff1 <= 10) 2175 return (dev_priv->chipset_power); 2176 2177 count1 = I915_READ(DMIEC); 2178 count2 = I915_READ(DDREC); 2179 count3 = I915_READ(CSIEC); 2180 2181 total_count = count1 + count2 + count3; 2182 2183 /* FIXME: handle per-counter overflow */ 2184 if (total_count < dev_priv->last_count1) { 2185 diff = ~0UL - dev_priv->last_count1; 2186 diff += total_count; 2187 } else { 2188 diff = total_count - dev_priv->last_count1; 2189 } 2190 2191 for (i = 0; i < DRM_ARRAY_SIZE(cparams); i++) { 2192 if (cparams[i].i == dev_priv->c_m && 2193 cparams[i].t == dev_priv->r_t) { 2194 m = cparams[i].m; 2195 c = cparams[i].c; 2196 break; 2197 } 2198 } 2199 2200 diff = diff / diff1; 2201 ret = ((m * diff) + c); 2202 ret = ret / 10; 2203 2204 dev_priv->last_count1 = total_count; 2205 dev_priv->last_time1 = now; 2206 2207 dev_priv->chipset_power = ret; 2208 return (ret); 2209 } 2210 2211 unsigned long i915_mch_val(struct drm_i915_private *dev_priv) 2212 { 2213 unsigned long m, x, b; 2214 u32 tsfs; 2215 2216 tsfs = I915_READ(TSFS); 2217 2218 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); 2219 x = I915_READ8(I915_TR1); 2220 2221 b = tsfs & TSFS_INTR_MASK; 2222 2223 return ((m * x) / 127) - b; 2224 } 2225 2226 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 2227 { 2228 static const struct v_table { 2229 u16 vd; /* in .1 mil */ 2230 u16 vm; /* in .1 mil */ 2231 } v_table[] = { 2232 { 0, 0, }, 2233 { 375, 0, }, 2234 { 500, 0, }, 2235 { 625, 0, }, 2236 { 750, 0, }, 2237 { 875, 0, }, 2238 { 1000, 0, }, 2239 { 1125, 0, }, 2240 { 4125, 3000, }, 2241 { 4125, 3000, }, 2242 { 4125, 3000, }, 2243 { 4125, 3000, }, 2244 { 4125, 3000, }, 2245 { 4125, 3000, }, 2246 { 4125, 3000, }, 2247 { 4125, 3000, }, 2248 { 4125, 3000, }, 2249 { 4125, 3000, }, 2250 { 4125, 3000, }, 2251 { 4125, 3000, }, 2252 { 4125, 3000, }, 2253 { 4125, 3000, }, 2254 { 4125, 3000, }, 2255 { 4125, 3000, }, 2256 { 4125, 3000, }, 2257 { 4125, 3000, }, 2258 { 4125, 3000, }, 2259 { 4125, 3000, }, 2260 { 4125, 3000, }, 2261 { 4125, 3000, }, 2262 { 4125, 3000, }, 2263 { 4125, 3000, }, 2264 { 4250, 3125, }, 2265 { 4375, 3250, }, 2266 { 4500, 3375, }, 2267 { 4625, 3500, }, 2268 { 4750, 3625, }, 2269 { 4875, 3750, }, 2270 { 5000, 3875, }, 2271 { 5125, 4000, }, 2272 { 5250, 4125, }, 2273 { 5375, 4250, }, 2274 { 5500, 4375, }, 2275 { 5625, 4500, }, 2276 { 5750, 4625, }, 2277 { 5875, 4750, }, 2278 { 6000, 4875, }, 2279 { 6125, 5000, }, 2280 { 6250, 5125, }, 2281 { 6375, 5250, }, 2282 { 6500, 5375, }, 2283 { 6625, 5500, }, 2284 { 6750, 5625, }, 2285 { 6875, 5750, }, 2286 { 7000, 5875, }, 2287 { 7125, 6000, }, 2288 { 7250, 6125, }, 2289 { 7375, 6250, }, 2290 { 7500, 6375, }, 2291 { 7625, 6500, }, 2292 { 7750, 6625, }, 2293 { 7875, 6750, }, 2294 { 8000, 6875, }, 2295 { 8125, 7000, }, 2296 { 8250, 7125, }, 2297 { 8375, 7250, }, 2298 { 8500, 7375, }, 2299 { 8625, 7500, }, 2300 { 8750, 7625, }, 2301 { 8875, 7750, }, 2302 { 9000, 7875, }, 2303 { 9125, 8000, }, 2304 { 9250, 8125, }, 2305 { 9375, 8250, }, 2306 { 9500, 8375, }, 2307 { 9625, 8500, }, 2308 { 9750, 8625, }, 2309 { 9875, 8750, }, 2310 { 10000, 8875, }, 2311 { 10125, 9000, }, 2312 { 10250, 9125, }, 2313 { 10375, 9250, }, 2314 { 10500, 9375, }, 2315 { 10625, 9500, }, 2316 { 10750, 9625, }, 2317 { 10875, 9750, }, 2318 { 11000, 9875, }, 2319 { 11125, 10000, }, 2320 { 11250, 10125, }, 2321 { 11375, 10250, }, 2322 { 11500, 10375, }, 2323 { 11625, 10500, }, 2324 { 11750, 10625, }, 2325 { 11875, 10750, }, 2326 { 12000, 10875, }, 2327 { 12125, 11000, }, 2328 { 12250, 11125, }, 2329 { 12375, 11250, }, 2330 { 12500, 11375, }, 2331 { 12625, 11500, }, 2332 { 12750, 11625, }, 2333 { 12875, 11750, }, 2334 { 13000, 11875, }, 2335 { 13125, 12000, }, 2336 { 13250, 12125, }, 2337 { 13375, 12250, }, 2338 { 13500, 12375, }, 2339 { 13625, 12500, }, 2340 { 13750, 12625, }, 2341 { 13875, 12750, }, 2342 { 14000, 12875, }, 2343 { 14125, 13000, }, 2344 { 14250, 13125, }, 2345 { 14375, 13250, }, 2346 { 14500, 13375, }, 2347 { 14625, 13500, }, 2348 { 14750, 13625, }, 2349 { 14875, 13750, }, 2350 { 15000, 13875, }, 2351 { 15125, 14000, }, 2352 { 15250, 14125, }, 2353 { 15375, 14250, }, 2354 { 15500, 14375, }, 2355 { 15625, 14500, }, 2356 { 15750, 14625, }, 2357 { 15875, 14750, }, 2358 { 16000, 14875, }, 2359 { 16125, 15000, }, 2360 }; 2361 if (dev_priv->info->is_mobile) 2362 return v_table[pxvid].vm; 2363 else 2364 return v_table[pxvid].vd; 2365 } 2366 2367 void i915_update_gfx_val(struct drm_i915_private *dev_priv) 2368 { 2369 struct timespec now, diff1; 2370 u64 diff; 2371 unsigned long diffms; 2372 u32 count; 2373 2374 if (dev_priv->info->gen != 5) 2375 return; 2376 2377 nanotime(&now); 2378 diff1 = now; 2379 timespecsub(&diff1, &dev_priv->last_time2); 2380 2381 /* Don't divide by 0 */ 2382 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000; 2383 if (!diffms) 2384 return; 2385 2386 count = I915_READ(GFXEC); 2387 2388 if (count < dev_priv->last_count2) { 2389 diff = ~0UL - dev_priv->last_count2; 2390 diff += count; 2391 } else { 2392 diff = count - dev_priv->last_count2; 2393 } 2394 2395 dev_priv->last_count2 = count; 2396 dev_priv->last_time2 = now; 2397 2398 /* More magic constants... */ 2399 diff = diff * 1181; 2400 diff = diff / (diffms * 10); 2401 dev_priv->gfx_power = diff; 2402 } 2403 2404 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 2405 { 2406 unsigned long t, corr, state1, corr2, state2; 2407 u32 pxvid, ext_v; 2408 2409 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); 2410 pxvid = (pxvid >> 24) & 0x7f; 2411 ext_v = pvid_to_extvid(dev_priv, pxvid); 2412 2413 state1 = ext_v; 2414 2415 t = i915_mch_val(dev_priv); 2416 2417 /* Revel in the empirically derived constants */ 2418 2419 /* Correction factor in 1/100000 units */ 2420 if (t > 80) 2421 corr = ((t * 2349) + 135940); 2422 else if (t >= 50) 2423 corr = ((t * 964) + 29317); 2424 else /* < 50 */ 2425 corr = ((t * 301) + 1004); 2426 2427 corr = corr * ((150142 * state1) / 10000 - 78642); 2428 corr /= 100000; 2429 corr2 = (corr * dev_priv->corr); 2430 2431 state2 = (corr2 * state1) / 10000; 2432 state2 /= 100; /* convert to mW */ 2433 2434 i915_update_gfx_val(dev_priv); 2435 2436 return dev_priv->gfx_power + state2; 2437 } 2438 2439 /** 2440 * i915_read_mch_val - return value for IPS use 2441 * 2442 * Calculate and return a value for the IPS driver to use when deciding whether 2443 * we have thermal and power headroom to increase CPU or GPU power budget. 2444 */ 2445 unsigned long i915_read_mch_val(void) 2446 { 2447 struct drm_i915_private *dev_priv; 2448 unsigned long chipset_val, graphics_val, ret = 0; 2449 2450 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 2451 if (!i915_mch_dev) 2452 goto out_unlock; 2453 dev_priv = i915_mch_dev; 2454 2455 chipset_val = i915_chipset_val(dev_priv); 2456 graphics_val = i915_gfx_val(dev_priv); 2457 2458 ret = chipset_val + graphics_val; 2459 2460 out_unlock: 2461 lockmgr(&mchdev_lock, LK_RELEASE); 2462 2463 return ret; 2464 } 2465 2466 /** 2467 * i915_gpu_raise - raise GPU frequency limit 2468 * 2469 * Raise the limit; IPS indicates we have thermal headroom. 2470 */ 2471 bool i915_gpu_raise(void) 2472 { 2473 struct drm_i915_private *dev_priv; 2474 bool ret = true; 2475 2476 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 2477 if (!i915_mch_dev) { 2478 ret = false; 2479 goto out_unlock; 2480 } 2481 dev_priv = i915_mch_dev; 2482 2483 if (dev_priv->max_delay > dev_priv->fmax) 2484 dev_priv->max_delay--; 2485 2486 out_unlock: 2487 lockmgr(&mchdev_lock, LK_RELEASE); 2488 2489 return ret; 2490 } 2491 2492 /** 2493 * i915_gpu_lower - lower GPU frequency limit 2494 * 2495 * IPS indicates we're close to a thermal limit, so throttle back the GPU 2496 * frequency maximum. 2497 */ 2498 bool i915_gpu_lower(void) 2499 { 2500 struct drm_i915_private *dev_priv; 2501 bool ret = true; 2502 2503 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 2504 if (!i915_mch_dev) { 2505 ret = false; 2506 goto out_unlock; 2507 } 2508 dev_priv = i915_mch_dev; 2509 2510 if (dev_priv->max_delay < dev_priv->min_delay) 2511 dev_priv->max_delay++; 2512 2513 out_unlock: 2514 lockmgr(&mchdev_lock, LK_RELEASE); 2515 2516 return ret; 2517 } 2518 2519 /** 2520 * i915_gpu_busy - indicate GPU business to IPS 2521 * 2522 * Tell the IPS driver whether or not the GPU is busy. 2523 */ 2524 bool i915_gpu_busy(void) 2525 { 2526 struct drm_i915_private *dev_priv; 2527 bool ret = false; 2528 2529 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 2530 if (!i915_mch_dev) 2531 goto out_unlock; 2532 dev_priv = i915_mch_dev; 2533 2534 ret = dev_priv->busy; 2535 2536 out_unlock: 2537 lockmgr(&mchdev_lock, LK_RELEASE); 2538 2539 return ret; 2540 } 2541 2542 /** 2543 * i915_gpu_turbo_disable - disable graphics turbo 2544 * 2545 * Disable graphics turbo by resetting the max frequency and setting the 2546 * current frequency to the default. 2547 */ 2548 bool i915_gpu_turbo_disable(void) 2549 { 2550 struct drm_i915_private *dev_priv; 2551 bool ret = true; 2552 2553 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 2554 if (!i915_mch_dev) { 2555 ret = false; 2556 goto out_unlock; 2557 } 2558 dev_priv = i915_mch_dev; 2559 2560 dev_priv->max_delay = dev_priv->fstart; 2561 2562 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart)) 2563 ret = false; 2564 2565 out_unlock: 2566 lockmgr(&mchdev_lock, LK_RELEASE); 2567 2568 return ret; 2569 } 2570 2571 void intel_init_emon(struct drm_device *dev) 2572 { 2573 struct drm_i915_private *dev_priv = dev->dev_private; 2574 u32 lcfuse; 2575 u8 pxw[16]; 2576 int i; 2577 2578 /* Disable to program */ 2579 I915_WRITE(ECR, 0); 2580 POSTING_READ(ECR); 2581 2582 /* Program energy weights for various events */ 2583 I915_WRITE(SDEW, 0x15040d00); 2584 I915_WRITE(CSIEW0, 0x007f0000); 2585 I915_WRITE(CSIEW1, 0x1e220004); 2586 I915_WRITE(CSIEW2, 0x04000004); 2587 2588 for (i = 0; i < 5; i++) 2589 I915_WRITE(PEW + (i * 4), 0); 2590 for (i = 0; i < 3; i++) 2591 I915_WRITE(DEW + (i * 4), 0); 2592 2593 /* Program P-state weights to account for frequency power adjustment */ 2594 for (i = 0; i < 16; i++) { 2595 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); 2596 unsigned long freq = intel_pxfreq(pxvidfreq); 2597 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> 2598 PXVFREQ_PX_SHIFT; 2599 unsigned long val; 2600 2601 val = vid * vid; 2602 val *= (freq / 1000); 2603 val *= 255; 2604 val /= (127*127*900); 2605 if (val > 0xff) 2606 DRM_ERROR("bad pxval: %ld\n", val); 2607 pxw[i] = val; 2608 } 2609 /* Render standby states get 0 weight */ 2610 pxw[14] = 0; 2611 pxw[15] = 0; 2612 2613 for (i = 0; i < 4; i++) { 2614 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | 2615 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); 2616 I915_WRITE(PXW + (i * 4), val); 2617 } 2618 2619 /* Adjust magic regs to magic values (more experimental results) */ 2620 I915_WRITE(OGW0, 0); 2621 I915_WRITE(OGW1, 0); 2622 I915_WRITE(EG0, 0x00007f00); 2623 I915_WRITE(EG1, 0x0000000e); 2624 I915_WRITE(EG2, 0x000e0000); 2625 I915_WRITE(EG3, 0x68000300); 2626 I915_WRITE(EG4, 0x42000000); 2627 I915_WRITE(EG5, 0x00140031); 2628 I915_WRITE(EG6, 0); 2629 I915_WRITE(EG7, 0); 2630 2631 for (i = 0; i < 8; i++) 2632 I915_WRITE(PXWL + (i * 4), 0); 2633 2634 /* Enable PMON + select events */ 2635 I915_WRITE(ECR, 0x80000019); 2636 2637 lcfuse = I915_READ(LCFUSE02); 2638 2639 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); 2640 } 2641 2642 static int intel_enable_rc6(struct drm_device *dev) 2643 { 2644 /* 2645 * Respect the kernel parameter if it is set 2646 */ 2647 if (i915_enable_rc6 >= 0) 2648 return i915_enable_rc6; 2649 2650 /* 2651 * Disable RC6 on Ironlake 2652 */ 2653 if (INTEL_INFO(dev)->gen == 5) 2654 return 0; 2655 2656 /* 2657 * Enable rc6 on Sandybridge if DMA remapping is disabled 2658 */ 2659 if (INTEL_INFO(dev)->gen == 6) { 2660 DRM_DEBUG_DRIVER( 2661 "Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n", 2662 intel_iommu_enabled ? "true" : "false", 2663 !intel_iommu_enabled ? "en" : "dis"); 2664 return (intel_iommu_enabled ? 0 : INTEL_RC6_ENABLE); 2665 } 2666 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n"); 2667 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 2668 } 2669 2670 void gen6_enable_rps(struct drm_i915_private *dev_priv) 2671 { 2672 struct drm_device *dev = dev_priv->dev; 2673 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 2674 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 2675 u32 pcu_mbox, rc6_mask = 0; 2676 u32 gtfifodbg; 2677 int cur_freq, min_freq, max_freq; 2678 int rc6_mode; 2679 int i; 2680 2681 /* Here begins a magic sequence of register writes to enable 2682 * auto-downclocking. 2683 * 2684 * Perhaps there might be some value in exposing these to 2685 * userspace... 2686 */ 2687 I915_WRITE(GEN6_RC_STATE, 0); 2688 DRM_LOCK(dev); 2689 2690 /* Clear the DBG now so we don't confuse earlier errors */ 2691 if ((gtfifodbg = I915_READ(GTFIFODBG))) { 2692 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); 2693 I915_WRITE(GTFIFODBG, gtfifodbg); 2694 } 2695 2696 gen6_gt_force_wake_get(dev_priv); 2697 2698 /* disable the counters and set deterministic thresholds */ 2699 I915_WRITE(GEN6_RC_CONTROL, 0); 2700 2701 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); 2702 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); 2703 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); 2704 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); 2705 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); 2706 2707 for (i = 0; i < I915_NUM_RINGS; i++) 2708 I915_WRITE(RING_MAX_IDLE(dev_priv->rings[i].mmio_base), 10); 2709 2710 I915_WRITE(GEN6_RC_SLEEP, 0); 2711 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 2712 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 2713 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); 2714 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 2715 2716 rc6_mode = intel_enable_rc6(dev_priv->dev); 2717 if (rc6_mode & INTEL_RC6_ENABLE) 2718 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; 2719 2720 if (rc6_mode & INTEL_RC6p_ENABLE) 2721 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; 2722 2723 if (rc6_mode & INTEL_RC6pp_ENABLE) 2724 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; 2725 2726 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", 2727 (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off", 2728 (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off", 2729 (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off"); 2730 2731 I915_WRITE(GEN6_RC_CONTROL, 2732 rc6_mask | 2733 GEN6_RC_CTL_EI_MODE(1) | 2734 GEN6_RC_CTL_HW_ENABLE); 2735 2736 I915_WRITE(GEN6_RPNSWREQ, 2737 GEN6_FREQUENCY(10) | 2738 GEN6_OFFSET(0) | 2739 GEN6_AGGRESSIVE_TURBO); 2740 I915_WRITE(GEN6_RC_VIDEO_FREQ, 2741 GEN6_FREQUENCY(12)); 2742 2743 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); 2744 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 2745 18 << 24 | 2746 6 << 16); 2747 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); 2748 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); 2749 I915_WRITE(GEN6_RP_UP_EI, 100000); 2750 I915_WRITE(GEN6_RP_DOWN_EI, 5000000); 2751 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); 2752 I915_WRITE(GEN6_RP_CONTROL, 2753 GEN6_RP_MEDIA_TURBO | 2754 GEN6_RP_MEDIA_HW_MODE | 2755 GEN6_RP_MEDIA_IS_GFX | 2756 GEN6_RP_ENABLE | 2757 GEN6_RP_UP_BUSY_AVG | 2758 GEN6_RP_DOWN_IDLE_CONT); 2759 2760 if (_intel_wait_for(dev, 2761 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500, 2762 1, "915pr1")) 2763 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); 2764 2765 I915_WRITE(GEN6_PCODE_DATA, 0); 2766 I915_WRITE(GEN6_PCODE_MAILBOX, 2767 GEN6_PCODE_READY | 2768 GEN6_PCODE_WRITE_MIN_FREQ_TABLE); 2769 if (_intel_wait_for(dev, 2770 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500, 2771 1, "915pr2")) 2772 DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); 2773 2774 min_freq = (rp_state_cap & 0xff0000) >> 16; 2775 max_freq = rp_state_cap & 0xff; 2776 cur_freq = (gt_perf_status & 0xff00) >> 8; 2777 2778 /* Check for overclock support */ 2779 if (_intel_wait_for(dev, 2780 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500, 2781 1, "915pr3")) 2782 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); 2783 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); 2784 pcu_mbox = I915_READ(GEN6_PCODE_DATA); 2785 if (_intel_wait_for(dev, 2786 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500, 2787 1, "915pr4")) 2788 DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); 2789 if (pcu_mbox & (1<<31)) { /* OC supported */ 2790 max_freq = pcu_mbox & 0xff; 2791 DRM_DEBUG("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); 2792 } 2793 2794 /* In units of 100MHz */ 2795 dev_priv->max_delay = max_freq; 2796 dev_priv->min_delay = min_freq; 2797 dev_priv->cur_delay = cur_freq; 2798 2799 /* requires MSI enabled */ 2800 I915_WRITE(GEN6_PMIER, 2801 GEN6_PM_MBOX_EVENT | 2802 GEN6_PM_THERMAL_EVENT | 2803 GEN6_PM_RP_DOWN_TIMEOUT | 2804 GEN6_PM_RP_UP_THRESHOLD | 2805 GEN6_PM_RP_DOWN_THRESHOLD | 2806 GEN6_PM_RP_UP_EI_EXPIRED | 2807 GEN6_PM_RP_DOWN_EI_EXPIRED); 2808 lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE); 2809 if (dev_priv->pm_iir != 0) 2810 kprintf("pm_iir %x\n", dev_priv->pm_iir); 2811 I915_WRITE(GEN6_PMIMR, 0); 2812 lockmgr(&dev_priv->rps_lock, LK_RELEASE); 2813 /* enable all PM interrupts */ 2814 I915_WRITE(GEN6_PMINTRMSK, 0); 2815 2816 gen6_gt_force_wake_put(dev_priv); 2817 DRM_UNLOCK(dev); 2818 } 2819 2820 void gen6_update_ring_freq(struct drm_i915_private *dev_priv) 2821 { 2822 struct drm_device *dev; 2823 int min_freq = 15; 2824 int gpu_freq, ia_freq, max_ia_freq; 2825 int scaling_factor = 180; 2826 uint64_t tsc_freq; 2827 2828 dev = dev_priv->dev; 2829 #if 0 2830 max_ia_freq = cpufreq_quick_get_max(0); 2831 /* 2832 * Default to measured freq if none found, PCU will ensure we don't go 2833 * over 2834 */ 2835 if (!max_ia_freq) 2836 max_ia_freq = tsc_freq; 2837 2838 /* Convert from Hz to MHz */ 2839 max_ia_freq /= 1000; 2840 #else 2841 tsc_freq = atomic_load_acq_64(&tsc_freq); 2842 max_ia_freq = tsc_freq / 1000 / 1000; 2843 #endif 2844 2845 DRM_LOCK(dev); 2846 2847 /* 2848 * For each potential GPU frequency, load a ring frequency we'd like 2849 * to use for memory access. We do this by specifying the IA frequency 2850 * the PCU should use as a reference to determine the ring frequency. 2851 */ 2852 for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; 2853 gpu_freq--) { 2854 int diff = dev_priv->max_delay - gpu_freq; 2855 int d; 2856 2857 /* 2858 * For GPU frequencies less than 750MHz, just use the lowest 2859 * ring freq. 2860 */ 2861 if (gpu_freq < min_freq) 2862 ia_freq = 800; 2863 else 2864 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); 2865 d = 100; 2866 ia_freq = (ia_freq + d / 2) / d; 2867 2868 I915_WRITE(GEN6_PCODE_DATA, 2869 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | 2870 gpu_freq); 2871 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | 2872 GEN6_PCODE_WRITE_MIN_FREQ_TABLE); 2873 if (_intel_wait_for(dev, 2874 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 2875 10, 1, "915frq")) { 2876 DRM_ERROR("pcode write of freq table timed out\n"); 2877 continue; 2878 } 2879 } 2880 2881 DRM_UNLOCK(dev); 2882 } 2883 2884 void ironlake_init_clock_gating(struct drm_device *dev) 2885 { 2886 struct drm_i915_private *dev_priv = dev->dev_private; 2887 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; 2888 2889 /* Required for FBC */ 2890 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | 2891 DPFCRUNIT_CLOCK_GATE_DISABLE | 2892 DPFDUNIT_CLOCK_GATE_DISABLE; 2893 /* Required for CxSR */ 2894 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; 2895 2896 I915_WRITE(PCH_3DCGDIS0, 2897 MARIUNIT_CLOCK_GATE_DISABLE | 2898 SVSMUNIT_CLOCK_GATE_DISABLE); 2899 I915_WRITE(PCH_3DCGDIS1, 2900 VFMUNIT_CLOCK_GATE_DISABLE); 2901 2902 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); 2903 2904 /* 2905 * According to the spec the following bits should be set in 2906 * order to enable memory self-refresh 2907 * The bit 22/21 of 0x42004 2908 * The bit 5 of 0x42020 2909 * The bit 15 of 0x45000 2910 */ 2911 I915_WRITE(ILK_DISPLAY_CHICKEN2, 2912 (I915_READ(ILK_DISPLAY_CHICKEN2) | 2913 ILK_DPARB_GATE | ILK_VSDPFD_FULL)); 2914 I915_WRITE(ILK_DSPCLK_GATE, 2915 (I915_READ(ILK_DSPCLK_GATE) | 2916 ILK_DPARB_CLK_GATE)); 2917 I915_WRITE(DISP_ARB_CTL, 2918 (I915_READ(DISP_ARB_CTL) | 2919 DISP_FBC_WM_DIS)); 2920 I915_WRITE(WM3_LP_ILK, 0); 2921 I915_WRITE(WM2_LP_ILK, 0); 2922 I915_WRITE(WM1_LP_ILK, 0); 2923 2924 /* 2925 * Based on the document from hardware guys the following bits 2926 * should be set unconditionally in order to enable FBC. 2927 * The bit 22 of 0x42000 2928 * The bit 22 of 0x42004 2929 * The bit 7,8,9 of 0x42020. 2930 */ 2931 if (IS_IRONLAKE_M(dev)) { 2932 I915_WRITE(ILK_DISPLAY_CHICKEN1, 2933 I915_READ(ILK_DISPLAY_CHICKEN1) | 2934 ILK_FBCQ_DIS); 2935 I915_WRITE(ILK_DISPLAY_CHICKEN2, 2936 I915_READ(ILK_DISPLAY_CHICKEN2) | 2937 ILK_DPARB_GATE); 2938 I915_WRITE(ILK_DSPCLK_GATE, 2939 I915_READ(ILK_DSPCLK_GATE) | 2940 ILK_DPFC_DIS1 | 2941 ILK_DPFC_DIS2 | 2942 ILK_CLK_FBC); 2943 } 2944 2945 I915_WRITE(ILK_DISPLAY_CHICKEN2, 2946 I915_READ(ILK_DISPLAY_CHICKEN2) | 2947 ILK_ELPIN_409_SELECT); 2948 I915_WRITE(_3D_CHICKEN2, 2949 _3D_CHICKEN2_WM_READ_PIPELINED << 16 | 2950 _3D_CHICKEN2_WM_READ_PIPELINED); 2951 } 2952 2953 void gen6_init_clock_gating(struct drm_device *dev) 2954 { 2955 struct drm_i915_private *dev_priv = dev->dev_private; 2956 int pipe; 2957 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; 2958 2959 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); 2960 2961 I915_WRITE(ILK_DISPLAY_CHICKEN2, 2962 I915_READ(ILK_DISPLAY_CHICKEN2) | 2963 ILK_ELPIN_409_SELECT); 2964 2965 I915_WRITE(WM3_LP_ILK, 0); 2966 I915_WRITE(WM2_LP_ILK, 0); 2967 I915_WRITE(WM1_LP_ILK, 0); 2968 2969 I915_WRITE(GEN6_UCGCTL1, 2970 I915_READ(GEN6_UCGCTL1) | 2971 GEN6_BLBUNIT_CLOCK_GATE_DISABLE); 2972 2973 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 2974 * gating disable must be set. Failure to set it results in 2975 * flickering pixels due to Z write ordering failures after 2976 * some amount of runtime in the Mesa "fire" demo, and Unigine 2977 * Sanctuary and Tropics, and apparently anything else with 2978 * alpha test or pixel discard. 2979 * 2980 * According to the spec, bit 11 (RCCUNIT) must also be set, 2981 * but we didn't debug actual testcases to find it out. 2982 */ 2983 I915_WRITE(GEN6_UCGCTL2, 2984 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | 2985 GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 2986 2987 /* 2988 * According to the spec the following bits should be 2989 * set in order to enable memory self-refresh and fbc: 2990 * The bit21 and bit22 of 0x42000 2991 * The bit21 and bit22 of 0x42004 2992 * The bit5 and bit7 of 0x42020 2993 * The bit14 of 0x70180 2994 * The bit14 of 0x71180 2995 */ 2996 I915_WRITE(ILK_DISPLAY_CHICKEN1, 2997 I915_READ(ILK_DISPLAY_CHICKEN1) | 2998 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); 2999 I915_WRITE(ILK_DISPLAY_CHICKEN2, 3000 I915_READ(ILK_DISPLAY_CHICKEN2) | 3001 ILK_DPARB_GATE | ILK_VSDPFD_FULL); 3002 I915_WRITE(ILK_DSPCLK_GATE, 3003 I915_READ(ILK_DSPCLK_GATE) | 3004 ILK_DPARB_CLK_GATE | 3005 ILK_DPFD_CLK_GATE); 3006 3007 for_each_pipe(pipe) { 3008 I915_WRITE(DSPCNTR(pipe), 3009 I915_READ(DSPCNTR(pipe)) | 3010 DISPPLANE_TRICKLE_FEED_DISABLE); 3011 intel_flush_display_plane(dev_priv, pipe); 3012 } 3013 } 3014 3015 void ivybridge_init_clock_gating(struct drm_device *dev) 3016 { 3017 struct drm_i915_private *dev_priv = dev->dev_private; 3018 int pipe; 3019 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; 3020 3021 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); 3022 3023 I915_WRITE(WM3_LP_ILK, 0); 3024 I915_WRITE(WM2_LP_ILK, 0); 3025 I915_WRITE(WM1_LP_ILK, 0); 3026 3027 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB. 3028 * This implements the WaDisableRCZUnitClockGating workaround. 3029 */ 3030 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 3031 3032 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); 3033 3034 I915_WRITE(IVB_CHICKEN3, 3035 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 3036 CHICKEN3_DGMG_DONE_FIX_DISABLE); 3037 3038 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ 3039 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 3040 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 3041 3042 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ 3043 I915_WRITE(GEN7_L3CNTLREG1, 3044 GEN7_WA_FOR_GEN7_L3_CONTROL); 3045 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, 3046 GEN7_WA_L3_CHICKEN_MODE); 3047 3048 /* This is required by WaCatErrorRejectionIssue */ 3049 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 3050 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 3051 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 3052 3053 for_each_pipe(pipe) { 3054 I915_WRITE(DSPCNTR(pipe), 3055 I915_READ(DSPCNTR(pipe)) | 3056 DISPPLANE_TRICKLE_FEED_DISABLE); 3057 intel_flush_display_plane(dev_priv, pipe); 3058 } 3059 } 3060 3061 void g4x_init_clock_gating(struct drm_device *dev) 3062 { 3063 struct drm_i915_private *dev_priv = dev->dev_private; 3064 uint32_t dspclk_gate; 3065 3066 I915_WRITE(RENCLK_GATE_D1, 0); 3067 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | 3068 GS_UNIT_CLOCK_GATE_DISABLE | 3069 CL_UNIT_CLOCK_GATE_DISABLE); 3070 I915_WRITE(RAMCLK_GATE_D, 0); 3071 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | 3072 OVRUNIT_CLOCK_GATE_DISABLE | 3073 OVCUNIT_CLOCK_GATE_DISABLE; 3074 if (IS_GM45(dev)) 3075 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; 3076 I915_WRITE(DSPCLK_GATE_D, dspclk_gate); 3077 } 3078 3079 void crestline_init_clock_gating(struct drm_device *dev) 3080 { 3081 struct drm_i915_private *dev_priv = dev->dev_private; 3082 3083 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); 3084 I915_WRITE(RENCLK_GATE_D2, 0); 3085 I915_WRITE(DSPCLK_GATE_D, 0); 3086 I915_WRITE(RAMCLK_GATE_D, 0); 3087 I915_WRITE16(DEUC, 0); 3088 } 3089 3090 void broadwater_init_clock_gating(struct drm_device *dev) 3091 { 3092 struct drm_i915_private *dev_priv = dev->dev_private; 3093 3094 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 3095 I965_RCC_CLOCK_GATE_DISABLE | 3096 I965_RCPB_CLOCK_GATE_DISABLE | 3097 I965_ISC_CLOCK_GATE_DISABLE | 3098 I965_FBC_CLOCK_GATE_DISABLE); 3099 I915_WRITE(RENCLK_GATE_D2, 0); 3100 } 3101 3102 void gen3_init_clock_gating(struct drm_device *dev) 3103 { 3104 struct drm_i915_private *dev_priv = dev->dev_private; 3105 u32 dstate = I915_READ(D_STATE); 3106 3107 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 3108 DSTATE_DOT_CLOCK_GATING; 3109 I915_WRITE(D_STATE, dstate); 3110 } 3111 3112 void i85x_init_clock_gating(struct drm_device *dev) 3113 { 3114 struct drm_i915_private *dev_priv = dev->dev_private; 3115 3116 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 3117 } 3118 3119 void i830_init_clock_gating(struct drm_device *dev) 3120 { 3121 struct drm_i915_private *dev_priv = dev->dev_private; 3122 3123 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 3124 } 3125 3126 void ibx_init_clock_gating(struct drm_device *dev) 3127 { 3128 struct drm_i915_private *dev_priv = dev->dev_private; 3129 3130 /* 3131 * On Ibex Peak and Cougar Point, we need to disable clock 3132 * gating for the panel power sequencer or it will fail to 3133 * start up when no ports are active. 3134 */ 3135 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 3136 } 3137 3138 void cpt_init_clock_gating(struct drm_device *dev) 3139 { 3140 struct drm_i915_private *dev_priv = dev->dev_private; 3141 int pipe; 3142 3143 /* 3144 * On Ibex Peak and Cougar Point, we need to disable clock 3145 * gating for the panel power sequencer or it will fail to 3146 * start up when no ports are active. 3147 */ 3148 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); 3149 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | 3150 DPLS_EDP_PPS_FIX_DIS); 3151 /* Without this, mode sets may fail silently on FDI */ 3152 for_each_pipe(pipe) 3153 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); 3154 } 3155 3156 static void ironlake_teardown_rc6(struct drm_device *dev) 3157 { 3158 struct drm_i915_private *dev_priv = dev->dev_private; 3159 3160 if (dev_priv->renderctx) { 3161 i915_gem_object_unpin(dev_priv->renderctx); 3162 drm_gem_object_unreference(&dev_priv->renderctx->base); 3163 dev_priv->renderctx = NULL; 3164 } 3165 3166 if (dev_priv->pwrctx) { 3167 i915_gem_object_unpin(dev_priv->pwrctx); 3168 drm_gem_object_unreference(&dev_priv->pwrctx->base); 3169 dev_priv->pwrctx = NULL; 3170 } 3171 } 3172 3173 void ironlake_disable_rc6(struct drm_device *dev) 3174 { 3175 struct drm_i915_private *dev_priv = dev->dev_private; 3176 3177 if (I915_READ(PWRCTXA)) { 3178 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ 3179 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); 3180 (void)_intel_wait_for(dev, 3181 ((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), 3182 50, 1, "915pro"); 3183 3184 I915_WRITE(PWRCTXA, 0); 3185 POSTING_READ(PWRCTXA); 3186 3187 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 3188 POSTING_READ(RSTDBYCTL); 3189 } 3190 3191 ironlake_teardown_rc6(dev); 3192 } 3193 3194 static int ironlake_setup_rc6(struct drm_device *dev) 3195 { 3196 struct drm_i915_private *dev_priv = dev->dev_private; 3197 3198 if (dev_priv->renderctx == NULL) 3199 dev_priv->renderctx = intel_alloc_context_page(dev); 3200 if (!dev_priv->renderctx) 3201 return -ENOMEM; 3202 3203 if (dev_priv->pwrctx == NULL) 3204 dev_priv->pwrctx = intel_alloc_context_page(dev); 3205 if (!dev_priv->pwrctx) { 3206 ironlake_teardown_rc6(dev); 3207 return -ENOMEM; 3208 } 3209 3210 return 0; 3211 } 3212 3213 void ironlake_enable_rc6(struct drm_device *dev) 3214 { 3215 struct drm_i915_private *dev_priv = dev->dev_private; 3216 int ret; 3217 3218 /* rc6 disabled by default due to repeated reports of hanging during 3219 * boot and resume. 3220 */ 3221 if (!intel_enable_rc6(dev)) 3222 return; 3223 3224 DRM_LOCK(dev); 3225 ret = ironlake_setup_rc6(dev); 3226 if (ret) { 3227 DRM_UNLOCK(dev); 3228 return; 3229 } 3230 3231 /* 3232 * GPU can automatically power down the render unit if given a page 3233 * to save state. 3234 */ 3235 ret = BEGIN_LP_RING(6); 3236 if (ret) { 3237 ironlake_teardown_rc6(dev); 3238 DRM_UNLOCK(dev); 3239 return; 3240 } 3241 3242 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); 3243 OUT_RING(MI_SET_CONTEXT); 3244 OUT_RING(dev_priv->renderctx->gtt_offset | 3245 MI_MM_SPACE_GTT | 3246 MI_SAVE_EXT_STATE_EN | 3247 MI_RESTORE_EXT_STATE_EN | 3248 MI_RESTORE_INHIBIT); 3249 OUT_RING(MI_SUSPEND_FLUSH); 3250 OUT_RING(MI_NOOP); 3251 OUT_RING(MI_FLUSH); 3252 ADVANCE_LP_RING(); 3253 3254 /* 3255 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW 3256 * does an implicit flush, combined with MI_FLUSH above, it should be 3257 * safe to assume that renderctx is valid 3258 */ 3259 ret = intel_wait_ring_idle(LP_RING(dev_priv)); 3260 if (ret) { 3261 DRM_ERROR("failed to enable ironlake power savings\n"); 3262 ironlake_teardown_rc6(dev); 3263 DRM_UNLOCK(dev); 3264 return; 3265 } 3266 3267 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); 3268 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 3269 DRM_UNLOCK(dev); 3270 } 3271 3272 void intel_init_clock_gating(struct drm_device *dev) 3273 { 3274 struct drm_i915_private *dev_priv = dev->dev_private; 3275 3276 dev_priv->display.init_clock_gating(dev); 3277 3278 if (dev_priv->display.init_pch_clock_gating) 3279 dev_priv->display.init_pch_clock_gating(dev); 3280 } 3281