1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "i915_drv.h" 25 #include "intel_drv.h" 26 27 #define FORCEWAKE_ACK_TIMEOUT_MS 2 28 29 #define __raw_i915_read8(dev_priv__, reg__) DRM_READ8(dev_priv__->mmio_map, reg__) 30 #define __raw_i915_write8(dev_priv__, reg__, val__) DRM_WRITE8(dev_priv__->mmio_map, reg__, val__) 31 32 #define __raw_i915_read16(dev_priv__, reg__) DRM_READ16(dev_priv__->mmio_map, reg__) 33 #define __raw_i915_write16(dev_priv__, reg__, val__) DRM_WRITE16(dev_priv__->mmio_map, reg__, val__) 34 35 #define __raw_i915_read32(dev_priv__, reg__) DRM_READ32(dev_priv__->mmio_map, reg__) 36 #define __raw_i915_write32(dev_priv__, reg__, val__) DRM_WRITE32(dev_priv__->mmio_map, reg__, val__) 37 38 #define __raw_i915_read64(dev_priv__, reg__) DRM_READ64(dev_priv__->mmio_map, reg__) 39 #define __raw_i915_write64(dev_priv__, reg__, val__) DRM_WRITE64(dev_priv__->mmio_map, reg__, val__) 40 41 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) 42 43 static void 44 assert_device_not_suspended(struct drm_i915_private *dev_priv) 45 { 46 WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, 47 "Device suspended\n"); 48 } 49 50 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 51 { 52 u32 gt_thread_status_mask; 53 54 if (IS_HASWELL(dev_priv->dev)) 55 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW; 56 else 57 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK; 58 59 /* w/a for a sporadic read returning 0 by waiting for the GT 60 * thread to wake up. 61 */ 62 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) 63 DRM_ERROR("GT thread status wait timed out\n"); 64 } 65 66 static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) 67 { 68 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 69 /* something from same cacheline, but !FORCEWAKE */ 70 __raw_posting_read(dev_priv, ECOBUS); 71 } 72 73 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, 74 int fw_engine) 75 { 76 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0, 77 FORCEWAKE_ACK_TIMEOUT_MS)) 78 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 79 80 __raw_i915_write32(dev_priv, FORCEWAKE, 1); 81 /* something from same cacheline, but !FORCEWAKE */ 82 __raw_posting_read(dev_priv, ECOBUS); 83 84 if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1), 85 FORCEWAKE_ACK_TIMEOUT_MS)) 86 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 87 88 /* WaRsForcewakeWaitTC0:snb */ 89 __gen6_gt_wait_for_thread_c0(dev_priv); 90 } 91 92 static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) 93 { 94 __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); 95 /* something from same cacheline, but !FORCEWAKE_MT */ 96 __raw_posting_read(dev_priv, ECOBUS); 97 } 98 99 static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv, 100 int fw_engine) 101 { 102 u32 forcewake_ack; 103 104 if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev)) 105 forcewake_ack = FORCEWAKE_ACK_HSW; 106 else 107 forcewake_ack = FORCEWAKE_MT_ACK; 108 109 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0, 110 FORCEWAKE_ACK_TIMEOUT_MS)) 111 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 112 113 __raw_i915_write32(dev_priv, FORCEWAKE_MT, 114 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 115 /* something from same cacheline, but !FORCEWAKE_MT */ 116 __raw_posting_read(dev_priv, ECOBUS); 117 118 if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL), 119 FORCEWAKE_ACK_TIMEOUT_MS)) 120 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 121 122 /* WaRsForcewakeWaitTC0:ivb,hsw */ 123 if (INTEL_INFO(dev_priv->dev)->gen < 8) 124 __gen6_gt_wait_for_thread_c0(dev_priv); 125 } 126 127 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 128 { 129 u32 gtfifodbg; 130 131 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); 132 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg)) 133 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg); 134 } 135 136 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, 137 int fw_engine) 138 { 139 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 140 /* something from same cacheline, but !FORCEWAKE */ 141 __raw_posting_read(dev_priv, ECOBUS); 142 gen6_gt_check_fifodbg(dev_priv); 143 } 144 145 static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv, 146 int fw_engine) 147 { 148 __raw_i915_write32(dev_priv, FORCEWAKE_MT, 149 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 150 /* something from same cacheline, but !FORCEWAKE_MT */ 151 __raw_posting_read(dev_priv, ECOBUS); 152 153 if (IS_GEN7(dev_priv->dev)) 154 gen6_gt_check_fifodbg(dev_priv); 155 } 156 157 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 158 { 159 int ret = 0; 160 161 /* On VLV, FIFO will be shared by both SW and HW. 162 * So, we need to read the FREE_ENTRIES everytime */ 163 if (IS_VALLEYVIEW(dev_priv->dev)) 164 dev_priv->uncore.fifo_count = 165 __raw_i915_read32(dev_priv, GTFIFOCTL) & 166 GT_FIFO_FREE_ENTRIES_MASK; 167 168 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 169 int loop = 500; 170 u32 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; 171 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 172 udelay(10); 173 fifo = __raw_i915_read32(dev_priv, GTFIFOCTL) & GT_FIFO_FREE_ENTRIES_MASK; 174 } 175 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) 176 ++ret; 177 dev_priv->uncore.fifo_count = fifo; 178 } 179 dev_priv->uncore.fifo_count--; 180 181 return ret; 182 } 183 184 static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) 185 { 186 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 187 _MASKED_BIT_DISABLE(0xffff)); 188 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, 189 _MASKED_BIT_DISABLE(0xffff)); 190 /* something from same cacheline, but !FORCEWAKE_VLV */ 191 __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); 192 } 193 194 static void __vlv_force_wake_get(struct drm_i915_private *dev_priv, 195 int fw_engine) 196 { 197 /* Check for Render Engine */ 198 if (FORCEWAKE_RENDER & fw_engine) { 199 if (wait_for_atomic((__raw_i915_read32(dev_priv, 200 FORCEWAKE_ACK_VLV) & 201 FORCEWAKE_KERNEL) == 0, 202 FORCEWAKE_ACK_TIMEOUT_MS)) 203 DRM_ERROR("Timed out: Render forcewake old ack to clear.\n"); 204 205 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 206 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 207 208 if (wait_for_atomic((__raw_i915_read32(dev_priv, 209 FORCEWAKE_ACK_VLV) & 210 FORCEWAKE_KERNEL), 211 FORCEWAKE_ACK_TIMEOUT_MS)) 212 DRM_ERROR("Timed out: waiting for Render to ack.\n"); 213 } 214 215 /* Check for Media Engine */ 216 if (FORCEWAKE_MEDIA & fw_engine) { 217 if (wait_for_atomic((__raw_i915_read32(dev_priv, 218 FORCEWAKE_ACK_MEDIA_VLV) & 219 FORCEWAKE_KERNEL) == 0, 220 FORCEWAKE_ACK_TIMEOUT_MS)) 221 DRM_ERROR("Timed out: Media forcewake old ack to clear.\n"); 222 223 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, 224 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); 225 226 if (wait_for_atomic((__raw_i915_read32(dev_priv, 227 FORCEWAKE_ACK_MEDIA_VLV) & 228 FORCEWAKE_KERNEL), 229 FORCEWAKE_ACK_TIMEOUT_MS)) 230 DRM_ERROR("Timed out: waiting for media to ack.\n"); 231 } 232 233 /* WaRsForcewakeWaitTC0:vlv */ 234 __gen6_gt_wait_for_thread_c0(dev_priv); 235 236 } 237 238 static void __vlv_force_wake_put(struct drm_i915_private *dev_priv, 239 int fw_engine) 240 { 241 242 /* Check for Render Engine */ 243 if (FORCEWAKE_RENDER & fw_engine) 244 __raw_i915_write32(dev_priv, FORCEWAKE_VLV, 245 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 246 247 248 /* Check for Media Engine */ 249 if (FORCEWAKE_MEDIA & fw_engine) 250 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, 251 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL)); 252 253 /* The below doubles as a POSTING_READ */ 254 gen6_gt_check_fifodbg(dev_priv); 255 256 } 257 258 static void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) 259 { 260 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 261 262 if (fw_engine & FORCEWAKE_RENDER && 263 dev_priv->uncore.fw_rendercount++ != 0) 264 fw_engine &= ~FORCEWAKE_RENDER; 265 if (fw_engine & FORCEWAKE_MEDIA && 266 dev_priv->uncore.fw_mediacount++ != 0) 267 fw_engine &= ~FORCEWAKE_MEDIA; 268 269 if (fw_engine) 270 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_engine); 271 272 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 273 } 274 275 static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) 276 { 277 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 278 279 if (fw_engine & FORCEWAKE_RENDER) { 280 WARN_ON(!dev_priv->uncore.fw_rendercount); 281 if (--dev_priv->uncore.fw_rendercount != 0) 282 fw_engine &= ~FORCEWAKE_RENDER; 283 } 284 285 if (fw_engine & FORCEWAKE_MEDIA) { 286 WARN_ON(!dev_priv->uncore.fw_mediacount); 287 if (--dev_priv->uncore.fw_mediacount != 0) 288 fw_engine &= ~FORCEWAKE_MEDIA; 289 } 290 291 if (fw_engine) 292 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw_engine); 293 294 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 295 } 296 297 static void gen6_force_wake_timer(unsigned long arg) 298 { 299 struct drm_i915_private *dev_priv = (void *)arg; 300 301 assert_device_not_suspended(dev_priv); 302 303 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 304 WARN_ON(!dev_priv->uncore.forcewake_count); 305 306 if (--dev_priv->uncore.forcewake_count == 0) 307 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); 308 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 309 310 intel_runtime_pm_put(dev_priv); 311 } 312 313 static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) 314 { 315 struct drm_i915_private *dev_priv = dev->dev_private; 316 317 if (del_timer_sync(&dev_priv->uncore.force_wake_timer)) 318 gen6_force_wake_timer((unsigned long)dev_priv); 319 320 /* Hold uncore.lock across reset to prevent any register access 321 * with forcewake not set correctly 322 */ 323 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 324 325 if (IS_VALLEYVIEW(dev)) 326 vlv_force_wake_reset(dev_priv); 327 else if (IS_GEN6(dev) || IS_GEN7(dev)) 328 __gen6_gt_force_wake_reset(dev_priv); 329 330 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_GEN8(dev)) 331 __gen7_gt_force_wake_mt_reset(dev_priv); 332 333 if (restore) { /* If reset with a user forcewake, try to restore */ 334 unsigned fw = 0; 335 336 if (IS_VALLEYVIEW(dev)) { 337 if (dev_priv->uncore.fw_rendercount) 338 fw |= FORCEWAKE_RENDER; 339 340 if (dev_priv->uncore.fw_mediacount) 341 fw |= FORCEWAKE_MEDIA; 342 } else { 343 if (dev_priv->uncore.forcewake_count) 344 fw = FORCEWAKE_ALL; 345 } 346 347 if (fw) 348 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 349 350 if (IS_GEN6(dev) || IS_GEN7(dev)) 351 dev_priv->uncore.fifo_count = 352 __raw_i915_read32(dev_priv, GTFIFOCTL) & 353 GT_FIFO_FREE_ENTRIES_MASK; 354 } else { 355 dev_priv->uncore.forcewake_count = 0; 356 dev_priv->uncore.fw_rendercount = 0; 357 dev_priv->uncore.fw_mediacount = 0; 358 } 359 360 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 361 } 362 363 void intel_uncore_early_sanitize(struct drm_device *dev) 364 { 365 struct drm_i915_private *dev_priv = dev->dev_private; 366 367 if (HAS_FPGA_DBG_UNCLAIMED(dev)) 368 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 369 370 if ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && 371 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) { 372 /* The docs do not explain exactly how the calculation can be 373 * made. It is somewhat guessable, but for now, it's always 374 * 128MB. 375 * NB: We can't write IDICR yet because we do not have gt funcs 376 * set up */ 377 dev_priv->ellc_size = 128; 378 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); 379 } 380 381 /* clear out old GT FIFO errors */ 382 if (IS_GEN6(dev) || IS_GEN7(dev)) 383 __raw_i915_write32(dev_priv, GTFIFODBG, 384 __raw_i915_read32(dev_priv, GTFIFODBG)); 385 386 intel_uncore_forcewake_reset(dev, false); 387 } 388 389 void intel_uncore_sanitize(struct drm_device *dev) 390 { 391 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 392 intel_disable_gt_powersave(dev); 393 } 394 395 /* 396 * Generally this is called implicitly by the register read function. However, 397 * if some sequence requires the GT to not power down then this function should 398 * be called at the beginning of the sequence followed by a call to 399 * gen6_gt_force_wake_put() at the end of the sequence. 400 */ 401 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine) 402 { 403 if (!dev_priv->uncore.funcs.force_wake_get) 404 return; 405 406 intel_runtime_pm_get(dev_priv); 407 408 /* Redirect to VLV specific routine */ 409 if (IS_VALLEYVIEW(dev_priv->dev)) 410 return vlv_force_wake_get(dev_priv, fw_engine); 411 412 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 413 if (dev_priv->uncore.forcewake_count++ == 0) 414 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); 415 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 416 } 417 418 /* 419 * see gen6_gt_force_wake_get() 420 */ 421 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine) 422 { 423 bool delayed = false; 424 425 if (!dev_priv->uncore.funcs.force_wake_put) 426 return; 427 428 /* Redirect to VLV specific routine */ 429 if (IS_VALLEYVIEW(dev_priv->dev)) { 430 vlv_force_wake_put(dev_priv, fw_engine); 431 goto out; 432 } 433 434 435 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 436 WARN_ON(!dev_priv->uncore.forcewake_count); 437 438 if (--dev_priv->uncore.forcewake_count == 0) { 439 dev_priv->uncore.forcewake_count++; 440 delayed = true; 441 mod_timer_pinned(&dev_priv->uncore.force_wake_timer, 442 jiffies + 1); 443 } 444 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 445 446 out: 447 if (!delayed) 448 intel_runtime_pm_put(dev_priv); 449 } 450 451 void assert_force_wake_inactive(struct drm_i915_private *dev_priv) 452 { 453 if (!dev_priv->uncore.funcs.force_wake_get) 454 return; 455 456 WARN_ON(dev_priv->uncore.forcewake_count > 0); 457 } 458 459 /* We give fast paths for the really cool registers */ 460 #define NEEDS_FORCE_WAKE(dev_priv, reg) \ 461 ((reg) < 0x40000 && (reg) != FORCEWAKE) 462 463 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ 464 (((reg) >= 0x2000 && (reg) < 0x4000) ||\ 465 ((reg) >= 0x5000 && (reg) < 0x8000) ||\ 466 ((reg) >= 0xB000 && (reg) < 0x12000) ||\ 467 ((reg) >= 0x2E000 && (reg) < 0x30000)) 468 469 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\ 470 (((reg) >= 0x12000 && (reg) < 0x14000) ||\ 471 ((reg) >= 0x22000 && (reg) < 0x24000) ||\ 472 ((reg) >= 0x30000 && (reg) < 0x40000)) 473 474 static void 475 ilk_dummy_write(struct drm_i915_private *dev_priv) 476 { 477 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 478 * the chip from rc6 before touching it for real. MI_MODE is masked, 479 * hence harmless to write 0 into. */ 480 __raw_i915_write32(dev_priv, MI_MODE, 0); 481 } 482 483 static void 484 hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg) 485 { 486 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 487 DRM_ERROR("Unknown unclaimed register before writing to %x\n", 488 reg); 489 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 490 } 491 } 492 493 static void 494 hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) 495 { 496 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 497 DRM_ERROR("Unclaimed write to %x\n", reg); 498 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 499 } 500 } 501 502 #define REG_READ_HEADER(x) \ 503 u##x val = 0; \ 504 assert_device_not_suspended(dev_priv); \ 505 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE) 506 507 #define REG_READ_FOOTER \ 508 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); \ 509 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 510 return val 511 512 #define __gen4_read(x) \ 513 static u##x \ 514 gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 515 REG_READ_HEADER(x); \ 516 val = __raw_i915_read##x(dev_priv, reg); \ 517 REG_READ_FOOTER; \ 518 } 519 520 #define __gen5_read(x) \ 521 static u##x \ 522 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 523 REG_READ_HEADER(x); \ 524 ilk_dummy_write(dev_priv); \ 525 val = __raw_i915_read##x(dev_priv, reg); \ 526 REG_READ_FOOTER; \ 527 } 528 529 #define __gen6_read(x) \ 530 static u##x \ 531 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 532 REG_READ_HEADER(x); \ 533 if (dev_priv->uncore.forcewake_count == 0 && \ 534 NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 535 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 536 FORCEWAKE_ALL); \ 537 val = __raw_i915_read##x(dev_priv, reg); \ 538 dev_priv->uncore.funcs.force_wake_put(dev_priv, \ 539 FORCEWAKE_ALL); \ 540 } else { \ 541 val = __raw_i915_read##x(dev_priv, reg); \ 542 } \ 543 REG_READ_FOOTER; \ 544 } 545 546 #define __vlv_read(x) \ 547 static u##x \ 548 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 549 unsigned fwengine = 0; \ 550 REG_READ_HEADER(x); \ 551 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) { \ 552 if (dev_priv->uncore.fw_rendercount == 0) \ 553 fwengine = FORCEWAKE_RENDER; \ 554 } else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) { \ 555 if (dev_priv->uncore.fw_mediacount == 0) \ 556 fwengine = FORCEWAKE_MEDIA; \ 557 } \ 558 if (fwengine) \ 559 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \ 560 val = __raw_i915_read##x(dev_priv, reg); \ 561 if (fwengine) \ 562 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \ 563 REG_READ_FOOTER; \ 564 } 565 566 567 __vlv_read(8) 568 __vlv_read(16) 569 __vlv_read(32) 570 __vlv_read(64) 571 __gen6_read(8) 572 __gen6_read(16) 573 __gen6_read(32) 574 __gen6_read(64) 575 __gen5_read(8) 576 __gen5_read(16) 577 __gen5_read(32) 578 __gen5_read(64) 579 __gen4_read(8) 580 __gen4_read(16) 581 __gen4_read(32) 582 __gen4_read(64) 583 584 #undef __vlv_read 585 #undef __gen6_read 586 #undef __gen5_read 587 #undef __gen4_read 588 #undef REG_READ_FOOTER 589 #undef REG_READ_HEADER 590 591 #define REG_WRITE_HEADER \ 592 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 593 assert_device_not_suspended(dev_priv); \ 594 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE) 595 596 #define REG_WRITE_FOOTER \ 597 lockmgr(&dev_priv->uncore.lock, LK_RELEASE) 598 599 #define __gen4_write(x) \ 600 static void \ 601 gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 602 REG_WRITE_HEADER; \ 603 __raw_i915_write##x(dev_priv, reg, val); \ 604 REG_WRITE_FOOTER; \ 605 } 606 607 #define __gen5_write(x) \ 608 static void \ 609 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 610 REG_WRITE_HEADER; \ 611 ilk_dummy_write(dev_priv); \ 612 __raw_i915_write##x(dev_priv, reg, val); \ 613 REG_WRITE_FOOTER; \ 614 } 615 616 #define __gen6_write(x) \ 617 static void \ 618 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 619 u32 __fifo_ret = 0; \ 620 REG_WRITE_HEADER; \ 621 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 622 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 623 } \ 624 __raw_i915_write##x(dev_priv, reg, val); \ 625 if (unlikely(__fifo_ret)) { \ 626 gen6_gt_check_fifodbg(dev_priv); \ 627 } \ 628 REG_WRITE_FOOTER; \ 629 } 630 631 #define __hsw_write(x) \ 632 static void \ 633 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 634 u32 __fifo_ret = 0; \ 635 REG_WRITE_HEADER; \ 636 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 637 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 638 } \ 639 hsw_unclaimed_reg_clear(dev_priv, reg); \ 640 __raw_i915_write##x(dev_priv, reg, val); \ 641 if (unlikely(__fifo_ret)) { \ 642 gen6_gt_check_fifodbg(dev_priv); \ 643 } \ 644 hsw_unclaimed_reg_check(dev_priv, reg); \ 645 REG_WRITE_FOOTER; \ 646 } 647 648 static const u32 gen8_shadowed_regs[] = { 649 FORCEWAKE_MT, 650 GEN6_RPNSWREQ, 651 GEN6_RC_VIDEO_FREQ, 652 RING_TAIL(RENDER_RING_BASE), 653 RING_TAIL(GEN6_BSD_RING_BASE), 654 RING_TAIL(VEBOX_RING_BASE), 655 RING_TAIL(BLT_RING_BASE), 656 /* TODO: Other registers are not yet used */ 657 }; 658 659 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) 660 { 661 int i; 662 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) 663 if (reg == gen8_shadowed_regs[i]) 664 return true; 665 666 return false; 667 } 668 669 #define __gen8_write(x) \ 670 static void \ 671 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 672 REG_WRITE_HEADER; \ 673 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) { \ 674 if (dev_priv->uncore.forcewake_count == 0) \ 675 dev_priv->uncore.funcs.force_wake_get(dev_priv, \ 676 FORCEWAKE_ALL); \ 677 __raw_i915_write##x(dev_priv, reg, val); \ 678 if (dev_priv->uncore.forcewake_count == 0) \ 679 dev_priv->uncore.funcs.force_wake_put(dev_priv, \ 680 FORCEWAKE_ALL); \ 681 } else { \ 682 __raw_i915_write##x(dev_priv, reg, val); \ 683 } \ 684 REG_WRITE_FOOTER; \ 685 } 686 687 __gen8_write(8) 688 __gen8_write(16) 689 __gen8_write(32) 690 __gen8_write(64) 691 __hsw_write(8) 692 __hsw_write(16) 693 __hsw_write(32) 694 __hsw_write(64) 695 __gen6_write(8) 696 __gen6_write(16) 697 __gen6_write(32) 698 __gen6_write(64) 699 __gen5_write(8) 700 __gen5_write(16) 701 __gen5_write(32) 702 __gen5_write(64) 703 __gen4_write(8) 704 __gen4_write(16) 705 __gen4_write(32) 706 __gen4_write(64) 707 708 #undef __gen8_write 709 #undef __hsw_write 710 #undef __gen6_write 711 #undef __gen5_write 712 #undef __gen4_write 713 #undef REG_WRITE_FOOTER 714 #undef REG_WRITE_HEADER 715 716 void intel_uncore_init(struct drm_device *dev) 717 { 718 struct drm_i915_private *dev_priv = dev->dev_private; 719 720 setup_timer(&dev_priv->uncore.force_wake_timer, 721 gen6_force_wake_timer, (unsigned long)dev_priv); 722 723 intel_uncore_early_sanitize(dev); 724 725 if (IS_VALLEYVIEW(dev)) { 726 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; 727 dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put; 728 } else if (IS_HASWELL(dev) || IS_GEN8(dev)) { 729 dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get; 730 dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put; 731 } else if (IS_IVYBRIDGE(dev)) { 732 u32 ecobus; 733 734 /* IVB configs may use multi-threaded forcewake */ 735 736 /* A small trick here - if the bios hasn't configured 737 * MT forcewake, and if the device is in RC6, then 738 * force_wake_mt_get will not wake the device and the 739 * ECOBUS read will return zero. Which will be 740 * (correctly) interpreted by the test below as MT 741 * forcewake being disabled. 742 */ 743 mutex_lock(&dev->struct_mutex); 744 __gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL); 745 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 746 __gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL); 747 mutex_unlock(&dev->struct_mutex); 748 749 if (ecobus & FORCEWAKE_MT_ENABLE) { 750 dev_priv->uncore.funcs.force_wake_get = 751 __gen7_gt_force_wake_mt_get; 752 dev_priv->uncore.funcs.force_wake_put = 753 __gen7_gt_force_wake_mt_put; 754 } else { 755 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 756 DRM_INFO("when using vblank-synced partial screen updates.\n"); 757 dev_priv->uncore.funcs.force_wake_get = 758 __gen6_gt_force_wake_get; 759 dev_priv->uncore.funcs.force_wake_put = 760 __gen6_gt_force_wake_put; 761 } 762 } else if (IS_GEN6(dev)) { 763 dev_priv->uncore.funcs.force_wake_get = 764 __gen6_gt_force_wake_get; 765 dev_priv->uncore.funcs.force_wake_put = 766 __gen6_gt_force_wake_put; 767 } 768 769 switch (INTEL_INFO(dev)->gen) { 770 default: 771 dev_priv->uncore.funcs.mmio_writeb = gen8_write8; 772 dev_priv->uncore.funcs.mmio_writew = gen8_write16; 773 dev_priv->uncore.funcs.mmio_writel = gen8_write32; 774 dev_priv->uncore.funcs.mmio_writeq = gen8_write64; 775 dev_priv->uncore.funcs.mmio_readb = gen6_read8; 776 dev_priv->uncore.funcs.mmio_readw = gen6_read16; 777 dev_priv->uncore.funcs.mmio_readl = gen6_read32; 778 dev_priv->uncore.funcs.mmio_readq = gen6_read64; 779 break; 780 case 7: 781 case 6: 782 if (IS_HASWELL(dev)) { 783 dev_priv->uncore.funcs.mmio_writeb = hsw_write8; 784 dev_priv->uncore.funcs.mmio_writew = hsw_write16; 785 dev_priv->uncore.funcs.mmio_writel = hsw_write32; 786 dev_priv->uncore.funcs.mmio_writeq = hsw_write64; 787 } else { 788 dev_priv->uncore.funcs.mmio_writeb = gen6_write8; 789 dev_priv->uncore.funcs.mmio_writew = gen6_write16; 790 dev_priv->uncore.funcs.mmio_writel = gen6_write32; 791 dev_priv->uncore.funcs.mmio_writeq = gen6_write64; 792 } 793 794 if (IS_VALLEYVIEW(dev)) { 795 dev_priv->uncore.funcs.mmio_readb = vlv_read8; 796 dev_priv->uncore.funcs.mmio_readw = vlv_read16; 797 dev_priv->uncore.funcs.mmio_readl = vlv_read32; 798 dev_priv->uncore.funcs.mmio_readq = vlv_read64; 799 } else { 800 dev_priv->uncore.funcs.mmio_readb = gen6_read8; 801 dev_priv->uncore.funcs.mmio_readw = gen6_read16; 802 dev_priv->uncore.funcs.mmio_readl = gen6_read32; 803 dev_priv->uncore.funcs.mmio_readq = gen6_read64; 804 } 805 break; 806 case 5: 807 dev_priv->uncore.funcs.mmio_writeb = gen5_write8; 808 dev_priv->uncore.funcs.mmio_writew = gen5_write16; 809 dev_priv->uncore.funcs.mmio_writel = gen5_write32; 810 dev_priv->uncore.funcs.mmio_writeq = gen5_write64; 811 dev_priv->uncore.funcs.mmio_readb = gen5_read8; 812 dev_priv->uncore.funcs.mmio_readw = gen5_read16; 813 dev_priv->uncore.funcs.mmio_readl = gen5_read32; 814 dev_priv->uncore.funcs.mmio_readq = gen5_read64; 815 break; 816 case 4: 817 case 3: 818 case 2: 819 dev_priv->uncore.funcs.mmio_writeb = gen4_write8; 820 dev_priv->uncore.funcs.mmio_writew = gen4_write16; 821 dev_priv->uncore.funcs.mmio_writel = gen4_write32; 822 dev_priv->uncore.funcs.mmio_writeq = gen4_write64; 823 dev_priv->uncore.funcs.mmio_readb = gen4_read8; 824 dev_priv->uncore.funcs.mmio_readw = gen4_read16; 825 dev_priv->uncore.funcs.mmio_readl = gen4_read32; 826 dev_priv->uncore.funcs.mmio_readq = gen4_read64; 827 break; 828 } 829 } 830 831 void intel_uncore_fini(struct drm_device *dev) 832 { 833 /* Paranoia: make sure we have disabled everything before we exit. */ 834 intel_uncore_sanitize(dev); 835 intel_uncore_forcewake_reset(dev, false); 836 } 837 838 #define GEN_RANGE(l, h) GENMASK(h, l) 839 840 static const struct register_whitelist { 841 uint64_t offset; 842 uint32_t size; 843 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 844 uint32_t gen_bitmask; 845 } whitelist[] = { 846 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) }, 847 }; 848 849 int i915_reg_read_ioctl(struct drm_device *dev, 850 void *data, struct drm_file *file) 851 { 852 struct drm_i915_private *dev_priv = dev->dev_private; 853 struct drm_i915_reg_read *reg = data; 854 struct register_whitelist const *entry = whitelist; 855 int i, ret = 0; 856 857 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 858 if (entry->offset == reg->offset && 859 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 860 break; 861 } 862 863 if (i == ARRAY_SIZE(whitelist)) 864 return -EINVAL; 865 866 intel_runtime_pm_get(dev_priv); 867 868 switch (entry->size) { 869 case 8: 870 reg->val = I915_READ64(reg->offset); 871 break; 872 case 4: 873 reg->val = I915_READ(reg->offset); 874 break; 875 case 2: 876 reg->val = I915_READ16(reg->offset); 877 break; 878 case 1: 879 reg->val = I915_READ8(reg->offset); 880 break; 881 default: 882 WARN_ON(1); 883 ret = -EINVAL; 884 goto out; 885 } 886 887 out: 888 intel_runtime_pm_put(dev_priv); 889 return ret; 890 } 891 892 int i915_get_reset_stats_ioctl(struct drm_device *dev, 893 void *data, struct drm_file *file) 894 { 895 struct drm_i915_private *dev_priv = dev->dev_private; 896 struct drm_i915_reset_stats *args = data; 897 struct i915_ctx_hang_stats *hs; 898 struct intel_context *ctx; 899 int ret; 900 901 if (args->flags || args->pad) 902 return -EINVAL; 903 904 ret = mutex_lock_interruptible(&dev->struct_mutex); 905 if (ret) 906 return ret; 907 908 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); 909 if (IS_ERR(ctx)) { 910 mutex_unlock(&dev->struct_mutex); 911 return PTR_ERR(ctx); 912 } 913 hs = &ctx->hang_stats; 914 915 args->reset_count = i915_reset_count(&dev_priv->gpu_error); 916 917 args->batch_active = hs->batch_active; 918 args->batch_pending = hs->batch_pending; 919 920 mutex_unlock(&dev->struct_mutex); 921 922 return 0; 923 } 924 925 static int i965_reset_complete(struct drm_device *dev) 926 { 927 u8 gdrst; 928 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); 929 return (gdrst & GRDOM_RESET_ENABLE) == 0; 930 } 931 932 static int i965_do_reset(struct drm_device *dev) 933 { 934 int ret; 935 936 /* FIXME: i965g/gm need a display save/restore for gpu reset. */ 937 return -ENODEV; 938 939 /* 940 * Set the domains we want to reset (GRDOM/bits 2 and 3) as 941 * well as the reset bit (GR/bit 0). Setting the GR bit 942 * triggers the reset; when done, the hardware will clear it. 943 */ 944 pci_write_config_byte(dev->pdev, I965_GDRST, 945 GRDOM_RENDER | GRDOM_RESET_ENABLE); 946 ret = wait_for(i965_reset_complete(dev), 500); 947 if (ret) 948 return ret; 949 950 pci_write_config_byte(dev->pdev, I965_GDRST, 951 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 952 953 ret = wait_for(i965_reset_complete(dev), 500); 954 if (ret) 955 return ret; 956 957 pci_write_config_byte(dev->pdev, I965_GDRST, 0); 958 959 return 0; 960 } 961 962 static int g4x_do_reset(struct drm_device *dev) 963 { 964 struct drm_i915_private *dev_priv = dev->dev_private; 965 int ret; 966 967 pci_write_config_byte(dev->pdev, I965_GDRST, 968 GRDOM_RENDER | GRDOM_RESET_ENABLE); 969 ret = wait_for(i965_reset_complete(dev), 500); 970 if (ret) 971 return ret; 972 973 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 974 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 975 POSTING_READ(VDECCLK_GATE_D); 976 977 pci_write_config_byte(dev->pdev, I965_GDRST, 978 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 979 ret = wait_for(i965_reset_complete(dev), 500); 980 if (ret) 981 return ret; 982 983 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 984 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 985 POSTING_READ(VDECCLK_GATE_D); 986 987 pci_write_config_byte(dev->pdev, I965_GDRST, 0); 988 989 return 0; 990 } 991 992 static int ironlake_do_reset(struct drm_device *dev) 993 { 994 struct drm_i915_private *dev_priv = dev->dev_private; 995 int ret; 996 997 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 998 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 999 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1000 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1001 if (ret) 1002 return ret; 1003 1004 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1005 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 1006 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1007 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1008 if (ret) 1009 return ret; 1010 1011 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0); 1012 1013 return 0; 1014 } 1015 1016 static int gen6_do_reset(struct drm_device *dev) 1017 { 1018 struct drm_i915_private *dev_priv = dev->dev_private; 1019 int ret; 1020 1021 /* Reset the chip */ 1022 1023 /* GEN6_GDRST is not in the gt power well, no need to check 1024 * for fifo space for the write or forcewake the chip for 1025 * the read 1026 */ 1027 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL); 1028 1029 /* Spin waiting for the device to ack the reset request */ 1030 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 1031 1032 intel_uncore_forcewake_reset(dev, true); 1033 1034 return ret; 1035 } 1036 1037 int intel_gpu_reset(struct drm_device *dev) 1038 { 1039 switch (INTEL_INFO(dev)->gen) { 1040 case 8: 1041 case 7: 1042 case 6: return gen6_do_reset(dev); 1043 case 5: return ironlake_do_reset(dev); 1044 case 4: 1045 if (IS_G4X(dev)) 1046 return g4x_do_reset(dev); 1047 else 1048 return i965_do_reset(dev); 1049 default: return -ENODEV; 1050 } 1051 } 1052 1053 void intel_uncore_check_errors(struct drm_device *dev) 1054 { 1055 struct drm_i915_private *dev_priv = dev->dev_private; 1056 1057 if (HAS_FPGA_DBG_UNCLAIMED(dev) && 1058 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 1059 DRM_ERROR("Unclaimed register before interrupt\n"); 1060 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 1061 } 1062 } 1063