1 /* 2 * Copyright © 2013 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "i915_drv.h" 25 #include "intel_drv.h" 26 #include "i915_vgpu.h" 27 28 #define FORCEWAKE_ACK_TIMEOUT_MS 2 29 30 #define __raw_i915_read8(dev_priv__, reg__) DRM_READ8(dev_priv__->mmio_map, reg__) 31 #define __raw_i915_write8(dev_priv__, reg__, val__) DRM_WRITE8(dev_priv__->mmio_map, reg__, val__) 32 33 #define __raw_i915_read16(dev_priv__, reg__) DRM_READ16(dev_priv__->mmio_map, reg__) 34 #define __raw_i915_write16(dev_priv__, reg__, val__) DRM_WRITE16(dev_priv__->mmio_map, reg__, val__) 35 36 #define __raw_i915_read32(dev_priv__, reg__) DRM_READ32(dev_priv__->mmio_map, reg__) 37 #define __raw_i915_write32(dev_priv__, reg__, val__) DRM_WRITE32(dev_priv__->mmio_map, reg__, val__) 38 39 #define __raw_i915_read64(dev_priv__, reg__) DRM_READ64(dev_priv__->mmio_map, reg__) 40 #define __raw_i915_write64(dev_priv__, reg__, val__) DRM_WRITE64(dev_priv__->mmio_map, reg__, val__) 41 42 #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) 43 44 static const char * const forcewake_domain_names[] = { 45 "render", 46 "blitter", 47 "media", 48 }; 49 50 const char * 51 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id) 52 { 53 BUILD_BUG_ON((sizeof(forcewake_domain_names)/sizeof(const char *)) != 54 FW_DOMAIN_ID_COUNT); 55 56 if (id >= 0 && id < FW_DOMAIN_ID_COUNT) 57 return forcewake_domain_names[id]; 58 59 WARN_ON(id); 60 61 return "unknown"; 62 } 63 64 static void 65 assert_device_not_suspended(struct drm_i915_private *dev_priv) 66 { 67 WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, 68 "Device suspended\n"); 69 } 70 71 static inline void 72 fw_domain_reset(const struct intel_uncore_forcewake_domain *d) 73 { 74 WARN_ON(d->reg_set == 0); 75 __raw_i915_write32(d->i915, d->reg_set, d->val_reset); 76 } 77 78 static inline void 79 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) 80 { 81 mod_timer_pinned(&d->timer, jiffies + 1); 82 } 83 84 static inline void 85 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d) 86 { 87 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 88 FORCEWAKE_KERNEL) == 0, 89 FORCEWAKE_ACK_TIMEOUT_MS)) 90 DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n", 91 intel_uncore_forcewake_domain_to_str(d->id)); 92 } 93 94 static inline void 95 fw_domain_get(const struct intel_uncore_forcewake_domain *d) 96 { 97 __raw_i915_write32(d->i915, d->reg_set, d->val_set); 98 } 99 100 static inline void 101 fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d) 102 { 103 if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) & 104 FORCEWAKE_KERNEL), 105 FORCEWAKE_ACK_TIMEOUT_MS)) 106 DRM_ERROR("%s: timed out waiting for forcewake ack request.\n", 107 intel_uncore_forcewake_domain_to_str(d->id)); 108 } 109 110 static inline void 111 fw_domain_put(const struct intel_uncore_forcewake_domain *d) 112 { 113 __raw_i915_write32(d->i915, d->reg_set, d->val_clear); 114 } 115 116 static inline void 117 fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d) 118 { 119 /* something from same cacheline, but not from the set register */ 120 if (d->reg_post) 121 __raw_posting_read(d->i915, d->reg_post); 122 } 123 124 static void 125 fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 126 { 127 struct intel_uncore_forcewake_domain *d; 128 enum forcewake_domain_id id; 129 130 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) { 131 fw_domain_wait_ack_clear(d); 132 fw_domain_get(d); 133 fw_domain_wait_ack(d); 134 } 135 } 136 137 static void 138 fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 139 { 140 struct intel_uncore_forcewake_domain *d; 141 enum forcewake_domain_id id; 142 143 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) { 144 fw_domain_put(d); 145 fw_domain_posting_read(d); 146 } 147 } 148 149 static void 150 fw_domains_posting_read(struct drm_i915_private *dev_priv) 151 { 152 struct intel_uncore_forcewake_domain *d; 153 enum forcewake_domain_id id; 154 155 /* No need to do for all, just do for first found */ 156 for_each_fw_domain(d, dev_priv, id) { 157 fw_domain_posting_read(d); 158 break; 159 } 160 } 161 162 static void 163 fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) 164 { 165 struct intel_uncore_forcewake_domain *d; 166 enum forcewake_domain_id id; 167 168 if (dev_priv->uncore.fw_domains == 0) 169 return; 170 171 for_each_fw_domain_mask(d, fw_domains, dev_priv, id) 172 fw_domain_reset(d); 173 174 fw_domains_posting_read(dev_priv); 175 } 176 177 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 178 { 179 /* w/a for a sporadic read returning 0 by waiting for the GT 180 * thread to wake up. 181 */ 182 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & 183 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500)) 184 DRM_ERROR("GT thread status wait timed out\n"); 185 } 186 187 static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv, 188 enum forcewake_domains fw_domains) 189 { 190 fw_domains_get(dev_priv, fw_domains); 191 192 /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */ 193 __gen6_gt_wait_for_thread_c0(dev_priv); 194 } 195 196 static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 197 { 198 u32 gtfifodbg; 199 200 gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG); 201 if (WARN(gtfifodbg, "GT wake FIFO error 0x%x\n", gtfifodbg)) 202 __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg); 203 } 204 205 static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv, 206 enum forcewake_domains fw_domains) 207 { 208 fw_domains_put(dev_priv, fw_domains); 209 gen6_gt_check_fifodbg(dev_priv); 210 } 211 212 static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv) 213 { 214 u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL); 215 216 return count & GT_FIFO_FREE_ENTRIES_MASK; 217 } 218 219 static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) 220 { 221 int ret = 0; 222 223 /* On VLV, FIFO will be shared by both SW and HW. 224 * So, we need to read the FREE_ENTRIES everytime */ 225 if (IS_VALLEYVIEW(dev_priv->dev)) 226 dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv); 227 228 if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { 229 int loop = 500; 230 u32 fifo = fifo_free_entries(dev_priv); 231 232 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { 233 udelay(10); 234 fifo = fifo_free_entries(dev_priv); 235 } 236 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) 237 ++ret; 238 dev_priv->uncore.fifo_count = fifo; 239 } 240 dev_priv->uncore.fifo_count--; 241 242 return ret; 243 } 244 245 static void intel_uncore_fw_release_timer(unsigned long arg) 246 { 247 struct intel_uncore_forcewake_domain *domain = (void *)arg; 248 249 assert_device_not_suspended(domain->i915); 250 251 lockmgr(&domain->i915->uncore.lock, LK_EXCLUSIVE); 252 if (WARN_ON(domain->wake_count == 0)) 253 domain->wake_count++; 254 255 if (--domain->wake_count == 0) 256 domain->i915->uncore.funcs.force_wake_put(domain->i915, 257 1 << domain->id); 258 259 lockmgr(&domain->i915->uncore.lock, LK_RELEASE); 260 } 261 262 void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) 263 { 264 struct drm_i915_private *dev_priv = dev->dev_private; 265 struct intel_uncore_forcewake_domain *domain; 266 int retry_count = 100; 267 enum forcewake_domain_id id; 268 enum forcewake_domains fw = 0, active_domains; 269 270 /* Hold uncore.lock across reset to prevent any register access 271 * with forcewake not set correctly. Wait until all pending 272 * timers are run before holding. 273 */ 274 while (1) { 275 active_domains = 0; 276 277 for_each_fw_domain(domain, dev_priv, id) { 278 if (del_timer_sync(&domain->timer) == 0) 279 continue; 280 281 intel_uncore_fw_release_timer((unsigned long)domain); 282 } 283 284 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 285 286 for_each_fw_domain(domain, dev_priv, id) { 287 if (timer_pending(&domain->timer)) 288 active_domains |= (1 << id); 289 } 290 291 if (active_domains == 0) 292 break; 293 294 if (--retry_count == 0) { 295 DRM_ERROR("Timed out waiting for forcewake timers to finish\n"); 296 break; 297 } 298 299 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 300 #if 0 301 cond_resched(); 302 #endif 303 } 304 305 WARN_ON(active_domains); 306 307 for_each_fw_domain(domain, dev_priv, id) 308 if (domain->wake_count) 309 fw |= 1 << id; 310 311 if (fw) 312 dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); 313 314 fw_domains_reset(dev_priv, FORCEWAKE_ALL); 315 316 if (restore) { /* If reset with a user forcewake, try to restore */ 317 if (fw) 318 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 319 320 if (IS_GEN6(dev) || IS_GEN7(dev)) 321 dev_priv->uncore.fifo_count = 322 fifo_free_entries(dev_priv); 323 } 324 325 if (!restore) 326 assert_forcewakes_inactive(dev_priv); 327 328 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 329 } 330 331 static void intel_uncore_ellc_detect(struct drm_device *dev) 332 { 333 struct drm_i915_private *dev_priv = dev->dev_private; 334 335 if ((IS_HASWELL(dev) || IS_BROADWELL(dev) || 336 INTEL_INFO(dev)->gen >= 9) && 337 (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) & EDRAM_ENABLED)) { 338 /* The docs do not explain exactly how the calculation can be 339 * made. It is somewhat guessable, but for now, it's always 340 * 128MB. 341 * NB: We can't write IDICR yet because we do not have gt funcs 342 * set up */ 343 dev_priv->ellc_size = 128; 344 DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size); 345 } 346 } 347 348 static void __intel_uncore_early_sanitize(struct drm_device *dev, 349 bool restore_forcewake) 350 { 351 struct drm_i915_private *dev_priv = dev->dev_private; 352 353 if (HAS_FPGA_DBG_UNCLAIMED(dev)) 354 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 355 356 /* clear out old GT FIFO errors */ 357 if (IS_GEN6(dev) || IS_GEN7(dev)) 358 __raw_i915_write32(dev_priv, GTFIFODBG, 359 __raw_i915_read32(dev_priv, GTFIFODBG)); 360 361 /* WaDisableShadowRegForCpd:chv */ 362 if (IS_CHERRYVIEW(dev)) { 363 __raw_i915_write32(dev_priv, GTFIFOCTL, 364 __raw_i915_read32(dev_priv, GTFIFOCTL) | 365 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | 366 GT_FIFO_CTL_RC6_POLICY_STALL); 367 } 368 369 intel_uncore_forcewake_reset(dev, restore_forcewake); 370 } 371 372 void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) 373 { 374 __intel_uncore_early_sanitize(dev, restore_forcewake); 375 i915_check_and_clear_faults(dev); 376 } 377 378 void intel_uncore_sanitize(struct drm_device *dev) 379 { 380 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 381 intel_disable_gt_powersave(dev); 382 } 383 384 static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 385 enum forcewake_domains fw_domains) 386 { 387 struct intel_uncore_forcewake_domain *domain; 388 enum forcewake_domain_id id; 389 390 if (!dev_priv->uncore.funcs.force_wake_get) 391 return; 392 393 fw_domains &= dev_priv->uncore.fw_domains; 394 395 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { 396 if (domain->wake_count++) 397 fw_domains &= ~(1 << id); 398 } 399 400 if (fw_domains) 401 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 402 } 403 404 /** 405 * intel_uncore_forcewake_get - grab forcewake domain references 406 * @dev_priv: i915 device instance 407 * @fw_domains: forcewake domains to get reference on 408 * 409 * This function can be used get GT's forcewake domain references. 410 * Normal register access will handle the forcewake domains automatically. 411 * However if some sequence requires the GT to not power down a particular 412 * forcewake domains this function should be called at the beginning of the 413 * sequence. And subsequently the reference should be dropped by symmetric 414 * call to intel_unforce_forcewake_put(). Usually caller wants all the domains 415 * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL. 416 */ 417 void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 418 enum forcewake_domains fw_domains) 419 { 420 421 if (!dev_priv->uncore.funcs.force_wake_get) 422 return; 423 424 WARN_ON(dev_priv->pm.suspended); 425 426 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 427 __intel_uncore_forcewake_get(dev_priv, fw_domains); 428 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 429 } 430 431 /** 432 * intel_uncore_forcewake_get__locked - grab forcewake domain references 433 * @dev_priv: i915 device instance 434 * @fw_domains: forcewake domains to get reference on 435 * 436 * See intel_uncore_forcewake_get(). This variant places the onus 437 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 438 */ 439 void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv, 440 enum forcewake_domains fw_domains) 441 { 442 assert_spin_locked(&dev_priv->uncore.lock); 443 444 if (!dev_priv->uncore.funcs.force_wake_get) 445 return; 446 447 __intel_uncore_forcewake_get(dev_priv, fw_domains); 448 } 449 450 static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 451 enum forcewake_domains fw_domains) 452 { 453 struct intel_uncore_forcewake_domain *domain; 454 enum forcewake_domain_id id; 455 456 if (!dev_priv->uncore.funcs.force_wake_put) 457 return; 458 459 fw_domains &= dev_priv->uncore.fw_domains; 460 461 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { 462 if (WARN_ON(domain->wake_count == 0)) 463 continue; 464 465 if (--domain->wake_count) 466 continue; 467 468 domain->wake_count++; 469 fw_domain_arm_timer(domain); 470 } 471 } 472 473 /** 474 * intel_uncore_forcewake_put - release a forcewake domain reference 475 * @dev_priv: i915 device instance 476 * @fw_domains: forcewake domains to put references 477 * 478 * This function drops the device-level forcewakes for specified 479 * domains obtained by intel_uncore_forcewake_get(). 480 */ 481 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, 482 enum forcewake_domains fw_domains) 483 { 484 485 if (!dev_priv->uncore.funcs.force_wake_put) 486 return; 487 488 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 489 __intel_uncore_forcewake_put(dev_priv, fw_domains); 490 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 491 } 492 493 /** 494 * intel_uncore_forcewake_put__locked - grab forcewake domain references 495 * @dev_priv: i915 device instance 496 * @fw_domains: forcewake domains to get reference on 497 * 498 * See intel_uncore_forcewake_put(). This variant places the onus 499 * on the caller to explicitly handle the dev_priv->uncore.lock spinlock. 500 */ 501 void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, 502 enum forcewake_domains fw_domains) 503 { 504 assert_spin_locked(&dev_priv->uncore.lock); 505 506 if (!dev_priv->uncore.funcs.force_wake_put) 507 return; 508 509 __intel_uncore_forcewake_put(dev_priv, fw_domains); 510 } 511 512 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) 513 { 514 struct intel_uncore_forcewake_domain *domain; 515 enum forcewake_domain_id id; 516 517 if (!dev_priv->uncore.funcs.force_wake_get) 518 return; 519 520 for_each_fw_domain(domain, dev_priv, id) 521 WARN_ON(domain->wake_count); 522 } 523 524 /* We give fast paths for the really cool registers */ 525 #define NEEDS_FORCE_WAKE(dev_priv, reg) \ 526 ((reg) < 0x40000 && (reg) != FORCEWAKE) 527 528 #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) 529 530 #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ 531 (REG_RANGE((reg), 0x2000, 0x4000) || \ 532 REG_RANGE((reg), 0x5000, 0x8000) || \ 533 REG_RANGE((reg), 0xB000, 0x12000) || \ 534 REG_RANGE((reg), 0x2E000, 0x30000)) 535 536 #define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg) \ 537 (REG_RANGE((reg), 0x12000, 0x14000) || \ 538 REG_RANGE((reg), 0x22000, 0x24000) || \ 539 REG_RANGE((reg), 0x30000, 0x40000)) 540 541 #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \ 542 (REG_RANGE((reg), 0x2000, 0x4000) || \ 543 REG_RANGE((reg), 0x5200, 0x8000) || \ 544 REG_RANGE((reg), 0x8300, 0x8500) || \ 545 REG_RANGE((reg), 0xB000, 0xB480) || \ 546 REG_RANGE((reg), 0xE000, 0xE800)) 547 548 #define FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg) \ 549 (REG_RANGE((reg), 0x8800, 0x8900) || \ 550 REG_RANGE((reg), 0xD000, 0xD800) || \ 551 REG_RANGE((reg), 0x12000, 0x14000) || \ 552 REG_RANGE((reg), 0x1A000, 0x1C000) || \ 553 REG_RANGE((reg), 0x1E800, 0x1EA00) || \ 554 REG_RANGE((reg), 0x30000, 0x38000)) 555 556 #define FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg) \ 557 (REG_RANGE((reg), 0x4000, 0x5000) || \ 558 REG_RANGE((reg), 0x8000, 0x8300) || \ 559 REG_RANGE((reg), 0x8500, 0x8600) || \ 560 REG_RANGE((reg), 0x9000, 0xB000) || \ 561 REG_RANGE((reg), 0xF000, 0x10000)) 562 563 #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \ 564 REG_RANGE((reg), 0xB00, 0x2000) 565 566 #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \ 567 (REG_RANGE((reg), 0x2000, 0x2700) || \ 568 REG_RANGE((reg), 0x3000, 0x4000) || \ 569 REG_RANGE((reg), 0x5200, 0x8000) || \ 570 REG_RANGE((reg), 0x8140, 0x8160) || \ 571 REG_RANGE((reg), 0x8300, 0x8500) || \ 572 REG_RANGE((reg), 0x8C00, 0x8D00) || \ 573 REG_RANGE((reg), 0xB000, 0xB480) || \ 574 REG_RANGE((reg), 0xE000, 0xE900) || \ 575 REG_RANGE((reg), 0x24400, 0x24800)) 576 577 #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \ 578 (REG_RANGE((reg), 0x8130, 0x8140) || \ 579 REG_RANGE((reg), 0x8800, 0x8A00) || \ 580 REG_RANGE((reg), 0xD000, 0xD800) || \ 581 REG_RANGE((reg), 0x12000, 0x14000) || \ 582 REG_RANGE((reg), 0x1A000, 0x1EA00) || \ 583 REG_RANGE((reg), 0x30000, 0x40000)) 584 585 #define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \ 586 REG_RANGE((reg), 0x9400, 0x9800) 587 588 #define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \ 589 ((reg) < 0x40000 &&\ 590 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \ 591 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \ 592 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \ 593 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) 594 595 static void 596 ilk_dummy_write(struct drm_i915_private *dev_priv) 597 { 598 /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up 599 * the chip from rc6 before touching it for real. MI_MODE is masked, 600 * hence harmless to write 0 into. */ 601 __raw_i915_write32(dev_priv, MI_MODE, 0); 602 } 603 604 static void 605 hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read, 606 bool before) 607 { 608 const char *op = read ? "reading" : "writing to"; 609 const char *when = before ? "before" : "after"; 610 611 if (!i915.mmio_debug) 612 return; 613 614 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 615 WARN(1, "Unclaimed register detected %s %s register 0x%x\n", 616 when, op, reg); 617 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 618 i915.mmio_debug--; /* Only report the first N failures */ 619 } 620 } 621 622 static void 623 hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) 624 { 625 static bool mmio_debug_once = true; 626 627 if (i915.mmio_debug || !mmio_debug_once) 628 return; 629 630 if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { 631 DRM_DEBUG("Unclaimed register detected, " 632 "enabling oneshot unclaimed register reporting. " 633 "Please use i915.mmio_debug=N for more information.\n"); 634 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 635 i915.mmio_debug = mmio_debug_once--; 636 } 637 } 638 639 #define GEN2_READ_HEADER(x) \ 640 u##x val = 0; \ 641 assert_device_not_suspended(dev_priv); 642 643 #define GEN2_READ_FOOTER \ 644 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 645 return val 646 647 #define __gen2_read(x) \ 648 static u##x \ 649 gen2_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 650 GEN2_READ_HEADER(x); \ 651 val = __raw_i915_read##x(dev_priv, reg); \ 652 GEN2_READ_FOOTER; \ 653 } 654 655 #define __gen5_read(x) \ 656 static u##x \ 657 gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 658 GEN2_READ_HEADER(x); \ 659 ilk_dummy_write(dev_priv); \ 660 val = __raw_i915_read##x(dev_priv, reg); \ 661 GEN2_READ_FOOTER; \ 662 } 663 664 __gen5_read(8) 665 __gen5_read(16) 666 __gen5_read(32) 667 __gen5_read(64) 668 __gen2_read(8) 669 __gen2_read(16) 670 __gen2_read(32) 671 __gen2_read(64) 672 673 #undef __gen5_read 674 #undef __gen2_read 675 676 #undef GEN2_READ_FOOTER 677 #undef GEN2_READ_HEADER 678 679 #define GEN6_READ_HEADER(x) \ 680 u##x val = 0; \ 681 assert_device_not_suspended(dev_priv); \ 682 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE) 683 684 #define GEN6_READ_FOOTER \ 685 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); \ 686 trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ 687 return val 688 689 static inline void __force_wake_get(struct drm_i915_private *dev_priv, 690 enum forcewake_domains fw_domains) 691 { 692 struct intel_uncore_forcewake_domain *domain; 693 enum forcewake_domain_id id; 694 695 if (WARN_ON(!fw_domains)) 696 return; 697 698 /* Ideally GCC would be constant-fold and eliminate this loop */ 699 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { 700 if (domain->wake_count) { 701 fw_domains &= ~(1 << id); 702 continue; 703 } 704 705 domain->wake_count++; 706 fw_domain_arm_timer(domain); 707 } 708 709 if (fw_domains) 710 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); 711 } 712 713 #define __vgpu_read(x) \ 714 static u##x \ 715 vgpu_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 716 GEN6_READ_HEADER(x); \ 717 val = __raw_i915_read##x(dev_priv, reg); \ 718 GEN6_READ_FOOTER; \ 719 } 720 721 #define __gen6_read(x) \ 722 static u##x \ 723 gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 724 GEN6_READ_HEADER(x); \ 725 hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \ 726 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) \ 727 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 728 val = __raw_i915_read##x(dev_priv, reg); \ 729 hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \ 730 GEN6_READ_FOOTER; \ 731 } 732 733 #define __vlv_read(x) \ 734 static u##x \ 735 vlv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 736 GEN6_READ_HEADER(x); \ 737 if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg)) \ 738 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 739 else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)) \ 740 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 741 val = __raw_i915_read##x(dev_priv, reg); \ 742 GEN6_READ_FOOTER; \ 743 } 744 745 #define __chv_read(x) \ 746 static u##x \ 747 chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 748 GEN6_READ_HEADER(x); \ 749 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \ 750 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 751 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \ 752 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 753 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \ 754 __force_wake_get(dev_priv, \ 755 FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \ 756 val = __raw_i915_read##x(dev_priv, reg); \ 757 GEN6_READ_FOOTER; \ 758 } 759 760 #define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \ 761 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) 762 763 #define __gen9_read(x) \ 764 static u##x \ 765 gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \ 766 enum forcewake_domains fw_engine; \ 767 GEN6_READ_HEADER(x); \ 768 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) \ 769 fw_engine = 0; \ 770 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ 771 fw_engine = FORCEWAKE_RENDER; \ 772 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ 773 fw_engine = FORCEWAKE_MEDIA; \ 774 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \ 775 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ 776 else \ 777 fw_engine = FORCEWAKE_BLITTER; \ 778 if (fw_engine) \ 779 __force_wake_get(dev_priv, fw_engine); \ 780 val = __raw_i915_read##x(dev_priv, reg); \ 781 GEN6_READ_FOOTER; \ 782 } 783 784 __vgpu_read(8) 785 __vgpu_read(16) 786 __vgpu_read(32) 787 __vgpu_read(64) 788 __gen9_read(8) 789 __gen9_read(16) 790 __gen9_read(32) 791 __gen9_read(64) 792 __chv_read(8) 793 __chv_read(16) 794 __chv_read(32) 795 __chv_read(64) 796 __vlv_read(8) 797 __vlv_read(16) 798 __vlv_read(32) 799 __vlv_read(64) 800 __gen6_read(8) 801 __gen6_read(16) 802 __gen6_read(32) 803 __gen6_read(64) 804 805 #undef __gen9_read 806 #undef __chv_read 807 #undef __vlv_read 808 #undef __gen6_read 809 #undef __vgpu_read 810 #undef GEN6_READ_FOOTER 811 #undef GEN6_READ_HEADER 812 813 #define GEN2_WRITE_HEADER \ 814 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 815 assert_device_not_suspended(dev_priv); \ 816 817 #define GEN2_WRITE_FOOTER 818 819 #define __gen2_write(x) \ 820 static void \ 821 gen2_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 822 GEN2_WRITE_HEADER; \ 823 __raw_i915_write##x(dev_priv, reg, val); \ 824 GEN2_WRITE_FOOTER; \ 825 } 826 827 #define __gen5_write(x) \ 828 static void \ 829 gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 830 GEN2_WRITE_HEADER; \ 831 ilk_dummy_write(dev_priv); \ 832 __raw_i915_write##x(dev_priv, reg, val); \ 833 GEN2_WRITE_FOOTER; \ 834 } 835 836 __gen5_write(8) 837 __gen5_write(16) 838 __gen5_write(32) 839 __gen5_write(64) 840 __gen2_write(8) 841 __gen2_write(16) 842 __gen2_write(32) 843 __gen2_write(64) 844 845 #undef __gen5_write 846 #undef __gen2_write 847 848 #undef GEN2_WRITE_FOOTER 849 #undef GEN2_WRITE_HEADER 850 851 #define GEN6_WRITE_HEADER \ 852 trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \ 853 assert_device_not_suspended(dev_priv); \ 854 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE) 855 856 #define GEN6_WRITE_FOOTER \ 857 lockmgr(&dev_priv->uncore.lock, LK_RELEASE) 858 859 #define __gen6_write(x) \ 860 static void \ 861 gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 862 u32 __fifo_ret = 0; \ 863 GEN6_WRITE_HEADER; \ 864 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 865 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 866 } \ 867 __raw_i915_write##x(dev_priv, reg, val); \ 868 if (unlikely(__fifo_ret)) { \ 869 gen6_gt_check_fifodbg(dev_priv); \ 870 } \ 871 GEN6_WRITE_FOOTER; \ 872 } 873 874 #define __hsw_write(x) \ 875 static void \ 876 hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 877 u32 __fifo_ret = 0; \ 878 GEN6_WRITE_HEADER; \ 879 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 880 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 881 } \ 882 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 883 __raw_i915_write##x(dev_priv, reg, val); \ 884 if (unlikely(__fifo_ret)) { \ 885 gen6_gt_check_fifodbg(dev_priv); \ 886 } \ 887 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ 888 hsw_unclaimed_reg_detect(dev_priv); \ 889 GEN6_WRITE_FOOTER; \ 890 } 891 892 #define __vgpu_write(x) \ 893 static void vgpu_write##x(struct drm_i915_private *dev_priv, \ 894 off_t reg, u##x val, bool trace) { \ 895 GEN6_WRITE_HEADER; \ 896 __raw_i915_write##x(dev_priv, reg, val); \ 897 GEN6_WRITE_FOOTER; \ 898 } 899 900 static const u32 gen8_shadowed_regs[] = { 901 FORCEWAKE_MT, 902 GEN6_RPNSWREQ, 903 GEN6_RC_VIDEO_FREQ, 904 RING_TAIL(RENDER_RING_BASE), 905 RING_TAIL(GEN6_BSD_RING_BASE), 906 RING_TAIL(VEBOX_RING_BASE), 907 RING_TAIL(BLT_RING_BASE), 908 /* TODO: Other registers are not yet used */ 909 }; 910 911 static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg) 912 { 913 int i; 914 for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) 915 if (reg == gen8_shadowed_regs[i]) 916 return true; 917 918 return false; 919 } 920 921 #define __gen8_write(x) \ 922 static void \ 923 gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 924 GEN6_WRITE_HEADER; \ 925 hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \ 926 if (reg < 0x40000 && !is_gen8_shadowed(dev_priv, reg)) \ 927 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 928 __raw_i915_write##x(dev_priv, reg, val); \ 929 hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \ 930 hsw_unclaimed_reg_detect(dev_priv); \ 931 GEN6_WRITE_FOOTER; \ 932 } 933 934 #define __chv_write(x) \ 935 static void \ 936 chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \ 937 bool shadowed = is_gen8_shadowed(dev_priv, reg); \ 938 GEN6_WRITE_HEADER; \ 939 if (!shadowed) { \ 940 if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg)) \ 941 __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ 942 else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(reg)) \ 943 __force_wake_get(dev_priv, FORCEWAKE_MEDIA); \ 944 else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(reg)) \ 945 __force_wake_get(dev_priv, FORCEWAKE_RENDER | FORCEWAKE_MEDIA); \ 946 } \ 947 __raw_i915_write##x(dev_priv, reg, val); \ 948 GEN6_WRITE_FOOTER; \ 949 } 950 951 static const u32 gen9_shadowed_regs[] = { 952 RING_TAIL(RENDER_RING_BASE), 953 RING_TAIL(GEN6_BSD_RING_BASE), 954 RING_TAIL(VEBOX_RING_BASE), 955 RING_TAIL(BLT_RING_BASE), 956 FORCEWAKE_BLITTER_GEN9, 957 FORCEWAKE_RENDER_GEN9, 958 FORCEWAKE_MEDIA_GEN9, 959 GEN6_RPNSWREQ, 960 GEN6_RC_VIDEO_FREQ, 961 /* TODO: Other registers are not yet used */ 962 }; 963 964 static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg) 965 { 966 int i; 967 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++) 968 if (reg == gen9_shadowed_regs[i]) 969 return true; 970 971 return false; 972 } 973 974 #define __gen9_write(x) \ 975 static void \ 976 gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \ 977 bool trace) { \ 978 enum forcewake_domains fw_engine; \ 979 GEN6_WRITE_HEADER; \ 980 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \ 981 is_gen9_shadowed(dev_priv, reg)) \ 982 fw_engine = 0; \ 983 else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) \ 984 fw_engine = FORCEWAKE_RENDER; \ 985 else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) \ 986 fw_engine = FORCEWAKE_MEDIA; \ 987 else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) \ 988 fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ 989 else \ 990 fw_engine = FORCEWAKE_BLITTER; \ 991 if (fw_engine) \ 992 __force_wake_get(dev_priv, fw_engine); \ 993 __raw_i915_write##x(dev_priv, reg, val); \ 994 GEN6_WRITE_FOOTER; \ 995 } 996 997 __gen9_write(8) 998 __gen9_write(16) 999 __gen9_write(32) 1000 __gen9_write(64) 1001 __chv_write(8) 1002 __chv_write(16) 1003 __chv_write(32) 1004 __chv_write(64) 1005 __gen8_write(8) 1006 __gen8_write(16) 1007 __gen8_write(32) 1008 __gen8_write(64) 1009 __hsw_write(8) 1010 __hsw_write(16) 1011 __hsw_write(32) 1012 __hsw_write(64) 1013 __gen6_write(8) 1014 __gen6_write(16) 1015 __gen6_write(32) 1016 __gen6_write(64) 1017 __vgpu_write(8) 1018 __vgpu_write(16) 1019 __vgpu_write(32) 1020 __vgpu_write(64) 1021 1022 #undef __gen9_write 1023 #undef __chv_write 1024 #undef __gen8_write 1025 #undef __hsw_write 1026 #undef __gen6_write 1027 #undef __vgpu_write 1028 #undef GEN6_WRITE_FOOTER 1029 #undef GEN6_WRITE_HEADER 1030 1031 #define ASSIGN_WRITE_MMIO_VFUNCS(x) \ 1032 do { \ 1033 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \ 1034 dev_priv->uncore.funcs.mmio_writew = x##_write16; \ 1035 dev_priv->uncore.funcs.mmio_writel = x##_write32; \ 1036 dev_priv->uncore.funcs.mmio_writeq = x##_write64; \ 1037 } while (0) 1038 1039 #define ASSIGN_READ_MMIO_VFUNCS(x) \ 1040 do { \ 1041 dev_priv->uncore.funcs.mmio_readb = x##_read8; \ 1042 dev_priv->uncore.funcs.mmio_readw = x##_read16; \ 1043 dev_priv->uncore.funcs.mmio_readl = x##_read32; \ 1044 dev_priv->uncore.funcs.mmio_readq = x##_read64; \ 1045 } while (0) 1046 1047 1048 static void fw_domain_init(struct drm_i915_private *dev_priv, 1049 enum forcewake_domain_id domain_id, 1050 u32 reg_set, u32 reg_ack) 1051 { 1052 struct intel_uncore_forcewake_domain *d; 1053 1054 if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT)) 1055 return; 1056 1057 d = &dev_priv->uncore.fw_domain[domain_id]; 1058 1059 WARN_ON(d->wake_count); 1060 1061 d->wake_count = 0; 1062 d->reg_set = reg_set; 1063 d->reg_ack = reg_ack; 1064 1065 if (IS_GEN6(dev_priv)) { 1066 d->val_reset = 0; 1067 d->val_set = FORCEWAKE_KERNEL; 1068 d->val_clear = 0; 1069 } else { 1070 /* WaRsClearFWBitsAtReset:bdw,skl */ 1071 d->val_reset = _MASKED_BIT_DISABLE(0xffff); 1072 d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL); 1073 d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL); 1074 } 1075 1076 if (IS_VALLEYVIEW(dev_priv)) 1077 d->reg_post = FORCEWAKE_ACK_VLV; 1078 else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) 1079 d->reg_post = ECOBUS; 1080 else 1081 d->reg_post = 0; 1082 1083 d->i915 = dev_priv; 1084 d->id = domain_id; 1085 1086 setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d); 1087 1088 dev_priv->uncore.fw_domains |= (1 << domain_id); 1089 1090 fw_domain_reset(d); 1091 } 1092 1093 static void intel_uncore_fw_domains_init(struct drm_device *dev) 1094 { 1095 struct drm_i915_private *dev_priv = dev->dev_private; 1096 1097 if (INTEL_INFO(dev_priv->dev)->gen <= 5) 1098 return; 1099 1100 if (IS_GEN9(dev)) { 1101 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1102 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1103 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1104 FORCEWAKE_RENDER_GEN9, 1105 FORCEWAKE_ACK_RENDER_GEN9); 1106 fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER, 1107 FORCEWAKE_BLITTER_GEN9, 1108 FORCEWAKE_ACK_BLITTER_GEN9); 1109 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1110 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 1111 } else if (IS_VALLEYVIEW(dev)) { 1112 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1113 if (!IS_CHERRYVIEW(dev)) 1114 dev_priv->uncore.funcs.force_wake_put = 1115 fw_domains_put_with_fifo; 1116 else 1117 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1118 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1119 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 1120 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1121 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 1122 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 1123 dev_priv->uncore.funcs.force_wake_get = 1124 fw_domains_get_with_thread_status; 1125 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1126 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1127 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1128 } else if (IS_IVYBRIDGE(dev)) { 1129 u32 ecobus; 1130 1131 /* IVB configs may use multi-threaded forcewake */ 1132 1133 /* A small trick here - if the bios hasn't configured 1134 * MT forcewake, and if the device is in RC6, then 1135 * force_wake_mt_get will not wake the device and the 1136 * ECOBUS read will return zero. Which will be 1137 * (correctly) interpreted by the test below as MT 1138 * forcewake being disabled. 1139 */ 1140 dev_priv->uncore.funcs.force_wake_get = 1141 fw_domains_get_with_thread_status; 1142 dev_priv->uncore.funcs.force_wake_put = 1143 fw_domains_put_with_fifo; 1144 1145 /* We need to init first for ECOBUS access and then 1146 * determine later if we want to reinit, in case of MT access is 1147 * not working. In this stage we don't know which flavour this 1148 * ivb is, so it is better to reset also the gen6 fw registers 1149 * before the ecobus check. 1150 */ 1151 1152 __raw_i915_write32(dev_priv, FORCEWAKE, 0); 1153 __raw_posting_read(dev_priv, ECOBUS); 1154 1155 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1156 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1157 1158 mutex_lock(&dev->struct_mutex); 1159 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); 1160 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 1161 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); 1162 mutex_unlock(&dev->struct_mutex); 1163 1164 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1165 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1166 DRM_INFO("when using vblank-synced partial screen updates.\n"); 1167 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1168 FORCEWAKE, FORCEWAKE_ACK); 1169 } 1170 } else if (IS_GEN6(dev)) { 1171 dev_priv->uncore.funcs.force_wake_get = 1172 fw_domains_get_with_thread_status; 1173 dev_priv->uncore.funcs.force_wake_put = 1174 fw_domains_put_with_fifo; 1175 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1176 FORCEWAKE, FORCEWAKE_ACK); 1177 } 1178 1179 /* All future platforms are expected to require complex power gating */ 1180 WARN_ON(dev_priv->uncore.fw_domains == 0); 1181 } 1182 1183 void intel_uncore_init(struct drm_device *dev) 1184 { 1185 struct drm_i915_private *dev_priv = dev->dev_private; 1186 1187 i915_check_vgpu(dev); 1188 1189 intel_uncore_ellc_detect(dev); 1190 intel_uncore_fw_domains_init(dev); 1191 __intel_uncore_early_sanitize(dev, false); 1192 1193 switch (INTEL_INFO(dev)->gen) { 1194 default: 1195 MISSING_CASE(INTEL_INFO(dev)->gen); 1196 return; 1197 case 9: 1198 ASSIGN_WRITE_MMIO_VFUNCS(gen9); 1199 ASSIGN_READ_MMIO_VFUNCS(gen9); 1200 break; 1201 case 8: 1202 if (IS_CHERRYVIEW(dev)) { 1203 ASSIGN_WRITE_MMIO_VFUNCS(chv); 1204 ASSIGN_READ_MMIO_VFUNCS(chv); 1205 1206 } else { 1207 ASSIGN_WRITE_MMIO_VFUNCS(gen8); 1208 ASSIGN_READ_MMIO_VFUNCS(gen6); 1209 } 1210 break; 1211 case 7: 1212 case 6: 1213 if (IS_HASWELL(dev)) { 1214 ASSIGN_WRITE_MMIO_VFUNCS(hsw); 1215 } else { 1216 ASSIGN_WRITE_MMIO_VFUNCS(gen6); 1217 } 1218 1219 if (IS_VALLEYVIEW(dev)) { 1220 ASSIGN_READ_MMIO_VFUNCS(vlv); 1221 } else { 1222 ASSIGN_READ_MMIO_VFUNCS(gen6); 1223 } 1224 break; 1225 case 5: 1226 ASSIGN_WRITE_MMIO_VFUNCS(gen5); 1227 ASSIGN_READ_MMIO_VFUNCS(gen5); 1228 break; 1229 case 4: 1230 case 3: 1231 case 2: 1232 ASSIGN_WRITE_MMIO_VFUNCS(gen2); 1233 ASSIGN_READ_MMIO_VFUNCS(gen2); 1234 break; 1235 } 1236 1237 if (intel_vgpu_active(dev)) { 1238 ASSIGN_WRITE_MMIO_VFUNCS(vgpu); 1239 ASSIGN_READ_MMIO_VFUNCS(vgpu); 1240 } 1241 1242 i915_check_and_clear_faults(dev); 1243 } 1244 #undef ASSIGN_WRITE_MMIO_VFUNCS 1245 #undef ASSIGN_READ_MMIO_VFUNCS 1246 1247 void intel_uncore_fini(struct drm_device *dev) 1248 { 1249 /* Paranoia: make sure we have disabled everything before we exit. */ 1250 intel_uncore_sanitize(dev); 1251 intel_uncore_forcewake_reset(dev, false); 1252 } 1253 1254 #define GEN_RANGE(l, h) GENMASK(h, l) 1255 1256 static const struct register_whitelist { 1257 uint64_t offset; 1258 uint32_t size; 1259 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 1260 uint32_t gen_bitmask; 1261 } whitelist[] = { 1262 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) }, 1263 }; 1264 1265 int i915_reg_read_ioctl(struct drm_device *dev, 1266 void *data, struct drm_file *file) 1267 { 1268 struct drm_i915_private *dev_priv = dev->dev_private; 1269 struct drm_i915_reg_read *reg = data; 1270 struct register_whitelist const *entry = whitelist; 1271 unsigned size; 1272 u64 offset; 1273 int i, ret = 0; 1274 1275 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1276 if (entry->offset == (reg->offset & -entry->size) && 1277 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1278 break; 1279 } 1280 1281 if (i == ARRAY_SIZE(whitelist)) 1282 return -EINVAL; 1283 1284 /* We use the low bits to encode extra flags as the register should 1285 * be naturally aligned (and those that are not so aligned merely 1286 * limit the available flags for that register). 1287 */ 1288 offset = entry->offset; 1289 size = entry->size; 1290 size |= reg->offset ^ offset; 1291 1292 intel_runtime_pm_get(dev_priv); 1293 1294 switch (size) { 1295 case 8 | 1: 1296 reg->val = I915_READ64_2x32(offset, offset+4); 1297 break; 1298 case 8: 1299 reg->val = I915_READ64(offset); 1300 break; 1301 case 4: 1302 reg->val = I915_READ(offset); 1303 break; 1304 case 2: 1305 reg->val = I915_READ16(offset); 1306 break; 1307 case 1: 1308 reg->val = I915_READ8(offset); 1309 break; 1310 default: 1311 ret = -EINVAL; 1312 goto out; 1313 } 1314 1315 out: 1316 intel_runtime_pm_put(dev_priv); 1317 return ret; 1318 } 1319 1320 int i915_get_reset_stats_ioctl(struct drm_device *dev, 1321 void *data, struct drm_file *file) 1322 { 1323 struct drm_i915_private *dev_priv = dev->dev_private; 1324 struct drm_i915_reset_stats *args = data; 1325 struct i915_ctx_hang_stats *hs; 1326 struct intel_context *ctx; 1327 int ret; 1328 1329 if (args->flags || args->pad) 1330 return -EINVAL; 1331 1332 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN)) 1333 return -EPERM; 1334 1335 ret = mutex_lock_interruptible(&dev->struct_mutex); 1336 if (ret) 1337 return ret; 1338 1339 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id); 1340 if (IS_ERR(ctx)) { 1341 mutex_unlock(&dev->struct_mutex); 1342 return PTR_ERR(ctx); 1343 } 1344 hs = &ctx->hang_stats; 1345 1346 if (capable(CAP_SYS_ADMIN)) 1347 args->reset_count = i915_reset_count(&dev_priv->gpu_error); 1348 else 1349 args->reset_count = 0; 1350 1351 args->batch_active = hs->batch_active; 1352 args->batch_pending = hs->batch_pending; 1353 1354 mutex_unlock(&dev->struct_mutex); 1355 1356 return 0; 1357 } 1358 1359 static int i915_reset_complete(struct drm_device *dev) 1360 { 1361 u8 gdrst; 1362 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1363 return (gdrst & GRDOM_RESET_STATUS) == 0; 1364 } 1365 1366 static int i915_do_reset(struct drm_device *dev) 1367 { 1368 /* assert reset for at least 20 usec */ 1369 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1370 udelay(20); 1371 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1372 1373 return wait_for(i915_reset_complete(dev), 500); 1374 } 1375 1376 static int g4x_reset_complete(struct drm_device *dev) 1377 { 1378 u8 gdrst; 1379 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1380 return (gdrst & GRDOM_RESET_ENABLE) == 0; 1381 } 1382 1383 static int g33_do_reset(struct drm_device *dev) 1384 { 1385 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1386 return wait_for(g4x_reset_complete(dev), 500); 1387 } 1388 1389 static int g4x_do_reset(struct drm_device *dev) 1390 { 1391 struct drm_i915_private *dev_priv = dev->dev_private; 1392 int ret; 1393 1394 pci_write_config_byte(dev->pdev, I915_GDRST, 1395 GRDOM_RENDER | GRDOM_RESET_ENABLE); 1396 ret = wait_for(g4x_reset_complete(dev), 500); 1397 if (ret) 1398 return ret; 1399 1400 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1401 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1402 POSTING_READ(VDECCLK_GATE_D); 1403 1404 pci_write_config_byte(dev->pdev, I915_GDRST, 1405 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1406 ret = wait_for(g4x_reset_complete(dev), 500); 1407 if (ret) 1408 return ret; 1409 1410 /* WaVcpClkGateDisableForMediaReset:ctg,elk */ 1411 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1412 POSTING_READ(VDECCLK_GATE_D); 1413 1414 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1415 1416 return 0; 1417 } 1418 1419 static int ironlake_do_reset(struct drm_device *dev) 1420 { 1421 struct drm_i915_private *dev_priv = dev->dev_private; 1422 int ret; 1423 1424 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1425 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 1426 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1427 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1428 if (ret) 1429 return ret; 1430 1431 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1432 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 1433 ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1434 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1435 if (ret) 1436 return ret; 1437 1438 I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0); 1439 1440 return 0; 1441 } 1442 1443 static int gen6_do_reset(struct drm_device *dev) 1444 { 1445 struct drm_i915_private *dev_priv = dev->dev_private; 1446 int ret; 1447 1448 /* Reset the chip */ 1449 1450 /* GEN6_GDRST is not in the gt power well, no need to check 1451 * for fifo space for the write or forcewake the chip for 1452 * the read 1453 */ 1454 __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL); 1455 1456 /* Spin waiting for the device to ack the reset request */ 1457 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 1458 1459 intel_uncore_forcewake_reset(dev, true); 1460 1461 return ret; 1462 } 1463 1464 int intel_gpu_reset(struct drm_device *dev) 1465 { 1466 if (INTEL_INFO(dev)->gen >= 6) 1467 return gen6_do_reset(dev); 1468 else if (IS_GEN5(dev)) 1469 return ironlake_do_reset(dev); 1470 else if (IS_G4X(dev)) 1471 return g4x_do_reset(dev); 1472 else if (IS_G33(dev)) 1473 return g33_do_reset(dev); 1474 else if (INTEL_INFO(dev)->gen >= 3) 1475 return i915_do_reset(dev); 1476 else 1477 return -ENODEV; 1478 } 1479 1480 void intel_uncore_check_errors(struct drm_device *dev) 1481 { 1482 struct drm_i915_private *dev_priv = dev->dev_private; 1483 1484 if (HAS_FPGA_DBG_UNCLAIMED(dev) && 1485 (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { 1486 DRM_ERROR("Unclaimed register before interrupt\n"); 1487 __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); 1488 } 1489 } 1490