1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifdef __DragonFly__ 31 #include "opt_drm.h" /* for VGA_SWITCHEROO */ 32 #endif 33 34 #include <linux/acpi.h> 35 #include <linux/device.h> 36 #include <linux/oom.h> 37 #include <linux/module.h> 38 #include <linux/pci.h> 39 #include <linux/pm.h> 40 #include <linux/pm_runtime.h> 41 #include <linux/pnp.h> 42 #include <linux/slab.h> 43 #include <linux/vgaarb.h> 44 #include <linux/vga_switcheroo.h> 45 #include <linux/vt.h> 46 #include <acpi/video.h> 47 48 #include <drm/drmP.h> 49 #include <drm/drm_crtc_helper.h> 50 #include <drm/i915_drm.h> 51 52 #include "i915_drv.h" 53 #include "i915_trace.h" 54 #include "i915_vgpu.h" 55 #include "intel_drv.h" 56 57 static struct drm_driver driver; 58 59 static unsigned int i915_load_fail_count; 60 61 bool __i915_inject_load_failure(const char *func, int line) 62 { 63 if (i915_load_fail_count >= i915.inject_load_failure) 64 return false; 65 66 if (++i915_load_fail_count == i915.inject_load_failure) { 67 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", 68 i915.inject_load_failure, func, line); 69 return true; 70 } 71 72 return false; 73 } 74 75 #define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" 76 #define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \ 77 "providing the dmesg log by booting with drm.debug=0xf" 78 79 void 80 __i915_printk(struct drm_i915_private *dev_priv, const char *level, 81 const char *fmt, ...) 82 { 83 static bool shown_bug_once; 84 struct device *dev = dev_priv->drm.dev; 85 bool is_error = level[1] <= KERN_ERR[1]; 86 bool is_debug = level[1] == KERN_DEBUG[1]; 87 struct va_format vaf; 88 va_list args; 89 90 if (is_debug && !(drm_debug & DRM_UT_DRIVER)) 91 return; 92 93 va_start(args, fmt); 94 95 vaf.fmt = fmt; 96 vaf.va = &args; 97 98 dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV", 99 __builtin_return_address(0), &vaf); 100 101 if (is_error && !shown_bug_once) { 102 dev_notice(dev, "%s", FDO_BUG_MSG); 103 shown_bug_once = true; 104 } 105 106 va_end(args); 107 } 108 109 static bool i915_error_injected(struct drm_i915_private *dev_priv) 110 { 111 return i915.inject_load_failure && 112 i915_load_fail_count == i915.inject_load_failure; 113 } 114 115 #define i915_load_error(dev_priv, fmt, ...) \ 116 __i915_printk(dev_priv, \ 117 i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \ 118 fmt, ##__VA_ARGS__) 119 120 121 static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) 122 { 123 enum intel_pch ret = PCH_NOP; 124 125 /* 126 * In a virtualized passthrough environment we can be in a 127 * setup where the ISA bridge is not able to be passed through. 128 * In this case, a south bridge can be emulated and we have to 129 * make an educated guess as to which PCH is really there. 130 */ 131 132 if (IS_GEN5(dev)) { 133 ret = PCH_IBX; 134 DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); 135 } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { 136 ret = PCH_CPT; 137 DRM_DEBUG_KMS("Assuming CouarPoint PCH\n"); 138 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 139 ret = PCH_LPT; 140 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); 141 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 142 ret = PCH_SPT; 143 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); 144 } 145 146 return ret; 147 } 148 149 static void intel_detect_pch(struct drm_device *dev) 150 { 151 struct drm_i915_private *dev_priv = to_i915(dev); 152 device_t pch = NULL; 153 struct pci_devinfo *di = NULL; 154 155 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting 156 * (which really amounts to a PCH but no South Display). 157 */ 158 if (INTEL_INFO(dev)->num_pipes == 0) { 159 dev_priv->pch_type = PCH_NOP; 160 return; 161 } 162 163 /* XXX The ISA bridge probe causes some old Core2 machines to hang */ 164 if (INTEL_INFO(dev)->gen < 5) 165 return; 166 167 /* 168 * The reason to probe ISA bridge instead of Dev31:Fun0 is to 169 * make graphics device passthrough work easy for VMM, that only 170 * need to expose ISA bridge to let driver know the real hardware 171 * underneath. This is a requirement from virtualization team. 172 * 173 * In some virtualized environments (e.g. XEN), there is irrelevant 174 * ISA bridge in the system. To work reliably, we should scan trhough 175 * all the ISA bridge devices and check for the first match, instead 176 * of only checking the first one. 177 */ 178 while ((pch = pci_iterate_class(&di, PCIC_BRIDGE, PCIS_BRIDGE_ISA))) { 179 if (pci_get_vendor(pch) == PCI_VENDOR_ID_INTEL) { 180 unsigned short id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK; 181 dev_priv->pch_id = id; 182 183 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 184 dev_priv->pch_type = PCH_IBX; 185 DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 186 WARN_ON(!IS_GEN5(dev)); 187 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 188 dev_priv->pch_type = PCH_CPT; 189 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 190 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 191 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 192 /* PantherPoint is CPT compatible */ 193 dev_priv->pch_type = PCH_CPT; 194 DRM_DEBUG_KMS("Found PantherPoint PCH\n"); 195 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 196 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 197 dev_priv->pch_type = PCH_LPT; 198 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 199 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); 200 WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)); 201 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 202 dev_priv->pch_type = PCH_LPT; 203 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 204 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); 205 WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev)); 206 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { 207 dev_priv->pch_type = PCH_SPT; 208 DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); 209 WARN_ON(!IS_SKYLAKE(dev) && 210 !IS_KABYLAKE(dev)); 211 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { 212 dev_priv->pch_type = PCH_SPT; 213 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); 214 WARN_ON(!IS_SKYLAKE(dev) && 215 !IS_KABYLAKE(dev)); 216 } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { 217 dev_priv->pch_type = PCH_KBP; 218 DRM_DEBUG_KMS("Found KabyPoint PCH\n"); 219 WARN_ON(!IS_KABYLAKE(dev)); 220 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || 221 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || 222 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && 223 1)) { 224 dev_priv->pch_type = intel_virt_detect_pch(dev); 225 } else 226 continue; 227 228 break; 229 } 230 } 231 if (!pch) 232 DRM_DEBUG_KMS("No PCH found.\n"); 233 234 #if 0 235 pci_dev_put(pch); 236 #endif 237 } 238 239 static int i915_getparam(struct drm_device *dev, void *data, 240 struct drm_file *file_priv) 241 { 242 struct drm_i915_private *dev_priv = to_i915(dev); 243 drm_i915_getparam_t *param = data; 244 int value; 245 246 switch (param->param) { 247 case I915_PARAM_IRQ_ACTIVE: 248 case I915_PARAM_ALLOW_BATCHBUFFER: 249 case I915_PARAM_LAST_DISPATCH: 250 /* Reject all old ums/dri params. */ 251 return -ENODEV; 252 case I915_PARAM_CHIPSET_ID: 253 value = dev->pdev->device; 254 break; 255 case I915_PARAM_REVISION: 256 value = dev->pdev->revision; 257 break; 258 case I915_PARAM_HAS_GEM: 259 value = 1; 260 break; 261 case I915_PARAM_NUM_FENCES_AVAIL: 262 value = dev_priv->num_fence_regs; 263 break; 264 case I915_PARAM_HAS_OVERLAY: 265 value = dev_priv->overlay ? 1 : 0; 266 break; 267 case I915_PARAM_HAS_PAGEFLIPPING: 268 value = 1; 269 break; 270 case I915_PARAM_HAS_EXECBUF2: 271 /* depends on GEM */ 272 value = 1; 273 break; 274 case I915_PARAM_HAS_BSD: 275 value = intel_engine_initialized(&dev_priv->engine[VCS]); 276 break; 277 case I915_PARAM_HAS_BLT: 278 value = intel_engine_initialized(&dev_priv->engine[BCS]); 279 break; 280 case I915_PARAM_HAS_VEBOX: 281 value = intel_engine_initialized(&dev_priv->engine[VECS]); 282 break; 283 case I915_PARAM_HAS_BSD2: 284 value = intel_engine_initialized(&dev_priv->engine[VCS2]); 285 break; 286 case I915_PARAM_HAS_RELAXED_FENCING: 287 value = 1; 288 break; 289 case I915_PARAM_HAS_COHERENT_RINGS: 290 value = 1; 291 break; 292 case I915_PARAM_HAS_EXEC_CONSTANTS: 293 value = INTEL_INFO(dev)->gen >= 4; 294 break; 295 case I915_PARAM_HAS_RELAXED_DELTA: 296 value = 1; 297 break; 298 case I915_PARAM_HAS_GEN7_SOL_RESET: 299 value = 1; 300 break; 301 case I915_PARAM_HAS_LLC: 302 value = HAS_LLC(dev); 303 break; 304 case I915_PARAM_HAS_WT: 305 value = HAS_WT(dev); 306 break; 307 case I915_PARAM_HAS_ALIASING_PPGTT: 308 value = USES_PPGTT(dev); 309 break; 310 case I915_PARAM_HAS_WAIT_TIMEOUT: 311 value = 1; 312 break; 313 case I915_PARAM_HAS_SEMAPHORES: 314 value = i915.semaphores; 315 break; 316 case I915_PARAM_HAS_PINNED_BATCHES: 317 value = 1; 318 break; 319 case I915_PARAM_HAS_EXEC_NO_RELOC: 320 value = 1; 321 break; 322 case I915_PARAM_HAS_EXEC_HANDLE_LUT: 323 value = 1; 324 break; 325 case I915_PARAM_CMD_PARSER_VERSION: 326 value = i915_cmd_parser_get_version(dev_priv); 327 break; 328 case I915_PARAM_HAS_COHERENT_PHYS_GTT: 329 value = 1; 330 break; 331 case I915_PARAM_SUBSLICE_TOTAL: 332 value = INTEL_INFO(dev)->subslice_total; 333 if (!value) 334 return -ENODEV; 335 break; 336 case I915_PARAM_EU_TOTAL: 337 value = INTEL_INFO(dev)->eu_total; 338 if (!value) 339 return -ENODEV; 340 break; 341 case I915_PARAM_HAS_GPU_RESET: 342 value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv); 343 break; 344 case I915_PARAM_HAS_RESOURCE_STREAMER: 345 value = HAS_RESOURCE_STREAMER(dev); 346 break; 347 case I915_PARAM_HAS_EXEC_SOFTPIN: 348 value = 1; 349 break; 350 case I915_PARAM_HAS_POOLED_EU: 351 value = HAS_POOLED_EU(dev); 352 break; 353 case I915_PARAM_MIN_EU_IN_POOL: 354 value = INTEL_INFO(dev)->min_eu_in_pool; 355 break; 356 default: 357 DRM_DEBUG("Unknown parameter %d\n", param->param); 358 return -EINVAL; 359 } 360 361 if (put_user(value, param->value)) 362 return -EFAULT; 363 364 return 0; 365 } 366 367 static int i915_get_bridge_dev(struct drm_device *dev) 368 { 369 struct drm_i915_private *dev_priv = to_i915(dev); 370 static struct pci_dev i915_bridge_dev; 371 372 i915_bridge_dev.dev.bsddev = pci_find_dbsf(0, 0, 0, 0); 373 if (!i915_bridge_dev.dev.bsddev) { 374 DRM_ERROR("bridge device not found\n"); 375 return -1; 376 } 377 378 dev_priv->bridge_dev = &i915_bridge_dev; 379 return 0; 380 } 381 382 /* Allocate space for the MCH regs if needed, return nonzero on error */ 383 static int 384 intel_alloc_mchbar_resource(struct drm_device *dev) 385 { 386 struct drm_i915_private *dev_priv = to_i915(dev); 387 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 388 u32 temp_lo, temp_hi = 0; 389 u64 mchbar_addr; 390 device_t vga; 391 392 if (INTEL_INFO(dev)->gen >= 4) 393 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); 394 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); 395 mchbar_addr = ((u64)temp_hi << 32) | temp_lo; 396 397 /* If ACPI doesn't have it, assume we need to allocate it ourselves */ 398 #ifdef CONFIG_PNP 399 if (mchbar_addr && 400 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) 401 return 0; 402 #endif 403 404 /* Get some space for it */ 405 vga = device_get_parent(dev->dev->bsddev); 406 dev_priv->mch_res_rid = 0x100; 407 dev_priv->mch_res = BUS_ALLOC_RESOURCE(device_get_parent(vga), 408 dev->dev->bsddev, SYS_RES_MEMORY, &dev_priv->mch_res_rid, 0, ~0UL, 409 MCHBAR_SIZE, RF_ACTIVE | RF_SHAREABLE, -1); 410 if (dev_priv->mch_res == NULL) { 411 DRM_ERROR("failed mchbar resource alloc\n"); 412 return (-ENOMEM); 413 } 414 415 if (INTEL_INFO(dev)->gen >= 4) 416 pci_write_config_dword(dev_priv->bridge_dev, reg + 4, 417 upper_32_bits(rman_get_start(dev_priv->mch_res))); 418 419 pci_write_config_dword(dev_priv->bridge_dev, reg, 420 lower_32_bits(rman_get_start(dev_priv->mch_res))); 421 return 0; 422 } 423 424 /* Setup MCHBAR if possible, return true if we should disable it again */ 425 static void 426 intel_setup_mchbar(struct drm_device *dev) 427 { 428 struct drm_i915_private *dev_priv = to_i915(dev); 429 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 430 u32 temp; 431 bool enabled; 432 433 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 434 return; 435 436 dev_priv->mchbar_need_disable = false; 437 438 if (IS_I915G(dev) || IS_I915GM(dev)) { 439 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp); 440 enabled = !!(temp & DEVEN_MCHBAR_EN); 441 } else { 442 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 443 enabled = temp & 1; 444 } 445 446 /* If it's already enabled, don't have to do anything */ 447 if (enabled) 448 return; 449 450 if (intel_alloc_mchbar_resource(dev)) 451 return; 452 453 dev_priv->mchbar_need_disable = true; 454 455 /* Space is allocated or reserved, so enable it. */ 456 if (IS_I915G(dev) || IS_I915GM(dev)) { 457 pci_write_config_dword(dev_priv->bridge_dev, DEVEN, 458 temp | DEVEN_MCHBAR_EN); 459 } else { 460 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); 461 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); 462 } 463 } 464 465 static void 466 intel_teardown_mchbar(struct drm_device *dev) 467 { 468 struct drm_i915_private *dev_priv = to_i915(dev); 469 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915; 470 device_t vga; 471 472 if (dev_priv->mchbar_need_disable) { 473 if (IS_I915G(dev) || IS_I915GM(dev)) { 474 u32 deven_val; 475 476 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, 477 &deven_val); 478 deven_val &= ~DEVEN_MCHBAR_EN; 479 pci_write_config_dword(dev_priv->bridge_dev, DEVEN, 480 deven_val); 481 } else { 482 u32 mchbar_val; 483 484 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, 485 &mchbar_val); 486 mchbar_val &= ~1; 487 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, 488 mchbar_val); 489 } 490 } 491 492 if (dev_priv->mch_res != NULL) { 493 vga = device_get_parent(dev->dev->bsddev); 494 BUS_DEACTIVATE_RESOURCE(device_get_parent(vga), dev->dev->bsddev, 495 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res); 496 BUS_RELEASE_RESOURCE(device_get_parent(vga), dev->dev->bsddev, 497 SYS_RES_MEMORY, dev_priv->mch_res_rid, dev_priv->mch_res); 498 dev_priv->mch_res = NULL; 499 } 500 } 501 502 #if 0 503 /* true = enable decode, false = disable decoder */ 504 static unsigned int i915_vga_set_decode(void *cookie, bool state) 505 { 506 struct drm_device *dev = cookie; 507 508 intel_modeset_vga_set_state(dev, state); 509 if (state) 510 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | 511 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 512 else 513 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 514 } 515 516 static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) 517 { 518 struct drm_device *dev = pci_get_drvdata(pdev); 519 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 520 521 if (state == VGA_SWITCHEROO_ON) { 522 pr_info("switched on\n"); 523 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 524 /* i915 resume handler doesn't set to D0 */ 525 pci_set_power_state(dev->pdev, PCI_D0); 526 i915_resume_switcheroo(dev); 527 dev->switch_power_state = DRM_SWITCH_POWER_ON; 528 } else { 529 pr_info("switched off\n"); 530 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 531 i915_suspend_switcheroo(dev, pmm); 532 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 533 } 534 } 535 536 static bool i915_switcheroo_can_switch(struct pci_dev *pdev) 537 { 538 struct drm_device *dev = pci_get_drvdata(pdev); 539 540 /* 541 * FIXME: open_count is protected by drm_global_mutex but that would lead to 542 * locking inversion with the driver load path. And the access here is 543 * completely racy anyway. So don't bother with locking for now. 544 */ 545 return dev->open_count == 0; 546 } 547 548 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { 549 .set_gpu_state = i915_switcheroo_set_state, 550 .reprobe = NULL, 551 .can_switch = i915_switcheroo_can_switch, 552 }; 553 #endif 554 555 static void i915_gem_fini(struct drm_device *dev) 556 { 557 struct drm_i915_private *dev_priv = to_i915(dev); 558 559 /* 560 * Neither the BIOS, ourselves or any other kernel 561 * expects the system to be in execlists mode on startup, 562 * so we need to reset the GPU back to legacy mode. And the only 563 * known way to disable logical contexts is through a GPU reset. 564 * 565 * So in order to leave the system in a known default configuration, 566 * always reset the GPU upon unload. Afterwards we then clean up the 567 * GEM state tracking, flushing off the requests and leaving the 568 * system in a known idle state. 569 * 570 * Note that is of the upmost importance that the GPU is idle and 571 * all stray writes are flushed *before* we dismantle the backing 572 * storage for the pinned objects. 573 * 574 * However, since we are uncertain that reseting the GPU on older 575 * machines is a good idea, we don't - just in case it leaves the 576 * machine in an unusable condition. 577 */ 578 if (HAS_HW_CONTEXTS(dev)) { 579 int reset = intel_gpu_reset(dev_priv, ALL_ENGINES); 580 WARN_ON(reset && reset != -ENODEV); 581 } 582 583 mutex_lock(&dev->struct_mutex); 584 i915_gem_reset(dev); 585 i915_gem_cleanup_engines(dev); 586 i915_gem_context_fini(dev); 587 mutex_unlock(&dev->struct_mutex); 588 589 WARN_ON(!list_empty(&to_i915(dev)->context_list)); 590 } 591 592 static int i915_load_modeset_init(struct drm_device *dev) 593 { 594 struct drm_i915_private *dev_priv = to_i915(dev); 595 int ret; 596 597 if (i915_inject_load_failure()) 598 return -ENODEV; 599 600 ret = intel_bios_init(dev_priv); 601 if (ret) 602 DRM_INFO("failed to find VBIOS tables\n"); 603 604 /* If we have > 1 VGA cards, then we need to arbitrate access 605 * to the common VGA resources. 606 * 607 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), 608 * then we do not take part in VGA arbitration and the 609 * vga_client_register() fails with -ENODEV. 610 */ 611 #if 0 612 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 613 if (ret && ret != -ENODEV) 614 goto out; 615 616 intel_register_dsm_handler(); 617 618 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false); 619 if (ret) 620 goto cleanup_vga_client; 621 #endif 622 623 /* must happen before intel_power_domains_init_hw() on VLV/CHV */ 624 intel_update_rawclk(dev_priv); 625 626 intel_power_domains_init_hw(dev_priv, false); 627 628 intel_csr_ucode_init(dev_priv); 629 630 ret = intel_irq_install(dev_priv); 631 if (ret) 632 goto cleanup_csr; 633 634 intel_setup_gmbus(dev); 635 636 /* Important: The output setup functions called by modeset_init need 637 * working irqs for e.g. gmbus and dp aux transfers. */ 638 intel_modeset_init(dev); 639 640 intel_guc_init(dev); 641 642 ret = i915_gem_init(dev); 643 if (ret) 644 goto cleanup_irq; 645 646 intel_modeset_gem_init(dev); 647 648 if (INTEL_INFO(dev)->num_pipes == 0) 649 return 0; 650 651 ret = intel_fbdev_init(dev); 652 if (ret) 653 goto cleanup_gem; 654 655 /* Only enable hotplug handling once the fbdev is fully set up. */ 656 intel_hpd_init(dev_priv); 657 658 drm_kms_helper_poll_init(dev); 659 660 #ifdef __DragonFly__ 661 /* 662 * If we are dealing with dual GPU machines the vga_switcheroo module 663 * has been loaded. Machines with dual GPUs have an integrated graphics 664 * device (IGD), which we assume is an Intel device. The other, the 665 * discrete device (DIS), is either an NVidia or a Radeon device. For 666 * now we will force switch the gmux so the intel driver outputs 667 * both to the laptop panel and the external monitor. 668 * 669 * DragonFly does not have an nvidia native driver yet. In the future, 670 * we will check for the radeon device: if present, we will leave 671 * the gmux switch as it is, so the user can choose between the IGD and 672 * the DIS using the /dev/vga_switcheroo device. 673 */ 674 if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC) { 675 ret = vga_switcheroo_force_migd(); 676 if (ret) { 677 DRM_INFO("could not switch gmux to IGD\n"); 678 } 679 } 680 #endif 681 682 return 0; 683 684 cleanup_gem: 685 i915_gem_fini(dev); 686 cleanup_irq: 687 intel_guc_fini(dev); 688 drm_irq_uninstall(dev); 689 intel_teardown_gmbus(dev); 690 cleanup_csr: 691 intel_csr_ucode_fini(dev_priv); 692 intel_power_domains_fini(dev_priv); 693 #if 0 694 vga_switcheroo_unregister_client(dev->pdev); 695 cleanup_vga_client: 696 vga_client_register(dev->pdev, NULL, NULL, NULL); 697 out: 698 #endif 699 return ret; 700 } 701 702 #if IS_ENABLED(CONFIG_FB) 703 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 704 { 705 struct apertures_struct *ap; 706 struct pci_dev *pdev = dev_priv->drm.pdev; 707 struct i915_ggtt *ggtt = &dev_priv->ggtt; 708 bool primary; 709 int ret; 710 711 ap = alloc_apertures(1); 712 if (!ap) 713 return -ENOMEM; 714 715 ap->ranges[0].base = ggtt->mappable_base; 716 ap->ranges[0].size = ggtt->mappable_end; 717 718 primary = 719 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; 720 721 ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary); 722 723 kfree(ap); 724 725 return ret; 726 } 727 #else 728 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 729 { 730 return 0; 731 } 732 #endif 733 734 #if !defined(CONFIG_VGA_CONSOLE) 735 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 736 { 737 return 0; 738 } 739 #elif !defined(CONFIG_DUMMY_CONSOLE) 740 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 741 { 742 return -ENODEV; 743 } 744 #else 745 static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 746 { 747 int ret = 0; 748 749 DRM_INFO("Replacing VGA console driver\n"); 750 751 console_lock(); 752 if (con_is_bound(&vga_con)) 753 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); 754 if (ret == 0) { 755 ret = do_unregister_con_driver(&vga_con); 756 757 /* Ignore "already unregistered". */ 758 if (ret == -ENODEV) 759 ret = 0; 760 } 761 console_unlock(); 762 763 return ret; 764 } 765 #endif 766 767 static void intel_init_dpio(struct drm_i915_private *dev_priv) 768 { 769 /* 770 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), 771 * CHV x1 PHY (DP/HDMI D) 772 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) 773 */ 774 if (IS_CHERRYVIEW(dev_priv)) { 775 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; 776 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; 777 } else if (IS_VALLEYVIEW(dev_priv)) { 778 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; 779 } 780 } 781 782 static int i915_workqueues_init(struct drm_i915_private *dev_priv) 783 { 784 /* 785 * The i915 workqueue is primarily used for batched retirement of 786 * requests (and thus managing bo) once the task has been completed 787 * by the GPU. i915_gem_retire_requests() is called directly when we 788 * need high-priority retirement, such as waiting for an explicit 789 * bo. 790 * 791 * It is also used for periodic low-priority events, such as 792 * idle-timers and recording error state. 793 * 794 * All tasks on the workqueue are expected to acquire the dev mutex 795 * so there is no point in running more than one instance of the 796 * workqueue at any time. Use an ordered one. 797 */ 798 dev_priv->wq = alloc_ordered_workqueue("i915", 0); 799 if (dev_priv->wq == NULL) 800 goto out_err; 801 802 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0); 803 if (dev_priv->hotplug.dp_wq == NULL) 804 goto out_free_wq; 805 806 return 0; 807 808 out_free_wq: 809 destroy_workqueue(dev_priv->wq); 810 out_err: 811 DRM_ERROR("Failed to allocate workqueues.\n"); 812 813 return -ENOMEM; 814 } 815 816 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) 817 { 818 destroy_workqueue(dev_priv->hotplug.dp_wq); 819 destroy_workqueue(dev_priv->wq); 820 } 821 822 /** 823 * i915_driver_init_early - setup state not requiring device access 824 * @dev_priv: device private 825 * 826 * Initialize everything that is a "SW-only" state, that is state not 827 * requiring accessing the device or exposing the driver via kernel internal 828 * or userspace interfaces. Example steps belonging here: lock initialization, 829 * system memory allocation, setting up device specific attributes and 830 * function hooks not requiring accessing the device. 831 */ 832 static int i915_driver_init_early(struct drm_i915_private *dev_priv, 833 const struct pci_device_id *ent) 834 { 835 const struct intel_device_info *match_info = 836 (struct intel_device_info *)ent->driver_data; 837 struct intel_device_info *device_info; 838 int ret = 0; 839 840 if (i915_inject_load_failure()) 841 return -ENODEV; 842 843 /* Setup the write-once "constant" device info */ 844 device_info = mkwrite_device_info(dev_priv); 845 memcpy(device_info, match_info, sizeof(*device_info)); 846 device_info->device_id = dev_priv->drm.pdev->device; 847 848 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); 849 device_info->gen_mask = BIT(device_info->gen - 1); 850 851 lockinit(&dev_priv->irq_lock, "userirq", 0, 0); 852 lockinit(&dev_priv->gpu_error.lock, "915err", 0, 0); 853 lockinit(&dev_priv->backlight_lock, "i915bl", 0, LK_CANRECURSE); 854 lockinit(&dev_priv->uncore.lock, "915gt", 0, 0); 855 lockinit(&dev_priv->mm.object_stat_lock, "i915osl", 0, 0); 856 lockinit(&dev_priv->mmio_flip_lock, "i915mfl", 0, 0); 857 lockinit(&dev_priv->sb_lock, "i915sbl", 0, LK_CANRECURSE); 858 lockinit(&dev_priv->modeset_restore_lock, "i915mrl", 0, LK_CANRECURSE); 859 lockinit(&dev_priv->av_mutex, "i915am", 0, LK_CANRECURSE); 860 lockinit(&dev_priv->wm.wm_mutex, "i915wm", 0, LK_CANRECURSE); 861 lockinit(&dev_priv->pps_mutex, "i915pm", 0, LK_CANRECURSE); 862 863 ret = i915_workqueues_init(dev_priv); 864 if (ret < 0) 865 return ret; 866 867 ret = intel_gvt_init(dev_priv); 868 if (ret < 0) 869 goto err_workqueues; 870 871 /* This must be called before any calls to HAS_PCH_* */ 872 intel_detect_pch(&dev_priv->drm); 873 874 intel_pm_setup(&dev_priv->drm); 875 intel_init_dpio(dev_priv); 876 intel_power_domains_init(dev_priv); 877 intel_irq_init(dev_priv); 878 intel_init_display_hooks(dev_priv); 879 intel_init_clock_gating_hooks(dev_priv); 880 intel_init_audio_hooks(dev_priv); 881 i915_gem_load_init(&dev_priv->drm); 882 883 intel_display_crc_init(&dev_priv->drm); 884 885 intel_device_info_dump(dev_priv); 886 887 /* Not all pre-production machines fall into this category, only the 888 * very first ones. Almost everything should work, except for maybe 889 * suspend/resume. And we don't implement workarounds that affect only 890 * pre-production machines. */ 891 if (IS_HSW_EARLY_SDV(dev_priv)) 892 DRM_INFO("This is an early pre-production Haswell machine. " 893 "It may not be fully functional.\n"); 894 895 return 0; 896 897 err_workqueues: 898 i915_workqueues_cleanup(dev_priv); 899 return ret; 900 } 901 902 /** 903 * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early() 904 * @dev_priv: device private 905 */ 906 static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) 907 { 908 i915_gem_load_cleanup(&dev_priv->drm); 909 i915_workqueues_cleanup(dev_priv); 910 } 911 912 static int i915_mmio_setup(struct drm_device *dev) 913 { 914 struct drm_i915_private *dev_priv = to_i915(dev); 915 int mmio_bar; 916 int mmio_size; 917 918 mmio_bar = IS_GEN2(dev) ? 1 : 0; 919 /* 920 * Before gen4, the registers and the GTT are behind different BARs. 921 * However, from gen4 onwards, the registers and the GTT are shared 922 * in the same BAR, so we want to restrict this ioremap from 923 * clobbering the GTT which we want ioremap_wc instead. Fortunately, 924 * the register BAR remains the same size for all the earlier 925 * generations up to Ironlake. 926 */ 927 if (INTEL_INFO(dev)->gen < 5) 928 mmio_size = 512 * 1024; 929 else 930 mmio_size = 2 * 1024 * 1024; 931 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); 932 if (dev_priv->regs == NULL) { 933 DRM_ERROR("failed to map registers\n"); 934 935 return -EIO; 936 } 937 938 /* Try to make sure MCHBAR is enabled before poking at it */ 939 intel_setup_mchbar(dev); 940 941 return 0; 942 } 943 944 static void i915_mmio_cleanup(struct drm_device *dev) 945 { 946 #if 0 947 struct drm_i915_private *dev_priv = to_i915(dev); 948 #endif 949 950 intel_teardown_mchbar(dev); 951 #if 0 952 pci_iounmap(dev->pdev, dev_priv->regs); 953 #endif 954 } 955 956 /** 957 * i915_driver_init_mmio - setup device MMIO 958 * @dev_priv: device private 959 * 960 * Setup minimal device state necessary for MMIO accesses later in the 961 * initialization sequence. The setup here should avoid any other device-wide 962 * side effects or exposing the driver via kernel internal or user space 963 * interfaces. 964 */ 965 static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) 966 { 967 struct drm_device *dev = &dev_priv->drm; 968 int ret; 969 970 if (i915_inject_load_failure()) 971 return -ENODEV; 972 973 if (i915_get_bridge_dev(dev)) 974 return -EIO; 975 976 ret = i915_mmio_setup(dev); 977 if (ret < 0) 978 goto put_bridge; 979 980 intel_uncore_init(dev_priv); 981 982 return 0; 983 984 put_bridge: 985 pci_dev_put(dev_priv->bridge_dev); 986 987 return ret; 988 } 989 990 /** 991 * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio() 992 * @dev_priv: device private 993 */ 994 static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) 995 { 996 struct drm_device *dev = &dev_priv->drm; 997 998 intel_uncore_fini(dev_priv); 999 i915_mmio_cleanup(dev); 1000 pci_dev_put(dev_priv->bridge_dev); 1001 } 1002 1003 static void intel_sanitize_options(struct drm_i915_private *dev_priv) 1004 { 1005 i915.enable_execlists = 1006 intel_sanitize_enable_execlists(dev_priv, 1007 i915.enable_execlists); 1008 1009 /* 1010 * i915.enable_ppgtt is read-only, so do an early pass to validate the 1011 * user's requested state against the hardware/driver capabilities. We 1012 * do this now so that we can print out any log messages once rather 1013 * than every time we check intel_enable_ppgtt(). 1014 */ 1015 i915.enable_ppgtt = 1016 intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt); 1017 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); 1018 1019 i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores); 1020 DRM_DEBUG_DRIVER("use GPU sempahores? %s\n", yesno(i915.semaphores)); 1021 } 1022 1023 /** 1024 * i915_driver_init_hw - setup state requiring device access 1025 * @dev_priv: device private 1026 * 1027 * Setup state that requires accessing the device, but doesn't require 1028 * exposing the driver via kernel internal or userspace interfaces. 1029 */ 1030 static int i915_driver_init_hw(struct drm_i915_private *dev_priv) 1031 { 1032 struct drm_device *dev = &dev_priv->drm; 1033 int ret; 1034 1035 if (i915_inject_load_failure()) 1036 return -ENODEV; 1037 1038 intel_device_info_runtime_init(dev_priv); 1039 1040 intel_sanitize_options(dev_priv); 1041 1042 ret = i915_ggtt_probe_hw(dev_priv); 1043 if (ret) 1044 return ret; 1045 1046 /* WARNING: Apparently we must kick fbdev drivers before vgacon, 1047 * otherwise the vga fbdev driver falls over. */ 1048 ret = i915_kick_out_firmware_fb(dev_priv); 1049 if (ret) { 1050 DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); 1051 goto out_ggtt; 1052 } 1053 1054 ret = i915_kick_out_vgacon(dev_priv); 1055 if (ret) { 1056 DRM_ERROR("failed to remove conflicting VGA console\n"); 1057 goto out_ggtt; 1058 } 1059 1060 ret = i915_ggtt_init_hw(dev_priv); 1061 if (ret) 1062 return ret; 1063 1064 ret = i915_ggtt_enable_hw(dev_priv); 1065 if (ret) { 1066 DRM_ERROR("failed to enable GGTT\n"); 1067 goto out_ggtt; 1068 } 1069 1070 pci_set_master(dev->pdev); 1071 1072 #if 0 1073 /* overlay on gen2 is broken and can't address above 1G */ 1074 if (IS_GEN2(dev)) { 1075 ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); 1076 if (ret) { 1077 DRM_ERROR("failed to set DMA mask\n"); 1078 1079 goto out_ggtt; 1080 } 1081 } 1082 1083 /* 965GM sometimes incorrectly writes to hardware status page (HWS) 1084 * using 32bit addressing, overwriting memory if HWS is located 1085 * above 4GB. 1086 * 1087 * The documentation also mentions an issue with undefined 1088 * behaviour if any general state is accessed within a page above 4GB, 1089 * which also needs to be handled carefully. 1090 */ 1091 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) { 1092 ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); 1093 1094 if (ret) { 1095 DRM_ERROR("failed to set DMA mask\n"); 1096 1097 goto out_ggtt; 1098 } 1099 } 1100 #endif 1101 1102 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, 1103 PM_QOS_DEFAULT_VALUE); 1104 1105 intel_uncore_sanitize(dev_priv); 1106 1107 intel_opregion_setup(dev_priv); 1108 1109 i915_gem_load_init_fences(dev_priv); 1110 1111 /* On the 945G/GM, the chipset reports the MSI capability on the 1112 * integrated graphics even though the support isn't actually there 1113 * according to the published specs. It doesn't appear to function 1114 * correctly in testing on 945G. 1115 * This may be a side effect of MSI having been made available for PEG 1116 * and the registers being closely associated. 1117 * 1118 * According to chipset errata, on the 965GM, MSI interrupts may 1119 * be lost or delayed, but we use them anyways to avoid 1120 * stuck interrupts on some machines. 1121 */ 1122 #if 0 1123 if (!IS_I945G(dev) && !IS_I945GM(dev)) { 1124 if (pci_enable_msi(dev->pdev) < 0) 1125 DRM_DEBUG_DRIVER("can't enable MSI"); 1126 } 1127 #endif 1128 1129 return 0; 1130 1131 out_ggtt: 1132 i915_ggtt_cleanup_hw(dev_priv); 1133 1134 return ret; 1135 } 1136 1137 /** 1138 * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw() 1139 * @dev_priv: device private 1140 */ 1141 static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) 1142 { 1143 #if 0 1144 struct drm_device *dev = &dev_priv->drm; 1145 1146 if (dev->pdev->msi_enabled) 1147 pci_disable_msi(dev->pdev); 1148 #endif 1149 1150 pm_qos_remove_request(&dev_priv->pm_qos); 1151 i915_ggtt_cleanup_hw(dev_priv); 1152 } 1153 1154 /** 1155 * i915_driver_register - register the driver with the rest of the system 1156 * @dev_priv: device private 1157 * 1158 * Perform any steps necessary to make the driver available via kernel 1159 * internal or userspace interfaces. 1160 */ 1161 static void i915_driver_register(struct drm_i915_private *dev_priv) 1162 { 1163 struct drm_device *dev = &dev_priv->drm; 1164 1165 i915_gem_shrinker_init(dev_priv); 1166 1167 /* 1168 * Notify a valid surface after modesetting, 1169 * when running inside a VM. 1170 */ 1171 if (intel_vgpu_active(dev_priv)) 1172 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); 1173 1174 /* Reveal our presence to userspace */ 1175 if (drm_dev_register(dev, 0) == 0) { 1176 i915_debugfs_register(dev_priv); 1177 i915_setup_sysfs(dev); 1178 } else 1179 DRM_ERROR("Failed to register driver for userspace access!\n"); 1180 1181 if (INTEL_INFO(dev_priv)->num_pipes) { 1182 /* Must be done after probing outputs */ 1183 intel_opregion_register(dev_priv); 1184 acpi_video_register(); 1185 } 1186 1187 if (IS_GEN5(dev_priv)) 1188 intel_gpu_ips_init(dev_priv); 1189 1190 i915_audio_component_init(dev_priv); 1191 1192 /* 1193 * Some ports require correctly set-up hpd registers for detection to 1194 * work properly (leading to ghost connected connector status), e.g. VGA 1195 * on gm45. Hence we can only set up the initial fbdev config after hpd 1196 * irqs are fully enabled. We do it last so that the async config 1197 * cannot run before the connectors are registered. 1198 */ 1199 intel_fbdev_initial_config_async(dev); 1200 } 1201 1202 /** 1203 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() 1204 * @dev_priv: device private 1205 */ 1206 static void i915_driver_unregister(struct drm_i915_private *dev_priv) 1207 { 1208 i915_audio_component_cleanup(dev_priv); 1209 1210 intel_gpu_ips_teardown(); 1211 acpi_video_unregister(); 1212 intel_opregion_unregister(dev_priv); 1213 1214 i915_teardown_sysfs(&dev_priv->drm); 1215 i915_debugfs_unregister(dev_priv); 1216 drm_dev_unregister(&dev_priv->drm); 1217 1218 i915_gem_shrinker_cleanup(dev_priv); 1219 } 1220 1221 /** 1222 * i915_driver_load - setup chip and create an initial config 1223 * @dev: DRM device 1224 * @flags: startup flags 1225 * 1226 * The driver load routine has to do several things: 1227 * - drive output discovery via intel_modeset_init() 1228 * - initialize the memory manager 1229 * - allocate initial config memory 1230 * - setup the DRM framebuffer with the allocated memory 1231 */ 1232 int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent); 1233 int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) 1234 { 1235 struct drm_i915_private *dev_priv; 1236 int ret; 1237 1238 if (i915.nuclear_pageflip) 1239 driver.driver_features |= DRIVER_ATOMIC; 1240 1241 ret = -ENOMEM; 1242 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 1243 if (dev_priv) 1244 ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev); 1245 if (ret) { 1246 dev_printk(KERN_ERR, &pdev->dev, 1247 "[" DRM_NAME ":%s] allocation failed\n", __func__); 1248 kfree(dev_priv); 1249 return ret; 1250 } 1251 1252 dev_priv->drm.pdev = pdev; 1253 dev_priv->drm.dev_private = dev_priv; 1254 1255 #if 0 1256 ret = pci_enable_device(pdev); 1257 if (ret) 1258 goto out_free_priv; 1259 #endif 1260 1261 pci_set_drvdata(pdev, &dev_priv->drm); 1262 1263 ret = i915_driver_init_early(dev_priv, ent); 1264 if (ret < 0) 1265 goto out_pci_disable; 1266 1267 intel_runtime_pm_get(dev_priv); 1268 1269 ret = i915_driver_init_mmio(dev_priv); 1270 if (ret < 0) 1271 goto out_runtime_pm_put; 1272 1273 ret = i915_driver_init_hw(dev_priv); 1274 if (ret < 0) 1275 goto out_cleanup_mmio; 1276 1277 /* 1278 * TODO: move the vblank init and parts of modeset init steps into one 1279 * of the i915_driver_init_/i915_driver_register functions according 1280 * to the role/effect of the given init step. 1281 */ 1282 if (INTEL_INFO(dev_priv)->num_pipes) { 1283 ret = drm_vblank_init(&dev_priv->drm, 1284 INTEL_INFO(dev_priv)->num_pipes); 1285 if (ret) 1286 goto out_cleanup_hw; 1287 } 1288 1289 ret = i915_load_modeset_init(&dev_priv->drm); 1290 if (ret < 0) 1291 goto out_cleanup_vblank; 1292 1293 i915_driver_register(dev_priv); 1294 1295 intel_runtime_pm_enable(dev_priv); 1296 1297 intel_runtime_pm_put(dev_priv); 1298 1299 /* Everything is in place, we can now relax! */ 1300 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", 1301 driver.name, driver.major, driver.minor, driver.patchlevel, 1302 driver.date, pci_name(pdev), dev_priv->drm.primary->index); 1303 1304 return 0; 1305 1306 out_cleanup_vblank: 1307 drm_vblank_cleanup(&dev_priv->drm); 1308 out_cleanup_hw: 1309 i915_driver_cleanup_hw(dev_priv); 1310 out_cleanup_mmio: 1311 i915_driver_cleanup_mmio(dev_priv); 1312 out_runtime_pm_put: 1313 intel_runtime_pm_put(dev_priv); 1314 i915_driver_cleanup_early(dev_priv); 1315 out_pci_disable: 1316 #if 0 1317 pci_disable_device(pdev); 1318 out_free_priv: 1319 #endif 1320 i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); 1321 drm_dev_unref(&dev_priv->drm); 1322 return ret; 1323 } 1324 1325 void i915_driver_unload(struct drm_device *dev); 1326 void i915_driver_unload(struct drm_device *dev) 1327 { 1328 struct drm_i915_private *dev_priv = to_i915(dev); 1329 1330 intel_fbdev_fini(dev); 1331 1332 if (i915_gem_suspend(dev)) 1333 DRM_ERROR("failed to idle hardware; continuing to unload!\n"); 1334 1335 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); 1336 1337 i915_driver_unregister(dev_priv); 1338 1339 drm_vblank_cleanup(dev); 1340 1341 intel_modeset_cleanup(dev); 1342 1343 /* 1344 * free the memory space allocated for the child device 1345 * config parsed from VBT 1346 */ 1347 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { 1348 kfree(dev_priv->vbt.child_dev); 1349 dev_priv->vbt.child_dev = NULL; 1350 dev_priv->vbt.child_dev_num = 0; 1351 } 1352 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); 1353 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; 1354 kfree(dev_priv->vbt.lfp_lvds_vbt_mode); 1355 dev_priv->vbt.lfp_lvds_vbt_mode = NULL; 1356 1357 #if 0 1358 vga_switcheroo_unregister_client(dev->pdev); 1359 vga_client_register(dev->pdev, NULL, NULL, NULL); 1360 #endif 1361 1362 intel_csr_ucode_fini(dev_priv); 1363 1364 /* Free error state after interrupts are fully disabled. */ 1365 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 1366 i915_destroy_error_state(dev); 1367 1368 /* Flush any outstanding unpin_work. */ 1369 drain_workqueue(dev_priv->wq); 1370 1371 intel_guc_fini(dev); 1372 i915_gem_fini(dev); 1373 intel_fbc_cleanup_cfb(dev_priv); 1374 1375 intel_power_domains_fini(dev_priv); 1376 1377 i915_driver_cleanup_hw(dev_priv); 1378 i915_driver_cleanup_mmio(dev_priv); 1379 1380 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 1381 1382 i915_driver_cleanup_early(dev_priv); 1383 } 1384 1385 static int i915_driver_open(struct drm_device *dev, struct drm_file *file) 1386 { 1387 int ret; 1388 1389 ret = i915_gem_open(dev, file); 1390 if (ret) 1391 return ret; 1392 1393 return 0; 1394 } 1395 1396 /** 1397 * i915_driver_lastclose - clean up after all DRM clients have exited 1398 * @dev: DRM device 1399 * 1400 * Take care of cleaning up after all DRM clients have exited. In the 1401 * mode setting case, we want to restore the kernel's initial mode (just 1402 * in case the last client left us in a bad state). 1403 * 1404 * Additionally, in the non-mode setting case, we'll tear down the GTT 1405 * and DMA structures, since the kernel won't be using them, and clea 1406 * up any GEM state. 1407 */ 1408 static void i915_driver_lastclose(struct drm_device *dev) 1409 { 1410 intel_fbdev_restore_mode(dev); 1411 #if 0 1412 vga_switcheroo_process_delayed_switch(); 1413 #endif 1414 } 1415 1416 static void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) 1417 { 1418 mutex_lock(&dev->struct_mutex); 1419 i915_gem_context_close(dev, file); 1420 i915_gem_release(dev, file); 1421 mutex_unlock(&dev->struct_mutex); 1422 } 1423 1424 static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) 1425 { 1426 struct drm_i915_file_private *file_priv = file->driver_priv; 1427 1428 kfree(file_priv); 1429 } 1430 1431 static void intel_suspend_encoders(struct drm_i915_private *dev_priv) 1432 { 1433 struct drm_device *dev = &dev_priv->drm; 1434 struct intel_encoder *encoder; 1435 1436 drm_modeset_lock_all(dev); 1437 for_each_intel_encoder(dev, encoder) 1438 if (encoder->suspend) 1439 encoder->suspend(encoder); 1440 drm_modeset_unlock_all(dev); 1441 } 1442 1443 static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 1444 bool rpm_resume); 1445 static int vlv_suspend_complete(struct drm_i915_private *dev_priv); 1446 1447 static bool suspend_to_idle(struct drm_i915_private *dev_priv) 1448 { 1449 #if IS_ENABLED(CONFIG_ACPI_SLEEP) 1450 if (acpi_target_system_state() < ACPI_STATE_S3) 1451 return true; 1452 #endif 1453 return false; 1454 } 1455 1456 static int i915_drm_suspend(struct drm_device *dev) 1457 { 1458 struct drm_i915_private *dev_priv = to_i915(dev); 1459 pci_power_t opregion_target_state; 1460 int error; 1461 1462 /* ignore lid events during suspend */ 1463 mutex_lock(&dev_priv->modeset_restore_lock); 1464 dev_priv->modeset_restore = MODESET_SUSPENDED; 1465 mutex_unlock(&dev_priv->modeset_restore_lock); 1466 1467 disable_rpm_wakeref_asserts(dev_priv); 1468 1469 /* We do a lot of poking in a lot of registers, make sure they work 1470 * properly. */ 1471 intel_display_set_init_power(dev_priv, true); 1472 1473 drm_kms_helper_poll_disable(dev); 1474 1475 #if 0 1476 pci_save_state(dev->pdev); 1477 #endif 1478 error = i915_gem_suspend(dev); 1479 if (error) { 1480 dev_err(&dev->pdev->dev, 1481 "GEM idle failed, resume might fail\n"); 1482 goto out; 1483 } 1484 1485 intel_guc_suspend(dev); 1486 1487 intel_display_suspend(dev); 1488 1489 intel_dp_mst_suspend(dev); 1490 1491 intel_runtime_pm_disable_interrupts(dev_priv); 1492 intel_hpd_cancel_work(dev_priv); 1493 1494 intel_suspend_encoders(dev_priv); 1495 1496 intel_suspend_hw(dev); 1497 1498 i915_gem_suspend_gtt_mappings(dev); 1499 1500 i915_save_state(dev); 1501 1502 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; 1503 intel_opregion_notify_adapter(dev_priv, opregion_target_state); 1504 1505 intel_uncore_forcewake_reset(dev_priv, false); 1506 intel_opregion_unregister(dev_priv); 1507 1508 #if 0 1509 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); 1510 #endif 1511 1512 dev_priv->suspend_count++; 1513 1514 intel_display_set_init_power(dev_priv, false); 1515 1516 intel_csr_ucode_suspend(dev_priv); 1517 1518 out: 1519 enable_rpm_wakeref_asserts(dev_priv); 1520 1521 return error; 1522 } 1523 1524 static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) 1525 { 1526 struct drm_i915_private *dev_priv = to_i915(drm_dev); 1527 bool fw_csr; 1528 int ret; 1529 1530 disable_rpm_wakeref_asserts(dev_priv); 1531 1532 fw_csr = !IS_BROXTON(dev_priv) && 1533 suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; 1534 /* 1535 * In case of firmware assisted context save/restore don't manually 1536 * deinit the power domains. This also means the CSR/DMC firmware will 1537 * stay active, it will power down any HW resources as required and 1538 * also enable deeper system power states that would be blocked if the 1539 * firmware was inactive. 1540 */ 1541 if (!fw_csr) 1542 intel_power_domains_suspend(dev_priv); 1543 1544 ret = 0; 1545 if (IS_BROXTON(dev_priv)) 1546 bxt_enable_dc9(dev_priv); 1547 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1548 hsw_enable_pc8(dev_priv); 1549 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1550 ret = vlv_suspend_complete(dev_priv); 1551 1552 if (ret) { 1553 DRM_ERROR("Suspend complete failed: %d\n", ret); 1554 if (!fw_csr) 1555 intel_power_domains_init_hw(dev_priv, true); 1556 1557 goto out; 1558 } 1559 1560 #if 0 1561 pci_disable_device(drm_dev->pdev); 1562 /* 1563 * During hibernation on some platforms the BIOS may try to access 1564 * the device even though it's already in D3 and hang the machine. So 1565 * leave the device in D0 on those platforms and hope the BIOS will 1566 * power down the device properly. The issue was seen on multiple old 1567 * GENs with different BIOS vendors, so having an explicit blacklist 1568 * is inpractical; apply the workaround on everything pre GEN6. The 1569 * platforms where the issue was seen: 1570 * Lenovo Thinkpad X301, X61s, X60, T60, X41 1571 * Fujitsu FSC S7110 1572 * Acer Aspire 1830T 1573 */ 1574 if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) 1575 pci_set_power_state(drm_dev->pdev, PCI_D3hot); 1576 #endif 1577 1578 dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); 1579 1580 out: 1581 enable_rpm_wakeref_asserts(dev_priv); 1582 1583 return ret; 1584 } 1585 1586 int i915_suspend_switcheroo(device_t kdev) 1587 { 1588 struct drm_softc *softc = device_get_softc(kdev); 1589 struct drm_device *dev = softc->drm_driver_data; 1590 int error; 1591 1592 if (!dev) { 1593 DRM_ERROR("dev: %p\n", dev); 1594 DRM_ERROR("DRM not initialized, aborting suspend.\n"); 1595 return -ENODEV; 1596 } 1597 1598 #if 0 1599 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && 1600 state.event != PM_EVENT_FREEZE)) 1601 return -EINVAL; 1602 #endif 1603 1604 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1605 return 0; 1606 1607 error = i915_drm_suspend(dev); 1608 if (error) 1609 return error; 1610 1611 return i915_drm_suspend_late(dev, false); 1612 } 1613 1614 static int i915_drm_resume(struct drm_device *dev) 1615 { 1616 struct drm_i915_private *dev_priv = to_i915(dev); 1617 int ret; 1618 1619 disable_rpm_wakeref_asserts(dev_priv); 1620 1621 ret = i915_ggtt_enable_hw(dev_priv); 1622 if (ret) 1623 DRM_ERROR("failed to re-enable GGTT\n"); 1624 1625 intel_csr_ucode_resume(dev_priv); 1626 1627 i915_gem_resume(dev); 1628 1629 i915_restore_state(dev); 1630 intel_opregion_setup(dev_priv); 1631 1632 intel_init_pch_refclk(dev); 1633 drm_mode_config_reset(dev); 1634 1635 /* 1636 * Interrupts have to be enabled before any batches are run. If not the 1637 * GPU will hang. i915_gem_init_hw() will initiate batches to 1638 * update/restore the context. 1639 * 1640 * Modeset enabling in intel_modeset_init_hw() also needs working 1641 * interrupts. 1642 */ 1643 intel_runtime_pm_enable_interrupts(dev_priv); 1644 1645 mutex_lock(&dev->struct_mutex); 1646 if (i915_gem_init_hw(dev)) { 1647 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); 1648 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); 1649 } 1650 mutex_unlock(&dev->struct_mutex); 1651 1652 intel_guc_resume(dev); 1653 1654 intel_modeset_init_hw(dev); 1655 1656 spin_lock_irq(&dev_priv->irq_lock); 1657 if (dev_priv->display.hpd_irq_setup) 1658 dev_priv->display.hpd_irq_setup(dev_priv); 1659 spin_unlock_irq(&dev_priv->irq_lock); 1660 1661 intel_dp_mst_resume(dev); 1662 1663 intel_display_resume(dev); 1664 1665 /* 1666 * ... but also need to make sure that hotplug processing 1667 * doesn't cause havoc. Like in the driver load code we don't 1668 * bother with the tiny race here where we might loose hotplug 1669 * notifications. 1670 * */ 1671 intel_hpd_init(dev_priv); 1672 /* Config may have changed between suspend and resume */ 1673 drm_helper_hpd_irq_event(dev); 1674 1675 intel_opregion_register(dev_priv); 1676 1677 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); 1678 1679 mutex_lock(&dev_priv->modeset_restore_lock); 1680 dev_priv->modeset_restore = MODESET_DONE; 1681 mutex_unlock(&dev_priv->modeset_restore_lock); 1682 1683 intel_opregion_notify_adapter(dev_priv, PCI_D0); 1684 1685 intel_autoenable_gt_powersave(dev_priv); 1686 drm_kms_helper_poll_enable(dev); 1687 1688 enable_rpm_wakeref_asserts(dev_priv); 1689 1690 return 0; 1691 } 1692 1693 static int i915_drm_resume_early(struct drm_device *dev) 1694 { 1695 struct drm_i915_private *dev_priv = to_i915(dev); 1696 int ret = 0; 1697 1698 /* 1699 * We have a resume ordering issue with the snd-hda driver also 1700 * requiring our device to be power up. Due to the lack of a 1701 * parent/child relationship we currently solve this with an early 1702 * resume hook. 1703 * 1704 * FIXME: This should be solved with a special hdmi sink device or 1705 * similar so that power domains can be employed. 1706 */ 1707 1708 /* 1709 * Note that we need to set the power state explicitly, since we 1710 * powered off the device during freeze and the PCI core won't power 1711 * it back up for us during thaw. Powering off the device during 1712 * freeze is not a hard requirement though, and during the 1713 * suspend/resume phases the PCI core makes sure we get here with the 1714 * device powered on. So in case we change our freeze logic and keep 1715 * the device powered we can also remove the following set power state 1716 * call. 1717 */ 1718 #if 0 1719 ret = pci_set_power_state(dev->pdev, PCI_D0); 1720 if (ret) { 1721 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); 1722 goto out; 1723 } 1724 1725 /* 1726 * Note that pci_enable_device() first enables any parent bridge 1727 * device and only then sets the power state for this device. The 1728 * bridge enabling is a nop though, since bridge devices are resumed 1729 * first. The order of enabling power and enabling the device is 1730 * imposed by the PCI core as described above, so here we preserve the 1731 * same order for the freeze/thaw phases. 1732 * 1733 * TODO: eventually we should remove pci_disable_device() / 1734 * pci_enable_enable_device() from suspend/resume. Due to how they 1735 * depend on the device enable refcount we can't anyway depend on them 1736 * disabling/enabling the device. 1737 */ 1738 if (pci_enable_device(dev->pdev)) { 1739 ret = -EIO; 1740 goto out; 1741 } 1742 #endif 1743 1744 pci_set_master(dev->pdev); 1745 1746 disable_rpm_wakeref_asserts(dev_priv); 1747 1748 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1749 ret = vlv_resume_prepare(dev_priv, false); 1750 if (ret) 1751 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", 1752 ret); 1753 1754 intel_uncore_early_sanitize(dev_priv, true); 1755 1756 if (IS_BROXTON(dev_priv)) { 1757 if (!dev_priv->suspended_to_idle) 1758 gen9_sanitize_dc_state(dev_priv); 1759 bxt_disable_dc9(dev_priv); 1760 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 1761 hsw_disable_pc8(dev_priv); 1762 } 1763 1764 intel_uncore_sanitize(dev_priv); 1765 1766 if (IS_BROXTON(dev_priv) || 1767 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) 1768 intel_power_domains_init_hw(dev_priv, true); 1769 1770 enable_rpm_wakeref_asserts(dev_priv); 1771 1772 #if 0 1773 out: 1774 #endif 1775 dev_priv->suspended_to_idle = false; 1776 1777 return ret; 1778 } 1779 1780 int i915_resume_switcheroo(struct drm_device *dev) 1781 { 1782 int ret; 1783 1784 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1785 return 0; 1786 1787 ret = i915_drm_resume_early(dev); 1788 if (ret) 1789 return ret; 1790 1791 return i915_drm_resume(dev); 1792 } 1793 1794 /** 1795 * i915_reset - reset chip after a hang 1796 * @dev: drm device to reset 1797 * 1798 * Reset the chip. Useful if a hang is detected. Returns zero on successful 1799 * reset or otherwise an error code. 1800 * 1801 * Procedure is fairly simple: 1802 * - reset the chip using the reset reg 1803 * - re-init context state 1804 * - re-init hardware status page 1805 * - re-init ring buffer 1806 * - re-init interrupt state 1807 * - re-init display 1808 */ 1809 int i915_reset(struct drm_i915_private *dev_priv) 1810 { 1811 struct drm_device *dev = &dev_priv->drm; 1812 struct i915_gpu_error *error = &dev_priv->gpu_error; 1813 unsigned reset_counter; 1814 int ret; 1815 1816 mutex_lock(&dev->struct_mutex); 1817 1818 /* Clear any previous failed attempts at recovery. Time to try again. */ 1819 atomic_andnot(I915_WEDGED, &error->reset_counter); 1820 1821 /* Clear the reset-in-progress flag and increment the reset epoch. */ 1822 reset_counter = atomic_inc_return(&error->reset_counter); 1823 if (WARN_ON(__i915_reset_in_progress(reset_counter))) { 1824 ret = -EIO; 1825 goto error; 1826 } 1827 1828 pr_notice("drm/i915: Resetting chip after gpu hang\n"); 1829 1830 i915_gem_reset(dev); 1831 1832 ret = intel_gpu_reset(dev_priv, ALL_ENGINES); 1833 if (ret) { 1834 if (ret != -ENODEV) 1835 DRM_ERROR("Failed to reset chip: %i\n", ret); 1836 else 1837 DRM_DEBUG_DRIVER("GPU reset disabled\n"); 1838 goto error; 1839 } 1840 1841 intel_overlay_reset(dev_priv); 1842 1843 /* Ok, now get things going again... */ 1844 1845 /* 1846 * Everything depends on having the GTT running, so we need to start 1847 * there. Fortunately we don't need to do this unless we reset the 1848 * chip at a PCI level. 1849 * 1850 * Next we need to restore the context, but we don't use those 1851 * yet either... 1852 * 1853 * Ring buffer needs to be re-initialized in the KMS case, or if X 1854 * was running at the time of the reset (i.e. we weren't VT 1855 * switched away). 1856 */ 1857 ret = i915_gem_init_hw(dev); 1858 if (ret) { 1859 DRM_ERROR("Failed hw init on reset %d\n", ret); 1860 goto error; 1861 } 1862 1863 mutex_unlock(&dev->struct_mutex); 1864 1865 /* 1866 * rps/rc6 re-init is necessary to restore state lost after the 1867 * reset and the re-install of gt irqs. Skip for ironlake per 1868 * previous concerns that it doesn't respond well to some forms 1869 * of re-init after reset. 1870 */ 1871 intel_autoenable_gt_powersave(dev_priv); 1872 1873 return 0; 1874 1875 error: 1876 atomic_or(I915_WEDGED, &error->reset_counter); 1877 mutex_unlock(&dev->struct_mutex); 1878 return ret; 1879 } 1880 1881 #if 0 1882 static int i915_pm_suspend(struct device *dev) 1883 { 1884 struct pci_dev *pdev = to_pci_dev(dev); 1885 struct drm_device *drm_dev = pci_get_drvdata(pdev); 1886 1887 if (!drm_dev) { 1888 dev_err(dev, "DRM not initialized, aborting suspend.\n"); 1889 return -ENODEV; 1890 } 1891 1892 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1893 return 0; 1894 1895 return i915_drm_suspend(drm_dev); 1896 } 1897 1898 static int i915_pm_suspend_late(struct device *dev) 1899 { 1900 struct drm_device *drm_dev = &dev_to_i915(dev)->drm; 1901 1902 /* 1903 * We have a suspend ordering issue with the snd-hda driver also 1904 * requiring our device to be power up. Due to the lack of a 1905 * parent/child relationship we currently solve this with an late 1906 * suspend hook. 1907 * 1908 * FIXME: This should be solved with a special hdmi sink device or 1909 * similar so that power domains can be employed. 1910 */ 1911 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1912 return 0; 1913 1914 return i915_drm_suspend_late(drm_dev, false); 1915 } 1916 1917 static int i915_pm_poweroff_late(struct device *dev) 1918 { 1919 struct drm_device *drm_dev = &dev_to_i915(dev)->drm; 1920 1921 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1922 return 0; 1923 1924 return i915_drm_suspend_late(drm_dev, true); 1925 } 1926 1927 static int i915_pm_resume_early(struct device *dev) 1928 { 1929 struct drm_device *drm_dev = &dev_to_i915(dev)->drm; 1930 1931 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1932 return 0; 1933 1934 return i915_drm_resume_early(drm_dev); 1935 } 1936 1937 static int i915_pm_resume(struct device *dev) 1938 { 1939 struct drm_device *drm_dev = &dev_to_i915(dev)->drm; 1940 1941 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1942 return 0; 1943 1944 return i915_drm_resume(drm_dev); 1945 } 1946 1947 /* freeze: before creating the hibernation_image */ 1948 static int i915_pm_freeze(struct device *dev) 1949 { 1950 return i915_pm_suspend(dev); 1951 } 1952 1953 static int i915_pm_freeze_late(struct device *dev) 1954 { 1955 int ret; 1956 1957 ret = i915_pm_suspend_late(dev); 1958 if (ret) 1959 return ret; 1960 1961 ret = i915_gem_freeze_late(dev_to_i915(dev)); 1962 if (ret) 1963 return ret; 1964 1965 return 0; 1966 } 1967 1968 /* thaw: called after creating the hibernation image, but before turning off. */ 1969 static int i915_pm_thaw_early(struct device *dev) 1970 { 1971 return i915_pm_resume_early(dev); 1972 } 1973 1974 static int i915_pm_thaw(struct device *dev) 1975 { 1976 return i915_pm_resume(dev); 1977 } 1978 1979 /* restore: called after loading the hibernation image. */ 1980 static int i915_pm_restore_early(struct device *dev) 1981 { 1982 return i915_pm_resume_early(dev); 1983 } 1984 1985 static int i915_pm_restore(struct device *dev) 1986 { 1987 return i915_pm_resume(dev); 1988 } 1989 #endif 1990 1991 /* 1992 * Save all Gunit registers that may be lost after a D3 and a subsequent 1993 * S0i[R123] transition. The list of registers needing a save/restore is 1994 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit 1995 * registers in the following way: 1996 * - Driver: saved/restored by the driver 1997 * - Punit : saved/restored by the Punit firmware 1998 * - No, w/o marking: no need to save/restore, since the register is R/O or 1999 * used internally by the HW in a way that doesn't depend 2000 * keeping the content across a suspend/resume. 2001 * - Debug : used for debugging 2002 * 2003 * We save/restore all registers marked with 'Driver', with the following 2004 * exceptions: 2005 * - Registers out of use, including also registers marked with 'Debug'. 2006 * These have no effect on the driver's operation, so we don't save/restore 2007 * them to reduce the overhead. 2008 * - Registers that are fully setup by an initialization function called from 2009 * the resume path. For example many clock gating and RPS/RC6 registers. 2010 * - Registers that provide the right functionality with their reset defaults. 2011 * 2012 * TODO: Except for registers that based on the above 3 criteria can be safely 2013 * ignored, we save/restore all others, practically treating the HW context as 2014 * a black-box for the driver. Further investigation is needed to reduce the 2015 * saved/restored registers even further, by following the same 3 criteria. 2016 */ 2017 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) 2018 { 2019 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; 2020 int i; 2021 2022 /* GAM 0x4000-0x4770 */ 2023 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); 2024 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); 2025 s->arb_mode = I915_READ(ARB_MODE); 2026 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); 2027 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); 2028 2029 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) 2030 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i)); 2031 2032 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); 2033 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); 2034 2035 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); 2036 s->ecochk = I915_READ(GAM_ECOCHK); 2037 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); 2038 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); 2039 2040 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); 2041 2042 /* MBC 0x9024-0x91D0, 0x8500 */ 2043 s->g3dctl = I915_READ(VLV_G3DCTL); 2044 s->gsckgctl = I915_READ(VLV_GSCKGCTL); 2045 s->mbctl = I915_READ(GEN6_MBCTL); 2046 2047 /* GCP 0x9400-0x9424, 0x8100-0x810C */ 2048 s->ucgctl1 = I915_READ(GEN6_UCGCTL1); 2049 s->ucgctl3 = I915_READ(GEN6_UCGCTL3); 2050 s->rcgctl1 = I915_READ(GEN6_RCGCTL1); 2051 s->rcgctl2 = I915_READ(GEN6_RCGCTL2); 2052 s->rstctl = I915_READ(GEN6_RSTCTL); 2053 s->misccpctl = I915_READ(GEN7_MISCCPCTL); 2054 2055 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ 2056 s->gfxpause = I915_READ(GEN6_GFXPAUSE); 2057 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); 2058 s->rpdeuc = I915_READ(GEN6_RPDEUC); 2059 s->ecobus = I915_READ(ECOBUS); 2060 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); 2061 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); 2062 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); 2063 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); 2064 s->rcedata = I915_READ(VLV_RCEDATA); 2065 s->spare2gh = I915_READ(VLV_SPAREG2H); 2066 2067 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ 2068 s->gt_imr = I915_READ(GTIMR); 2069 s->gt_ier = I915_READ(GTIER); 2070 s->pm_imr = I915_READ(GEN6_PMIMR); 2071 s->pm_ier = I915_READ(GEN6_PMIER); 2072 2073 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) 2074 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i)); 2075 2076 /* GT SA CZ domain, 0x100000-0x138124 */ 2077 s->tilectl = I915_READ(TILECTL); 2078 s->gt_fifoctl = I915_READ(GTFIFOCTL); 2079 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); 2080 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 2081 s->pmwgicz = I915_READ(VLV_PMWGICZ); 2082 2083 /* Gunit-Display CZ domain, 0x182028-0x1821CF */ 2084 s->gu_ctl0 = I915_READ(VLV_GU_CTL0); 2085 s->gu_ctl1 = I915_READ(VLV_GU_CTL1); 2086 s->pcbr = I915_READ(VLV_PCBR); 2087 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); 2088 2089 /* 2090 * Not saving any of: 2091 * DFT, 0x9800-0x9EC0 2092 * SARB, 0xB000-0xB1FC 2093 * GAC, 0x5208-0x524C, 0x14000-0x14C000 2094 * PCI CFG 2095 */ 2096 } 2097 2098 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) 2099 { 2100 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; 2101 u32 val; 2102 int i; 2103 2104 /* GAM 0x4000-0x4770 */ 2105 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); 2106 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); 2107 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); 2108 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); 2109 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); 2110 2111 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) 2112 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]); 2113 2114 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); 2115 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); 2116 2117 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); 2118 I915_WRITE(GAM_ECOCHK, s->ecochk); 2119 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); 2120 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); 2121 2122 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); 2123 2124 /* MBC 0x9024-0x91D0, 0x8500 */ 2125 I915_WRITE(VLV_G3DCTL, s->g3dctl); 2126 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); 2127 I915_WRITE(GEN6_MBCTL, s->mbctl); 2128 2129 /* GCP 0x9400-0x9424, 0x8100-0x810C */ 2130 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); 2131 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); 2132 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); 2133 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); 2134 I915_WRITE(GEN6_RSTCTL, s->rstctl); 2135 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); 2136 2137 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ 2138 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); 2139 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); 2140 I915_WRITE(GEN6_RPDEUC, s->rpdeuc); 2141 I915_WRITE(ECOBUS, s->ecobus); 2142 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); 2143 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); 2144 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); 2145 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); 2146 I915_WRITE(VLV_RCEDATA, s->rcedata); 2147 I915_WRITE(VLV_SPAREG2H, s->spare2gh); 2148 2149 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ 2150 I915_WRITE(GTIMR, s->gt_imr); 2151 I915_WRITE(GTIER, s->gt_ier); 2152 I915_WRITE(GEN6_PMIMR, s->pm_imr); 2153 I915_WRITE(GEN6_PMIER, s->pm_ier); 2154 2155 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) 2156 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]); 2157 2158 /* GT SA CZ domain, 0x100000-0x138124 */ 2159 I915_WRITE(TILECTL, s->tilectl); 2160 I915_WRITE(GTFIFOCTL, s->gt_fifoctl); 2161 /* 2162 * Preserve the GT allow wake and GFX force clock bit, they are not 2163 * be restored, as they are used to control the s0ix suspend/resume 2164 * sequence by the caller. 2165 */ 2166 val = I915_READ(VLV_GTLC_WAKE_CTRL); 2167 val &= VLV_GTLC_ALLOWWAKEREQ; 2168 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; 2169 I915_WRITE(VLV_GTLC_WAKE_CTRL, val); 2170 2171 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 2172 val &= VLV_GFX_CLK_FORCE_ON_BIT; 2173 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; 2174 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); 2175 2176 I915_WRITE(VLV_PMWGICZ, s->pmwgicz); 2177 2178 /* Gunit-Display CZ domain, 0x182028-0x1821CF */ 2179 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); 2180 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); 2181 I915_WRITE(VLV_PCBR, s->pcbr); 2182 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); 2183 } 2184 2185 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) 2186 { 2187 u32 val; 2188 int err; 2189 2190 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 2191 val &= ~VLV_GFX_CLK_FORCE_ON_BIT; 2192 if (force_on) 2193 val |= VLV_GFX_CLK_FORCE_ON_BIT; 2194 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); 2195 2196 if (!force_on) 2197 return 0; 2198 2199 err = intel_wait_for_register(dev_priv, 2200 VLV_GTLC_SURVIVABILITY_REG, 2201 VLV_GFX_CLK_STATUS_BIT, 2202 VLV_GFX_CLK_STATUS_BIT, 2203 20); 2204 if (err) 2205 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", 2206 I915_READ(VLV_GTLC_SURVIVABILITY_REG)); 2207 2208 return err; 2209 } 2210 2211 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) 2212 { 2213 u32 val; 2214 int err = 0; 2215 2216 val = I915_READ(VLV_GTLC_WAKE_CTRL); 2217 val &= ~VLV_GTLC_ALLOWWAKEREQ; 2218 if (allow) 2219 val |= VLV_GTLC_ALLOWWAKEREQ; 2220 I915_WRITE(VLV_GTLC_WAKE_CTRL, val); 2221 POSTING_READ(VLV_GTLC_WAKE_CTRL); 2222 2223 err = intel_wait_for_register(dev_priv, 2224 VLV_GTLC_PW_STATUS, 2225 VLV_GTLC_ALLOWWAKEACK, 2226 allow, 2227 1); 2228 if (err) 2229 DRM_ERROR("timeout disabling GT waking\n"); 2230 2231 return err; 2232 } 2233 2234 static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, 2235 bool wait_for_on) 2236 { 2237 u32 mask; 2238 u32 val; 2239 int err; 2240 2241 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; 2242 val = wait_for_on ? mask : 0; 2243 if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) 2244 return 0; 2245 2246 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", 2247 onoff(wait_for_on), 2248 I915_READ(VLV_GTLC_PW_STATUS)); 2249 2250 /* 2251 * RC6 transitioning can be delayed up to 2 msec (see 2252 * valleyview_enable_rps), use 3 msec for safety. 2253 */ 2254 err = intel_wait_for_register(dev_priv, 2255 VLV_GTLC_PW_STATUS, mask, val, 2256 3); 2257 if (err) 2258 DRM_ERROR("timeout waiting for GT wells to go %s\n", 2259 onoff(wait_for_on)); 2260 2261 return err; 2262 } 2263 2264 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) 2265 { 2266 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) 2267 return; 2268 2269 DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n"); 2270 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); 2271 } 2272 2273 static int vlv_suspend_complete(struct drm_i915_private *dev_priv) 2274 { 2275 u32 mask; 2276 int err; 2277 2278 /* 2279 * Bspec defines the following GT well on flags as debug only, so 2280 * don't treat them as hard failures. 2281 */ 2282 (void)vlv_wait_for_gt_wells(dev_priv, false); 2283 2284 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; 2285 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); 2286 2287 vlv_check_no_gt_access(dev_priv); 2288 2289 err = vlv_force_gfx_clock(dev_priv, true); 2290 if (err) 2291 goto err1; 2292 2293 err = vlv_allow_gt_wake(dev_priv, false); 2294 if (err) 2295 goto err2; 2296 2297 if (!IS_CHERRYVIEW(dev_priv)) 2298 vlv_save_gunit_s0ix_state(dev_priv); 2299 2300 err = vlv_force_gfx_clock(dev_priv, false); 2301 if (err) 2302 goto err2; 2303 2304 return 0; 2305 2306 err2: 2307 /* For safety always re-enable waking and disable gfx clock forcing */ 2308 vlv_allow_gt_wake(dev_priv, true); 2309 err1: 2310 vlv_force_gfx_clock(dev_priv, false); 2311 2312 return err; 2313 } 2314 2315 static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 2316 bool rpm_resume) 2317 { 2318 struct drm_device *dev = &dev_priv->drm; 2319 int err; 2320 int ret; 2321 2322 /* 2323 * If any of the steps fail just try to continue, that's the best we 2324 * can do at this point. Return the first error code (which will also 2325 * leave RPM permanently disabled). 2326 */ 2327 ret = vlv_force_gfx_clock(dev_priv, true); 2328 2329 if (!IS_CHERRYVIEW(dev_priv)) 2330 vlv_restore_gunit_s0ix_state(dev_priv); 2331 2332 err = vlv_allow_gt_wake(dev_priv, true); 2333 if (!ret) 2334 ret = err; 2335 2336 err = vlv_force_gfx_clock(dev_priv, false); 2337 if (!ret) 2338 ret = err; 2339 2340 vlv_check_no_gt_access(dev_priv); 2341 2342 if (rpm_resume) { 2343 intel_init_clock_gating(dev); 2344 i915_gem_restore_fences(dev); 2345 } 2346 2347 return ret; 2348 } 2349 2350 #if 0 2351 static int intel_runtime_suspend(struct device *device) 2352 { 2353 struct pci_dev *pdev = to_pci_dev(device); 2354 struct drm_device *dev = pci_get_drvdata(pdev); 2355 struct drm_i915_private *dev_priv = to_i915(dev); 2356 int ret; 2357 2358 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6()))) 2359 return -ENODEV; 2360 2361 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) 2362 return -ENODEV; 2363 2364 DRM_DEBUG_KMS("Suspending device\n"); 2365 2366 /* 2367 * We could deadlock here in case another thread holding struct_mutex 2368 * calls RPM suspend concurrently, since the RPM suspend will wait 2369 * first for this RPM suspend to finish. In this case the concurrent 2370 * RPM resume will be followed by its RPM suspend counterpart. Still 2371 * for consistency return -EAGAIN, which will reschedule this suspend. 2372 */ 2373 if (!mutex_trylock(&dev->struct_mutex)) { 2374 DRM_DEBUG_KMS("device lock contention, deffering suspend\n"); 2375 /* 2376 * Bump the expiration timestamp, otherwise the suspend won't 2377 * be rescheduled. 2378 */ 2379 pm_runtime_mark_last_busy(device); 2380 2381 return -EAGAIN; 2382 } 2383 2384 disable_rpm_wakeref_asserts(dev_priv); 2385 2386 /* 2387 * We are safe here against re-faults, since the fault handler takes 2388 * an RPM reference. 2389 */ 2390 i915_gem_release_all_mmaps(dev_priv); 2391 mutex_unlock(&dev->struct_mutex); 2392 2393 intel_guc_suspend(dev); 2394 2395 intel_runtime_pm_disable_interrupts(dev_priv); 2396 2397 ret = 0; 2398 if (IS_BROXTON(dev_priv)) { 2399 bxt_display_core_uninit(dev_priv); 2400 bxt_enable_dc9(dev_priv); 2401 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2402 hsw_enable_pc8(dev_priv); 2403 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 2404 ret = vlv_suspend_complete(dev_priv); 2405 } 2406 2407 if (ret) { 2408 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); 2409 intel_runtime_pm_enable_interrupts(dev_priv); 2410 2411 enable_rpm_wakeref_asserts(dev_priv); 2412 2413 return ret; 2414 } 2415 2416 intel_uncore_forcewake_reset(dev_priv, false); 2417 2418 enable_rpm_wakeref_asserts(dev_priv); 2419 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 2420 2421 if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) 2422 DRM_ERROR("Unclaimed access detected prior to suspending\n"); 2423 2424 dev_priv->pm.suspended = true; 2425 2426 /* 2427 * FIXME: We really should find a document that references the arguments 2428 * used below! 2429 */ 2430 if (IS_BROADWELL(dev_priv)) { 2431 /* 2432 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop 2433 * being detected, and the call we do at intel_runtime_resume() 2434 * won't be able to restore them. Since PCI_D3hot matches the 2435 * actual specification and appears to be working, use it. 2436 */ 2437 intel_opregion_notify_adapter(dev_priv, PCI_D3hot); 2438 } else { 2439 /* 2440 * current versions of firmware which depend on this opregion 2441 * notification have repurposed the D1 definition to mean 2442 * "runtime suspended" vs. what you would normally expect (D3) 2443 * to distinguish it from notifications that might be sent via 2444 * the suspend path. 2445 */ 2446 intel_opregion_notify_adapter(dev_priv, PCI_D1); 2447 } 2448 2449 assert_forcewakes_inactive(dev_priv); 2450 2451 if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) 2452 intel_hpd_poll_init(dev_priv); 2453 2454 DRM_DEBUG_KMS("Device suspended\n"); 2455 return 0; 2456 } 2457 2458 static int intel_runtime_resume(struct device *device) 2459 { 2460 struct pci_dev *pdev = to_pci_dev(device); 2461 struct drm_device *dev = pci_get_drvdata(pdev); 2462 struct drm_i915_private *dev_priv = to_i915(dev); 2463 int ret = 0; 2464 2465 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) 2466 return -ENODEV; 2467 2468 DRM_DEBUG_KMS("Resuming device\n"); 2469 2470 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 2471 disable_rpm_wakeref_asserts(dev_priv); 2472 2473 intel_opregion_notify_adapter(dev_priv, PCI_D0); 2474 dev_priv->pm.suspended = false; 2475 if (intel_uncore_unclaimed_mmio(dev_priv)) 2476 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); 2477 2478 intel_guc_resume(dev); 2479 2480 if (IS_GEN6(dev_priv)) 2481 intel_init_pch_refclk(dev); 2482 2483 if (IS_BROXTON(dev)) { 2484 bxt_disable_dc9(dev_priv); 2485 bxt_display_core_init(dev_priv, true); 2486 if (dev_priv->csr.dmc_payload && 2487 (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 2488 gen9_enable_dc5(dev_priv); 2489 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 2490 hsw_disable_pc8(dev_priv); 2491 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 2492 ret = vlv_resume_prepare(dev_priv, true); 2493 } 2494 2495 /* 2496 * No point of rolling back things in case of an error, as the best 2497 * we can do is to hope that things will still work (and disable RPM). 2498 */ 2499 i915_gem_init_swizzling(dev); 2500 2501 intel_runtime_pm_enable_interrupts(dev_priv); 2502 2503 /* 2504 * On VLV/CHV display interrupts are part of the display 2505 * power well, so hpd is reinitialized from there. For 2506 * everyone else do it here. 2507 */ 2508 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 2509 intel_hpd_init(dev_priv); 2510 2511 enable_rpm_wakeref_asserts(dev_priv); 2512 2513 if (ret) 2514 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); 2515 else 2516 DRM_DEBUG_KMS("Device resumed\n"); 2517 2518 return ret; 2519 } 2520 2521 const struct dev_pm_ops i915_pm_ops = { 2522 /* 2523 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, 2524 * PMSG_RESUME] 2525 */ 2526 .suspend = i915_pm_suspend, 2527 .suspend_late = i915_pm_suspend_late, 2528 .resume_early = i915_pm_resume_early, 2529 .resume = i915_pm_resume, 2530 2531 /* 2532 * S4 event handlers 2533 * @freeze, @freeze_late : called (1) before creating the 2534 * hibernation image [PMSG_FREEZE] and 2535 * (2) after rebooting, before restoring 2536 * the image [PMSG_QUIESCE] 2537 * @thaw, @thaw_early : called (1) after creating the hibernation 2538 * image, before writing it [PMSG_THAW] 2539 * and (2) after failing to create or 2540 * restore the image [PMSG_RECOVER] 2541 * @poweroff, @poweroff_late: called after writing the hibernation 2542 * image, before rebooting [PMSG_HIBERNATE] 2543 * @restore, @restore_early : called after rebooting and restoring the 2544 * hibernation image [PMSG_RESTORE] 2545 */ 2546 .freeze = i915_pm_freeze, 2547 .freeze_late = i915_pm_freeze_late, 2548 .thaw_early = i915_pm_thaw_early, 2549 .thaw = i915_pm_thaw, 2550 .poweroff = i915_pm_suspend, 2551 .poweroff_late = i915_pm_poweroff_late, 2552 .restore_early = i915_pm_restore_early, 2553 .restore = i915_pm_restore, 2554 2555 /* S0ix (via runtime suspend) event handlers */ 2556 .runtime_suspend = intel_runtime_suspend, 2557 .runtime_resume = intel_runtime_resume, 2558 }; 2559 2560 static const struct vm_operations_struct i915_gem_vm_ops = { 2561 .fault = i915_gem_fault, 2562 .open = drm_gem_vm_open, 2563 .close = drm_gem_vm_close, 2564 }; 2565 #endif 2566 2567 static struct cdev_pager_ops i915_gem_vm_ops = { 2568 .cdev_pg_fault = i915_gem_fault, 2569 .cdev_pg_ctor = i915_gem_pager_ctor, 2570 .cdev_pg_dtor = i915_gem_pager_dtor 2571 }; 2572 2573 static const struct file_operations i915_driver_fops = { 2574 .owner = THIS_MODULE, 2575 #if 0 2576 .open = drm_open, 2577 .release = drm_release, 2578 .unlocked_ioctl = drm_ioctl, 2579 .mmap = drm_gem_mmap, 2580 .poll = drm_poll, 2581 .read = drm_read, 2582 #ifdef CONFIG_COMPAT 2583 .compat_ioctl = i915_compat_ioctl, 2584 #endif 2585 .llseek = noop_llseek, 2586 #endif 2587 }; 2588 2589 static int 2590 i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data, 2591 struct drm_file *file) 2592 { 2593 return -ENODEV; 2594 } 2595 2596 static const struct drm_ioctl_desc i915_ioctls[] = { 2597 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2598 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH), 2599 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH), 2600 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), 2601 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), 2602 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), 2603 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), 2604 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2605 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), 2606 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), 2607 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2608 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH), 2609 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2610 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2611 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH), 2612 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), 2613 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2614 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2615 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), 2616 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), 2617 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 2618 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 2619 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2620 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW), 2621 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW), 2622 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2623 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2624 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2625 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW), 2626 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW), 2627 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW), 2628 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW), 2629 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW), 2630 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW), 2631 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW), 2632 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW), 2633 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW), 2634 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), 2635 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), 2636 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), 2637 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 2638 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 2639 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), 2640 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), 2641 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2642 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW), 2643 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW), 2644 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW), 2645 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW), 2646 #if 0 2647 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW), 2648 #endif 2649 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW), 2650 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW), 2651 }; 2652 2653 static int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx, 2654 struct sysctl_oid *top) 2655 { 2656 return drm_add_busid_modesetting(dev, ctx, top); 2657 } 2658 2659 static struct drm_driver driver = { 2660 /* Don't use MTRRs here; the Xserver or userspace app should 2661 * deal with them for Intel hardware. 2662 */ 2663 .driver_features = 2664 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | 2665 DRIVER_RENDER | DRIVER_MODESET, 2666 .open = i915_driver_open, 2667 .lastclose = i915_driver_lastclose, 2668 .preclose = i915_driver_preclose, 2669 .postclose = i915_driver_postclose, 2670 .set_busid = drm_pci_set_busid, 2671 2672 .gem_close_object = i915_gem_close_object, 2673 .gem_free_object = i915_gem_free_object, 2674 .gem_vm_ops = &i915_gem_vm_ops, 2675 2676 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 2677 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 2678 .gem_prime_export = i915_gem_prime_export, 2679 .gem_prime_import = i915_gem_prime_import, 2680 2681 .dumb_create = i915_gem_dumb_create, 2682 .dumb_map_offset = i915_gem_mmap_gtt, 2683 .dumb_destroy = drm_gem_dumb_destroy, 2684 .ioctls = i915_ioctls, 2685 .num_ioctls = ARRAY_SIZE(i915_ioctls), 2686 .fops = &i915_driver_fops, 2687 .name = DRIVER_NAME, 2688 .desc = DRIVER_DESC, 2689 .date = DRIVER_DATE, 2690 .major = DRIVER_MAJOR, 2691 .minor = DRIVER_MINOR, 2692 .patchlevel = DRIVER_PATCHLEVEL, 2693 #ifdef __DragonFly__ 2694 .sysctl_init = i915_sysctl_init, 2695 #endif 2696 }; 2697