1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #include <linux/device.h> 31 #include <linux/acpi.h> 32 #include <drm/drmP.h> 33 #include <drm/i915_drm.h> 34 #include "i915_drv.h" 35 #include "i915_trace.h" 36 #include "intel_drv.h" 37 38 #include <linux/apple-gmux.h> 39 #include <linux/console.h> 40 #include <linux/module.h> 41 #include <linux/pm_runtime.h> 42 #include <linux/vgaarb.h> 43 #include <linux/vga_switcheroo.h> 44 #include <drm/drm_crtc_helper.h> 45 46 static struct drm_driver driver; 47 48 #define GEN_DEFAULT_PIPEOFFSETS \ 49 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ 50 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ 51 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ 52 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \ 53 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET } 54 55 #define GEN_CHV_PIPEOFFSETS \ 56 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ 57 CHV_PIPE_C_OFFSET }, \ 58 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \ 59 CHV_TRANSCODER_C_OFFSET, }, \ 60 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \ 61 CHV_PALETTE_C_OFFSET } 62 63 #define CURSOR_OFFSETS \ 64 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET } 65 66 #define IVB_CURSOR_OFFSETS \ 67 .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET } 68 69 #define BDW_COLORS \ 70 .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 } 71 #define CHV_COLORS \ 72 .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 } 73 74 static const struct intel_device_info intel_i830_info = { 75 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, 76 .has_overlay = 1, .overlay_needs_physical = 1, 77 .ring_mask = RENDER_RING, 78 GEN_DEFAULT_PIPEOFFSETS, 79 CURSOR_OFFSETS, 80 }; 81 82 static const struct intel_device_info intel_845g_info = { 83 .gen = 2, .num_pipes = 1, 84 .has_overlay = 1, .overlay_needs_physical = 1, 85 .ring_mask = RENDER_RING, 86 GEN_DEFAULT_PIPEOFFSETS, 87 CURSOR_OFFSETS, 88 }; 89 90 static const struct intel_device_info intel_i85x_info = { 91 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2, 92 .cursor_needs_physical = 1, 93 .has_overlay = 1, .overlay_needs_physical = 1, 94 .has_fbc = 1, 95 .ring_mask = RENDER_RING, 96 GEN_DEFAULT_PIPEOFFSETS, 97 CURSOR_OFFSETS, 98 }; 99 100 static const struct intel_device_info intel_i865g_info = { 101 .gen = 2, .num_pipes = 1, 102 .has_overlay = 1, .overlay_needs_physical = 1, 103 .ring_mask = RENDER_RING, 104 GEN_DEFAULT_PIPEOFFSETS, 105 CURSOR_OFFSETS, 106 }; 107 108 static const struct intel_device_info intel_i915g_info = { 109 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2, 110 .has_overlay = 1, .overlay_needs_physical = 1, 111 .ring_mask = RENDER_RING, 112 GEN_DEFAULT_PIPEOFFSETS, 113 CURSOR_OFFSETS, 114 }; 115 static const struct intel_device_info intel_i915gm_info = { 116 .gen = 3, .is_mobile = 1, .num_pipes = 2, 117 .cursor_needs_physical = 1, 118 .has_overlay = 1, .overlay_needs_physical = 1, 119 .supports_tv = 1, 120 .has_fbc = 1, 121 .ring_mask = RENDER_RING, 122 GEN_DEFAULT_PIPEOFFSETS, 123 CURSOR_OFFSETS, 124 }; 125 static const struct intel_device_info intel_i945g_info = { 126 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2, 127 .has_overlay = 1, .overlay_needs_physical = 1, 128 .ring_mask = RENDER_RING, 129 GEN_DEFAULT_PIPEOFFSETS, 130 CURSOR_OFFSETS, 131 }; 132 static const struct intel_device_info intel_i945gm_info = { 133 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2, 134 .has_hotplug = 1, .cursor_needs_physical = 1, 135 .has_overlay = 1, .overlay_needs_physical = 1, 136 .supports_tv = 1, 137 .has_fbc = 1, 138 .ring_mask = RENDER_RING, 139 GEN_DEFAULT_PIPEOFFSETS, 140 CURSOR_OFFSETS, 141 }; 142 143 static const struct intel_device_info intel_i965g_info = { 144 .gen = 4, .is_broadwater = 1, .num_pipes = 2, 145 .has_hotplug = 1, 146 .has_overlay = 1, 147 .ring_mask = RENDER_RING, 148 GEN_DEFAULT_PIPEOFFSETS, 149 CURSOR_OFFSETS, 150 }; 151 152 static const struct intel_device_info intel_i965gm_info = { 153 .gen = 4, .is_crestline = 1, .num_pipes = 2, 154 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, 155 .has_overlay = 1, 156 .supports_tv = 1, 157 .ring_mask = RENDER_RING, 158 GEN_DEFAULT_PIPEOFFSETS, 159 CURSOR_OFFSETS, 160 }; 161 162 static const struct intel_device_info intel_g33_info = { 163 .gen = 3, .is_g33 = 1, .num_pipes = 2, 164 .need_gfx_hws = 1, .has_hotplug = 1, 165 .has_overlay = 1, 166 .ring_mask = RENDER_RING, 167 GEN_DEFAULT_PIPEOFFSETS, 168 CURSOR_OFFSETS, 169 }; 170 171 static const struct intel_device_info intel_g45_info = { 172 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2, 173 .has_pipe_cxsr = 1, .has_hotplug = 1, 174 .ring_mask = RENDER_RING | BSD_RING, 175 GEN_DEFAULT_PIPEOFFSETS, 176 CURSOR_OFFSETS, 177 }; 178 179 static const struct intel_device_info intel_gm45_info = { 180 .gen = 4, .is_g4x = 1, .num_pipes = 2, 181 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, 182 .has_pipe_cxsr = 1, .has_hotplug = 1, 183 .supports_tv = 1, 184 .ring_mask = RENDER_RING | BSD_RING, 185 GEN_DEFAULT_PIPEOFFSETS, 186 CURSOR_OFFSETS, 187 }; 188 189 static const struct intel_device_info intel_pineview_info = { 190 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2, 191 .need_gfx_hws = 1, .has_hotplug = 1, 192 .has_overlay = 1, 193 GEN_DEFAULT_PIPEOFFSETS, 194 CURSOR_OFFSETS, 195 }; 196 197 static const struct intel_device_info intel_ironlake_d_info = { 198 .gen = 5, .num_pipes = 2, 199 .need_gfx_hws = 1, .has_hotplug = 1, 200 .ring_mask = RENDER_RING | BSD_RING, 201 GEN_DEFAULT_PIPEOFFSETS, 202 CURSOR_OFFSETS, 203 }; 204 205 static const struct intel_device_info intel_ironlake_m_info = { 206 .gen = 5, .is_mobile = 1, .num_pipes = 2, 207 .need_gfx_hws = 1, .has_hotplug = 1, 208 .has_fbc = 1, 209 .ring_mask = RENDER_RING | BSD_RING, 210 GEN_DEFAULT_PIPEOFFSETS, 211 CURSOR_OFFSETS, 212 }; 213 214 static const struct intel_device_info intel_sandybridge_d_info = { 215 .gen = 6, .num_pipes = 2, 216 .need_gfx_hws = 1, .has_hotplug = 1, 217 .has_fbc = 1, 218 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, 219 .has_llc = 1, 220 GEN_DEFAULT_PIPEOFFSETS, 221 CURSOR_OFFSETS, 222 }; 223 224 static const struct intel_device_info intel_sandybridge_m_info = { 225 .gen = 6, .is_mobile = 1, .num_pipes = 2, 226 .need_gfx_hws = 1, .has_hotplug = 1, 227 .has_fbc = 1, 228 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, 229 .has_llc = 1, 230 GEN_DEFAULT_PIPEOFFSETS, 231 CURSOR_OFFSETS, 232 }; 233 234 #define GEN7_FEATURES \ 235 .gen = 7, .num_pipes = 3, \ 236 .need_gfx_hws = 1, .has_hotplug = 1, \ 237 .has_fbc = 1, \ 238 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ 239 .has_llc = 1, \ 240 GEN_DEFAULT_PIPEOFFSETS, \ 241 IVB_CURSOR_OFFSETS 242 243 static const struct intel_device_info intel_ivybridge_d_info = { 244 GEN7_FEATURES, 245 .is_ivybridge = 1, 246 }; 247 248 static const struct intel_device_info intel_ivybridge_m_info = { 249 GEN7_FEATURES, 250 .is_ivybridge = 1, 251 .is_mobile = 1, 252 }; 253 254 static const struct intel_device_info intel_ivybridge_q_info = { 255 GEN7_FEATURES, 256 .is_ivybridge = 1, 257 .num_pipes = 0, /* legal, last one wins */ 258 }; 259 260 #define VLV_FEATURES \ 261 .gen = 7, .num_pipes = 2, \ 262 .need_gfx_hws = 1, .has_hotplug = 1, \ 263 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ 264 .display_mmio_offset = VLV_DISPLAY_BASE, \ 265 GEN_DEFAULT_PIPEOFFSETS, \ 266 CURSOR_OFFSETS 267 268 static const struct intel_device_info intel_valleyview_m_info = { 269 VLV_FEATURES, 270 .is_valleyview = 1, 271 .is_mobile = 1, 272 }; 273 274 static const struct intel_device_info intel_valleyview_d_info = { 275 VLV_FEATURES, 276 .is_valleyview = 1, 277 }; 278 279 #define HSW_FEATURES \ 280 GEN7_FEATURES, \ 281 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ 282 .has_ddi = 1, \ 283 .has_fpga_dbg = 1 284 285 static const struct intel_device_info intel_haswell_d_info = { 286 HSW_FEATURES, 287 .is_haswell = 1, 288 }; 289 290 static const struct intel_device_info intel_haswell_m_info = { 291 HSW_FEATURES, 292 .is_haswell = 1, 293 .is_mobile = 1, 294 }; 295 296 #define BDW_FEATURES \ 297 HSW_FEATURES, \ 298 BDW_COLORS 299 300 static const struct intel_device_info intel_broadwell_d_info = { 301 BDW_FEATURES, 302 .gen = 8, 303 }; 304 305 static const struct intel_device_info intel_broadwell_m_info = { 306 BDW_FEATURES, 307 .gen = 8, .is_mobile = 1, 308 }; 309 310 static const struct intel_device_info intel_broadwell_gt3d_info = { 311 BDW_FEATURES, 312 .gen = 8, 313 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 314 }; 315 316 static const struct intel_device_info intel_broadwell_gt3m_info = { 317 BDW_FEATURES, 318 .gen = 8, .is_mobile = 1, 319 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 320 }; 321 322 static const struct intel_device_info intel_cherryview_info = { 323 .gen = 8, .num_pipes = 3, 324 .need_gfx_hws = 1, .has_hotplug = 1, 325 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 326 .is_cherryview = 1, 327 .display_mmio_offset = VLV_DISPLAY_BASE, 328 GEN_CHV_PIPEOFFSETS, 329 CURSOR_OFFSETS, 330 CHV_COLORS, 331 }; 332 333 static const struct intel_device_info intel_skylake_info = { 334 BDW_FEATURES, 335 .is_skylake = 1, 336 .gen = 9, 337 }; 338 339 static const struct intel_device_info intel_skylake_gt3_info = { 340 BDW_FEATURES, 341 .is_skylake = 1, 342 .gen = 9, 343 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 344 }; 345 346 static const struct intel_device_info intel_broxton_info = { 347 .is_preliminary = 1, 348 .is_broxton = 1, 349 .gen = 9, 350 .need_gfx_hws = 1, .has_hotplug = 1, 351 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 352 .num_pipes = 3, 353 .has_ddi = 1, 354 .has_fpga_dbg = 1, 355 .has_fbc = 1, 356 GEN_DEFAULT_PIPEOFFSETS, 357 IVB_CURSOR_OFFSETS, 358 BDW_COLORS, 359 }; 360 361 static const struct intel_device_info intel_kabylake_info = { 362 BDW_FEATURES, 363 .is_kabylake = 1, 364 .gen = 9, 365 }; 366 367 static const struct intel_device_info intel_kabylake_gt2_info = { 368 BDW_FEATURES, 369 .is_kabylake = 1, 370 .gen = 9, 371 }; 372 373 static const struct intel_device_info intel_kabylake_gt3_info = { 374 BDW_FEATURES, 375 .is_kabylake = 1, 376 .gen = 9, 377 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 378 }; 379 380 static const struct intel_device_info intel_coffeelake_gt1_info = { 381 BDW_FEATURES, \ 382 .is_kabylake = 1, 383 .gen = 9, 384 }; 385 386 static const struct intel_device_info intel_coffeelake_gt2_info = { 387 BDW_FEATURES, \ 388 .is_kabylake = 1, 389 .gen = 9, 390 }; 391 392 static const struct intel_device_info intel_coffeelake_gt3_info = { 393 BDW_FEATURES, \ 394 .is_kabylake = 1, 395 .gen = 9, 396 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 397 }; 398 399 /* 400 * Make sure any device matches here are from most specific to most 401 * general. For example, since the Quanta match is based on the subsystem 402 * and subvendor IDs, we need it to come before the more general IVB 403 * PCI ID matches, otherwise we'll use the wrong info struct above. 404 */ 405 406 static const struct pci_device_id pciidlist[] = { 407 INTEL_I830_IDS(&intel_i830_info), 408 INTEL_I845G_IDS(&intel_845g_info), 409 INTEL_I85X_IDS(&intel_i85x_info), 410 INTEL_I865G_IDS(&intel_i865g_info), 411 INTEL_I915G_IDS(&intel_i915g_info), 412 INTEL_I915GM_IDS(&intel_i915gm_info), 413 INTEL_I945G_IDS(&intel_i945g_info), 414 INTEL_I945GM_IDS(&intel_i945gm_info), 415 INTEL_I965G_IDS(&intel_i965g_info), 416 INTEL_G33_IDS(&intel_g33_info), 417 INTEL_I965GM_IDS(&intel_i965gm_info), 418 INTEL_GM45_IDS(&intel_gm45_info), 419 INTEL_G45_IDS(&intel_g45_info), 420 INTEL_PINEVIEW_IDS(&intel_pineview_info), 421 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), 422 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), 423 INTEL_SNB_D_IDS(&intel_sandybridge_d_info), 424 INTEL_SNB_M_IDS(&intel_sandybridge_m_info), 425 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ 426 INTEL_IVB_M_IDS(&intel_ivybridge_m_info), 427 INTEL_IVB_D_IDS(&intel_ivybridge_d_info), 428 INTEL_HSW_D_IDS(&intel_haswell_d_info), 429 INTEL_HSW_M_IDS(&intel_haswell_m_info), 430 INTEL_VLV_M_IDS(&intel_valleyview_m_info), 431 INTEL_VLV_D_IDS(&intel_valleyview_d_info), 432 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), 433 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), 434 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), 435 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), 436 INTEL_CHV_IDS(&intel_cherryview_info), 437 INTEL_SKL_GT1_IDS(&intel_skylake_info), 438 INTEL_SKL_GT2_IDS(&intel_skylake_info), 439 INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info), 440 INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info), 441 INTEL_BXT_IDS(&intel_broxton_info), 442 INTEL_KBL_GT1_IDS(&intel_kabylake_info), 443 INTEL_KBL_GT2_IDS(&intel_kabylake_info), 444 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info), 445 INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info), 446 INTEL_AML_GT2_IDS(&intel_kabylake_gt2_info), 447 INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info), 448 INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info), 449 INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info), 450 INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info), 451 INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info), 452 INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info), 453 INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info), 454 INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info), 455 {0, 0, 0} 456 }; 457 458 #define PCI_VENDOR_INTEL 0x8086 459 460 static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) 461 { 462 enum intel_pch ret = PCH_NOP; 463 464 /* 465 * In a virtualized passthrough environment we can be in a 466 * setup where the ISA bridge is not able to be passed through. 467 * In this case, a south bridge can be emulated and we have to 468 * make an educated guess as to which PCH is really there. 469 */ 470 471 if (IS_GEN5(dev)) { 472 ret = PCH_IBX; 473 DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); 474 } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { 475 ret = PCH_CPT; 476 DRM_DEBUG_KMS("Assuming CouarPoint PCH\n"); 477 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 478 ret = PCH_LPT; 479 DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); 480 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 481 ret = PCH_SPT; 482 DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); 483 } 484 485 return ret; 486 } 487 488 void intel_detect_pch(struct drm_device *dev) 489 { 490 struct drm_i915_private *dev_priv = dev->dev_private; 491 device_t pch = NULL; 492 struct pci_devinfo *di; 493 494 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting 495 * (which really amounts to a PCH but no South Display). 496 */ 497 if (INTEL_INFO(dev)->num_pipes == 0) { 498 dev_priv->pch_type = PCH_NOP; 499 return; 500 } 501 502 /* XXX The ISA bridge probe causes some old Core2 machines to hang */ 503 if (INTEL_INFO(dev)->gen < 5) 504 return; 505 506 /* 507 * The reason to probe ISA bridge instead of Dev31:Fun0 is to 508 * make graphics device passthrough work easy for VMM, that only 509 * need to expose ISA bridge to let driver know the real hardware 510 * underneath. This is a requirement from virtualization team. 511 * 512 * In some virtualized environments (e.g. XEN), there is irrelevant 513 * ISA bridge in the system. To work reliably, we should scan trhough 514 * all the ISA bridge devices and check for the first match, instead 515 * of only checking the first one. 516 */ 517 di = NULL; 518 519 while ((pch = pci_iterate_class(&di, PCIC_BRIDGE, PCIS_BRIDGE_ISA))) { 520 if (pci_get_vendor(pch) == PCI_VENDOR_INTEL) { 521 unsigned short id = pci_get_device(pch) & INTEL_PCH_DEVICE_ID_MASK; 522 dev_priv->pch_id = id; 523 524 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 525 dev_priv->pch_type = PCH_IBX; 526 DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 527 WARN_ON(!IS_GEN5(dev)); 528 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 529 dev_priv->pch_type = PCH_CPT; 530 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 531 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 532 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 533 /* PantherPoint is CPT compatible */ 534 dev_priv->pch_type = PCH_CPT; 535 DRM_DEBUG_KMS("Found PantherPoint PCH\n"); 536 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); 537 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 538 dev_priv->pch_type = PCH_LPT; 539 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 540 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); 541 WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)); 542 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 543 dev_priv->pch_type = PCH_LPT; 544 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 545 WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev)); 546 WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev)); 547 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { 548 dev_priv->pch_type = PCH_SPT; 549 DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); 550 WARN_ON(!IS_SKYLAKE(dev) && 551 !IS_KABYLAKE(dev)); 552 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { 553 dev_priv->pch_type = PCH_SPT; 554 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); 555 WARN_ON(!IS_SKYLAKE(dev) && 556 !IS_KABYLAKE(dev)); 557 } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { 558 dev_priv->pch_type = PCH_KBP; 559 DRM_DEBUG_KMS("Found KabyPoint PCH\n"); 560 WARN_ON(!IS_KABYLAKE(dev)); 561 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || 562 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || 563 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && 564 1)) { 565 dev_priv->pch_type = intel_virt_detect_pch(dev); 566 } else 567 continue; 568 569 break; 570 } 571 } 572 if (!pch) 573 DRM_DEBUG_KMS("No PCH found.\n"); 574 575 #if 0 576 pci_dev_put(pch); 577 #endif 578 } 579 580 bool i915_semaphore_is_enabled(struct drm_device *dev) 581 { 582 if (INTEL_INFO(dev)->gen < 6) 583 return false; 584 585 if (i915.semaphores >= 0) 586 return i915.semaphores; 587 588 /* TODO: make semaphores and Execlists play nicely together */ 589 if (i915.enable_execlists) 590 return false; 591 592 /* Until we get further testing... */ 593 if (IS_GEN8(dev)) 594 return false; 595 596 #ifdef CONFIG_INTEL_IOMMU 597 /* Enable semaphores on SNB when IO remapping is off */ 598 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) 599 return false; 600 #endif 601 602 return true; 603 } 604 605 #ifdef __DragonFly__ 606 #define IS_BUILTIN(blah) 0 607 #endif 608 609 static void intel_suspend_encoders(struct drm_i915_private *dev_priv) 610 { 611 struct drm_device *dev = dev_priv->dev; 612 struct intel_encoder *encoder; 613 614 drm_modeset_lock_all(dev); 615 for_each_intel_encoder(dev, encoder) 616 if (encoder->suspend) 617 encoder->suspend(encoder); 618 drm_modeset_unlock_all(dev); 619 } 620 621 static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 622 bool rpm_resume); 623 static int vlv_suspend_complete(struct drm_i915_private *dev_priv); 624 625 static bool suspend_to_idle(struct drm_i915_private *dev_priv) 626 { 627 #if IS_ENABLED(CONFIG_ACPI_SLEEP) 628 if (acpi_target_system_state() < ACPI_STATE_S3) 629 return true; 630 #endif 631 return false; 632 } 633 634 static int i915_drm_suspend(struct drm_device *dev) 635 { 636 struct drm_i915_private *dev_priv = dev->dev_private; 637 pci_power_t opregion_target_state; 638 int error; 639 640 /* ignore lid events during suspend */ 641 mutex_lock(&dev_priv->modeset_restore_lock); 642 dev_priv->modeset_restore = MODESET_SUSPENDED; 643 mutex_unlock(&dev_priv->modeset_restore_lock); 644 645 disable_rpm_wakeref_asserts(dev_priv); 646 647 /* We do a lot of poking in a lot of registers, make sure they work 648 * properly. */ 649 intel_display_set_init_power(dev_priv, true); 650 651 drm_kms_helper_poll_disable(dev); 652 653 #if 0 654 pci_save_state(dev->pdev); 655 #endif 656 657 error = i915_gem_suspend(dev); 658 if (error) { 659 dev_err(dev->dev, 660 "GEM idle failed, resume might fail\n"); 661 goto out; 662 } 663 664 intel_guc_suspend(dev); 665 666 intel_suspend_gt_powersave(dev); 667 668 intel_display_suspend(dev); 669 670 #if 0 671 intel_dp_mst_suspend(dev); 672 #endif 673 674 intel_runtime_pm_disable_interrupts(dev_priv); 675 intel_hpd_cancel_work(dev_priv); 676 677 intel_suspend_encoders(dev_priv); 678 679 intel_suspend_hw(dev); 680 681 i915_gem_suspend_gtt_mappings(dev); 682 683 i915_save_state(dev); 684 685 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; 686 intel_opregion_notify_adapter(dev, opregion_target_state); 687 688 intel_uncore_forcewake_reset(dev, false); 689 intel_opregion_fini(dev); 690 691 #if 0 692 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); 693 #endif 694 695 dev_priv->suspend_count++; 696 697 intel_display_set_init_power(dev_priv, false); 698 699 intel_csr_ucode_suspend(dev_priv); 700 701 out: 702 enable_rpm_wakeref_asserts(dev_priv); 703 704 return error; 705 } 706 707 static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) 708 { 709 struct drm_i915_private *dev_priv = drm_dev->dev_private; 710 bool fw_csr; 711 int ret; 712 713 disable_rpm_wakeref_asserts(dev_priv); 714 715 fw_csr = !IS_BROXTON(dev_priv) && 716 suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload; 717 /* 718 * In case of firmware assisted context save/restore don't manually 719 * deinit the power domains. This also means the CSR/DMC firmware will 720 * stay active, it will power down any HW resources as required and 721 * also enable deeper system power states that would be blocked if the 722 * firmware was inactive. 723 */ 724 if (!fw_csr) 725 intel_power_domains_suspend(dev_priv); 726 727 ret = 0; 728 if (IS_BROXTON(dev_priv)) 729 bxt_enable_dc9(dev_priv); 730 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 731 hsw_enable_pc8(dev_priv); 732 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 733 ret = vlv_suspend_complete(dev_priv); 734 735 if (ret) { 736 DRM_ERROR("Suspend complete failed: %d\n", ret); 737 if (!fw_csr) 738 intel_power_domains_init_hw(dev_priv, true); 739 740 goto out; 741 } 742 743 #if 0 744 pci_disable_device(drm_dev->pdev); 745 /* 746 * During hibernation on some platforms the BIOS may try to access 747 * the device even though it's already in D3 and hang the machine. So 748 * leave the device in D0 on those platforms and hope the BIOS will 749 * power down the device properly. The issue was seen on multiple old 750 * GENs with different BIOS vendors, so having an explicit blacklist 751 * is inpractical; apply the workaround on everything pre GEN6. The 752 * platforms where the issue was seen: 753 * Lenovo Thinkpad X301, X61s, X60, T60, X41 754 * Fujitsu FSC S7110 755 * Acer Aspire 1830T 756 */ 757 if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6)) 758 pci_set_power_state(drm_dev->pdev, PCI_D3hot); 759 #endif 760 761 dev_priv->suspended_to_idle = suspend_to_idle(dev_priv); 762 763 out: 764 enable_rpm_wakeref_asserts(dev_priv); 765 766 return ret; 767 } 768 769 int i915_suspend_switcheroo(device_t kdev) 770 { 771 struct drm_device *dev = device_get_softc(kdev); 772 int error; 773 774 if (!dev || !dev->dev_private) { 775 DRM_ERROR("dev: %p\n", dev); 776 DRM_ERROR("DRM not initialized, aborting suspend.\n"); 777 return -ENODEV; 778 } 779 780 #if 0 781 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND && 782 state.event != PM_EVENT_FREEZE)) 783 return -EINVAL; 784 #endif 785 786 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 787 return 0; 788 789 error = i915_drm_suspend(dev); 790 if (error) 791 return error; 792 793 return i915_drm_suspend_late(dev, false); 794 } 795 796 static int i915_drm_resume(struct drm_device *dev) 797 { 798 struct drm_i915_private *dev_priv = dev->dev_private; 799 int ret; 800 801 disable_rpm_wakeref_asserts(dev_priv); 802 803 ret = i915_ggtt_enable_hw(dev); 804 if (ret) 805 DRM_ERROR("failed to re-enable GGTT\n"); 806 807 intel_csr_ucode_resume(dev_priv); 808 809 mutex_lock(&dev->struct_mutex); 810 i915_gem_restore_gtt_mappings(dev); 811 mutex_unlock(&dev->struct_mutex); 812 813 i915_restore_state(dev); 814 intel_opregion_setup(dev); 815 816 intel_init_pch_refclk(dev); 817 drm_mode_config_reset(dev); 818 819 /* 820 * Interrupts have to be enabled before any batches are run. If not the 821 * GPU will hang. i915_gem_init_hw() will initiate batches to 822 * update/restore the context. 823 * 824 * Modeset enabling in intel_modeset_init_hw() also needs working 825 * interrupts. 826 */ 827 intel_runtime_pm_enable_interrupts(dev_priv); 828 829 mutex_lock(&dev->struct_mutex); 830 if (i915_gem_init_hw(dev)) { 831 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); 832 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); 833 } 834 mutex_unlock(&dev->struct_mutex); 835 836 intel_guc_resume(dev); 837 838 intel_modeset_init_hw(dev); 839 840 spin_lock_irq(&dev_priv->irq_lock); 841 if (dev_priv->display.hpd_irq_setup) 842 dev_priv->display.hpd_irq_setup(dev); 843 spin_unlock_irq(&dev_priv->irq_lock); 844 845 intel_dp_mst_resume(dev); 846 847 intel_display_resume(dev); 848 849 /* 850 * ... but also need to make sure that hotplug processing 851 * doesn't cause havoc. Like in the driver load code we don't 852 * bother with the tiny race here where we might loose hotplug 853 * notifications. 854 * */ 855 intel_hpd_init(dev_priv); 856 /* Config may have changed between suspend and resume */ 857 drm_helper_hpd_irq_event(dev); 858 859 intel_opregion_init(dev); 860 861 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); 862 863 mutex_lock(&dev_priv->modeset_restore_lock); 864 dev_priv->modeset_restore = MODESET_DONE; 865 mutex_unlock(&dev_priv->modeset_restore_lock); 866 867 intel_opregion_notify_adapter(dev, PCI_D0); 868 869 drm_kms_helper_poll_enable(dev); 870 871 enable_rpm_wakeref_asserts(dev_priv); 872 873 return 0; 874 } 875 876 static int i915_drm_resume_early(struct drm_device *dev) 877 { 878 struct drm_i915_private *dev_priv = dev->dev_private; 879 int ret = 0; 880 881 /* 882 * We have a resume ordering issue with the snd-hda driver also 883 * requiring our device to be power up. Due to the lack of a 884 * parent/child relationship we currently solve this with an early 885 * resume hook. 886 * 887 * FIXME: This should be solved with a special hdmi sink device or 888 * similar so that power domains can be employed. 889 */ 890 891 /* 892 * Note that we need to set the power state explicitly, since we 893 * powered off the device during freeze and the PCI core won't power 894 * it back up for us during thaw. Powering off the device during 895 * freeze is not a hard requirement though, and during the 896 * suspend/resume phases the PCI core makes sure we get here with the 897 * device powered on. So in case we change our freeze logic and keep 898 * the device powered we can also remove the following set power state 899 * call. 900 */ 901 #if 0 902 ret = pci_set_power_state(dev->pdev, PCI_D0); 903 if (ret) { 904 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret); 905 goto out; 906 } 907 908 /* 909 * Note that pci_enable_device() first enables any parent bridge 910 * device and only then sets the power state for this device. The 911 * bridge enabling is a nop though, since bridge devices are resumed 912 * first. The order of enabling power and enabling the device is 913 * imposed by the PCI core as described above, so here we preserve the 914 * same order for the freeze/thaw phases. 915 * 916 * TODO: eventually we should remove pci_disable_device() / 917 * pci_enable_enable_device() from suspend/resume. Due to how they 918 * depend on the device enable refcount we can't anyway depend on them 919 * disabling/enabling the device. 920 */ 921 if (pci_enable_device(dev->pdev)) { 922 ret = -EIO; 923 goto out; 924 } 925 926 pci_set_master(dev->pdev); 927 #endif 928 929 disable_rpm_wakeref_asserts(dev_priv); 930 931 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 932 ret = vlv_resume_prepare(dev_priv, false); 933 if (ret) 934 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", 935 ret); 936 937 intel_uncore_early_sanitize(dev, true); 938 939 if (IS_BROXTON(dev)) { 940 if (!dev_priv->suspended_to_idle) 941 gen9_sanitize_dc_state(dev_priv); 942 bxt_disable_dc9(dev_priv); 943 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 944 hsw_disable_pc8(dev_priv); 945 } 946 947 intel_uncore_sanitize(dev); 948 949 if (IS_BROXTON(dev_priv) || 950 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) 951 intel_power_domains_init_hw(dev_priv, true); 952 953 enable_rpm_wakeref_asserts(dev_priv); 954 955 #if 0 956 out: 957 #endif 958 dev_priv->suspended_to_idle = false; 959 960 return ret; 961 } 962 963 int i915_resume_switcheroo(struct drm_device *dev) 964 { 965 int ret; 966 967 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 968 return 0; 969 970 ret = i915_drm_resume_early(dev); 971 if (ret) 972 return ret; 973 974 return i915_drm_resume(dev); 975 } 976 977 struct intel_device_info * 978 i915_get_device_id(int device) 979 { 980 const struct pci_device_id *did; 981 982 for (did = &pciidlist[0]; did->device != 0; did++) { 983 if (did->device != device) 984 continue; 985 return (struct intel_device_info *)did->driver_data; 986 } 987 return (NULL); 988 } 989 990 static int i915_sysctl_init(struct drm_device *dev, struct sysctl_ctx_list *ctx, 991 struct sysctl_oid *top) 992 { 993 return drm_add_busid_modesetting(dev, ctx, top); 994 } 995 996 extern devclass_t drm_devclass; 997 998 /** 999 * i915_reset - reset chip after a hang 1000 * @dev: drm device to reset 1001 * 1002 * Reset the chip. Useful if a hang is detected. Returns zero on successful 1003 * reset or otherwise an error code. 1004 * 1005 * Procedure is fairly simple: 1006 * - reset the chip using the reset reg 1007 * - re-init context state 1008 * - re-init hardware status page 1009 * - re-init ring buffer 1010 * - re-init interrupt state 1011 * - re-init display 1012 */ 1013 int i915_reset(struct drm_device *dev) 1014 { 1015 struct drm_i915_private *dev_priv = dev->dev_private; 1016 struct i915_gpu_error *error = &dev_priv->gpu_error; 1017 unsigned reset_counter; 1018 int ret; 1019 1020 intel_reset_gt_powersave(dev); 1021 1022 mutex_lock(&dev->struct_mutex); 1023 1024 /* Clear any previous failed attempts at recovery. Time to try again. */ 1025 atomic_andnot(I915_WEDGED, &error->reset_counter); 1026 1027 /* Clear the reset-in-progress flag and increment the reset epoch. */ 1028 reset_counter = atomic_inc_return(&error->reset_counter); 1029 if (WARN_ON(__i915_reset_in_progress(reset_counter))) { 1030 ret = -EIO; 1031 goto error; 1032 } 1033 1034 i915_gem_reset(dev); 1035 1036 ret = intel_gpu_reset(dev, ALL_ENGINES); 1037 1038 /* Also reset the gpu hangman. */ 1039 if (error->stop_rings != 0) { 1040 DRM_INFO("Simulated gpu hang, resetting stop_rings\n"); 1041 error->stop_rings = 0; 1042 if (ret == -ENODEV) { 1043 DRM_INFO("Reset not implemented, but ignoring " 1044 "error for simulated gpu hangs\n"); 1045 ret = 0; 1046 } 1047 } 1048 1049 if (i915_stop_ring_allow_warn(dev_priv)) 1050 pr_notice("drm/i915: Resetting chip after gpu hang\n"); 1051 1052 if (ret) { 1053 if (ret != -ENODEV) 1054 DRM_ERROR("Failed to reset chip: %i\n", ret); 1055 else 1056 DRM_DEBUG_DRIVER("GPU reset disabled\n"); 1057 goto error; 1058 } 1059 1060 intel_overlay_reset(dev_priv); 1061 1062 /* Ok, now get things going again... */ 1063 1064 /* 1065 * Everything depends on having the GTT running, so we need to start 1066 * there. Fortunately we don't need to do this unless we reset the 1067 * chip at a PCI level. 1068 * 1069 * Next we need to restore the context, but we don't use those 1070 * yet either... 1071 * 1072 * Ring buffer needs to be re-initialized in the KMS case, or if X 1073 * was running at the time of the reset (i.e. we weren't VT 1074 * switched away). 1075 */ 1076 ret = i915_gem_init_hw(dev); 1077 if (ret) { 1078 DRM_ERROR("Failed hw init on reset %d\n", ret); 1079 goto error; 1080 } 1081 1082 mutex_unlock(&dev->struct_mutex); 1083 1084 /* 1085 * rps/rc6 re-init is necessary to restore state lost after the 1086 * reset and the re-install of gt irqs. Skip for ironlake per 1087 * previous concerns that it doesn't respond well to some forms 1088 * of re-init after reset. 1089 */ 1090 if (INTEL_INFO(dev)->gen > 5) 1091 intel_enable_gt_powersave(dev); 1092 1093 return 0; 1094 1095 error: 1096 atomic_or(I915_WEDGED, &error->reset_counter); 1097 mutex_unlock(&dev->struct_mutex); 1098 return ret; 1099 } 1100 1101 #if 0 1102 static void 1103 i915_pci_remove(struct pci_dev *pdev) 1104 { 1105 struct drm_device *dev = pci_get_drvdata(pdev); 1106 1107 drm_put_dev(dev); 1108 } 1109 1110 static int i915_pm_suspend(struct device *dev) 1111 { 1112 struct pci_dev *pdev = to_pci_dev(dev); 1113 struct drm_device *drm_dev = pci_get_drvdata(pdev); 1114 1115 if (!drm_dev || !drm_dev->dev_private) { 1116 dev_err(dev, "DRM not initialized, aborting suspend.\n"); 1117 return -ENODEV; 1118 } 1119 1120 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1121 return 0; 1122 1123 return i915_drm_suspend(drm_dev); 1124 } 1125 1126 static int i915_pm_suspend_late(struct device *dev) 1127 { 1128 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1129 1130 /* 1131 * We have a suspend ordering issue with the snd-hda driver also 1132 * requiring our device to be power up. Due to the lack of a 1133 * parent/child relationship we currently solve this with an late 1134 * suspend hook. 1135 * 1136 * FIXME: This should be solved with a special hdmi sink device or 1137 * similar so that power domains can be employed. 1138 */ 1139 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1140 return 0; 1141 1142 return i915_drm_suspend_late(drm_dev, false); 1143 } 1144 1145 static int i915_pm_poweroff_late(struct device *dev) 1146 { 1147 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1148 1149 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1150 return 0; 1151 1152 return i915_drm_suspend_late(drm_dev, true); 1153 } 1154 1155 static int i915_pm_resume_early(struct device *dev) 1156 { 1157 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1158 1159 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1160 return 0; 1161 1162 return i915_drm_resume_early(drm_dev); 1163 } 1164 1165 static int i915_pm_resume(struct device *dev) 1166 { 1167 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1168 1169 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1170 return 0; 1171 1172 return i915_drm_resume(drm_dev); 1173 } 1174 #endif 1175 1176 /* 1177 * Save all Gunit registers that may be lost after a D3 and a subsequent 1178 * S0i[R123] transition. The list of registers needing a save/restore is 1179 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit 1180 * registers in the following way: 1181 * - Driver: saved/restored by the driver 1182 * - Punit : saved/restored by the Punit firmware 1183 * - No, w/o marking: no need to save/restore, since the register is R/O or 1184 * used internally by the HW in a way that doesn't depend 1185 * keeping the content across a suspend/resume. 1186 * - Debug : used for debugging 1187 * 1188 * We save/restore all registers marked with 'Driver', with the following 1189 * exceptions: 1190 * - Registers out of use, including also registers marked with 'Debug'. 1191 * These have no effect on the driver's operation, so we don't save/restore 1192 * them to reduce the overhead. 1193 * - Registers that are fully setup by an initialization function called from 1194 * the resume path. For example many clock gating and RPS/RC6 registers. 1195 * - Registers that provide the right functionality with their reset defaults. 1196 * 1197 * TODO: Except for registers that based on the above 3 criteria can be safely 1198 * ignored, we save/restore all others, practically treating the HW context as 1199 * a black-box for the driver. Further investigation is needed to reduce the 1200 * saved/restored registers even further, by following the same 3 criteria. 1201 */ 1202 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) 1203 { 1204 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; 1205 int i; 1206 1207 /* GAM 0x4000-0x4770 */ 1208 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK); 1209 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL); 1210 s->arb_mode = I915_READ(ARB_MODE); 1211 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0); 1212 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); 1213 1214 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) 1215 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i)); 1216 1217 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); 1218 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); 1219 1220 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); 1221 s->ecochk = I915_READ(GAM_ECOCHK); 1222 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7); 1223 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7); 1224 1225 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR); 1226 1227 /* MBC 0x9024-0x91D0, 0x8500 */ 1228 s->g3dctl = I915_READ(VLV_G3DCTL); 1229 s->gsckgctl = I915_READ(VLV_GSCKGCTL); 1230 s->mbctl = I915_READ(GEN6_MBCTL); 1231 1232 /* GCP 0x9400-0x9424, 0x8100-0x810C */ 1233 s->ucgctl1 = I915_READ(GEN6_UCGCTL1); 1234 s->ucgctl3 = I915_READ(GEN6_UCGCTL3); 1235 s->rcgctl1 = I915_READ(GEN6_RCGCTL1); 1236 s->rcgctl2 = I915_READ(GEN6_RCGCTL2); 1237 s->rstctl = I915_READ(GEN6_RSTCTL); 1238 s->misccpctl = I915_READ(GEN7_MISCCPCTL); 1239 1240 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ 1241 s->gfxpause = I915_READ(GEN6_GFXPAUSE); 1242 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC); 1243 s->rpdeuc = I915_READ(GEN6_RPDEUC); 1244 s->ecobus = I915_READ(ECOBUS); 1245 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL); 1246 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT); 1247 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW); 1248 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR); 1249 s->rcedata = I915_READ(VLV_RCEDATA); 1250 s->spare2gh = I915_READ(VLV_SPAREG2H); 1251 1252 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ 1253 s->gt_imr = I915_READ(GTIMR); 1254 s->gt_ier = I915_READ(GTIER); 1255 s->pm_imr = I915_READ(GEN6_PMIMR); 1256 s->pm_ier = I915_READ(GEN6_PMIER); 1257 1258 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) 1259 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i)); 1260 1261 /* GT SA CZ domain, 0x100000-0x138124 */ 1262 s->tilectl = I915_READ(TILECTL); 1263 s->gt_fifoctl = I915_READ(GTFIFOCTL); 1264 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL); 1265 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 1266 s->pmwgicz = I915_READ(VLV_PMWGICZ); 1267 1268 /* Gunit-Display CZ domain, 0x182028-0x1821CF */ 1269 s->gu_ctl0 = I915_READ(VLV_GU_CTL0); 1270 s->gu_ctl1 = I915_READ(VLV_GU_CTL1); 1271 s->pcbr = I915_READ(VLV_PCBR); 1272 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); 1273 1274 /* 1275 * Not saving any of: 1276 * DFT, 0x9800-0x9EC0 1277 * SARB, 0xB000-0xB1FC 1278 * GAC, 0x5208-0x524C, 0x14000-0x14C000 1279 * PCI CFG 1280 */ 1281 } 1282 1283 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) 1284 { 1285 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state; 1286 u32 val; 1287 int i; 1288 1289 /* GAM 0x4000-0x4770 */ 1290 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark); 1291 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl); 1292 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16)); 1293 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0); 1294 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); 1295 1296 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) 1297 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]); 1298 1299 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); 1300 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); 1301 1302 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); 1303 I915_WRITE(GAM_ECOCHK, s->ecochk); 1304 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp); 1305 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp); 1306 1307 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr); 1308 1309 /* MBC 0x9024-0x91D0, 0x8500 */ 1310 I915_WRITE(VLV_G3DCTL, s->g3dctl); 1311 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl); 1312 I915_WRITE(GEN6_MBCTL, s->mbctl); 1313 1314 /* GCP 0x9400-0x9424, 0x8100-0x810C */ 1315 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1); 1316 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3); 1317 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1); 1318 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2); 1319 I915_WRITE(GEN6_RSTCTL, s->rstctl); 1320 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl); 1321 1322 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */ 1323 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause); 1324 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc); 1325 I915_WRITE(GEN6_RPDEUC, s->rpdeuc); 1326 I915_WRITE(ECOBUS, s->ecobus); 1327 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl); 1328 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout); 1329 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw); 1330 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr); 1331 I915_WRITE(VLV_RCEDATA, s->rcedata); 1332 I915_WRITE(VLV_SPAREG2H, s->spare2gh); 1333 1334 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */ 1335 I915_WRITE(GTIMR, s->gt_imr); 1336 I915_WRITE(GTIER, s->gt_ier); 1337 I915_WRITE(GEN6_PMIMR, s->pm_imr); 1338 I915_WRITE(GEN6_PMIER, s->pm_ier); 1339 1340 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) 1341 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]); 1342 1343 /* GT SA CZ domain, 0x100000-0x138124 */ 1344 I915_WRITE(TILECTL, s->tilectl); 1345 I915_WRITE(GTFIFOCTL, s->gt_fifoctl); 1346 /* 1347 * Preserve the GT allow wake and GFX force clock bit, they are not 1348 * be restored, as they are used to control the s0ix suspend/resume 1349 * sequence by the caller. 1350 */ 1351 val = I915_READ(VLV_GTLC_WAKE_CTRL); 1352 val &= VLV_GTLC_ALLOWWAKEREQ; 1353 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ; 1354 I915_WRITE(VLV_GTLC_WAKE_CTRL, val); 1355 1356 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 1357 val &= VLV_GFX_CLK_FORCE_ON_BIT; 1358 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT; 1359 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); 1360 1361 I915_WRITE(VLV_PMWGICZ, s->pmwgicz); 1362 1363 /* Gunit-Display CZ domain, 0x182028-0x1821CF */ 1364 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); 1365 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); 1366 I915_WRITE(VLV_PCBR, s->pcbr); 1367 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); 1368 } 1369 1370 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) 1371 { 1372 u32 val; 1373 int err; 1374 1375 #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT) 1376 1377 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 1378 val &= ~VLV_GFX_CLK_FORCE_ON_BIT; 1379 if (force_on) 1380 val |= VLV_GFX_CLK_FORCE_ON_BIT; 1381 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val); 1382 1383 if (!force_on) 1384 return 0; 1385 1386 err = wait_for(COND, 20); 1387 if (err) 1388 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", 1389 I915_READ(VLV_GTLC_SURVIVABILITY_REG)); 1390 1391 return err; 1392 #undef COND 1393 } 1394 1395 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) 1396 { 1397 u32 val; 1398 int err = 0; 1399 1400 val = I915_READ(VLV_GTLC_WAKE_CTRL); 1401 val &= ~VLV_GTLC_ALLOWWAKEREQ; 1402 if (allow) 1403 val |= VLV_GTLC_ALLOWWAKEREQ; 1404 I915_WRITE(VLV_GTLC_WAKE_CTRL, val); 1405 POSTING_READ(VLV_GTLC_WAKE_CTRL); 1406 1407 #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \ 1408 allow) 1409 err = wait_for(COND, 1); 1410 if (err) 1411 DRM_ERROR("timeout disabling GT waking\n"); 1412 return err; 1413 #undef COND 1414 } 1415 1416 static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, 1417 bool wait_for_on) 1418 { 1419 u32 mask; 1420 u32 val; 1421 int err; 1422 1423 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; 1424 val = wait_for_on ? mask : 0; 1425 #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) 1426 if (COND) 1427 return 0; 1428 1429 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", 1430 onoff(wait_for_on), 1431 I915_READ(VLV_GTLC_PW_STATUS)); 1432 1433 /* 1434 * RC6 transitioning can be delayed up to 2 msec (see 1435 * valleyview_enable_rps), use 3 msec for safety. 1436 */ 1437 err = wait_for(COND, 3); 1438 if (err) 1439 DRM_ERROR("timeout waiting for GT wells to go %s\n", 1440 onoff(wait_for_on)); 1441 1442 return err; 1443 #undef COND 1444 } 1445 1446 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) 1447 { 1448 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR)) 1449 return; 1450 1451 DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n"); 1452 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR); 1453 } 1454 1455 static int vlv_suspend_complete(struct drm_i915_private *dev_priv) 1456 { 1457 u32 mask; 1458 int err; 1459 1460 /* 1461 * Bspec defines the following GT well on flags as debug only, so 1462 * don't treat them as hard failures. 1463 */ 1464 (void)vlv_wait_for_gt_wells(dev_priv, false); 1465 1466 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS; 1467 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask); 1468 1469 vlv_check_no_gt_access(dev_priv); 1470 1471 err = vlv_force_gfx_clock(dev_priv, true); 1472 if (err) 1473 goto err1; 1474 1475 err = vlv_allow_gt_wake(dev_priv, false); 1476 if (err) 1477 goto err2; 1478 1479 if (!IS_CHERRYVIEW(dev_priv)) 1480 vlv_save_gunit_s0ix_state(dev_priv); 1481 1482 err = vlv_force_gfx_clock(dev_priv, false); 1483 if (err) 1484 goto err2; 1485 1486 return 0; 1487 1488 err2: 1489 /* For safety always re-enable waking and disable gfx clock forcing */ 1490 vlv_allow_gt_wake(dev_priv, true); 1491 err1: 1492 vlv_force_gfx_clock(dev_priv, false); 1493 1494 return err; 1495 } 1496 1497 static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 1498 bool rpm_resume) 1499 { 1500 struct drm_device *dev = dev_priv->dev; 1501 int err; 1502 int ret; 1503 1504 /* 1505 * If any of the steps fail just try to continue, that's the best we 1506 * can do at this point. Return the first error code (which will also 1507 * leave RPM permanently disabled). 1508 */ 1509 ret = vlv_force_gfx_clock(dev_priv, true); 1510 1511 if (!IS_CHERRYVIEW(dev_priv)) 1512 vlv_restore_gunit_s0ix_state(dev_priv); 1513 1514 err = vlv_allow_gt_wake(dev_priv, true); 1515 if (!ret) 1516 ret = err; 1517 1518 err = vlv_force_gfx_clock(dev_priv, false); 1519 if (!ret) 1520 ret = err; 1521 1522 vlv_check_no_gt_access(dev_priv); 1523 1524 if (rpm_resume) { 1525 intel_init_clock_gating(dev); 1526 i915_gem_restore_fences(dev); 1527 } 1528 1529 return ret; 1530 } 1531 1532 #if 0 1533 static int intel_runtime_suspend(struct device *device) 1534 { 1535 struct pci_dev *pdev = to_pci_dev(device); 1536 struct drm_device *dev = pci_get_drvdata(pdev); 1537 struct drm_i915_private *dev_priv = dev->dev_private; 1538 int ret; 1539 1540 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) 1541 return -ENODEV; 1542 1543 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) 1544 return -ENODEV; 1545 1546 DRM_DEBUG_KMS("Suspending device\n"); 1547 1548 /* 1549 * We could deadlock here in case another thread holding struct_mutex 1550 * calls RPM suspend concurrently, since the RPM suspend will wait 1551 * first for this RPM suspend to finish. In this case the concurrent 1552 * RPM resume will be followed by its RPM suspend counterpart. Still 1553 * for consistency return -EAGAIN, which will reschedule this suspend. 1554 */ 1555 if (!mutex_trylock(&dev->struct_mutex)) { 1556 DRM_DEBUG_KMS("device lock contention, deffering suspend\n"); 1557 /* 1558 * Bump the expiration timestamp, otherwise the suspend won't 1559 * be rescheduled. 1560 */ 1561 pm_runtime_mark_last_busy(device); 1562 1563 return -EAGAIN; 1564 } 1565 1566 disable_rpm_wakeref_asserts(dev_priv); 1567 1568 /* 1569 * We are safe here against re-faults, since the fault handler takes 1570 * an RPM reference. 1571 */ 1572 i915_gem_release_all_mmaps(dev_priv); 1573 mutex_unlock(&dev->struct_mutex); 1574 1575 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 1576 1577 intel_guc_suspend(dev); 1578 1579 intel_suspend_gt_powersave(dev); 1580 intel_runtime_pm_disable_interrupts(dev_priv); 1581 1582 ret = 0; 1583 if (IS_BROXTON(dev_priv)) { 1584 bxt_display_core_uninit(dev_priv); 1585 bxt_enable_dc9(dev_priv); 1586 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 1587 hsw_enable_pc8(dev_priv); 1588 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1589 ret = vlv_suspend_complete(dev_priv); 1590 } 1591 1592 if (ret) { 1593 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); 1594 intel_runtime_pm_enable_interrupts(dev_priv); 1595 1596 enable_rpm_wakeref_asserts(dev_priv); 1597 1598 return ret; 1599 } 1600 1601 intel_uncore_forcewake_reset(dev, false); 1602 1603 enable_rpm_wakeref_asserts(dev_priv); 1604 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 1605 1606 if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv)) 1607 DRM_ERROR("Unclaimed access detected prior to suspending\n"); 1608 1609 dev_priv->pm.suspended = true; 1610 1611 /* 1612 * FIXME: We really should find a document that references the arguments 1613 * used below! 1614 */ 1615 if (IS_BROADWELL(dev)) { 1616 /* 1617 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop 1618 * being detected, and the call we do at intel_runtime_resume() 1619 * won't be able to restore them. Since PCI_D3hot matches the 1620 * actual specification and appears to be working, use it. 1621 */ 1622 intel_opregion_notify_adapter(dev, PCI_D3hot); 1623 } else { 1624 /* 1625 * current versions of firmware which depend on this opregion 1626 * notification have repurposed the D1 definition to mean 1627 * "runtime suspended" vs. what you would normally expect (D3) 1628 * to distinguish it from notifications that might be sent via 1629 * the suspend path. 1630 */ 1631 intel_opregion_notify_adapter(dev, PCI_D1); 1632 } 1633 1634 assert_forcewakes_inactive(dev_priv); 1635 1636 if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) 1637 intel_hpd_poll_init(dev_priv); 1638 1639 DRM_DEBUG_KMS("Device suspended\n"); 1640 return 0; 1641 } 1642 1643 static int intel_runtime_resume(struct device *device) 1644 { 1645 struct pci_dev *pdev = to_pci_dev(device); 1646 struct drm_device *dev = pci_get_drvdata(pdev); 1647 struct drm_i915_private *dev_priv = dev->dev_private; 1648 int ret = 0; 1649 1650 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) 1651 return -ENODEV; 1652 1653 DRM_DEBUG_KMS("Resuming device\n"); 1654 1655 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 1656 disable_rpm_wakeref_asserts(dev_priv); 1657 1658 intel_opregion_notify_adapter(dev, PCI_D0); 1659 dev_priv->pm.suspended = false; 1660 if (intel_uncore_unclaimed_mmio(dev_priv)) 1661 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); 1662 1663 intel_guc_resume(dev); 1664 1665 if (IS_GEN6(dev_priv)) 1666 intel_init_pch_refclk(dev); 1667 1668 if (IS_BROXTON(dev)) { 1669 bxt_disable_dc9(dev_priv); 1670 bxt_display_core_init(dev_priv, true); 1671 if (dev_priv->csr.dmc_payload && 1672 (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5)) 1673 gen9_enable_dc5(dev_priv); 1674 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 1675 hsw_disable_pc8(dev_priv); 1676 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1677 ret = vlv_resume_prepare(dev_priv, true); 1678 } 1679 1680 /* 1681 * No point of rolling back things in case of an error, as the best 1682 * we can do is to hope that things will still work (and disable RPM). 1683 */ 1684 i915_gem_init_swizzling(dev); 1685 gen6_update_ring_freq(dev); 1686 1687 intel_runtime_pm_enable_interrupts(dev_priv); 1688 1689 /* 1690 * On VLV/CHV display interrupts are part of the display 1691 * power well, so hpd is reinitialized from there. For 1692 * everyone else do it here. 1693 */ 1694 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 1695 intel_hpd_init(dev_priv); 1696 1697 intel_enable_gt_powersave(dev); 1698 1699 enable_rpm_wakeref_asserts(dev_priv); 1700 1701 if (ret) 1702 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); 1703 else 1704 DRM_DEBUG_KMS("Device resumed\n"); 1705 1706 return ret; 1707 } 1708 1709 static const struct dev_pm_ops i915_pm_ops = { 1710 /* 1711 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, 1712 * PMSG_RESUME] 1713 */ 1714 .suspend = i915_pm_suspend, 1715 .suspend_late = i915_pm_suspend_late, 1716 .resume_early = i915_pm_resume_early, 1717 .resume = i915_pm_resume, 1718 1719 /* 1720 * S4 event handlers 1721 * @freeze, @freeze_late : called (1) before creating the 1722 * hibernation image [PMSG_FREEZE] and 1723 * (2) after rebooting, before restoring 1724 * the image [PMSG_QUIESCE] 1725 * @thaw, @thaw_early : called (1) after creating the hibernation 1726 * image, before writing it [PMSG_THAW] 1727 * and (2) after failing to create or 1728 * restore the image [PMSG_RECOVER] 1729 * @poweroff, @poweroff_late: called after writing the hibernation 1730 * image, before rebooting [PMSG_HIBERNATE] 1731 * @restore, @restore_early : called after rebooting and restoring the 1732 * hibernation image [PMSG_RESTORE] 1733 */ 1734 .freeze = i915_pm_suspend, 1735 .freeze_late = i915_pm_suspend_late, 1736 .thaw_early = i915_pm_resume_early, 1737 .thaw = i915_pm_resume, 1738 .poweroff = i915_pm_suspend, 1739 .poweroff_late = i915_pm_poweroff_late, 1740 .restore_early = i915_pm_resume_early, 1741 .restore = i915_pm_resume, 1742 1743 /* S0ix (via runtime suspend) event handlers */ 1744 .runtime_suspend = intel_runtime_suspend, 1745 .runtime_resume = intel_runtime_resume, 1746 }; 1747 1748 static const struct vm_operations_struct i915_gem_vm_ops = { 1749 .fault = i915_gem_fault, 1750 .open = drm_gem_vm_open, 1751 .close = drm_gem_vm_close, 1752 }; 1753 #endif 1754 1755 static const struct file_operations i915_driver_fops = { 1756 .owner = THIS_MODULE, 1757 #if 0 1758 .open = drm_open, 1759 .release = drm_release, 1760 .unlocked_ioctl = drm_ioctl, 1761 .mmap = drm_gem_mmap, 1762 .poll = drm_poll, 1763 .read = drm_read, 1764 #ifdef CONFIG_COMPAT 1765 .compat_ioctl = i915_compat_ioctl, 1766 #endif 1767 .llseek = noop_llseek, 1768 #endif 1769 }; 1770 1771 static struct cdev_pager_ops i915_gem_vm_ops = { 1772 .cdev_pg_fault = i915_gem_fault, 1773 .cdev_pg_ctor = i915_gem_pager_ctor, 1774 .cdev_pg_dtor = i915_gem_pager_dtor 1775 }; 1776 1777 static struct drm_driver driver = { 1778 /* Don't use MTRRs here; the Xserver or userspace app should 1779 * deal with them for Intel hardware. 1780 */ 1781 .driver_features = 1782 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | 1783 DRIVER_RENDER | DRIVER_MODESET, 1784 .load = i915_driver_load, 1785 .unload = i915_driver_unload, 1786 .open = i915_driver_open, 1787 .lastclose = i915_driver_lastclose, 1788 .preclose = i915_driver_preclose, 1789 .postclose = i915_driver_postclose, 1790 .set_busid = drm_pci_set_busid, 1791 1792 #if defined(CONFIG_DEBUG_FS) 1793 .debugfs_init = i915_debugfs_init, 1794 .debugfs_cleanup = i915_debugfs_cleanup, 1795 #endif 1796 .gem_free_object = i915_gem_free_object, 1797 .gem_vm_ops = &i915_gem_vm_ops, 1798 1799 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 1800 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 1801 .gem_prime_export = i915_gem_prime_export, 1802 .gem_prime_import = i915_gem_prime_import, 1803 1804 .dumb_create = i915_gem_dumb_create, 1805 .dumb_map_offset = i915_gem_mmap_gtt, 1806 .dumb_destroy = drm_gem_dumb_destroy, 1807 .ioctls = i915_ioctls, 1808 .sysctl_init = i915_sysctl_init, 1809 .fops = &i915_driver_fops, 1810 .name = DRIVER_NAME, 1811 .desc = DRIVER_DESC, 1812 .date = DRIVER_DATE, 1813 .major = DRIVER_MAJOR, 1814 .minor = DRIVER_MINOR, 1815 .patchlevel = DRIVER_PATCHLEVEL, 1816 }; 1817 1818 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1819 { 1820 struct intel_device_info *intel_info = 1821 (struct intel_device_info *) ent->driver_data; 1822 1823 if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) { 1824 DRM_INFO("This hardware requires preliminary hardware support.\n" 1825 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n"); 1826 return -ENODEV; 1827 } 1828 1829 /* Only bind to function 0 of the device. Early generations 1830 * used function 1 as a placeholder for multi-head. This causes 1831 * us confusion instead, especially on the systems where both 1832 * functions have the same PCI-ID! 1833 */ 1834 if (PCI_FUNC(pdev->devfn)) 1835 return -ENODEV; 1836 1837 /* 1838 * apple-gmux is needed on dual GPU MacBook Pro 1839 * to probe the panel if we're the inactive GPU. 1840 */ 1841 #if 0 1842 if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) && 1843 apple_gmux_present() && pdev != vga_default_device() && 1844 !vga_switcheroo_handler_flags()) 1845 return -EPROBE_DEFER; 1846 #endif 1847 1848 return drm_get_pci_dev(pdev, ent, &driver); 1849 } 1850 1851 static int 1852 i915_pci_probe_dfly(device_t kdev) 1853 { 1854 int device, i = 0; 1855 const struct pci_device_id *ent; 1856 static struct pci_dev *pdev = NULL; 1857 static device_t bsddev; 1858 1859 if (pci_get_class(kdev) != PCIC_DISPLAY) 1860 return ENXIO; 1861 1862 if (pci_get_vendor(kdev) != PCI_VENDOR_ID_INTEL) 1863 return ENXIO; 1864 1865 device = pci_get_device(kdev); 1866 1867 for (i = 0; pciidlist[i].device != 0; i++) { 1868 if (pciidlist[i].device == device) { 1869 ent = &pciidlist[i]; 1870 goto found; 1871 } 1872 } 1873 1874 return ENXIO; 1875 found: 1876 if (!strcmp(device_get_name(kdev), "drmsub")) 1877 bsddev = device_get_parent(kdev); 1878 else 1879 bsddev = kdev; 1880 1881 drm_init_pdev(bsddev, &pdev); 1882 1883 /* Print the contents of pdev struct. */ 1884 drm_print_pdev(pdev); 1885 1886 return i915_pci_probe(pdev, ent); 1887 } 1888 1889 static int i915_driver_attach(device_t kdev) 1890 { 1891 return 0; 1892 } 1893 1894 static device_method_t i915_methods[] = { 1895 /* Device interface */ 1896 DEVMETHOD(device_probe, i915_pci_probe_dfly), 1897 DEVMETHOD(device_attach, i915_driver_attach), 1898 DEVMETHOD(device_suspend, i915_suspend_switcheroo), 1899 DEVMETHOD(device_resume, i915_resume_switcheroo), 1900 DEVMETHOD(device_detach, drm_release), 1901 DEVMETHOD_END 1902 }; 1903 1904 static driver_t i915_driver = { 1905 "drm", 1906 i915_methods, 1907 sizeof(struct drm_device) 1908 }; 1909 1910 static int __init i915_init(void) 1911 { 1912 driver.num_ioctls = i915_max_ioctl; 1913 1914 /* 1915 * Enable KMS by default, unless explicitly overriden by 1916 * either the i915.modeset prarameter or by the 1917 * vga_text_mode_force boot option. 1918 */ 1919 1920 if (i915.modeset == 0) 1921 driver.driver_features &= ~DRIVER_MODESET; 1922 1923 if (vgacon_text_force() && i915.modeset == -1) 1924 driver.driver_features &= ~DRIVER_MODESET; 1925 1926 if (!(driver.driver_features & DRIVER_MODESET)) { 1927 /* Silently fail loading to not upset userspace. */ 1928 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n"); 1929 return 0; 1930 } 1931 1932 if (i915.nuclear_pageflip) 1933 driver.driver_features |= DRIVER_ATOMIC; 1934 1935 #if 0 1936 return drm_pci_init(&driver, &i915_pci_driver); 1937 #else 1938 return 1; 1939 #endif 1940 } 1941 1942 #if 0 1943 static void __exit i915_exit(void) 1944 { 1945 if (!(driver.driver_features & DRIVER_MODESET)) 1946 return; /* Never loaded a driver. */ 1947 1948 drm_pci_exit(&driver, &i915_pci_driver); 1949 } 1950 #endif 1951 1952 module_init(i915_init); 1953 1954 DRIVER_MODULE_ORDERED(i915, vgapci, i915_driver, drm_devclass, NULL, NULL, SI_ORDER_ANY); 1955 MODULE_DEPEND(i915, drm, 1, 1, 1); 1956 #ifdef CONFIG_ACPI 1957 MODULE_DEPEND(i915, acpi, 1, 1, 1); 1958 #endif 1959