1 /* $OpenBSD: i915_irq.c,v 1.30 2015/12/01 20:41:32 kettenis Exp $ */ 2 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 3 */ 4 /* 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 31 32 #include <dev/pci/drm/drmP.h> 33 #include <dev/pci/drm/i915_drm.h> 34 #include "i915_drv.h" 35 #include "i915_trace.h" 36 #include "intel_drv.h" 37 38 static const u32 hpd_ibx[] = { 39 [HPD_CRT] = SDE_CRT_HOTPLUG, 40 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 41 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 42 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 43 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 44 }; 45 46 static const u32 hpd_cpt[] = { 47 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 48 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 49 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 50 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 51 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 52 }; 53 54 static const u32 hpd_mask_i915[] = { 55 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 56 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 57 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 58 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 59 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 60 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 61 }; 62 63 static const u32 hpd_status_g4x[] = { 64 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 65 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 66 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 67 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 68 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 69 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 70 }; 71 72 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 73 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 74 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 75 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 76 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 77 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 78 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 79 }; 80 81 /* For display hotplug interrupt */ 82 static void 83 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 84 { 85 assert_spin_locked(&dev_priv->irq_lock); 86 87 if (dev_priv->pc8.irqs_disabled) { 88 WARN(1, "IRQs disabled\n"); 89 dev_priv->pc8.regsave.deimr &= ~mask; 90 return; 91 } 92 93 if ((dev_priv->irq_mask & mask) != 0) { 94 dev_priv->irq_mask &= ~mask; 95 I915_WRITE(DEIMR, dev_priv->irq_mask); 96 POSTING_READ(DEIMR); 97 } 98 } 99 100 static void 101 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 102 { 103 assert_spin_locked(&dev_priv->irq_lock); 104 105 if (dev_priv->pc8.irqs_disabled) { 106 WARN(1, "IRQs disabled\n"); 107 dev_priv->pc8.regsave.deimr |= mask; 108 return; 109 } 110 111 if ((dev_priv->irq_mask & mask) != mask) { 112 dev_priv->irq_mask |= mask; 113 I915_WRITE(DEIMR, dev_priv->irq_mask); 114 POSTING_READ(DEIMR); 115 } 116 } 117 118 /** 119 * ilk_update_gt_irq - update GTIMR 120 * @dev_priv: driver private 121 * @interrupt_mask: mask of interrupt bits to update 122 * @enabled_irq_mask: mask of interrupt bits to enable 123 */ 124 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 125 uint32_t interrupt_mask, 126 uint32_t enabled_irq_mask) 127 { 128 assert_spin_locked(&dev_priv->irq_lock); 129 130 if (dev_priv->pc8.irqs_disabled) { 131 WARN(1, "IRQs disabled\n"); 132 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; 133 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & 134 interrupt_mask); 135 return; 136 } 137 138 dev_priv->gt_irq_mask &= ~interrupt_mask; 139 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 140 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 141 POSTING_READ(GTIMR); 142 } 143 144 void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 145 { 146 ilk_update_gt_irq(dev_priv, mask, mask); 147 } 148 149 void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 150 { 151 ilk_update_gt_irq(dev_priv, mask, 0); 152 } 153 154 /** 155 * snb_update_pm_irq - update GEN6_PMIMR 156 * @dev_priv: driver private 157 * @interrupt_mask: mask of interrupt bits to update 158 * @enabled_irq_mask: mask of interrupt bits to enable 159 */ 160 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 161 uint32_t interrupt_mask, 162 uint32_t enabled_irq_mask) 163 { 164 uint32_t new_val; 165 166 assert_spin_locked(&dev_priv->irq_lock); 167 168 if (dev_priv->pc8.irqs_disabled) { 169 WARN(1, "IRQs disabled\n"); 170 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; 171 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & 172 interrupt_mask); 173 return; 174 } 175 176 new_val = dev_priv->pm_irq_mask; 177 new_val &= ~interrupt_mask; 178 new_val |= (~enabled_irq_mask & interrupt_mask); 179 180 if (new_val != dev_priv->pm_irq_mask) { 181 dev_priv->pm_irq_mask = new_val; 182 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 183 POSTING_READ(GEN6_PMIMR); 184 } 185 } 186 187 void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 188 { 189 snb_update_pm_irq(dev_priv, mask, mask); 190 } 191 192 void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 193 { 194 snb_update_pm_irq(dev_priv, mask, 0); 195 } 196 197 static bool ivb_can_enable_err_int(struct drm_device *dev) 198 { 199 struct drm_i915_private *dev_priv = dev->dev_private; 200 struct intel_crtc *crtc; 201 enum pipe pipe; 202 203 assert_spin_locked(&dev_priv->irq_lock); 204 205 for_each_pipe(pipe) { 206 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 207 208 if (crtc->cpu_fifo_underrun_disabled) 209 return false; 210 } 211 212 return true; 213 } 214 215 static bool cpt_can_enable_serr_int(struct drm_device *dev) 216 { 217 struct drm_i915_private *dev_priv = dev->dev_private; 218 enum pipe pipe; 219 struct intel_crtc *crtc; 220 221 assert_spin_locked(&dev_priv->irq_lock); 222 223 for_each_pipe(pipe) { 224 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 225 226 if (crtc->pch_fifo_underrun_disabled) 227 return false; 228 } 229 230 return true; 231 } 232 233 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 234 enum pipe pipe, bool enable) 235 { 236 struct drm_i915_private *dev_priv = dev->dev_private; 237 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 238 DE_PIPEB_FIFO_UNDERRUN; 239 240 if (enable) 241 ironlake_enable_display_irq(dev_priv, bit); 242 else 243 ironlake_disable_display_irq(dev_priv, bit); 244 } 245 246 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 247 enum pipe pipe, bool enable) 248 { 249 struct drm_i915_private *dev_priv = dev->dev_private; 250 if (enable) { 251 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 252 253 if (!ivb_can_enable_err_int(dev)) 254 return; 255 256 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 257 } else { 258 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); 259 260 /* Change the state _after_ we've read out the current one. */ 261 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 262 263 if (!was_enabled && 264 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { 265 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", 266 pipe_name(pipe)); 267 } 268 } 269 } 270 271 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, 272 enum pipe pipe, bool enable) 273 { 274 struct drm_i915_private *dev_priv = dev->dev_private; 275 276 assert_spin_locked(&dev_priv->irq_lock); 277 278 if (enable) 279 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; 280 else 281 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; 282 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 283 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 284 } 285 286 /** 287 * ibx_display_interrupt_update - update SDEIMR 288 * @dev_priv: driver private 289 * @interrupt_mask: mask of interrupt bits to update 290 * @enabled_irq_mask: mask of interrupt bits to enable 291 */ 292 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 293 uint32_t interrupt_mask, 294 uint32_t enabled_irq_mask) 295 { 296 uint32_t sdeimr = I915_READ(SDEIMR); 297 sdeimr &= ~interrupt_mask; 298 sdeimr |= (~enabled_irq_mask & interrupt_mask); 299 300 assert_spin_locked(&dev_priv->irq_lock); 301 302 if (dev_priv->pc8.irqs_disabled && 303 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { 304 WARN(1, "IRQs disabled\n"); 305 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; 306 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & 307 interrupt_mask); 308 return; 309 } 310 311 I915_WRITE(SDEIMR, sdeimr); 312 POSTING_READ(SDEIMR); 313 } 314 #define ibx_enable_display_interrupt(dev_priv, bits) \ 315 ibx_display_interrupt_update((dev_priv), (bits), (bits)) 316 #define ibx_disable_display_interrupt(dev_priv, bits) \ 317 ibx_display_interrupt_update((dev_priv), (bits), 0) 318 319 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 320 enum transcoder pch_transcoder, 321 bool enable) 322 { 323 struct drm_i915_private *dev_priv = dev->dev_private; 324 uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 325 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 326 327 if (enable) 328 ibx_enable_display_interrupt(dev_priv, bit); 329 else 330 ibx_disable_display_interrupt(dev_priv, bit); 331 } 332 333 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 334 enum transcoder pch_transcoder, 335 bool enable) 336 { 337 struct drm_i915_private *dev_priv = dev->dev_private; 338 339 if (enable) { 340 I915_WRITE(SERR_INT, 341 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 342 343 if (!cpt_can_enable_serr_int(dev)) 344 return; 345 346 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 347 } else { 348 uint32_t tmp = I915_READ(SERR_INT); 349 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); 350 351 /* Change the state _after_ we've read out the current one. */ 352 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 353 354 if (!was_enabled && 355 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { 356 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", 357 transcoder_name(pch_transcoder)); 358 } 359 } 360 } 361 362 /** 363 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 364 * @dev: drm device 365 * @pipe: pipe 366 * @enable: true if we want to report FIFO underrun errors, false otherwise 367 * 368 * This function makes us disable or enable CPU fifo underruns for a specific 369 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 370 * reporting for one pipe may also disable all the other CPU error interruts for 371 * the other pipes, due to the fact that there's just one interrupt mask/enable 372 * bit for all the pipes. 373 * 374 * Returns the previous state of underrun reporting. 375 */ 376 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 377 enum pipe pipe, bool enable) 378 { 379 struct drm_i915_private *dev_priv = dev->dev_private; 380 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 381 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 382 unsigned long flags; 383 bool ret; 384 385 spin_lock_irqsave(&dev_priv->irq_lock, flags); 386 387 ret = !intel_crtc->cpu_fifo_underrun_disabled; 388 389 if (enable == ret) 390 goto done; 391 392 intel_crtc->cpu_fifo_underrun_disabled = !enable; 393 394 if (IS_GEN5(dev) || IS_GEN6(dev)) 395 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 396 else if (IS_GEN7(dev)) 397 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 398 else if (IS_GEN8(dev)) 399 broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 400 401 done: 402 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 403 return ret; 404 } 405 406 /** 407 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 408 * @dev: drm device 409 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 410 * @enable: true if we want to report FIFO underrun errors, false otherwise 411 * 412 * This function makes us disable or enable PCH fifo underruns for a specific 413 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 414 * underrun reporting for one transcoder may also disable all the other PCH 415 * error interruts for the other transcoders, due to the fact that there's just 416 * one interrupt mask/enable bit for all the transcoders. 417 * 418 * Returns the previous state of underrun reporting. 419 */ 420 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 421 enum transcoder pch_transcoder, 422 bool enable) 423 { 424 struct drm_i915_private *dev_priv = dev->dev_private; 425 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 426 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 427 unsigned long flags; 428 bool ret; 429 430 /* 431 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 432 * has only one pch transcoder A that all pipes can use. To avoid racy 433 * pch transcoder -> pipe lookups from interrupt code simply store the 434 * underrun statistics in crtc A. Since we never expose this anywhere 435 * nor use it outside of the fifo underrun code here using the "wrong" 436 * crtc on LPT won't cause issues. 437 */ 438 439 spin_lock_irqsave(&dev_priv->irq_lock, flags); 440 441 ret = !intel_crtc->pch_fifo_underrun_disabled; 442 443 if (enable == ret) 444 goto done; 445 446 intel_crtc->pch_fifo_underrun_disabled = !enable; 447 448 if (HAS_PCH_IBX(dev)) 449 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 450 else 451 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 452 453 done: 454 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 455 return ret; 456 } 457 458 459 void 460 i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) 461 { 462 u32 reg = PIPESTAT(pipe); 463 u32 pipestat = I915_READ(reg) & 0x7fff0000; 464 465 assert_spin_locked(&dev_priv->irq_lock); 466 467 if ((pipestat & mask) == mask) 468 return; 469 470 /* Enable the interrupt, clear any pending status */ 471 pipestat |= mask | (mask >> 16); 472 I915_WRITE(reg, pipestat); 473 POSTING_READ(reg); 474 } 475 476 void 477 i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask) 478 { 479 u32 reg = PIPESTAT(pipe); 480 u32 pipestat = I915_READ(reg) & 0x7fff0000; 481 482 assert_spin_locked(&dev_priv->irq_lock); 483 484 if ((pipestat & mask) == 0) 485 return; 486 487 pipestat &= ~mask; 488 I915_WRITE(reg, pipestat); 489 POSTING_READ(reg); 490 } 491 492 /** 493 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 494 */ 495 static void i915_enable_asle_pipestat(struct drm_device *dev) 496 { 497 drm_i915_private_t *dev_priv = dev->dev_private; 498 unsigned long irqflags; 499 500 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 501 return; 502 503 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 504 505 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE); 506 if (INTEL_INFO(dev)->gen >= 4) 507 i915_enable_pipestat(dev_priv, PIPE_A, 508 PIPE_LEGACY_BLC_EVENT_ENABLE); 509 510 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 511 } 512 513 /** 514 * i915_pipe_enabled - check if a pipe is enabled 515 * @dev: DRM device 516 * @pipe: pipe to check 517 * 518 * Reading certain registers when the pipe is disabled can hang the chip. 519 * Use this routine to make sure the PLL is running and the pipe is active 520 * before reading such registers if unsure. 521 */ 522 static int 523 i915_pipe_enabled(struct drm_device *dev, int pipe) 524 { 525 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 526 527 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 528 /* Locking is horribly broken here, but whatever. */ 529 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 530 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 531 532 return intel_crtc->active; 533 } else { 534 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 535 } 536 } 537 538 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 539 { 540 /* Gen2 doesn't have a hardware frame counter */ 541 return 0; 542 } 543 544 /* Called from drm generic code, passed a 'crtc', which 545 * we use as a pipe index 546 */ 547 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 548 { 549 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 550 unsigned long high_frame; 551 unsigned long low_frame; 552 u32 high1, high2, low, pixel, vbl_start; 553 554 if (!i915_pipe_enabled(dev, pipe)) { 555 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 556 "pipe %c\n", pipe_name(pipe)); 557 return 0; 558 } 559 560 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 561 struct intel_crtc *intel_crtc = 562 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 563 const struct drm_display_mode *mode = 564 &intel_crtc->config.adjusted_mode; 565 566 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; 567 } else { 568 enum transcoder cpu_transcoder = (enum transcoder) pipe; 569 u32 htotal; 570 571 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 572 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; 573 574 vbl_start *= htotal; 575 } 576 577 high_frame = PIPEFRAME(pipe); 578 low_frame = PIPEFRAMEPIXEL(pipe); 579 580 /* 581 * High & low register fields aren't synchronized, so make sure 582 * we get a low value that's stable across two reads of the high 583 * register. 584 */ 585 do { 586 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 587 low = I915_READ(low_frame); 588 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 589 } while (high1 != high2); 590 591 high1 >>= PIPE_FRAME_HIGH_SHIFT; 592 pixel = low & PIPE_PIXEL_MASK; 593 low >>= PIPE_FRAME_LOW_SHIFT; 594 595 /* 596 * The frame counter increments at beginning of active. 597 * Cook up a vblank counter by also checking the pixel 598 * counter against vblank start. 599 */ 600 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 601 } 602 603 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 604 { 605 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 606 int reg = PIPE_FRMCOUNT_GM45(pipe); 607 608 if (!i915_pipe_enabled(dev, pipe)) { 609 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 610 "pipe %c\n", pipe_name(pipe)); 611 return 0; 612 } 613 614 return I915_READ(reg); 615 } 616 617 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 618 #define __raw_i915_read32(dev_priv__, reg__) bus_space_read_4((dev_priv__)->regs->bst, (dev_priv__)->regs->bsh, (reg__)) 619 620 static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) 621 { 622 struct drm_i915_private *dev_priv = dev->dev_private; 623 uint32_t status; 624 int reg; 625 626 if (INTEL_INFO(dev)->gen >= 8) { 627 status = GEN8_PIPE_VBLANK; 628 reg = GEN8_DE_PIPE_ISR(pipe); 629 } else if (INTEL_INFO(dev)->gen >= 7) { 630 status = DE_PIPE_VBLANK_IVB(pipe); 631 reg = DEISR; 632 } else { 633 status = DE_PIPE_VBLANK(pipe); 634 reg = DEISR; 635 } 636 637 return __raw_i915_read32(dev_priv, reg) & status; 638 } 639 640 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 641 unsigned int flags, int *vpos, int *hpos, 642 ktime_t *stime, ktime_t *etime) 643 { 644 struct drm_i915_private *dev_priv = dev->dev_private; 645 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 646 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 647 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; 648 int position; 649 int vbl_start, vbl_end, htotal, vtotal; 650 bool in_vbl = true; 651 int ret = 0; 652 unsigned long irqflags; 653 654 if (!intel_crtc->active) { 655 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 656 "pipe %c\n", pipe_name(pipe)); 657 return 0; 658 } 659 660 htotal = mode->crtc_htotal; 661 vtotal = mode->crtc_vtotal; 662 vbl_start = mode->crtc_vblank_start; 663 vbl_end = mode->crtc_vblank_end; 664 665 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 666 vbl_start = DIV_ROUND_UP(vbl_start, 2); 667 vbl_end /= 2; 668 vtotal /= 2; 669 } 670 671 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 672 673 /* 674 * Lock uncore.lock, as we will do multiple timing critical raw 675 * register reads, potentially with preemption disabled, so the 676 * following code must not block on uncore.lock. 677 */ 678 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 679 680 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 681 682 /* Get optional system timestamp before query. */ 683 if (stime) 684 *stime = ktime_get(); 685 686 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 687 /* No obvious pixelcount register. Only query vertical 688 * scanout position from Display scan line register. 689 */ 690 if (IS_GEN2(dev)) 691 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 692 else 693 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 694 695 if (HAS_DDI(dev)) { 696 /* 697 * On HSW HDMI outputs there seems to be a 2 line 698 * difference, whereas eDP has the normal 1 line 699 * difference that earlier platforms have. External 700 * DP is unknown. For now just check for the 2 line 701 * difference case on all output types on HSW+. 702 * 703 * This might misinterpret the scanline counter being 704 * one line too far along on eDP, but that's less 705 * dangerous than the alternative since that would lead 706 * the vblank timestamp code astray when it sees a 707 * scanline count before vblank_start during a vblank 708 * interrupt. 709 */ 710 in_vbl = ilk_pipe_in_vblank_locked(dev, pipe); 711 if ((in_vbl && (position == vbl_start - 2 || 712 position == vbl_start - 1)) || 713 (!in_vbl && (position == vbl_end - 2 || 714 position == vbl_end - 1))) 715 position = (position + 2) % vtotal; 716 } else if (HAS_PCH_SPLIT(dev)) { 717 /* 718 * The scanline counter increments at the leading edge 719 * of hsync, ie. it completely misses the active portion 720 * of the line. Fix up the counter at both edges of vblank 721 * to get a more accurate picture whether we're in vblank 722 * or not. 723 */ 724 in_vbl = ilk_pipe_in_vblank_locked(dev, pipe); 725 if ((in_vbl && position == vbl_start - 1) || 726 (!in_vbl && position == vbl_end - 1)) 727 position = (position + 1) % vtotal; 728 } else { 729 /* 730 * ISR vblank status bits don't work the way we'd want 731 * them to work on non-PCH platforms (for 732 * ilk_pipe_in_vblank_locked()), and there doesn't 733 * appear any other way to determine if we're currently 734 * in vblank. 735 * 736 * Instead let's assume that we're already in vblank if 737 * we got called from the vblank interrupt and the 738 * scanline counter value indicates that we're on the 739 * line just prior to vblank start. This should result 740 * in the correct answer, unless the vblank interrupt 741 * delivery really got delayed for almost exactly one 742 * full frame/field. 743 */ 744 if (flags & DRM_CALLED_FROM_VBLIRQ && 745 position == vbl_start - 1) { 746 position = (position + 1) % vtotal; 747 748 /* Signal this correction as "applied". */ 749 ret |= 0x8; 750 } 751 } 752 } else { 753 /* Have access to pixelcount since start of frame. 754 * We can split this into vertical and horizontal 755 * scanout position. 756 */ 757 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 758 759 /* convert to pixel counts */ 760 vbl_start *= htotal; 761 vbl_end *= htotal; 762 vtotal *= htotal; 763 } 764 765 /* Get optional system timestamp after query. */ 766 if (etime) 767 *etime = ktime_get(); 768 769 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 770 771 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 772 773 in_vbl = position >= vbl_start && position < vbl_end; 774 775 /* 776 * While in vblank, position will be negative 777 * counting up towards 0 at vbl_end. And outside 778 * vblank, position will be positive counting 779 * up since vbl_end. 780 */ 781 if (position >= vbl_start) 782 position -= vbl_end; 783 else 784 position += vtotal - vbl_end; 785 786 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 787 *vpos = position; 788 *hpos = 0; 789 } else { 790 *vpos = position / htotal; 791 *hpos = position - (*vpos * htotal); 792 } 793 794 /* In vblank? */ 795 if (in_vbl) 796 ret |= DRM_SCANOUTPOS_INVBL; 797 798 return ret; 799 } 800 801 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 802 int *max_error, 803 struct timeval *vblank_time, 804 unsigned flags) 805 { 806 struct drm_crtc *crtc; 807 808 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 809 DRM_ERROR("Invalid crtc %d\n", pipe); 810 return -EINVAL; 811 } 812 813 /* Get drm_crtc to timestamp: */ 814 crtc = intel_get_crtc_for_pipe(dev, pipe); 815 if (crtc == NULL) { 816 DRM_ERROR("Invalid crtc %d\n", pipe); 817 return -EINVAL; 818 } 819 820 if (!crtc->enabled) { 821 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 822 return -EBUSY; 823 } 824 825 /* Helper routine in DRM core does all the work: */ 826 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 827 vblank_time, flags, 828 crtc, 829 &to_intel_crtc(crtc)->config.adjusted_mode); 830 } 831 832 static bool intel_hpd_irq_event(struct drm_device *dev, 833 struct drm_connector *connector) 834 { 835 enum drm_connector_status old_status; 836 837 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 838 old_status = connector->status; 839 840 connector->status = connector->funcs->detect(connector, false); 841 if (old_status == connector->status) 842 return false; 843 844 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 845 connector->base.id, 846 drm_get_connector_name(connector), 847 drm_get_connector_status_name(old_status), 848 drm_get_connector_status_name(connector->status)); 849 850 return true; 851 } 852 853 /* 854 * Handle hotplug events outside the interrupt handler proper. 855 */ 856 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 857 858 static void i915_hotplug_work_func(struct work_struct *work) 859 { 860 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 861 hotplug_work); 862 struct drm_device *dev = dev_priv->dev; 863 struct drm_mode_config *mode_config = &dev->mode_config; 864 struct intel_connector *intel_connector; 865 struct intel_encoder *intel_encoder; 866 struct drm_connector *connector; 867 unsigned long irqflags; 868 bool hpd_disabled = false; 869 bool changed = false; 870 u32 hpd_event_bits; 871 872 /* HPD irq before everything is fully set up. */ 873 if (!dev_priv->enable_hotplug_processing) 874 return; 875 876 mutex_lock(&mode_config->mutex); 877 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 878 879 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 880 881 hpd_event_bits = dev_priv->hpd_event_bits; 882 dev_priv->hpd_event_bits = 0; 883 list_for_each_entry(connector, &mode_config->connector_list, head) { 884 intel_connector = to_intel_connector(connector); 885 intel_encoder = intel_connector->encoder; 886 if (intel_encoder->hpd_pin > HPD_NONE && 887 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 888 connector->polled == DRM_CONNECTOR_POLL_HPD) { 889 DRM_INFO("HPD interrupt storm detected on connector %s: " 890 "switching from hotplug detection to polling\n", 891 drm_get_connector_name(connector)); 892 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 893 connector->polled = DRM_CONNECTOR_POLL_CONNECT 894 | DRM_CONNECTOR_POLL_DISCONNECT; 895 hpd_disabled = true; 896 } 897 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 898 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 899 drm_get_connector_name(connector), intel_encoder->hpd_pin); 900 } 901 } 902 /* if there were no outputs to poll, poll was disabled, 903 * therefore make sure it's enabled when disabling HPD on 904 * some connectors */ 905 if (hpd_disabled) { 906 drm_kms_helper_poll_enable(dev); 907 mod_timer(&dev_priv->hotplug_reenable_timer, 908 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 909 } 910 911 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 912 913 list_for_each_entry(connector, &mode_config->connector_list, head) { 914 intel_connector = to_intel_connector(connector); 915 intel_encoder = intel_connector->encoder; 916 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 917 if (intel_encoder->hot_plug) 918 intel_encoder->hot_plug(intel_encoder); 919 if (intel_hpd_irq_event(dev, connector)) 920 changed = true; 921 } 922 } 923 mutex_unlock(&mode_config->mutex); 924 925 if (changed) 926 drm_kms_helper_hotplug_event(dev); 927 } 928 929 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 930 { 931 drm_i915_private_t *dev_priv = dev->dev_private; 932 u32 busy_up, busy_down, max_avg, min_avg; 933 u8 new_delay; 934 935 spin_lock(&mchdev_lock); 936 937 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 938 939 new_delay = dev_priv->ips.cur_delay; 940 941 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 942 busy_up = I915_READ(RCPREVBSYTUPAVG); 943 busy_down = I915_READ(RCPREVBSYTDNAVG); 944 max_avg = I915_READ(RCBMAXAVG); 945 min_avg = I915_READ(RCBMINAVG); 946 947 /* Handle RCS change request from hw */ 948 if (busy_up > max_avg) { 949 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 950 new_delay = dev_priv->ips.cur_delay - 1; 951 if (new_delay < dev_priv->ips.max_delay) 952 new_delay = dev_priv->ips.max_delay; 953 } else if (busy_down < min_avg) { 954 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 955 new_delay = dev_priv->ips.cur_delay + 1; 956 if (new_delay > dev_priv->ips.min_delay) 957 new_delay = dev_priv->ips.min_delay; 958 } 959 960 if (ironlake_set_drps(dev, new_delay)) 961 dev_priv->ips.cur_delay = new_delay; 962 963 spin_unlock(&mchdev_lock); 964 965 return; 966 } 967 968 static void notify_ring(struct drm_device *dev, 969 struct intel_ring_buffer *ring) 970 { 971 if (ring->obj == NULL) 972 return; 973 974 trace_i915_gem_request_complete(ring); 975 976 wake_up_all(&ring->irq_queue); 977 i915_queue_hangcheck(dev); 978 } 979 980 static void gen6_pm_rps_work(struct work_struct *work) 981 { 982 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 983 rps.work); 984 u32 pm_iir; 985 int new_delay, adj; 986 987 spin_lock_irq(&dev_priv->irq_lock); 988 pm_iir = dev_priv->rps.pm_iir; 989 dev_priv->rps.pm_iir = 0; 990 /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 991 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 992 spin_unlock_irq(&dev_priv->irq_lock); 993 994 /* Make sure we didn't queue anything we're not going to process. */ 995 WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS); 996 997 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 998 return; 999 1000 mutex_lock(&dev_priv->rps.hw_lock); 1001 1002 adj = dev_priv->rps.last_adj; 1003 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1004 if (adj > 0) 1005 adj *= 2; 1006 else 1007 adj = 1; 1008 new_delay = dev_priv->rps.cur_delay + adj; 1009 1010 /* 1011 * For better performance, jump directly 1012 * to RPe if we're below it. 1013 */ 1014 if (new_delay < dev_priv->rps.rpe_delay) 1015 new_delay = dev_priv->rps.rpe_delay; 1016 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1017 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay) 1018 new_delay = dev_priv->rps.rpe_delay; 1019 else 1020 new_delay = dev_priv->rps.min_delay; 1021 adj = 0; 1022 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1023 if (adj < 0) 1024 adj *= 2; 1025 else 1026 adj = -1; 1027 new_delay = dev_priv->rps.cur_delay + adj; 1028 } else { /* unknown event */ 1029 new_delay = dev_priv->rps.cur_delay; 1030 } 1031 1032 /* sysfs frequency interfaces may have snuck in while servicing the 1033 * interrupt 1034 */ 1035 new_delay = clamp_t(int, new_delay, 1036 dev_priv->rps.min_delay, dev_priv->rps.max_delay); 1037 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; 1038 1039 if (IS_VALLEYVIEW(dev_priv->dev)) 1040 valleyview_set_rps(dev_priv->dev, new_delay); 1041 else 1042 gen6_set_rps(dev_priv->dev, new_delay); 1043 1044 mutex_unlock(&dev_priv->rps.hw_lock); 1045 } 1046 1047 1048 /** 1049 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1050 * occurred. 1051 * @work: workqueue struct 1052 * 1053 * Doesn't actually do anything except notify userspace. As a consequence of 1054 * this event, userspace should try to remap the bad rows since statistically 1055 * it is likely the same row is more likely to go bad again. 1056 */ 1057 static void ivybridge_parity_work(struct work_struct *work) 1058 { 1059 #ifdef notyet 1060 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 1061 l3_parity.error_work); 1062 u32 error_status, row, bank, subbank; 1063 char *parity_event[6]; 1064 uint32_t misccpctl; 1065 unsigned long flags; 1066 uint8_t slice = 0; 1067 1068 /* We must turn off DOP level clock gating to access the L3 registers. 1069 * In order to prevent a get/put style interface, acquire struct mutex 1070 * any time we access those registers. 1071 */ 1072 mutex_lock(&dev_priv->dev->struct_mutex); 1073 1074 /* If we've screwed up tracking, just let the interrupt fire again */ 1075 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1076 goto out; 1077 1078 misccpctl = I915_READ(GEN7_MISCCPCTL); 1079 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1080 POSTING_READ(GEN7_MISCCPCTL); 1081 1082 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1083 u32 reg; 1084 1085 slice--; 1086 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1087 break; 1088 1089 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1090 1091 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1092 1093 error_status = I915_READ(reg); 1094 row = GEN7_PARITY_ERROR_ROW(error_status); 1095 bank = GEN7_PARITY_ERROR_BANK(error_status); 1096 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1097 1098 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1099 POSTING_READ(reg); 1100 1101 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1102 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1103 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1104 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1105 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1106 parity_event[5] = NULL; 1107 1108 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1109 KOBJ_CHANGE, parity_event); 1110 1111 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1112 slice, row, bank, subbank); 1113 1114 kfree(parity_event[4]); 1115 kfree(parity_event[3]); 1116 kfree(parity_event[2]); 1117 kfree(parity_event[1]); 1118 } 1119 1120 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1121 1122 out: 1123 WARN_ON(dev_priv->l3_parity.which_slice); 1124 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1125 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1126 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1127 1128 mutex_unlock(&dev_priv->dev->struct_mutex); 1129 #endif 1130 } 1131 1132 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1133 { 1134 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1135 1136 if (!HAS_L3_DPF(dev)) 1137 return; 1138 1139 spin_lock(&dev_priv->irq_lock); 1140 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1141 spin_unlock(&dev_priv->irq_lock); 1142 1143 iir &= GT_PARITY_ERROR(dev); 1144 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1145 dev_priv->l3_parity.which_slice |= 1 << 1; 1146 1147 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1148 dev_priv->l3_parity.which_slice |= 1 << 0; 1149 1150 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1151 } 1152 1153 static void ilk_gt_irq_handler(struct drm_device *dev, 1154 struct drm_i915_private *dev_priv, 1155 u32 gt_iir) 1156 { 1157 if (gt_iir & 1158 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1159 notify_ring(dev, &dev_priv->ring[RCS]); 1160 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1161 notify_ring(dev, &dev_priv->ring[VCS]); 1162 } 1163 1164 static void snb_gt_irq_handler(struct drm_device *dev, 1165 struct drm_i915_private *dev_priv, 1166 u32 gt_iir) 1167 { 1168 1169 if (gt_iir & 1170 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1171 notify_ring(dev, &dev_priv->ring[RCS]); 1172 if (gt_iir & GT_BSD_USER_INTERRUPT) 1173 notify_ring(dev, &dev_priv->ring[VCS]); 1174 if (gt_iir & GT_BLT_USER_INTERRUPT) 1175 notify_ring(dev, &dev_priv->ring[BCS]); 1176 1177 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1178 GT_BSD_CS_ERROR_INTERRUPT | 1179 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 1180 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 1181 i915_handle_error(dev, false); 1182 } 1183 1184 if (gt_iir & GT_PARITY_ERROR(dev)) 1185 ivybridge_parity_error_irq_handler(dev, gt_iir); 1186 } 1187 1188 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1189 struct drm_i915_private *dev_priv, 1190 u32 master_ctl) 1191 { 1192 u32 rcs, bcs, vcs; 1193 uint32_t tmp = 0; 1194 irqreturn_t ret = IRQ_NONE; 1195 1196 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1197 tmp = I915_READ(GEN8_GT_IIR(0)); 1198 if (tmp) { 1199 ret = IRQ_HANDLED; 1200 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1201 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1202 if (rcs & GT_RENDER_USER_INTERRUPT) 1203 notify_ring(dev, &dev_priv->ring[RCS]); 1204 if (bcs & GT_RENDER_USER_INTERRUPT) 1205 notify_ring(dev, &dev_priv->ring[BCS]); 1206 I915_WRITE(GEN8_GT_IIR(0), tmp); 1207 } else 1208 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1209 } 1210 1211 if (master_ctl & GEN8_GT_VCS1_IRQ) { 1212 tmp = I915_READ(GEN8_GT_IIR(1)); 1213 if (tmp) { 1214 ret = IRQ_HANDLED; 1215 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1216 if (vcs & GT_RENDER_USER_INTERRUPT) 1217 notify_ring(dev, &dev_priv->ring[VCS]); 1218 I915_WRITE(GEN8_GT_IIR(1), tmp); 1219 } else 1220 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1221 } 1222 1223 if (master_ctl & GEN8_GT_VECS_IRQ) { 1224 tmp = I915_READ(GEN8_GT_IIR(3)); 1225 if (tmp) { 1226 ret = IRQ_HANDLED; 1227 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1228 if (vcs & GT_RENDER_USER_INTERRUPT) 1229 notify_ring(dev, &dev_priv->ring[VECS]); 1230 I915_WRITE(GEN8_GT_IIR(3), tmp); 1231 } else 1232 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1233 } 1234 1235 return ret; 1236 } 1237 1238 #define HPD_STORM_DETECT_PERIOD 1000 1239 #define HPD_STORM_THRESHOLD 5 1240 1241 static inline void intel_hpd_irq_handler(struct drm_device *dev, 1242 u32 hotplug_trigger, 1243 const u32 *hpd) 1244 { 1245 drm_i915_private_t *dev_priv = dev->dev_private; 1246 int i; 1247 bool storm_detected = false; 1248 1249 if (!hotplug_trigger) 1250 return; 1251 1252 spin_lock(&dev_priv->irq_lock); 1253 for (i = 1; i < HPD_NUM_PINS; i++) { 1254 1255 if (hpd[i] & hotplug_trigger && 1256 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { 1257 /* 1258 * On GMCH platforms the interrupt mask bits only 1259 * prevent irq generation, not the setting of the 1260 * hotplug bits itself. So only WARN about unexpected 1261 * interrupts on saner platforms. 1262 */ 1263 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), 1264 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1265 hotplug_trigger, i, hpd[i]); 1266 1267 continue; 1268 } 1269 1270 if (!(hpd[i] & hotplug_trigger) || 1271 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1272 continue; 1273 1274 dev_priv->hpd_event_bits |= (1 << i); 1275 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1276 dev_priv->hpd_stats[i].hpd_last_jiffies 1277 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1278 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1279 dev_priv->hpd_stats[i].hpd_cnt = 0; 1280 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1281 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1282 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1283 dev_priv->hpd_event_bits &= ~(1 << i); 1284 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1285 storm_detected = true; 1286 } else { 1287 dev_priv->hpd_stats[i].hpd_cnt++; 1288 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1289 dev_priv->hpd_stats[i].hpd_cnt); 1290 } 1291 } 1292 1293 if (storm_detected) 1294 dev_priv->display.hpd_irq_setup(dev); 1295 spin_unlock(&dev_priv->irq_lock); 1296 1297 /* 1298 * Our hotplug handler can grab modeset locks (by calling down into the 1299 * fb helpers). Hence it must not be run on our own dev-priv->wq work 1300 * queue for otherwise the flush_work in the pageflip code will 1301 * deadlock. 1302 */ 1303 schedule_work(&dev_priv->hotplug_work); 1304 } 1305 1306 static void gmbus_irq_handler(struct drm_device *dev) 1307 { 1308 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1309 1310 wake_up_all(&dev_priv->gmbus_wait_queue); 1311 } 1312 1313 static void dp_aux_irq_handler(struct drm_device *dev) 1314 { 1315 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1316 1317 wake_up_all(&dev_priv->gmbus_wait_queue); 1318 } 1319 1320 #if defined(CONFIG_DEBUG_FS) 1321 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1322 uint32_t crc0, uint32_t crc1, 1323 uint32_t crc2, uint32_t crc3, 1324 uint32_t crc4) 1325 { 1326 struct drm_i915_private *dev_priv = dev->dev_private; 1327 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1328 struct intel_pipe_crc_entry *entry; 1329 int head, tail; 1330 1331 spin_lock(&pipe_crc->lock); 1332 1333 if (!pipe_crc->entries) { 1334 spin_unlock(&pipe_crc->lock); 1335 DRM_ERROR("spurious interrupt\n"); 1336 return; 1337 } 1338 1339 head = pipe_crc->head; 1340 tail = pipe_crc->tail; 1341 1342 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1343 spin_unlock(&pipe_crc->lock); 1344 DRM_ERROR("CRC buffer overflowing\n"); 1345 return; 1346 } 1347 1348 entry = &pipe_crc->entries[head]; 1349 1350 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1351 entry->crc[0] = crc0; 1352 entry->crc[1] = crc1; 1353 entry->crc[2] = crc2; 1354 entry->crc[3] = crc3; 1355 entry->crc[4] = crc4; 1356 1357 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1358 pipe_crc->head = head; 1359 1360 spin_unlock(&pipe_crc->lock); 1361 1362 wake_up_interruptible(&pipe_crc->wq); 1363 } 1364 #else 1365 static inline void 1366 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1367 uint32_t crc0, uint32_t crc1, 1368 uint32_t crc2, uint32_t crc3, 1369 uint32_t crc4) {} 1370 #endif 1371 1372 1373 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1374 { 1375 struct drm_i915_private *dev_priv = dev->dev_private; 1376 1377 display_pipe_crc_irq_handler(dev, pipe, 1378 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1379 0, 0, 0, 0); 1380 } 1381 1382 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1383 { 1384 struct drm_i915_private *dev_priv = dev->dev_private; 1385 1386 display_pipe_crc_irq_handler(dev, pipe, 1387 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1388 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1389 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1390 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1391 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1392 } 1393 1394 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1395 { 1396 struct drm_i915_private *dev_priv = dev->dev_private; 1397 uint32_t res1, res2; 1398 1399 if (INTEL_INFO(dev)->gen >= 3) 1400 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1401 else 1402 res1 = 0; 1403 1404 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1405 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1406 else 1407 res2 = 0; 1408 1409 display_pipe_crc_irq_handler(dev, pipe, 1410 I915_READ(PIPE_CRC_RES_RED(pipe)), 1411 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1412 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1413 res1, res2); 1414 } 1415 1416 /* The RPS events need forcewake, so we add them to a work queue and mask their 1417 * IMR bits until the work is done. Other interrupts can be processed without 1418 * the work queue. */ 1419 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1420 { 1421 if (pm_iir & GEN6_PM_RPS_EVENTS) { 1422 spin_lock(&dev_priv->irq_lock); 1423 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 1424 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); 1425 spin_unlock(&dev_priv->irq_lock); 1426 1427 queue_work(dev_priv->wq, &dev_priv->rps.work); 1428 } 1429 1430 if (HAS_VEBOX(dev_priv->dev)) { 1431 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1432 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1433 1434 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 1435 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 1436 i915_handle_error(dev_priv->dev, false); 1437 } 1438 } 1439 } 1440 1441 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1442 { 1443 struct drm_device *dev = (struct drm_device *) arg; 1444 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1445 u32 iir, gt_iir, pm_iir; 1446 irqreturn_t ret = IRQ_NONE; 1447 unsigned long irqflags; 1448 int pipe; 1449 u32 pipe_stats[I915_MAX_PIPES]; 1450 1451 atomic_inc(&dev_priv->irq_received); 1452 1453 while (true) { 1454 iir = I915_READ(VLV_IIR); 1455 gt_iir = I915_READ(GTIIR); 1456 pm_iir = I915_READ(GEN6_PMIIR); 1457 1458 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1459 goto out; 1460 1461 ret = IRQ_HANDLED; 1462 1463 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1464 1465 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1466 for_each_pipe(pipe) { 1467 int reg = PIPESTAT(pipe); 1468 pipe_stats[pipe] = I915_READ(reg); 1469 1470 /* 1471 * Clear the PIPE*STAT regs before the IIR 1472 */ 1473 if (pipe_stats[pipe] & 0x8000ffff) { 1474 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1475 DRM_DEBUG_DRIVER("pipe %c underrun\n", 1476 pipe_name(pipe)); 1477 I915_WRITE(reg, pipe_stats[pipe]); 1478 } 1479 } 1480 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1481 1482 for_each_pipe(pipe) { 1483 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1484 drm_handle_vblank(dev, pipe); 1485 1486 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 1487 intel_prepare_page_flip(dev, pipe); 1488 intel_finish_page_flip(dev, pipe); 1489 } 1490 1491 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1492 i9xx_pipe_crc_irq_handler(dev, pipe); 1493 } 1494 1495 /* Consume port. Then clear IIR or we'll miss events */ 1496 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 1497 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1498 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1499 1500 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1501 hotplug_status); 1502 1503 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 1504 1505 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1506 dp_aux_irq_handler(dev); 1507 1508 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1509 I915_READ(PORT_HOTPLUG_STAT); 1510 } 1511 1512 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1513 gmbus_irq_handler(dev); 1514 1515 if (pm_iir) 1516 gen6_rps_irq_handler(dev_priv, pm_iir); 1517 1518 I915_WRITE(GTIIR, gt_iir); 1519 I915_WRITE(GEN6_PMIIR, pm_iir); 1520 I915_WRITE(VLV_IIR, iir); 1521 } 1522 1523 out: 1524 return ret; 1525 } 1526 1527 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1528 { 1529 #ifdef DRMDEBUG 1530 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1531 #endif 1532 int pipe; 1533 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1534 1535 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1536 1537 #ifdef notyet 1538 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1539 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1540 SDE_AUDIO_POWER_SHIFT); 1541 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1542 port_name(port)); 1543 } 1544 #endif 1545 1546 if (pch_iir & SDE_AUX_MASK) 1547 dp_aux_irq_handler(dev); 1548 1549 if (pch_iir & SDE_GMBUS) 1550 gmbus_irq_handler(dev); 1551 1552 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1553 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1554 1555 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1556 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1557 1558 if (pch_iir & SDE_POISON) 1559 DRM_ERROR("PCH poison interrupt\n"); 1560 1561 if (pch_iir & SDE_FDI_MASK) 1562 for_each_pipe(pipe) 1563 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1564 pipe_name(pipe), 1565 I915_READ(FDI_RX_IIR(pipe))); 1566 1567 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1568 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1569 1570 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1571 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1572 1573 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1574 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1575 false)) 1576 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1577 1578 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1579 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1580 false)) 1581 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1582 } 1583 1584 static void ivb_err_int_handler(struct drm_device *dev) 1585 { 1586 struct drm_i915_private *dev_priv = dev->dev_private; 1587 u32 err_int = I915_READ(GEN7_ERR_INT); 1588 enum pipe pipe; 1589 1590 if (err_int & ERR_INT_POISON) 1591 DRM_ERROR("Poison interrupt\n"); 1592 1593 for_each_pipe(pipe) { 1594 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 1595 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1596 false)) 1597 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1598 pipe_name(pipe)); 1599 } 1600 1601 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1602 if (IS_IVYBRIDGE(dev)) 1603 ivb_pipe_crc_irq_handler(dev, pipe); 1604 else 1605 hsw_pipe_crc_irq_handler(dev, pipe); 1606 } 1607 } 1608 1609 I915_WRITE(GEN7_ERR_INT, err_int); 1610 } 1611 1612 static void cpt_serr_int_handler(struct drm_device *dev) 1613 { 1614 struct drm_i915_private *dev_priv = dev->dev_private; 1615 u32 serr_int = I915_READ(SERR_INT); 1616 1617 if (serr_int & SERR_INT_POISON) 1618 DRM_ERROR("PCH poison interrupt\n"); 1619 1620 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1621 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1622 false)) 1623 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1624 1625 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1626 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1627 false)) 1628 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1629 1630 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1631 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 1632 false)) 1633 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 1634 1635 I915_WRITE(SERR_INT, serr_int); 1636 } 1637 1638 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1639 { 1640 #ifdef DRMDEBUG 1641 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1642 #endif 1643 int pipe; 1644 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1645 1646 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 1647 1648 #ifdef notyet 1649 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1650 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1651 SDE_AUDIO_POWER_SHIFT_CPT); 1652 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1653 port_name(port)); 1654 } 1655 #endif 1656 1657 if (pch_iir & SDE_AUX_MASK_CPT) 1658 dp_aux_irq_handler(dev); 1659 1660 if (pch_iir & SDE_GMBUS_CPT) 1661 gmbus_irq_handler(dev); 1662 1663 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1664 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 1665 1666 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1667 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 1668 1669 if (pch_iir & SDE_FDI_MASK_CPT) 1670 for_each_pipe(pipe) 1671 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1672 pipe_name(pipe), 1673 I915_READ(FDI_RX_IIR(pipe))); 1674 1675 if (pch_iir & SDE_ERROR_CPT) 1676 cpt_serr_int_handler(dev); 1677 } 1678 1679 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1680 { 1681 struct drm_i915_private *dev_priv = dev->dev_private; 1682 enum pipe pipe; 1683 1684 if (de_iir & DE_AUX_CHANNEL_A) 1685 dp_aux_irq_handler(dev); 1686 1687 if (de_iir & DE_GSE) 1688 intel_opregion_asle_intr(dev); 1689 1690 if (de_iir & DE_POISON) 1691 DRM_ERROR("Poison interrupt\n"); 1692 1693 for_each_pipe(pipe) { 1694 if (de_iir & DE_PIPE_VBLANK(pipe)) 1695 drm_handle_vblank(dev, pipe); 1696 1697 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 1698 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 1699 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1700 pipe_name(pipe)); 1701 1702 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 1703 i9xx_pipe_crc_irq_handler(dev, pipe); 1704 1705 /* plane/pipes map 1:1 on ilk+ */ 1706 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 1707 intel_prepare_page_flip(dev, pipe); 1708 intel_finish_page_flip_plane(dev, pipe); 1709 } 1710 } 1711 1712 /* check event from PCH */ 1713 if (de_iir & DE_PCH_EVENT) { 1714 u32 pch_iir = I915_READ(SDEIIR); 1715 1716 if (HAS_PCH_CPT(dev)) 1717 cpt_irq_handler(dev, pch_iir); 1718 else 1719 ibx_irq_handler(dev, pch_iir); 1720 1721 /* should clear PCH hotplug event before clear CPU irq */ 1722 I915_WRITE(SDEIIR, pch_iir); 1723 } 1724 1725 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1726 ironlake_rps_change_irq_handler(dev); 1727 } 1728 1729 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 1730 { 1731 struct drm_i915_private *dev_priv = dev->dev_private; 1732 enum pipe i; 1733 1734 if (de_iir & DE_ERR_INT_IVB) 1735 ivb_err_int_handler(dev); 1736 1737 if (de_iir & DE_AUX_CHANNEL_A_IVB) 1738 dp_aux_irq_handler(dev); 1739 1740 if (de_iir & DE_GSE_IVB) 1741 intel_opregion_asle_intr(dev); 1742 1743 for_each_pipe(i) { 1744 if (de_iir & (DE_PIPE_VBLANK_IVB(i))) 1745 drm_handle_vblank(dev, i); 1746 1747 /* plane/pipes map 1:1 on ilk+ */ 1748 if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) { 1749 intel_prepare_page_flip(dev, i); 1750 intel_finish_page_flip_plane(dev, i); 1751 } 1752 } 1753 1754 /* check event from PCH */ 1755 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 1756 u32 pch_iir = I915_READ(SDEIIR); 1757 1758 cpt_irq_handler(dev, pch_iir); 1759 1760 /* clear PCH hotplug event before clear CPU irq */ 1761 I915_WRITE(SDEIIR, pch_iir); 1762 } 1763 } 1764 1765 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 1766 { 1767 struct drm_device *dev = (struct drm_device *) arg; 1768 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1769 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1770 irqreturn_t ret = IRQ_NONE; 1771 1772 atomic_inc(&dev_priv->irq_received); 1773 1774 /* We get interrupts on unclaimed registers, so check for this before we 1775 * do any I915_{READ,WRITE}. */ 1776 intel_uncore_check_errors(dev); 1777 1778 /* disable master interrupt before clearing iir */ 1779 de_ier = I915_READ(DEIER); 1780 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 1781 POSTING_READ(DEIER); 1782 1783 /* Disable south interrupts. We'll only write to SDEIIR once, so further 1784 * interrupts will will be stored on its back queue, and then we'll be 1785 * able to process them after we restore SDEIER (as soon as we restore 1786 * it, we'll get an interrupt if SDEIIR still has something to process 1787 * due to its back queue). */ 1788 if (!HAS_PCH_NOP(dev)) { 1789 sde_ier = I915_READ(SDEIER); 1790 I915_WRITE(SDEIER, 0); 1791 POSTING_READ(SDEIER); 1792 } 1793 1794 gt_iir = I915_READ(GTIIR); 1795 if (gt_iir) { 1796 if (INTEL_INFO(dev)->gen >= 6) 1797 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1798 else 1799 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 1800 I915_WRITE(GTIIR, gt_iir); 1801 ret = IRQ_HANDLED; 1802 } 1803 1804 de_iir = I915_READ(DEIIR); 1805 if (de_iir) { 1806 if (INTEL_INFO(dev)->gen >= 7) 1807 ivb_display_irq_handler(dev, de_iir); 1808 else 1809 ilk_display_irq_handler(dev, de_iir); 1810 I915_WRITE(DEIIR, de_iir); 1811 ret = IRQ_HANDLED; 1812 } 1813 1814 if (INTEL_INFO(dev)->gen >= 6) { 1815 u32 pm_iir = I915_READ(GEN6_PMIIR); 1816 if (pm_iir) { 1817 gen6_rps_irq_handler(dev_priv, pm_iir); 1818 I915_WRITE(GEN6_PMIIR, pm_iir); 1819 ret = IRQ_HANDLED; 1820 } 1821 } 1822 1823 I915_WRITE(DEIER, de_ier); 1824 POSTING_READ(DEIER); 1825 if (!HAS_PCH_NOP(dev)) { 1826 I915_WRITE(SDEIER, sde_ier); 1827 POSTING_READ(SDEIER); 1828 } 1829 1830 return ret; 1831 } 1832 1833 static irqreturn_t gen8_irq_handler(int irq, void *arg) 1834 { 1835 struct drm_device *dev = arg; 1836 struct drm_i915_private *dev_priv = dev->dev_private; 1837 u32 master_ctl; 1838 irqreturn_t ret = IRQ_NONE; 1839 uint32_t tmp = 0; 1840 enum pipe pipe; 1841 1842 atomic_inc(&dev_priv->irq_received); 1843 1844 master_ctl = I915_READ(GEN8_MASTER_IRQ); 1845 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 1846 if (!master_ctl) 1847 return IRQ_NONE; 1848 1849 I915_WRITE(GEN8_MASTER_IRQ, 0); 1850 POSTING_READ(GEN8_MASTER_IRQ); 1851 1852 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); 1853 1854 if (master_ctl & GEN8_DE_MISC_IRQ) { 1855 tmp = I915_READ(GEN8_DE_MISC_IIR); 1856 if (tmp & GEN8_DE_MISC_GSE) 1857 intel_opregion_asle_intr(dev); 1858 else if (tmp) 1859 DRM_ERROR("Unexpected DE Misc interrupt\n"); 1860 else 1861 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 1862 1863 if (tmp) { 1864 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 1865 ret = IRQ_HANDLED; 1866 } 1867 } 1868 1869 if (master_ctl & GEN8_DE_PORT_IRQ) { 1870 tmp = I915_READ(GEN8_DE_PORT_IIR); 1871 if (tmp & GEN8_AUX_CHANNEL_A) 1872 dp_aux_irq_handler(dev); 1873 else if (tmp) 1874 DRM_ERROR("Unexpected DE Port interrupt\n"); 1875 else 1876 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 1877 1878 if (tmp) { 1879 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 1880 ret = IRQ_HANDLED; 1881 } 1882 } 1883 1884 for_each_pipe(pipe) { 1885 uint32_t pipe_iir; 1886 1887 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 1888 continue; 1889 1890 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 1891 if (pipe_iir & GEN8_PIPE_VBLANK) 1892 drm_handle_vblank(dev, pipe); 1893 1894 if (pipe_iir & GEN8_PIPE_FLIP_DONE) { 1895 intel_prepare_page_flip(dev, pipe); 1896 intel_finish_page_flip_plane(dev, pipe); 1897 } 1898 1899 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 1900 hsw_pipe_crc_irq_handler(dev, pipe); 1901 1902 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { 1903 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1904 false)) 1905 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1906 pipe_name(pipe)); 1907 } 1908 1909 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { 1910 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 1911 pipe_name(pipe), 1912 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 1913 } 1914 1915 if (pipe_iir) { 1916 ret = IRQ_HANDLED; 1917 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 1918 } else 1919 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 1920 } 1921 1922 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { 1923 /* 1924 * FIXME(BDW): Assume for now that the new interrupt handling 1925 * scheme also closed the SDE interrupt handling race we've seen 1926 * on older pch-split platforms. But this needs testing. 1927 */ 1928 u32 pch_iir = I915_READ(SDEIIR); 1929 1930 cpt_irq_handler(dev, pch_iir); 1931 1932 if (pch_iir) { 1933 I915_WRITE(SDEIIR, pch_iir); 1934 ret = IRQ_HANDLED; 1935 } 1936 } 1937 1938 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1939 POSTING_READ(GEN8_MASTER_IRQ); 1940 1941 return ret; 1942 } 1943 1944 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 1945 bool reset_completed) 1946 { 1947 struct intel_ring_buffer *ring; 1948 int i; 1949 1950 /* 1951 * Notify all waiters for GPU completion events that reset state has 1952 * been changed, and that they need to restart their wait after 1953 * checking for potential errors (and bail out to drop locks if there is 1954 * a gpu reset pending so that i915_error_work_func can acquire them). 1955 */ 1956 1957 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 1958 for_each_ring(ring, dev_priv, i) 1959 wake_up_all(&ring->irq_queue); 1960 1961 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 1962 wake_up_all(&dev_priv->pending_flip_queue); 1963 1964 /* 1965 * Signal tasks blocked in i915_gem_wait_for_error that the pending 1966 * reset state is cleared. 1967 */ 1968 if (reset_completed) 1969 wake_up_all(&dev_priv->gpu_error.reset_queue); 1970 } 1971 1972 /** 1973 * i915_error_work_func - do process context error handling work 1974 * @work: work struct 1975 * 1976 * Fire an error uevent so userspace can see that a hang or error 1977 * was detected. 1978 */ 1979 static void i915_error_work_func(struct work_struct *work) 1980 { 1981 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 1982 work); 1983 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 1984 gpu_error); 1985 struct drm_device *dev = dev_priv->dev; 1986 #ifdef notyet 1987 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 1988 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 1989 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 1990 #endif 1991 int ret; 1992 1993 #ifdef __linux__ 1994 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 1995 #endif 1996 1997 /* 1998 * Note that there's only one work item which does gpu resets, so we 1999 * need not worry about concurrent gpu resets potentially incrementing 2000 * error->reset_counter twice. We only need to take care of another 2001 * racing irq/hangcheck declaring the gpu dead for a second time. A 2002 * quick check for that is good enough: schedule_work ensures the 2003 * correct ordering between hang detection and this work item, and since 2004 * the reset in-progress bit is only ever set by code outside of this 2005 * work we don't need to worry about any other races. 2006 */ 2007 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 2008 DRM_DEBUG_DRIVER("resetting chip\n"); 2009 #ifdef __linux__ 2010 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2011 reset_event); 2012 #endif 2013 2014 /* 2015 * All state reset _must_ be completed before we update the 2016 * reset counter, for otherwise waiters might miss the reset 2017 * pending state and not properly drop locks, resulting in 2018 * deadlocks with the reset work. 2019 */ 2020 ret = i915_reset(dev); 2021 2022 intel_display_handle_reset(dev); 2023 2024 if (ret == 0) { 2025 /* 2026 * After all the gem state is reset, increment the reset 2027 * counter and wake up everyone waiting for the reset to 2028 * complete. 2029 * 2030 * Since unlock operations are a one-sided barrier only, 2031 * we need to insert a barrier here to order any seqno 2032 * updates before 2033 * the counter increment. 2034 */ 2035 smp_mb__before_atomic_inc(); 2036 atomic_inc(&dev_priv->gpu_error.reset_counter); 2037 2038 #ifdef __linux__ 2039 kobject_uevent_env(&dev->primary->kdev->kobj, 2040 KOBJ_CHANGE, reset_done_event); 2041 #endif 2042 } else { 2043 atomic_set_mask(I915_WEDGED, &error->reset_counter); 2044 } 2045 2046 /* 2047 * Note: The wake_up also serves as a memory barrier so that 2048 * waiters see the update value of the reset counter atomic_t. 2049 */ 2050 i915_error_wake_up(dev_priv, true); 2051 } 2052 } 2053 2054 static void i915_report_and_clear_eir(struct drm_device *dev) 2055 { 2056 struct drm_i915_private *dev_priv = dev->dev_private; 2057 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2058 u32 eir = I915_READ(EIR); 2059 int pipe, i; 2060 2061 if (!eir) 2062 return; 2063 2064 pr_err("render error detected, EIR: 0x%08x\n", eir); 2065 2066 i915_get_extra_instdone(dev, instdone); 2067 2068 if (IS_G4X(dev)) { 2069 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2070 u32 ipeir = I915_READ(IPEIR_I965); 2071 2072 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2073 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2074 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2075 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2076 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2077 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2078 I915_WRITE(IPEIR_I965, ipeir); 2079 POSTING_READ(IPEIR_I965); 2080 } 2081 if (eir & GM45_ERROR_PAGE_TABLE) { 2082 u32 pgtbl_err = I915_READ(PGTBL_ER); 2083 pr_err("page table error\n"); 2084 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2085 I915_WRITE(PGTBL_ER, pgtbl_err); 2086 POSTING_READ(PGTBL_ER); 2087 } 2088 } 2089 2090 if (!IS_GEN2(dev)) { 2091 if (eir & I915_ERROR_PAGE_TABLE) { 2092 u32 pgtbl_err = I915_READ(PGTBL_ER); 2093 pr_err("page table error\n"); 2094 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2095 I915_WRITE(PGTBL_ER, pgtbl_err); 2096 POSTING_READ(PGTBL_ER); 2097 } 2098 } 2099 2100 if (eir & I915_ERROR_MEMORY_REFRESH) { 2101 pr_err("memory refresh error:\n"); 2102 for_each_pipe(pipe) 2103 pr_err("pipe %c stat: 0x%08x\n", 2104 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2105 /* pipestat has already been acked */ 2106 } 2107 if (eir & I915_ERROR_INSTRUCTION) { 2108 pr_err("instruction error\n"); 2109 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2110 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2111 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2112 if (INTEL_INFO(dev)->gen < 4) { 2113 u32 ipeir = I915_READ(IPEIR); 2114 2115 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2116 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2117 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2118 I915_WRITE(IPEIR, ipeir); 2119 POSTING_READ(IPEIR); 2120 } else { 2121 u32 ipeir = I915_READ(IPEIR_I965); 2122 2123 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2124 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2125 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2126 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2127 I915_WRITE(IPEIR_I965, ipeir); 2128 POSTING_READ(IPEIR_I965); 2129 } 2130 } 2131 2132 I915_WRITE(EIR, eir); 2133 POSTING_READ(EIR); 2134 eir = I915_READ(EIR); 2135 if (eir) { 2136 /* 2137 * some errors might have become stuck, 2138 * mask them. 2139 */ 2140 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2141 I915_WRITE(EMR, I915_READ(EMR) | eir); 2142 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2143 } 2144 } 2145 2146 /** 2147 * i915_handle_error - handle an error interrupt 2148 * @dev: drm device 2149 * 2150 * Do some basic checking of regsiter state at error interrupt time and 2151 * dump it to the syslog. Also call i915_capture_error_state() to make 2152 * sure we get a record and make it available in debugfs. Fire a uevent 2153 * so userspace knows something bad happened (should trigger collection 2154 * of a ring dump etc.). 2155 */ 2156 void i915_handle_error(struct drm_device *dev, bool wedged) 2157 { 2158 struct drm_i915_private *dev_priv = dev->dev_private; 2159 2160 i915_capture_error_state(dev); 2161 i915_report_and_clear_eir(dev); 2162 2163 if (wedged) { 2164 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2165 &dev_priv->gpu_error.reset_counter); 2166 2167 /* 2168 * Wakeup waiting processes so that the reset work function 2169 * i915_error_work_func doesn't deadlock trying to grab various 2170 * locks. By bumping the reset counter first, the woken 2171 * processes will see a reset in progress and back off, 2172 * releasing their locks and then wait for the reset completion. 2173 * We must do this for _all_ gpu waiters that might hold locks 2174 * that the reset work needs to acquire. 2175 * 2176 * Note: The wake_up serves as the required memory barrier to 2177 * ensure that the waiters see the updated value of the reset 2178 * counter atomic_t. 2179 */ 2180 i915_error_wake_up(dev_priv, false); 2181 } 2182 2183 /* 2184 * Our reset work can grab modeset locks (since it needs to reset the 2185 * state of outstanding pagelips). Hence it must not be run on our own 2186 * dev-priv->wq work queue for otherwise the flush_work in the pageflip 2187 * code will deadlock. 2188 */ 2189 schedule_work(&dev_priv->gpu_error.work); 2190 } 2191 2192 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 2193 { 2194 drm_i915_private_t *dev_priv = dev->dev_private; 2195 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2196 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2197 struct drm_i915_gem_object *obj; 2198 struct intel_unpin_work *work; 2199 unsigned long flags; 2200 bool stall_detected; 2201 2202 /* Ignore early vblank irqs */ 2203 if (intel_crtc == NULL) 2204 return; 2205 2206 spin_lock_irqsave(&dev->event_lock, flags); 2207 work = intel_crtc->unpin_work; 2208 2209 if (work == NULL || 2210 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 2211 !work->enable_stall_check) { 2212 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 2213 spin_unlock_irqrestore(&dev->event_lock, flags); 2214 return; 2215 } 2216 2217 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 2218 obj = work->pending_flip_obj; 2219 if (INTEL_INFO(dev)->gen >= 4) { 2220 int dspsurf = DSPSURF(intel_crtc->plane); 2221 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 2222 i915_gem_obj_ggtt_offset(obj); 2223 } else { 2224 int dspaddr = DSPADDR(intel_crtc->plane); 2225 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 2226 crtc->y * crtc->fb->pitches[0] + 2227 crtc->x * crtc->fb->bits_per_pixel/8); 2228 } 2229 2230 spin_unlock_irqrestore(&dev->event_lock, flags); 2231 2232 if (stall_detected) { 2233 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 2234 intel_prepare_page_flip(dev, intel_crtc->plane); 2235 } 2236 } 2237 2238 /* Called from drm generic code, passed 'crtc' which 2239 * we use as a pipe index 2240 */ 2241 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2242 { 2243 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2244 unsigned long irqflags; 2245 2246 if (!i915_pipe_enabled(dev, pipe)) 2247 return -EINVAL; 2248 2249 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2250 if (INTEL_INFO(dev)->gen >= 4) 2251 i915_enable_pipestat(dev_priv, pipe, 2252 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2253 else 2254 i915_enable_pipestat(dev_priv, pipe, 2255 PIPE_VBLANK_INTERRUPT_ENABLE); 2256 2257 /* maintain vblank delivery even in deep C-states */ 2258 if (dev_priv->info->gen == 3) 2259 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 2260 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2261 2262 return 0; 2263 } 2264 2265 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2266 { 2267 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2268 unsigned long irqflags; 2269 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2270 DE_PIPE_VBLANK(pipe); 2271 2272 if (!i915_pipe_enabled(dev, pipe)) 2273 return -EINVAL; 2274 2275 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2276 ironlake_enable_display_irq(dev_priv, bit); 2277 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2278 2279 return 0; 2280 } 2281 2282 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2283 { 2284 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2285 unsigned long irqflags; 2286 u32 imr; 2287 2288 if (!i915_pipe_enabled(dev, pipe)) 2289 return -EINVAL; 2290 2291 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2292 imr = I915_READ(VLV_IMR); 2293 if (pipe == PIPE_A) 2294 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2295 else 2296 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2297 I915_WRITE(VLV_IMR, imr); 2298 i915_enable_pipestat(dev_priv, pipe, 2299 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2300 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2301 2302 return 0; 2303 } 2304 2305 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2306 { 2307 struct drm_i915_private *dev_priv = dev->dev_private; 2308 unsigned long irqflags; 2309 2310 if (!i915_pipe_enabled(dev, pipe)) 2311 return -EINVAL; 2312 2313 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2314 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2315 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2316 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2317 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2318 return 0; 2319 } 2320 2321 /* Called from drm generic code, passed 'crtc' which 2322 * we use as a pipe index 2323 */ 2324 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2325 { 2326 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2327 unsigned long irqflags; 2328 2329 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2330 if (dev_priv->info->gen == 3) 2331 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 2332 2333 i915_disable_pipestat(dev_priv, pipe, 2334 PIPE_VBLANK_INTERRUPT_ENABLE | 2335 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2336 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2337 } 2338 2339 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2340 { 2341 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2342 unsigned long irqflags; 2343 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2344 DE_PIPE_VBLANK(pipe); 2345 2346 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2347 ironlake_disable_display_irq(dev_priv, bit); 2348 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2349 } 2350 2351 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2352 { 2353 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2354 unsigned long irqflags; 2355 u32 imr; 2356 2357 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2358 i915_disable_pipestat(dev_priv, pipe, 2359 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2360 imr = I915_READ(VLV_IMR); 2361 if (pipe == PIPE_A) 2362 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2363 else 2364 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2365 I915_WRITE(VLV_IMR, imr); 2366 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2367 } 2368 2369 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2370 { 2371 struct drm_i915_private *dev_priv = dev->dev_private; 2372 unsigned long irqflags; 2373 2374 if (!i915_pipe_enabled(dev, pipe)) 2375 return; 2376 2377 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2378 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2379 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2380 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2381 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2382 } 2383 2384 static u32 2385 ring_last_seqno(struct intel_ring_buffer *ring) 2386 { 2387 return list_entry(ring->request_list.prev, 2388 struct drm_i915_gem_request, list)->seqno; 2389 } 2390 2391 static bool 2392 ring_idle(struct intel_ring_buffer *ring, u32 seqno) 2393 { 2394 return (list_empty(&ring->request_list) || 2395 i915_seqno_passed(seqno, ring_last_seqno(ring))); 2396 } 2397 2398 static struct intel_ring_buffer * 2399 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 2400 { 2401 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2402 u32 cmd, ipehr, acthd, acthd_min; 2403 2404 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2405 if ((ipehr & ~(0x3 << 16)) != 2406 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 2407 return NULL; 2408 2409 /* ACTHD is likely pointing to the dword after the actual command, 2410 * so scan backwards until we find the MBOX. 2411 */ 2412 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 2413 acthd_min = max((int)acthd - 3 * 4, 0); 2414 do { 2415 cmd = ioread32(ring->virtual_start + acthd); 2416 if (cmd == ipehr) 2417 break; 2418 2419 acthd -= 4; 2420 if (acthd < acthd_min) 2421 return NULL; 2422 } while (1); 2423 2424 *seqno = ioread32(ring->virtual_start+acthd+4)+1; 2425 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 2426 } 2427 2428 static int semaphore_passed(struct intel_ring_buffer *ring) 2429 { 2430 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2431 struct intel_ring_buffer *signaller; 2432 u32 seqno, ctl; 2433 2434 ring->hangcheck.deadlock = true; 2435 2436 signaller = semaphore_waits_for(ring, &seqno); 2437 if (signaller == NULL || signaller->hangcheck.deadlock) 2438 return -1; 2439 2440 /* cursory check for an unkickable deadlock */ 2441 ctl = I915_READ_CTL(signaller); 2442 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 2443 return -1; 2444 2445 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); 2446 } 2447 2448 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2449 { 2450 struct intel_ring_buffer *ring; 2451 int i; 2452 2453 for_each_ring(ring, dev_priv, i) 2454 ring->hangcheck.deadlock = false; 2455 } 2456 2457 static enum intel_ring_hangcheck_action 2458 ring_stuck(struct intel_ring_buffer *ring, u32 acthd) 2459 { 2460 struct drm_device *dev = ring->dev; 2461 struct drm_i915_private *dev_priv = dev->dev_private; 2462 u32 tmp; 2463 2464 if (ring->hangcheck.acthd != acthd) 2465 return HANGCHECK_ACTIVE; 2466 2467 if (IS_GEN2(dev)) 2468 return HANGCHECK_HUNG; 2469 2470 /* Is the chip hanging on a WAIT_FOR_EVENT? 2471 * If so we can simply poke the RB_WAIT bit 2472 * and break the hang. This should work on 2473 * all but the second generation chipsets. 2474 */ 2475 tmp = I915_READ_CTL(ring); 2476 if (tmp & RING_WAIT) { 2477 DRM_ERROR("Kicking stuck wait on %s\n", 2478 ring->name); 2479 i915_handle_error(dev, false); 2480 I915_WRITE_CTL(ring, tmp); 2481 return HANGCHECK_KICK; 2482 } 2483 2484 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2485 switch (semaphore_passed(ring)) { 2486 default: 2487 return HANGCHECK_HUNG; 2488 case 1: 2489 DRM_ERROR("Kicking stuck semaphore on %s\n", 2490 ring->name); 2491 i915_handle_error(dev, false); 2492 I915_WRITE_CTL(ring, tmp); 2493 return HANGCHECK_KICK; 2494 case 0: 2495 return HANGCHECK_WAIT; 2496 } 2497 } 2498 2499 return HANGCHECK_HUNG; 2500 } 2501 2502 /** 2503 * This is called when the chip hasn't reported back with completed 2504 * batchbuffers in a long time. We keep track per ring seqno progress and 2505 * if there are no progress, hangcheck score for that ring is increased. 2506 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2507 * we kick the ring. If we see no progress on three subsequent calls 2508 * we assume chip is wedged and try to fix it by resetting the chip. 2509 */ 2510 static void i915_hangcheck_elapsed(unsigned long data) 2511 { 2512 struct drm_device *dev = (struct drm_device *)data; 2513 drm_i915_private_t *dev_priv = dev->dev_private; 2514 struct intel_ring_buffer *ring; 2515 int i; 2516 int busy_count = 0, rings_hung = 0; 2517 bool stuck[I915_NUM_RINGS] = { 0 }; 2518 #define BUSY 1 2519 #define KICK 5 2520 #define HUNG 20 2521 #define FIRE 30 2522 2523 if (!i915_enable_hangcheck) 2524 return; 2525 2526 for_each_ring(ring, dev_priv, i) { 2527 u32 seqno, acthd; 2528 bool busy = true; 2529 2530 semaphore_clear_deadlocks(dev_priv); 2531 2532 seqno = ring->get_seqno(ring, false); 2533 acthd = intel_ring_get_active_head(ring); 2534 2535 if (ring->hangcheck.seqno == seqno) { 2536 if (ring_idle(ring, seqno)) { 2537 ring->hangcheck.action = HANGCHECK_IDLE; 2538 2539 if (waitqueue_active(&ring->irq_queue)) { 2540 /* Issue a wake-up to catch stuck h/w. */ 2541 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2542 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 2543 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2544 ring->name); 2545 else 2546 DRM_INFO("Fake missed irq on %s\n", 2547 ring->name); 2548 wake_up_all(&ring->irq_queue); 2549 } 2550 /* Safeguard against driver failure */ 2551 ring->hangcheck.score += BUSY; 2552 } else 2553 busy = false; 2554 } else { 2555 /* We always increment the hangcheck score 2556 * if the ring is busy and still processing 2557 * the same request, so that no single request 2558 * can run indefinitely (such as a chain of 2559 * batches). The only time we do not increment 2560 * the hangcheck score on this ring, if this 2561 * ring is in a legitimate wait for another 2562 * ring. In that case the waiting ring is a 2563 * victim and we want to be sure we catch the 2564 * right culprit. Then every time we do kick 2565 * the ring, add a small increment to the 2566 * score so that we can catch a batch that is 2567 * being repeatedly kicked and so responsible 2568 * for stalling the machine. 2569 */ 2570 ring->hangcheck.action = ring_stuck(ring, 2571 acthd); 2572 2573 switch (ring->hangcheck.action) { 2574 case HANGCHECK_IDLE: 2575 case HANGCHECK_WAIT: 2576 break; 2577 case HANGCHECK_ACTIVE: 2578 ring->hangcheck.score += BUSY; 2579 break; 2580 case HANGCHECK_KICK: 2581 ring->hangcheck.score += KICK; 2582 break; 2583 case HANGCHECK_HUNG: 2584 ring->hangcheck.score += HUNG; 2585 stuck[i] = true; 2586 break; 2587 } 2588 } 2589 } else { 2590 ring->hangcheck.action = HANGCHECK_ACTIVE; 2591 2592 /* Gradually reduce the count so that we catch DoS 2593 * attempts across multiple batches. 2594 */ 2595 if (ring->hangcheck.score > 0) 2596 ring->hangcheck.score--; 2597 } 2598 2599 ring->hangcheck.seqno = seqno; 2600 ring->hangcheck.acthd = acthd; 2601 busy_count += busy; 2602 } 2603 2604 for_each_ring(ring, dev_priv, i) { 2605 if (ring->hangcheck.score > FIRE) { 2606 DRM_INFO("%s on %s\n", 2607 stuck[i] ? "stuck" : "no progress", 2608 ring->name); 2609 rings_hung++; 2610 } 2611 } 2612 2613 if (rings_hung) 2614 return i915_handle_error(dev, true); 2615 2616 if (busy_count) 2617 /* Reset timer case chip hangs without another request 2618 * being added */ 2619 i915_queue_hangcheck(dev); 2620 } 2621 2622 void i915_queue_hangcheck(struct drm_device *dev) 2623 { 2624 struct drm_i915_private *dev_priv = dev->dev_private; 2625 if (!i915_enable_hangcheck) 2626 return; 2627 2628 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 2629 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 2630 } 2631 2632 static void ibx_irq_preinstall(struct drm_device *dev) 2633 { 2634 struct drm_i915_private *dev_priv = dev->dev_private; 2635 2636 if (HAS_PCH_NOP(dev)) 2637 return; 2638 2639 /* south display irq */ 2640 I915_WRITE(SDEIMR, 0xffffffff); 2641 /* 2642 * SDEIER is also touched by the interrupt handler to work around missed 2643 * PCH interrupts. Hence we can't update it after the interrupt handler 2644 * is enabled - instead we unconditionally enable all PCH interrupt 2645 * sources here, but then only unmask them as needed with SDEIMR. 2646 */ 2647 I915_WRITE(SDEIER, 0xffffffff); 2648 POSTING_READ(SDEIER); 2649 } 2650 2651 static void gen5_gt_irq_preinstall(struct drm_device *dev) 2652 { 2653 struct drm_i915_private *dev_priv = dev->dev_private; 2654 2655 /* and GT */ 2656 I915_WRITE(GTIMR, 0xffffffff); 2657 I915_WRITE(GTIER, 0x0); 2658 POSTING_READ(GTIER); 2659 2660 if (INTEL_INFO(dev)->gen >= 6) { 2661 /* and PM */ 2662 I915_WRITE(GEN6_PMIMR, 0xffffffff); 2663 I915_WRITE(GEN6_PMIER, 0x0); 2664 POSTING_READ(GEN6_PMIER); 2665 } 2666 } 2667 2668 /* drm_dma.h hooks 2669 */ 2670 static void ironlake_irq_preinstall(struct drm_device *dev) 2671 { 2672 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2673 2674 atomic_set(&dev_priv->irq_received, 0); 2675 2676 I915_WRITE(HWSTAM, 0xeffe); 2677 2678 I915_WRITE(DEIMR, 0xffffffff); 2679 I915_WRITE(DEIER, 0x0); 2680 POSTING_READ(DEIER); 2681 2682 gen5_gt_irq_preinstall(dev); 2683 2684 ibx_irq_preinstall(dev); 2685 } 2686 2687 static void valleyview_irq_preinstall(struct drm_device *dev) 2688 { 2689 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2690 int pipe; 2691 2692 atomic_set(&dev_priv->irq_received, 0); 2693 2694 /* VLV magic */ 2695 I915_WRITE(VLV_IMR, 0); 2696 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 2697 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 2698 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 2699 2700 /* and GT */ 2701 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2702 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2703 2704 gen5_gt_irq_preinstall(dev); 2705 2706 I915_WRITE(DPINVGTT, 0xff); 2707 2708 I915_WRITE(PORT_HOTPLUG_EN, 0); 2709 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2710 for_each_pipe(pipe) 2711 I915_WRITE(PIPESTAT(pipe), 0xffff); 2712 I915_WRITE(VLV_IIR, 0xffffffff); 2713 I915_WRITE(VLV_IMR, 0xffffffff); 2714 I915_WRITE(VLV_IER, 0x0); 2715 POSTING_READ(VLV_IER); 2716 } 2717 2718 static void gen8_irq_preinstall(struct drm_device *dev) 2719 { 2720 struct drm_i915_private *dev_priv = dev->dev_private; 2721 int pipe; 2722 2723 atomic_set(&dev_priv->irq_received, 0); 2724 2725 I915_WRITE(GEN8_MASTER_IRQ, 0); 2726 POSTING_READ(GEN8_MASTER_IRQ); 2727 2728 /* IIR can theoretically queue up two events. Be paranoid */ 2729 #define GEN8_IRQ_INIT_NDX(type, which) do { \ 2730 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 2731 POSTING_READ(GEN8_##type##_IMR(which)); \ 2732 I915_WRITE(GEN8_##type##_IER(which), 0); \ 2733 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 2734 POSTING_READ(GEN8_##type##_IIR(which)); \ 2735 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 2736 } while (0) 2737 2738 #define GEN8_IRQ_INIT(type) do { \ 2739 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 2740 POSTING_READ(GEN8_##type##_IMR); \ 2741 I915_WRITE(GEN8_##type##_IER, 0); \ 2742 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 2743 POSTING_READ(GEN8_##type##_IIR); \ 2744 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 2745 } while (0) 2746 2747 GEN8_IRQ_INIT_NDX(GT, 0); 2748 GEN8_IRQ_INIT_NDX(GT, 1); 2749 GEN8_IRQ_INIT_NDX(GT, 2); 2750 GEN8_IRQ_INIT_NDX(GT, 3); 2751 2752 for_each_pipe(pipe) { 2753 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe); 2754 } 2755 2756 GEN8_IRQ_INIT(DE_PORT); 2757 GEN8_IRQ_INIT(DE_MISC); 2758 GEN8_IRQ_INIT(PCU); 2759 #undef GEN8_IRQ_INIT 2760 #undef GEN8_IRQ_INIT_NDX 2761 2762 POSTING_READ(GEN8_PCU_IIR); 2763 2764 ibx_irq_preinstall(dev); 2765 } 2766 2767 static void ibx_hpd_irq_setup(struct drm_device *dev) 2768 { 2769 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2770 struct drm_mode_config *mode_config = &dev->mode_config; 2771 struct intel_encoder *intel_encoder; 2772 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 2773 2774 if (HAS_PCH_IBX(dev)) { 2775 hotplug_irqs = SDE_HOTPLUG_MASK; 2776 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2777 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2778 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 2779 } else { 2780 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 2781 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2782 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2783 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 2784 } 2785 2786 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 2787 2788 /* 2789 * Enable digital hotplug on the PCH, and configure the DP short pulse 2790 * duration to 2ms (which is the minimum in the Display Port spec) 2791 * 2792 * This register is the same on all known PCH chips. 2793 */ 2794 hotplug = I915_READ(PCH_PORT_HOTPLUG); 2795 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 2796 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 2797 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 2798 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 2799 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 2800 } 2801 2802 static void ibx_irq_postinstall(struct drm_device *dev) 2803 { 2804 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2805 u32 mask; 2806 2807 if (HAS_PCH_NOP(dev)) 2808 return; 2809 2810 if (HAS_PCH_IBX(dev)) { 2811 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 2812 } else { 2813 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 2814 2815 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2816 } 2817 2818 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2819 I915_WRITE(SDEIMR, ~mask); 2820 } 2821 2822 static void gen5_gt_irq_postinstall(struct drm_device *dev) 2823 { 2824 struct drm_i915_private *dev_priv = dev->dev_private; 2825 u32 pm_irqs, gt_irqs; 2826 2827 pm_irqs = gt_irqs = 0; 2828 2829 dev_priv->gt_irq_mask = ~0; 2830 if (HAS_L3_DPF(dev)) { 2831 /* L3 parity interrupt is always unmasked. */ 2832 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 2833 gt_irqs |= GT_PARITY_ERROR(dev); 2834 } 2835 2836 gt_irqs |= GT_RENDER_USER_INTERRUPT; 2837 if (IS_GEN5(dev)) { 2838 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 2839 ILK_BSD_USER_INTERRUPT; 2840 } else { 2841 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 2842 } 2843 2844 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2845 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2846 I915_WRITE(GTIER, gt_irqs); 2847 POSTING_READ(GTIER); 2848 2849 if (INTEL_INFO(dev)->gen >= 6) { 2850 pm_irqs |= GEN6_PM_RPS_EVENTS; 2851 2852 if (HAS_VEBOX(dev)) 2853 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 2854 2855 dev_priv->pm_irq_mask = 0xffffffff; 2856 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2857 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 2858 I915_WRITE(GEN6_PMIER, pm_irqs); 2859 POSTING_READ(GEN6_PMIER); 2860 } 2861 } 2862 2863 static int ironlake_irq_postinstall(struct drm_device *dev) 2864 { 2865 unsigned long irqflags; 2866 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2867 u32 display_mask, extra_mask; 2868 2869 if (INTEL_INFO(dev)->gen >= 7) { 2870 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 2871 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 2872 DE_PLANEB_FLIP_DONE_IVB | 2873 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 2874 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 2875 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 2876 2877 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2878 } else { 2879 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2880 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 2881 DE_AUX_CHANNEL_A | 2882 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 2883 DE_POISON); 2884 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 2885 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; 2886 } 2887 2888 dev_priv->irq_mask = ~display_mask; 2889 2890 /* should always can generate irq */ 2891 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2892 I915_WRITE(DEIMR, dev_priv->irq_mask); 2893 I915_WRITE(DEIER, display_mask | extra_mask); 2894 POSTING_READ(DEIER); 2895 2896 gen5_gt_irq_postinstall(dev); 2897 2898 ibx_irq_postinstall(dev); 2899 2900 if (IS_IRONLAKE_M(dev)) { 2901 /* Enable PCU event interrupts 2902 * 2903 * spinlocking not required here for correctness since interrupt 2904 * setup is guaranteed to run in single-threaded context. But we 2905 * need it to make the assert_spin_locked happy. */ 2906 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2907 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 2908 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2909 } 2910 2911 return 0; 2912 } 2913 2914 static int valleyview_irq_postinstall(struct drm_device *dev) 2915 { 2916 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2917 u32 enable_mask; 2918 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV | 2919 PIPE_CRC_DONE_ENABLE; 2920 unsigned long irqflags; 2921 2922 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2923 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2924 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2925 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2926 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2927 2928 /* 2929 *Leave vblank interrupts masked initially. enable/disable will 2930 * toggle them based on usage. 2931 */ 2932 dev_priv->irq_mask = (~enable_mask) | 2933 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2934 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2935 2936 I915_WRITE(PORT_HOTPLUG_EN, 0); 2937 POSTING_READ(PORT_HOTPLUG_EN); 2938 2939 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 2940 I915_WRITE(VLV_IER, enable_mask); 2941 I915_WRITE(VLV_IIR, 0xffffffff); 2942 I915_WRITE(PIPESTAT(0), 0xffff); 2943 I915_WRITE(PIPESTAT(1), 0xffff); 2944 POSTING_READ(VLV_IER); 2945 2946 /* Interrupt setup is already guaranteed to be single-threaded, this is 2947 * just to make the assert_spin_locked check happy. */ 2948 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2949 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable); 2950 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); 2951 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable); 2952 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2953 2954 I915_WRITE(VLV_IIR, 0xffffffff); 2955 I915_WRITE(VLV_IIR, 0xffffffff); 2956 2957 gen5_gt_irq_postinstall(dev); 2958 2959 /* ack & enable invalid PTE error interrupts */ 2960 #if 0 /* FIXME: add support to irq handler for checking these bits */ 2961 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2962 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 2963 #endif 2964 2965 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2966 2967 return 0; 2968 } 2969 2970 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 2971 { 2972 int i; 2973 2974 /* These are interrupts we'll toggle with the ring mask register */ 2975 uint32_t gt_interrupts[] = { 2976 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 2977 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 2978 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 2979 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 2980 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 2981 0, 2982 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT 2983 }; 2984 2985 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) { 2986 u32 tmp = I915_READ(GEN8_GT_IIR(i)); 2987 if (tmp) 2988 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", 2989 i, tmp); 2990 I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]); 2991 I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]); 2992 } 2993 POSTING_READ(GEN8_GT_IER(0)); 2994 } 2995 2996 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 2997 { 2998 struct drm_device *dev = dev_priv->dev; 2999 uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | 3000 GEN8_PIPE_CDCLK_CRC_DONE | 3001 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3002 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3003 GEN8_PIPE_FIFO_UNDERRUN; 3004 int pipe; 3005 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3006 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3007 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3008 3009 for_each_pipe(pipe) { 3010 u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 3011 if (tmp) 3012 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", 3013 pipe, tmp); 3014 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 3015 I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables); 3016 } 3017 POSTING_READ(GEN8_DE_PIPE_ISR(0)); 3018 3019 I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A); 3020 I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A); 3021 POSTING_READ(GEN8_DE_PORT_IER); 3022 3023 I915_WRITE(GEN8_DE_MISC_IMR, ~GEN8_DE_MISC_GSE); 3024 I915_WRITE(GEN8_DE_MISC_IER, GEN8_DE_MISC_GSE); 3025 POSTING_READ(GEN8_DE_MISC_IER); 3026 } 3027 3028 static int gen8_irq_postinstall(struct drm_device *dev) 3029 { 3030 struct drm_i915_private *dev_priv = dev->dev_private; 3031 3032 gen8_gt_irq_postinstall(dev_priv); 3033 gen8_de_irq_postinstall(dev_priv); 3034 3035 ibx_irq_postinstall(dev); 3036 3037 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 3038 POSTING_READ(GEN8_MASTER_IRQ); 3039 3040 return 0; 3041 } 3042 3043 static void gen8_irq_uninstall(struct drm_device *dev) 3044 { 3045 struct drm_i915_private *dev_priv = dev->dev_private; 3046 int pipe; 3047 3048 if (!dev_priv) 3049 return; 3050 3051 atomic_set(&dev_priv->irq_received, 0); 3052 3053 I915_WRITE(GEN8_MASTER_IRQ, 0); 3054 3055 #define GEN8_IRQ_FINI_NDX(type, which) do { \ 3056 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 3057 I915_WRITE(GEN8_##type##_IER(which), 0); \ 3058 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 3059 } while (0) 3060 3061 #define GEN8_IRQ_FINI(type) do { \ 3062 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 3063 I915_WRITE(GEN8_##type##_IER, 0); \ 3064 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 3065 } while (0) 3066 3067 GEN8_IRQ_FINI_NDX(GT, 0); 3068 GEN8_IRQ_FINI_NDX(GT, 1); 3069 GEN8_IRQ_FINI_NDX(GT, 2); 3070 GEN8_IRQ_FINI_NDX(GT, 3); 3071 3072 for_each_pipe(pipe) { 3073 GEN8_IRQ_FINI_NDX(DE_PIPE, pipe); 3074 } 3075 3076 GEN8_IRQ_FINI(DE_PORT); 3077 GEN8_IRQ_FINI(DE_MISC); 3078 GEN8_IRQ_FINI(PCU); 3079 #undef GEN8_IRQ_FINI 3080 #undef GEN8_IRQ_FINI_NDX 3081 3082 POSTING_READ(GEN8_PCU_IIR); 3083 } 3084 3085 static void valleyview_irq_uninstall(struct drm_device *dev) 3086 { 3087 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3088 int pipe; 3089 3090 if (!dev_priv) 3091 return; 3092 3093 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3094 3095 for_each_pipe(pipe) 3096 I915_WRITE(PIPESTAT(pipe), 0xffff); 3097 3098 I915_WRITE(HWSTAM, 0xffffffff); 3099 I915_WRITE(PORT_HOTPLUG_EN, 0); 3100 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3101 for_each_pipe(pipe) 3102 I915_WRITE(PIPESTAT(pipe), 0xffff); 3103 I915_WRITE(VLV_IIR, 0xffffffff); 3104 I915_WRITE(VLV_IMR, 0xffffffff); 3105 I915_WRITE(VLV_IER, 0x0); 3106 POSTING_READ(VLV_IER); 3107 } 3108 3109 static void ironlake_irq_uninstall(struct drm_device *dev) 3110 { 3111 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3112 3113 if (!dev_priv) 3114 return; 3115 3116 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3117 3118 I915_WRITE(HWSTAM, 0xffffffff); 3119 3120 I915_WRITE(DEIMR, 0xffffffff); 3121 I915_WRITE(DEIER, 0x0); 3122 I915_WRITE(DEIIR, I915_READ(DEIIR)); 3123 if (IS_GEN7(dev)) 3124 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 3125 3126 I915_WRITE(GTIMR, 0xffffffff); 3127 I915_WRITE(GTIER, 0x0); 3128 I915_WRITE(GTIIR, I915_READ(GTIIR)); 3129 3130 if (HAS_PCH_NOP(dev)) 3131 return; 3132 3133 I915_WRITE(SDEIMR, 0xffffffff); 3134 I915_WRITE(SDEIER, 0x0); 3135 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 3136 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3137 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 3138 } 3139 3140 static void i8xx_irq_preinstall(struct drm_device * dev) 3141 { 3142 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3143 int pipe; 3144 3145 atomic_set(&dev_priv->irq_received, 0); 3146 3147 for_each_pipe(pipe) 3148 I915_WRITE(PIPESTAT(pipe), 0); 3149 I915_WRITE16(IMR, 0xffff); 3150 I915_WRITE16(IER, 0x0); 3151 POSTING_READ16(IER); 3152 } 3153 3154 static int i8xx_irq_postinstall(struct drm_device *dev) 3155 { 3156 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3157 unsigned long irqflags; 3158 3159 I915_WRITE16(EMR, 3160 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3161 3162 /* Unmask the interrupts that we always want on. */ 3163 dev_priv->irq_mask = 3164 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3165 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3166 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3167 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3168 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3169 I915_WRITE16(IMR, dev_priv->irq_mask); 3170 3171 I915_WRITE16(IER, 3172 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3173 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3174 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3175 I915_USER_INTERRUPT); 3176 POSTING_READ16(IER); 3177 3178 /* Interrupt setup is already guaranteed to be single-threaded, this is 3179 * just to make the assert_spin_locked check happy. */ 3180 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3181 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3182 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3183 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3184 3185 return 0; 3186 } 3187 3188 /* 3189 * Returns true when a page flip has completed. 3190 */ 3191 static bool i8xx_handle_vblank(struct drm_device *dev, 3192 int plane, int pipe, u32 iir) 3193 { 3194 drm_i915_private_t *dev_priv = dev->dev_private; 3195 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3196 3197 if (!drm_handle_vblank(dev, pipe)) 3198 return false; 3199 3200 if ((iir & flip_pending) == 0) 3201 return false; 3202 3203 intel_prepare_page_flip(dev, plane); 3204 3205 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3206 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3207 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3208 * the flip is completed (no longer pending). Since this doesn't raise 3209 * an interrupt per se, we watch for the change at vblank. 3210 */ 3211 if (I915_READ16(ISR) & flip_pending) 3212 return false; 3213 3214 intel_finish_page_flip(dev, pipe); 3215 3216 return true; 3217 } 3218 3219 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3220 { 3221 struct drm_device *dev = (struct drm_device *) arg; 3222 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3223 u16 iir, new_iir; 3224 u32 pipe_stats[2]; 3225 unsigned long irqflags; 3226 int pipe; 3227 u16 flip_mask = 3228 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3229 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3230 3231 atomic_inc(&dev_priv->irq_received); 3232 3233 iir = I915_READ16(IIR); 3234 if (iir == 0) 3235 return IRQ_NONE; 3236 3237 while (iir & ~flip_mask) { 3238 /* Can't rely on pipestat interrupt bit in iir as it might 3239 * have been cleared after the pipestat interrupt was received. 3240 * It doesn't set the bit in iir again, but it still produces 3241 * interrupts (for non-MSI). 3242 */ 3243 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3244 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3245 i915_handle_error(dev, false); 3246 3247 for_each_pipe(pipe) { 3248 int reg = PIPESTAT(pipe); 3249 pipe_stats[pipe] = I915_READ(reg); 3250 3251 /* 3252 * Clear the PIPE*STAT regs before the IIR 3253 */ 3254 if (pipe_stats[pipe] & 0x8000ffff) { 3255 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3256 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3257 pipe_name(pipe)); 3258 I915_WRITE(reg, pipe_stats[pipe]); 3259 } 3260 } 3261 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3262 3263 I915_WRITE16(IIR, iir & ~flip_mask); 3264 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3265 3266 i915_update_dri1_breadcrumb(dev); 3267 3268 if (iir & I915_USER_INTERRUPT) 3269 notify_ring(dev, &dev_priv->ring[RCS]); 3270 3271 for_each_pipe(pipe) { 3272 int plane = pipe; 3273 if (HAS_FBC(dev)) 3274 plane = !plane; 3275 3276 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3277 i8xx_handle_vblank(dev, plane, pipe, iir)) 3278 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3279 3280 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3281 i9xx_pipe_crc_irq_handler(dev, pipe); 3282 } 3283 3284 iir = new_iir; 3285 } 3286 3287 return IRQ_HANDLED; 3288 } 3289 3290 static void i8xx_irq_uninstall(struct drm_device * dev) 3291 { 3292 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3293 int pipe; 3294 3295 for_each_pipe(pipe) { 3296 /* Clear enable bits; then clear status bits */ 3297 I915_WRITE(PIPESTAT(pipe), 0); 3298 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3299 } 3300 I915_WRITE16(IMR, 0xffff); 3301 I915_WRITE16(IER, 0x0); 3302 I915_WRITE16(IIR, I915_READ16(IIR)); 3303 } 3304 3305 static void i915_irq_preinstall(struct drm_device * dev) 3306 { 3307 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3308 int pipe; 3309 3310 atomic_set(&dev_priv->irq_received, 0); 3311 3312 if (I915_HAS_HOTPLUG(dev)) { 3313 I915_WRITE(PORT_HOTPLUG_EN, 0); 3314 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3315 } 3316 3317 I915_WRITE16(HWSTAM, 0xeffe); 3318 for_each_pipe(pipe) 3319 I915_WRITE(PIPESTAT(pipe), 0); 3320 I915_WRITE(IMR, 0xffffffff); 3321 I915_WRITE(IER, 0x0); 3322 POSTING_READ(IER); 3323 } 3324 3325 static int i915_irq_postinstall(struct drm_device *dev) 3326 { 3327 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3328 u32 enable_mask; 3329 unsigned long irqflags; 3330 3331 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3332 3333 /* Unmask the interrupts that we always want on. */ 3334 dev_priv->irq_mask = 3335 ~(I915_ASLE_INTERRUPT | 3336 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3337 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3338 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3339 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3340 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3341 3342 enable_mask = 3343 I915_ASLE_INTERRUPT | 3344 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3345 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3346 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3347 I915_USER_INTERRUPT; 3348 3349 if (I915_HAS_HOTPLUG(dev)) { 3350 I915_WRITE(PORT_HOTPLUG_EN, 0); 3351 POSTING_READ(PORT_HOTPLUG_EN); 3352 3353 /* Enable in IER... */ 3354 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3355 /* and unmask in IMR */ 3356 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3357 } 3358 3359 I915_WRITE(IMR, dev_priv->irq_mask); 3360 I915_WRITE(IER, enable_mask); 3361 POSTING_READ(IER); 3362 3363 i915_enable_asle_pipestat(dev); 3364 3365 /* Interrupt setup is already guaranteed to be single-threaded, this is 3366 * just to make the assert_spin_locked check happy. */ 3367 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3368 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3369 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3370 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3371 3372 return 0; 3373 } 3374 3375 /* 3376 * Returns true when a page flip has completed. 3377 */ 3378 static bool i915_handle_vblank(struct drm_device *dev, 3379 int plane, int pipe, u32 iir) 3380 { 3381 drm_i915_private_t *dev_priv = dev->dev_private; 3382 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3383 3384 if (!drm_handle_vblank(dev, pipe)) 3385 return false; 3386 3387 if ((iir & flip_pending) == 0) 3388 return false; 3389 3390 intel_prepare_page_flip(dev, plane); 3391 3392 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3393 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3394 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3395 * the flip is completed (no longer pending). Since this doesn't raise 3396 * an interrupt per se, we watch for the change at vblank. 3397 */ 3398 if (I915_READ(ISR) & flip_pending) 3399 return false; 3400 3401 intel_finish_page_flip(dev, pipe); 3402 3403 return true; 3404 } 3405 3406 static irqreturn_t i915_irq_handler(int irq, void *arg) 3407 { 3408 struct drm_device *dev = (struct drm_device *) arg; 3409 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3410 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3411 unsigned long irqflags; 3412 u32 flip_mask = 3413 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3414 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3415 int pipe, ret = IRQ_NONE; 3416 3417 atomic_inc(&dev_priv->irq_received); 3418 3419 iir = I915_READ(IIR); 3420 do { 3421 bool irq_received = (iir & ~flip_mask) != 0; 3422 bool blc_event = false; 3423 3424 /* Can't rely on pipestat interrupt bit in iir as it might 3425 * have been cleared after the pipestat interrupt was received. 3426 * It doesn't set the bit in iir again, but it still produces 3427 * interrupts (for non-MSI). 3428 */ 3429 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3430 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3431 i915_handle_error(dev, false); 3432 3433 for_each_pipe(pipe) { 3434 int reg = PIPESTAT(pipe); 3435 pipe_stats[pipe] = I915_READ(reg); 3436 3437 /* Clear the PIPE*STAT regs before the IIR */ 3438 if (pipe_stats[pipe] & 0x8000ffff) { 3439 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3440 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3441 pipe_name(pipe)); 3442 I915_WRITE(reg, pipe_stats[pipe]); 3443 irq_received = true; 3444 } 3445 } 3446 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3447 3448 if (!irq_received) 3449 break; 3450 3451 /* Consume port. Then clear IIR or we'll miss events */ 3452 if ((I915_HAS_HOTPLUG(dev)) && 3453 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 3454 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3455 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 3456 3457 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3458 hotplug_status); 3459 3460 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 3461 3462 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3463 POSTING_READ(PORT_HOTPLUG_STAT); 3464 } 3465 3466 I915_WRITE(IIR, iir & ~flip_mask); 3467 new_iir = I915_READ(IIR); /* Flush posted writes */ 3468 3469 if (iir & I915_USER_INTERRUPT) 3470 notify_ring(dev, &dev_priv->ring[RCS]); 3471 3472 for_each_pipe(pipe) { 3473 int plane = pipe; 3474 if (HAS_FBC(dev)) 3475 plane = !plane; 3476 3477 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3478 i915_handle_vblank(dev, plane, pipe, iir)) 3479 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3480 3481 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3482 blc_event = true; 3483 3484 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3485 i9xx_pipe_crc_irq_handler(dev, pipe); 3486 } 3487 3488 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3489 intel_opregion_asle_intr(dev); 3490 3491 /* With MSI, interrupts are only generated when iir 3492 * transitions from zero to nonzero. If another bit got 3493 * set while we were handling the existing iir bits, then 3494 * we would never get another interrupt. 3495 * 3496 * This is fine on non-MSI as well, as if we hit this path 3497 * we avoid exiting the interrupt handler only to generate 3498 * another one. 3499 * 3500 * Note that for MSI this could cause a stray interrupt report 3501 * if an interrupt landed in the time between writing IIR and 3502 * the posting read. This should be rare enough to never 3503 * trigger the 99% of 100,000 interrupts test for disabling 3504 * stray interrupts. 3505 */ 3506 ret = IRQ_HANDLED; 3507 iir = new_iir; 3508 } while (iir & ~flip_mask); 3509 3510 i915_update_dri1_breadcrumb(dev); 3511 3512 return ret; 3513 } 3514 3515 static void i915_irq_uninstall(struct drm_device * dev) 3516 { 3517 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3518 int pipe; 3519 3520 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3521 3522 if (I915_HAS_HOTPLUG(dev)) { 3523 I915_WRITE(PORT_HOTPLUG_EN, 0); 3524 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3525 } 3526 3527 I915_WRITE16(HWSTAM, 0xffff); 3528 for_each_pipe(pipe) { 3529 /* Clear enable bits; then clear status bits */ 3530 I915_WRITE(PIPESTAT(pipe), 0); 3531 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3532 } 3533 I915_WRITE(IMR, 0xffffffff); 3534 I915_WRITE(IER, 0x0); 3535 3536 I915_WRITE(IIR, I915_READ(IIR)); 3537 } 3538 3539 static void i965_irq_preinstall(struct drm_device * dev) 3540 { 3541 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3542 int pipe; 3543 3544 atomic_set(&dev_priv->irq_received, 0); 3545 3546 I915_WRITE(PORT_HOTPLUG_EN, 0); 3547 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3548 3549 I915_WRITE(HWSTAM, 0xeffe); 3550 for_each_pipe(pipe) 3551 I915_WRITE(PIPESTAT(pipe), 0); 3552 I915_WRITE(IMR, 0xffffffff); 3553 I915_WRITE(IER, 0x0); 3554 POSTING_READ(IER); 3555 } 3556 3557 static int i965_irq_postinstall(struct drm_device *dev) 3558 { 3559 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3560 u32 enable_mask; 3561 u32 error_mask; 3562 unsigned long irqflags; 3563 3564 /* Unmask the interrupts that we always want on. */ 3565 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3566 I915_DISPLAY_PORT_INTERRUPT | 3567 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3568 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3569 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3570 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3571 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3572 3573 enable_mask = ~dev_priv->irq_mask; 3574 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3575 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3576 enable_mask |= I915_USER_INTERRUPT; 3577 3578 if (IS_G4X(dev)) 3579 enable_mask |= I915_BSD_USER_INTERRUPT; 3580 3581 /* Interrupt setup is already guaranteed to be single-threaded, this is 3582 * just to make the assert_spin_locked check happy. */ 3583 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3584 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); 3585 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3586 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3587 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3588 3589 /* 3590 * Enable some error detection, note the instruction error mask 3591 * bit is reserved, so we leave it masked. 3592 */ 3593 if (IS_G4X(dev)) { 3594 error_mask = ~(GM45_ERROR_PAGE_TABLE | 3595 GM45_ERROR_MEM_PRIV | 3596 GM45_ERROR_CP_PRIV | 3597 I915_ERROR_MEMORY_REFRESH); 3598 } else { 3599 error_mask = ~(I915_ERROR_PAGE_TABLE | 3600 I915_ERROR_MEMORY_REFRESH); 3601 } 3602 I915_WRITE(EMR, error_mask); 3603 3604 I915_WRITE(IMR, dev_priv->irq_mask); 3605 I915_WRITE(IER, enable_mask); 3606 POSTING_READ(IER); 3607 3608 I915_WRITE(PORT_HOTPLUG_EN, 0); 3609 POSTING_READ(PORT_HOTPLUG_EN); 3610 3611 i915_enable_asle_pipestat(dev); 3612 3613 return 0; 3614 } 3615 3616 static void i915_hpd_irq_setup(struct drm_device *dev) 3617 { 3618 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3619 struct drm_mode_config *mode_config = &dev->mode_config; 3620 struct intel_encoder *intel_encoder; 3621 u32 hotplug_en; 3622 3623 assert_spin_locked(&dev_priv->irq_lock); 3624 3625 if (I915_HAS_HOTPLUG(dev)) { 3626 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 3627 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 3628 /* Note HDMI and DP share hotplug bits */ 3629 /* enable bits are the same for all generations */ 3630 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3631 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3632 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 3633 /* Programming the CRT detection parameters tends 3634 to generate a spurious hotplug event about three 3635 seconds later. So just do it once. 3636 */ 3637 if (IS_G4X(dev)) 3638 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 3639 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 3640 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3641 3642 /* Ignore TV since it's buggy */ 3643 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 3644 } 3645 } 3646 3647 static irqreturn_t i965_irq_handler(int irq, void *arg) 3648 { 3649 struct drm_device *dev = (struct drm_device *) arg; 3650 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3651 u32 iir, new_iir; 3652 u32 pipe_stats[I915_MAX_PIPES]; 3653 unsigned long irqflags; 3654 int irq_received; 3655 int ret = IRQ_NONE, pipe; 3656 u32 flip_mask = 3657 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3658 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3659 3660 atomic_inc(&dev_priv->irq_received); 3661 3662 iir = I915_READ(IIR); 3663 3664 for (;;) { 3665 bool blc_event = false; 3666 3667 irq_received = (iir & ~flip_mask) != 0; 3668 3669 /* Can't rely on pipestat interrupt bit in iir as it might 3670 * have been cleared after the pipestat interrupt was received. 3671 * It doesn't set the bit in iir again, but it still produces 3672 * interrupts (for non-MSI). 3673 */ 3674 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3675 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3676 i915_handle_error(dev, false); 3677 3678 for_each_pipe(pipe) { 3679 int reg = PIPESTAT(pipe); 3680 pipe_stats[pipe] = I915_READ(reg); 3681 3682 /* 3683 * Clear the PIPE*STAT regs before the IIR 3684 */ 3685 if (pipe_stats[pipe] & 0x8000ffff) { 3686 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3687 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3688 pipe_name(pipe)); 3689 I915_WRITE(reg, pipe_stats[pipe]); 3690 irq_received = 1; 3691 } 3692 } 3693 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3694 3695 if (!irq_received) 3696 break; 3697 3698 ret = IRQ_HANDLED; 3699 3700 /* Consume port. Then clear IIR or we'll miss events */ 3701 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 3702 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3703 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 3704 HOTPLUG_INT_STATUS_G4X : 3705 HOTPLUG_INT_STATUS_I915); 3706 3707 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3708 hotplug_status); 3709 3710 intel_hpd_irq_handler(dev, hotplug_trigger, 3711 IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915); 3712 3713 if (IS_G4X(dev) && 3714 (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)) 3715 dp_aux_irq_handler(dev); 3716 3717 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3718 I915_READ(PORT_HOTPLUG_STAT); 3719 } 3720 3721 I915_WRITE(IIR, iir & ~flip_mask); 3722 new_iir = I915_READ(IIR); /* Flush posted writes */ 3723 3724 if (iir & I915_USER_INTERRUPT) 3725 notify_ring(dev, &dev_priv->ring[RCS]); 3726 if (iir & I915_BSD_USER_INTERRUPT) 3727 notify_ring(dev, &dev_priv->ring[VCS]); 3728 3729 for_each_pipe(pipe) { 3730 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 3731 i915_handle_vblank(dev, pipe, pipe, iir)) 3732 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 3733 3734 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3735 blc_event = true; 3736 3737 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3738 i9xx_pipe_crc_irq_handler(dev, pipe); 3739 } 3740 3741 3742 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3743 intel_opregion_asle_intr(dev); 3744 3745 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 3746 gmbus_irq_handler(dev); 3747 3748 /* With MSI, interrupts are only generated when iir 3749 * transitions from zero to nonzero. If another bit got 3750 * set while we were handling the existing iir bits, then 3751 * we would never get another interrupt. 3752 * 3753 * This is fine on non-MSI as well, as if we hit this path 3754 * we avoid exiting the interrupt handler only to generate 3755 * another one. 3756 * 3757 * Note that for MSI this could cause a stray interrupt report 3758 * if an interrupt landed in the time between writing IIR and 3759 * the posting read. This should be rare enough to never 3760 * trigger the 99% of 100,000 interrupts test for disabling 3761 * stray interrupts. 3762 */ 3763 iir = new_iir; 3764 } 3765 3766 i915_update_dri1_breadcrumb(dev); 3767 3768 return ret; 3769 } 3770 3771 static void i965_irq_uninstall(struct drm_device * dev) 3772 { 3773 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3774 int pipe; 3775 3776 if (!dev_priv) 3777 return; 3778 3779 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3780 3781 I915_WRITE(PORT_HOTPLUG_EN, 0); 3782 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3783 3784 I915_WRITE(HWSTAM, 0xffffffff); 3785 for_each_pipe(pipe) 3786 I915_WRITE(PIPESTAT(pipe), 0); 3787 I915_WRITE(IMR, 0xffffffff); 3788 I915_WRITE(IER, 0x0); 3789 3790 for_each_pipe(pipe) 3791 I915_WRITE(PIPESTAT(pipe), 3792 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 3793 I915_WRITE(IIR, I915_READ(IIR)); 3794 } 3795 3796 static void i915_reenable_hotplug_timer_func(unsigned long data) 3797 { 3798 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3799 struct drm_device *dev = dev_priv->dev; 3800 struct drm_mode_config *mode_config = &dev->mode_config; 3801 unsigned long irqflags; 3802 int i; 3803 3804 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3805 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 3806 struct drm_connector *connector; 3807 3808 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 3809 continue; 3810 3811 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3812 3813 list_for_each_entry(connector, &mode_config->connector_list, head) { 3814 struct intel_connector *intel_connector = to_intel_connector(connector); 3815 3816 if (intel_connector->encoder->hpd_pin == i) { 3817 if (connector->polled != intel_connector->polled) 3818 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 3819 drm_get_connector_name(connector)); 3820 connector->polled = intel_connector->polled; 3821 if (!connector->polled) 3822 connector->polled = DRM_CONNECTOR_POLL_HPD; 3823 } 3824 } 3825 } 3826 if (dev_priv->display.hpd_irq_setup) 3827 dev_priv->display.hpd_irq_setup(dev); 3828 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3829 } 3830 3831 void intel_irq_init(struct drm_device *dev) 3832 { 3833 struct drm_i915_private *dev_priv = dev->dev_private; 3834 3835 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 3836 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 3837 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 3838 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 3839 3840 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 3841 i915_hangcheck_elapsed, 3842 (unsigned long) dev); 3843 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 3844 (unsigned long) dev_priv); 3845 3846 #ifdef notyet 3847 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 3848 #endif 3849 3850 if (IS_GEN2(dev)) { 3851 dev->max_vblank_count = 0; 3852 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 3853 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3854 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3855 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3856 } else { 3857 dev->driver->get_vblank_counter = i915_get_vblank_counter; 3858 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 3859 } 3860 3861 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 3862 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3863 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 3864 } 3865 3866 if (IS_VALLEYVIEW(dev)) { 3867 dev->driver->irq_handler = valleyview_irq_handler; 3868 dev->driver->irq_preinstall = valleyview_irq_preinstall; 3869 dev->driver->irq_postinstall = valleyview_irq_postinstall; 3870 dev->driver->irq_uninstall = valleyview_irq_uninstall; 3871 dev->driver->enable_vblank = valleyview_enable_vblank; 3872 dev->driver->disable_vblank = valleyview_disable_vblank; 3873 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3874 } else if (IS_GEN8(dev)) { 3875 dev->driver->irq_handler = gen8_irq_handler; 3876 dev->driver->irq_preinstall = gen8_irq_preinstall; 3877 dev->driver->irq_postinstall = gen8_irq_postinstall; 3878 dev->driver->irq_uninstall = gen8_irq_uninstall; 3879 dev->driver->enable_vblank = gen8_enable_vblank; 3880 dev->driver->disable_vblank = gen8_disable_vblank; 3881 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3882 } else if (HAS_PCH_SPLIT(dev)) { 3883 dev->driver->irq_handler = ironlake_irq_handler; 3884 dev->driver->irq_preinstall = ironlake_irq_preinstall; 3885 dev->driver->irq_postinstall = ironlake_irq_postinstall; 3886 dev->driver->irq_uninstall = ironlake_irq_uninstall; 3887 dev->driver->enable_vblank = ironlake_enable_vblank; 3888 dev->driver->disable_vblank = ironlake_disable_vblank; 3889 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3890 } else { 3891 if (INTEL_INFO(dev)->gen == 2) { 3892 dev->driver->irq_preinstall = i8xx_irq_preinstall; 3893 dev->driver->irq_postinstall = i8xx_irq_postinstall; 3894 dev->driver->irq_handler = i8xx_irq_handler; 3895 dev->driver->irq_uninstall = i8xx_irq_uninstall; 3896 } else if (INTEL_INFO(dev)->gen == 3) { 3897 dev->driver->irq_preinstall = i915_irq_preinstall; 3898 dev->driver->irq_postinstall = i915_irq_postinstall; 3899 dev->driver->irq_uninstall = i915_irq_uninstall; 3900 dev->driver->irq_handler = i915_irq_handler; 3901 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3902 } else { 3903 dev->driver->irq_preinstall = i965_irq_preinstall; 3904 dev->driver->irq_postinstall = i965_irq_postinstall; 3905 dev->driver->irq_uninstall = i965_irq_uninstall; 3906 dev->driver->irq_handler = i965_irq_handler; 3907 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3908 } 3909 dev->driver->enable_vblank = i915_enable_vblank; 3910 dev->driver->disable_vblank = i915_disable_vblank; 3911 } 3912 } 3913 3914 void intel_hpd_init(struct drm_device *dev) 3915 { 3916 struct drm_i915_private *dev_priv = dev->dev_private; 3917 struct drm_mode_config *mode_config = &dev->mode_config; 3918 struct drm_connector *connector; 3919 unsigned long irqflags; 3920 int i; 3921 3922 for (i = 1; i < HPD_NUM_PINS; i++) { 3923 dev_priv->hpd_stats[i].hpd_cnt = 0; 3924 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3925 } 3926 list_for_each_entry(connector, &mode_config->connector_list, head) { 3927 struct intel_connector *intel_connector = to_intel_connector(connector); 3928 connector->polled = intel_connector->polled; 3929 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3930 connector->polled = DRM_CONNECTOR_POLL_HPD; 3931 } 3932 3933 /* Interrupt setup is already guaranteed to be single-threaded, this is 3934 * just to make the assert_spin_locked checks happy. */ 3935 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3936 if (dev_priv->display.hpd_irq_setup) 3937 dev_priv->display.hpd_irq_setup(dev); 3938 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3939 } 3940 3941 /* Disable interrupts so we can allow Package C8+. */ 3942 void hsw_pc8_disable_interrupts(struct drm_device *dev) 3943 { 3944 struct drm_i915_private *dev_priv = dev->dev_private; 3945 unsigned long irqflags; 3946 3947 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3948 3949 dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); 3950 dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); 3951 dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); 3952 dev_priv->pc8.regsave.gtier = I915_READ(GTIER); 3953 dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); 3954 3955 ironlake_disable_display_irq(dev_priv, 0xffffffff); 3956 ibx_disable_display_interrupt(dev_priv, 0xffffffff); 3957 ilk_disable_gt_irq(dev_priv, 0xffffffff); 3958 snb_disable_pm_irq(dev_priv, 0xffffffff); 3959 3960 dev_priv->pc8.irqs_disabled = true; 3961 3962 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3963 } 3964 3965 /* Restore interrupts so we can recover from Package C8+. */ 3966 void hsw_pc8_restore_interrupts(struct drm_device *dev) 3967 { 3968 struct drm_i915_private *dev_priv = dev->dev_private; 3969 unsigned long irqflags; 3970 uint32_t val; 3971 3972 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3973 3974 val = I915_READ(DEIMR); 3975 WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val); 3976 3977 val = I915_READ(SDEIMR); 3978 WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val); 3979 3980 val = I915_READ(GTIMR); 3981 WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val); 3982 3983 val = I915_READ(GEN6_PMIMR); 3984 WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val); 3985 3986 dev_priv->pc8.irqs_disabled = false; 3987 3988 ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); 3989 ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr); 3990 ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); 3991 snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); 3992 I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); 3993 3994 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3995 } 3996