1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #include <drm/drmP.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 #include "i915_trace.h" 33 #include "intel_drv.h" 34 35 static const u32 hpd_ibx[] = { 36 [HPD_CRT] = SDE_CRT_HOTPLUG, 37 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 38 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 39 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 40 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 41 }; 42 43 static const u32 hpd_cpt[] = { 44 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 45 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 46 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 47 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 48 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 49 }; 50 51 static const u32 hpd_mask_i915[] = { 52 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 53 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 54 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 55 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 56 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 57 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 58 }; 59 60 static const u32 hpd_status_g4x[] = { 61 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 62 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 63 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 64 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 65 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 66 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 67 }; 68 69 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 70 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 71 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 72 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 73 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 74 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 75 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 76 }; 77 78 /* IIR can theoretically queue up two events. Be paranoid. */ 79 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 80 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 81 POSTING_READ(GEN8_##type##_IMR(which)); \ 82 I915_WRITE(GEN8_##type##_IER(which), 0); \ 83 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 84 POSTING_READ(GEN8_##type##_IIR(which)); \ 85 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 86 POSTING_READ(GEN8_##type##_IIR(which)); \ 87 } while (0) 88 89 #define GEN5_IRQ_RESET(type) do { \ 90 I915_WRITE(type##IMR, 0xffffffff); \ 91 POSTING_READ(type##IMR); \ 92 I915_WRITE(type##IER, 0); \ 93 I915_WRITE(type##IIR, 0xffffffff); \ 94 POSTING_READ(type##IIR); \ 95 I915_WRITE(type##IIR, 0xffffffff); \ 96 POSTING_READ(type##IIR); \ 97 } while (0) 98 99 /* 100 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 101 */ 102 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \ 103 u32 val = I915_READ(reg); \ 104 if (val) { \ 105 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \ 106 (reg), val); \ 107 I915_WRITE((reg), 0xffffffff); \ 108 POSTING_READ(reg); \ 109 I915_WRITE((reg), 0xffffffff); \ 110 POSTING_READ(reg); \ 111 } \ 112 } while (0) 113 114 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 115 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ 116 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 117 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 118 POSTING_READ(GEN8_##type##_IER(which)); \ 119 } while (0) 120 121 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 122 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ 123 I915_WRITE(type##IMR, (imr_val)); \ 124 I915_WRITE(type##IER, (ier_val)); \ 125 POSTING_READ(type##IER); \ 126 } while (0) 127 128 /* For display hotplug interrupt */ 129 static void 130 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 131 { 132 assert_spin_locked(&dev_priv->irq_lock); 133 134 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 135 return; 136 137 if ((dev_priv->irq_mask & mask) != 0) { 138 dev_priv->irq_mask &= ~mask; 139 I915_WRITE(DEIMR, dev_priv->irq_mask); 140 POSTING_READ(DEIMR); 141 } 142 } 143 144 static void 145 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 146 { 147 assert_spin_locked(&dev_priv->irq_lock); 148 149 if (!intel_irqs_enabled(dev_priv)) 150 return; 151 152 if ((dev_priv->irq_mask & mask) != mask) { 153 dev_priv->irq_mask |= mask; 154 I915_WRITE(DEIMR, dev_priv->irq_mask); 155 POSTING_READ(DEIMR); 156 } 157 } 158 159 /** 160 * ilk_update_gt_irq - update GTIMR 161 * @dev_priv: driver private 162 * @interrupt_mask: mask of interrupt bits to update 163 * @enabled_irq_mask: mask of interrupt bits to enable 164 */ 165 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 166 uint32_t interrupt_mask, 167 uint32_t enabled_irq_mask) 168 { 169 assert_spin_locked(&dev_priv->irq_lock); 170 171 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 172 return; 173 174 dev_priv->gt_irq_mask &= ~interrupt_mask; 175 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 176 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 177 POSTING_READ(GTIMR); 178 } 179 180 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 181 { 182 ilk_update_gt_irq(dev_priv, mask, mask); 183 } 184 185 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 186 { 187 ilk_update_gt_irq(dev_priv, mask, 0); 188 } 189 190 /** 191 * snb_update_pm_irq - update GEN6_PMIMR 192 * @dev_priv: driver private 193 * @interrupt_mask: mask of interrupt bits to update 194 * @enabled_irq_mask: mask of interrupt bits to enable 195 */ 196 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 197 uint32_t interrupt_mask, 198 uint32_t enabled_irq_mask) 199 { 200 uint32_t new_val; 201 202 assert_spin_locked(&dev_priv->irq_lock); 203 204 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 205 return; 206 207 new_val = dev_priv->pm_irq_mask; 208 new_val &= ~interrupt_mask; 209 new_val |= (~enabled_irq_mask & interrupt_mask); 210 211 if (new_val != dev_priv->pm_irq_mask) { 212 dev_priv->pm_irq_mask = new_val; 213 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 214 POSTING_READ(GEN6_PMIMR); 215 } 216 } 217 218 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 219 { 220 snb_update_pm_irq(dev_priv, mask, mask); 221 } 222 223 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 224 { 225 snb_update_pm_irq(dev_priv, mask, 0); 226 } 227 228 static bool ivb_can_enable_err_int(struct drm_device *dev) 229 { 230 struct drm_i915_private *dev_priv = dev->dev_private; 231 struct intel_crtc *crtc; 232 enum i915_pipe pipe; 233 234 assert_spin_locked(&dev_priv->irq_lock); 235 236 for_each_pipe(pipe) { 237 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 238 239 if (crtc->cpu_fifo_underrun_disabled) 240 return false; 241 } 242 243 return true; 244 } 245 246 /** 247 * bdw_update_pm_irq - update GT interrupt 2 248 * @dev_priv: driver private 249 * @interrupt_mask: mask of interrupt bits to update 250 * @enabled_irq_mask: mask of interrupt bits to enable 251 * 252 * Copied from the snb function, updated with relevant register offsets 253 */ 254 static void bdw_update_pm_irq(struct drm_i915_private *dev_priv, 255 uint32_t interrupt_mask, 256 uint32_t enabled_irq_mask) 257 { 258 uint32_t new_val; 259 260 assert_spin_locked(&dev_priv->irq_lock); 261 262 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 263 return; 264 265 new_val = dev_priv->pm_irq_mask; 266 new_val &= ~interrupt_mask; 267 new_val |= (~enabled_irq_mask & interrupt_mask); 268 269 if (new_val != dev_priv->pm_irq_mask) { 270 dev_priv->pm_irq_mask = new_val; 271 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask); 272 POSTING_READ(GEN8_GT_IMR(2)); 273 } 274 } 275 276 void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 277 { 278 bdw_update_pm_irq(dev_priv, mask, mask); 279 } 280 281 void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 282 { 283 bdw_update_pm_irq(dev_priv, mask, 0); 284 } 285 286 static bool cpt_can_enable_serr_int(struct drm_device *dev) 287 { 288 struct drm_i915_private *dev_priv = dev->dev_private; 289 enum i915_pipe pipe; 290 struct intel_crtc *crtc; 291 292 assert_spin_locked(&dev_priv->irq_lock); 293 294 for_each_pipe(pipe) { 295 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 296 297 if (crtc->pch_fifo_underrun_disabled) 298 return false; 299 } 300 301 return true; 302 } 303 304 void i9xx_check_fifo_underruns(struct drm_device *dev) 305 { 306 struct drm_i915_private *dev_priv = dev->dev_private; 307 struct intel_crtc *crtc; 308 309 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 310 311 for_each_intel_crtc(dev, crtc) { 312 u32 reg = PIPESTAT(crtc->pipe); 313 u32 pipestat; 314 315 if (crtc->cpu_fifo_underrun_disabled) 316 continue; 317 318 pipestat = I915_READ(reg) & 0xffff0000; 319 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0) 320 continue; 321 322 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 323 POSTING_READ(reg); 324 325 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe)); 326 } 327 328 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 329 } 330 331 static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, 332 enum i915_pipe pipe, 333 bool enable, bool old) 334 { 335 struct drm_i915_private *dev_priv = dev->dev_private; 336 u32 reg = PIPESTAT(pipe); 337 u32 pipestat = I915_READ(reg) & 0xffff0000; 338 339 assert_spin_locked(&dev_priv->irq_lock); 340 341 if (enable) { 342 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 343 POSTING_READ(reg); 344 } else { 345 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS) 346 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 347 } 348 } 349 350 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 351 enum i915_pipe pipe, bool enable) 352 { 353 struct drm_i915_private *dev_priv = dev->dev_private; 354 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 355 DE_PIPEB_FIFO_UNDERRUN; 356 357 if (enable) 358 ironlake_enable_display_irq(dev_priv, bit); 359 else 360 ironlake_disable_display_irq(dev_priv, bit); 361 } 362 363 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 364 enum i915_pipe pipe, 365 bool enable, bool old) 366 { 367 struct drm_i915_private *dev_priv = dev->dev_private; 368 if (enable) { 369 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 370 371 if (!ivb_can_enable_err_int(dev)) 372 return; 373 374 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 375 } else { 376 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 377 378 if (old && 379 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) { 380 DRM_ERROR("uncleared fifo underrun on pipe %c\n", 381 pipe_name(pipe)); 382 } 383 } 384 } 385 386 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, 387 enum i915_pipe pipe, bool enable) 388 { 389 struct drm_i915_private *dev_priv = dev->dev_private; 390 391 assert_spin_locked(&dev_priv->irq_lock); 392 393 if (enable) 394 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; 395 else 396 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; 397 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 398 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 399 } 400 401 /** 402 * ibx_display_interrupt_update - update SDEIMR 403 * @dev_priv: driver private 404 * @interrupt_mask: mask of interrupt bits to update 405 * @enabled_irq_mask: mask of interrupt bits to enable 406 */ 407 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 408 uint32_t interrupt_mask, 409 uint32_t enabled_irq_mask) 410 { 411 uint32_t sdeimr = I915_READ(SDEIMR); 412 sdeimr &= ~interrupt_mask; 413 sdeimr |= (~enabled_irq_mask & interrupt_mask); 414 415 assert_spin_locked(&dev_priv->irq_lock); 416 417 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 418 return; 419 420 I915_WRITE(SDEIMR, sdeimr); 421 POSTING_READ(SDEIMR); 422 } 423 #define ibx_enable_display_interrupt(dev_priv, bits) \ 424 ibx_display_interrupt_update((dev_priv), (bits), (bits)) 425 #define ibx_disable_display_interrupt(dev_priv, bits) \ 426 ibx_display_interrupt_update((dev_priv), (bits), 0) 427 428 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 429 enum transcoder pch_transcoder, 430 bool enable) 431 { 432 struct drm_i915_private *dev_priv = dev->dev_private; 433 uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 434 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 435 436 if (enable) 437 ibx_enable_display_interrupt(dev_priv, bit); 438 else 439 ibx_disable_display_interrupt(dev_priv, bit); 440 } 441 442 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 443 enum transcoder pch_transcoder, 444 bool enable, bool old) 445 { 446 struct drm_i915_private *dev_priv = dev->dev_private; 447 448 if (enable) { 449 I915_WRITE(SERR_INT, 450 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 451 452 if (!cpt_can_enable_serr_int(dev)) 453 return; 454 455 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 456 } else { 457 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 458 459 if (old && I915_READ(SERR_INT) & 460 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) { 461 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n", 462 transcoder_name(pch_transcoder)); 463 } 464 } 465 } 466 467 /** 468 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 469 * @dev: drm device 470 * @pipe: pipe 471 * @enable: true if we want to report FIFO underrun errors, false otherwise 472 * 473 * This function makes us disable or enable CPU fifo underruns for a specific 474 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 475 * reporting for one pipe may also disable all the other CPU error interruts for 476 * the other pipes, due to the fact that there's just one interrupt mask/enable 477 * bit for all the pipes. 478 * 479 * Returns the previous state of underrun reporting. 480 */ 481 static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 482 enum i915_pipe pipe, bool enable) 483 { 484 struct drm_i915_private *dev_priv = dev->dev_private; 485 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 486 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 487 bool old; 488 489 assert_spin_locked(&dev_priv->irq_lock); 490 491 old = !intel_crtc->cpu_fifo_underrun_disabled; 492 intel_crtc->cpu_fifo_underrun_disabled = !enable; 493 494 if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)) 495 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old); 496 else if (IS_GEN5(dev) || IS_GEN6(dev)) 497 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 498 else if (IS_GEN7(dev)) 499 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old); 500 else if (IS_GEN8(dev)) 501 broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 502 503 return old; 504 } 505 506 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 507 enum i915_pipe pipe, bool enable) 508 { 509 struct drm_i915_private *dev_priv = dev->dev_private; 510 bool ret; 511 512 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 513 ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable); 514 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 515 516 return ret; 517 } 518 519 static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev, 520 enum i915_pipe pipe) 521 { 522 struct drm_i915_private *dev_priv = dev->dev_private; 523 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 524 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 525 526 return !intel_crtc->cpu_fifo_underrun_disabled; 527 } 528 529 /** 530 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 531 * @dev: drm device 532 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 533 * @enable: true if we want to report FIFO underrun errors, false otherwise 534 * 535 * This function makes us disable or enable PCH fifo underruns for a specific 536 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 537 * underrun reporting for one transcoder may also disable all the other PCH 538 * error interruts for the other transcoders, due to the fact that there's just 539 * one interrupt mask/enable bit for all the transcoders. 540 * 541 * Returns the previous state of underrun reporting. 542 */ 543 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 544 enum transcoder pch_transcoder, 545 bool enable) 546 { 547 struct drm_i915_private *dev_priv = dev->dev_private; 548 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 549 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 550 bool old; 551 552 /* 553 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 554 * has only one pch transcoder A that all pipes can use. To avoid racy 555 * pch transcoder -> pipe lookups from interrupt code simply store the 556 * underrun statistics in crtc A. Since we never expose this anywhere 557 * nor use it outside of the fifo underrun code here using the "wrong" 558 * crtc on LPT won't cause issues. 559 */ 560 561 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 562 563 old = !intel_crtc->pch_fifo_underrun_disabled; 564 intel_crtc->pch_fifo_underrun_disabled = !enable; 565 566 if (HAS_PCH_IBX(dev)) 567 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 568 else 569 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old); 570 571 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 572 return old; 573 } 574 575 static void 576 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 577 u32 enable_mask, u32 status_mask) 578 { 579 u32 reg = PIPESTAT(pipe); 580 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 581 582 assert_spin_locked(&dev_priv->irq_lock); 583 584 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 585 status_mask & ~PIPESTAT_INT_STATUS_MASK, 586 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 587 pipe_name(pipe), enable_mask, status_mask)) 588 return; 589 590 if ((pipestat & enable_mask) == enable_mask) 591 return; 592 593 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 594 595 /* Enable the interrupt, clear any pending status */ 596 pipestat |= enable_mask | status_mask; 597 I915_WRITE(reg, pipestat); 598 POSTING_READ(reg); 599 } 600 601 static void 602 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 603 u32 enable_mask, u32 status_mask) 604 { 605 u32 reg = PIPESTAT(pipe); 606 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 607 608 assert_spin_locked(&dev_priv->irq_lock); 609 610 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 611 status_mask & ~PIPESTAT_INT_STATUS_MASK, 612 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 613 pipe_name(pipe), enable_mask, status_mask)) 614 return; 615 616 if ((pipestat & enable_mask) == 0) 617 return; 618 619 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 620 621 pipestat &= ~enable_mask; 622 I915_WRITE(reg, pipestat); 623 POSTING_READ(reg); 624 } 625 626 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 627 { 628 u32 enable_mask = status_mask << 16; 629 630 /* 631 * On pipe A we don't support the PSR interrupt yet, 632 * on pipe B and C the same bit MBZ. 633 */ 634 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 635 return 0; 636 /* 637 * On pipe B and C we don't support the PSR interrupt yet, on pipe 638 * A the same bit is for perf counters which we don't use either. 639 */ 640 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 641 return 0; 642 643 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 644 SPRITE0_FLIP_DONE_INT_EN_VLV | 645 SPRITE1_FLIP_DONE_INT_EN_VLV); 646 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 647 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 648 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 649 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 650 651 return enable_mask; 652 } 653 654 void 655 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 656 u32 status_mask) 657 { 658 u32 enable_mask; 659 660 if (IS_VALLEYVIEW(dev_priv->dev)) 661 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 662 status_mask); 663 else 664 enable_mask = status_mask << 16; 665 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 666 } 667 668 void 669 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 670 u32 status_mask) 671 { 672 u32 enable_mask; 673 674 if (IS_VALLEYVIEW(dev_priv->dev)) 675 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 676 status_mask); 677 else 678 enable_mask = status_mask << 16; 679 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 680 } 681 682 /** 683 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 684 */ 685 static void i915_enable_asle_pipestat(struct drm_device *dev) 686 { 687 struct drm_i915_private *dev_priv = dev->dev_private; 688 689 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 690 return; 691 692 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 693 694 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 695 if (INTEL_INFO(dev)->gen >= 4) 696 i915_enable_pipestat(dev_priv, PIPE_A, 697 PIPE_LEGACY_BLC_EVENT_STATUS); 698 699 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 700 } 701 702 /** 703 * i915_pipe_enabled - check if a pipe is enabled 704 * @dev: DRM device 705 * @pipe: pipe to check 706 * 707 * Reading certain registers when the pipe is disabled can hang the chip. 708 * Use this routine to make sure the PLL is running and the pipe is active 709 * before reading such registers if unsure. 710 */ 711 static int 712 i915_pipe_enabled(struct drm_device *dev, int pipe) 713 { 714 struct drm_i915_private *dev_priv = dev->dev_private; 715 716 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 717 /* Locking is horribly broken here, but whatever. */ 718 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 719 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 720 721 return intel_crtc->active; 722 } else { 723 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 724 } 725 } 726 727 /* 728 * This timing diagram depicts the video signal in and 729 * around the vertical blanking period. 730 * 731 * Assumptions about the fictitious mode used in this example: 732 * vblank_start >= 3 733 * vsync_start = vblank_start + 1 734 * vsync_end = vblank_start + 2 735 * vtotal = vblank_start + 3 736 * 737 * start of vblank: 738 * latch double buffered registers 739 * increment frame counter (ctg+) 740 * generate start of vblank interrupt (gen4+) 741 * | 742 * | frame start: 743 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 744 * | may be shifted forward 1-3 extra lines via PIPECONF 745 * | | 746 * | | start of vsync: 747 * | | generate vsync interrupt 748 * | | | 749 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 750 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 751 * ----va---> <-----------------vb--------------------> <--------va------------- 752 * | | <----vs-----> | 753 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 754 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 755 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 756 * | | | 757 * last visible pixel first visible pixel 758 * | increment frame counter (gen3/4) 759 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 760 * 761 * x = horizontal active 762 * _ = horizontal blanking 763 * hs = horizontal sync 764 * va = vertical active 765 * vb = vertical blanking 766 * vs = vertical sync 767 * vbs = vblank_start (number) 768 * 769 * Summary: 770 * - most events happen at the start of horizontal sync 771 * - frame start happens at the start of horizontal blank, 1-4 lines 772 * (depending on PIPECONF settings) after the start of vblank 773 * - gen3/4 pixel and frame counter are synchronized with the start 774 * of horizontal active on the first line of vertical active 775 */ 776 777 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 778 { 779 /* Gen2 doesn't have a hardware frame counter */ 780 return 0; 781 } 782 783 /* Called from drm generic code, passed a 'crtc', which 784 * we use as a pipe index 785 */ 786 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 787 { 788 struct drm_i915_private *dev_priv = dev->dev_private; 789 unsigned long high_frame; 790 unsigned long low_frame; 791 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 792 793 if (!i915_pipe_enabled(dev, pipe)) { 794 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 795 "pipe %c\n", pipe_name(pipe)); 796 return 0; 797 } 798 799 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 800 struct intel_crtc *intel_crtc = 801 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 802 const struct drm_display_mode *mode = 803 &intel_crtc->config.adjusted_mode; 804 805 htotal = mode->crtc_htotal; 806 hsync_start = mode->crtc_hsync_start; 807 vbl_start = mode->crtc_vblank_start; 808 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 809 vbl_start = DIV_ROUND_UP(vbl_start, 2); 810 } else { 811 enum transcoder cpu_transcoder = (enum transcoder) pipe; 812 813 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 814 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1; 815 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; 816 if ((I915_READ(PIPECONF(cpu_transcoder)) & 817 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE) 818 vbl_start = DIV_ROUND_UP(vbl_start, 2); 819 } 820 821 /* Convert to pixel count */ 822 vbl_start *= htotal; 823 824 /* Start of vblank event occurs at start of hsync */ 825 vbl_start -= htotal - hsync_start; 826 827 high_frame = PIPEFRAME(pipe); 828 low_frame = PIPEFRAMEPIXEL(pipe); 829 830 /* 831 * High & low register fields aren't synchronized, so make sure 832 * we get a low value that's stable across two reads of the high 833 * register. 834 */ 835 do { 836 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 837 low = I915_READ(low_frame); 838 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 839 } while (high1 != high2); 840 841 high1 >>= PIPE_FRAME_HIGH_SHIFT; 842 pixel = low & PIPE_PIXEL_MASK; 843 low >>= PIPE_FRAME_LOW_SHIFT; 844 845 /* 846 * The frame counter increments at beginning of active. 847 * Cook up a vblank counter by also checking the pixel 848 * counter against vblank start. 849 */ 850 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 851 } 852 853 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 854 { 855 struct drm_i915_private *dev_priv = dev->dev_private; 856 int reg = PIPE_FRMCOUNT_GM45(pipe); 857 858 if (!i915_pipe_enabled(dev, pipe)) { 859 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 860 "pipe %c\n", pipe_name(pipe)); 861 return 0; 862 } 863 864 return I915_READ(reg); 865 } 866 867 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 868 #define __raw_i915_read32(dev_priv__, reg__) DRM_READ32(dev_priv__->mmio_map, reg__) 869 870 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 871 { 872 struct drm_device *dev = crtc->base.dev; 873 struct drm_i915_private *dev_priv = dev->dev_private; 874 const struct drm_display_mode *mode = &crtc->config.adjusted_mode; 875 enum i915_pipe pipe = crtc->pipe; 876 int position, vtotal; 877 878 vtotal = mode->crtc_vtotal; 879 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 880 vtotal /= 2; 881 882 if (IS_GEN2(dev)) 883 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 884 else 885 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 886 887 /* 888 * See update_scanline_offset() for the details on the 889 * scanline_offset adjustment. 890 */ 891 return (position + crtc->scanline_offset) % vtotal; 892 } 893 894 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 895 unsigned int flags, int *vpos, int *hpos, 896 ktime_t *stime, ktime_t *etime) 897 { 898 struct drm_i915_private *dev_priv = dev->dev_private; 899 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 900 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 901 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; 902 int position; 903 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 904 bool in_vbl = true; 905 int ret = 0; 906 907 if (!intel_crtc->active) { 908 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 909 "pipe %c\n", pipe_name(pipe)); 910 return 0; 911 } 912 913 htotal = mode->crtc_htotal; 914 hsync_start = mode->crtc_hsync_start; 915 vtotal = mode->crtc_vtotal; 916 vbl_start = mode->crtc_vblank_start; 917 vbl_end = mode->crtc_vblank_end; 918 919 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 920 vbl_start = DIV_ROUND_UP(vbl_start, 2); 921 vbl_end /= 2; 922 vtotal /= 2; 923 } 924 925 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 926 927 /* 928 * Lock uncore.lock, as we will do multiple timing critical raw 929 * register reads, potentially with preemption disabled, so the 930 * following code must not block on uncore.lock. 931 */ 932 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 933 934 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 935 936 /* Get optional system timestamp before query. */ 937 if (stime) 938 *stime = ktime_get(); 939 940 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 941 /* No obvious pixelcount register. Only query vertical 942 * scanout position from Display scan line register. 943 */ 944 position = __intel_get_crtc_scanline(intel_crtc); 945 } else { 946 /* Have access to pixelcount since start of frame. 947 * We can split this into vertical and horizontal 948 * scanout position. 949 */ 950 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 951 952 /* convert to pixel counts */ 953 vbl_start *= htotal; 954 vbl_end *= htotal; 955 vtotal *= htotal; 956 957 /* 958 * In interlaced modes, the pixel counter counts all pixels, 959 * so one field will have htotal more pixels. In order to avoid 960 * the reported position from jumping backwards when the pixel 961 * counter is beyond the length of the shorter field, just 962 * clamp the position the length of the shorter field. This 963 * matches how the scanline counter based position works since 964 * the scanline counter doesn't count the two half lines. 965 */ 966 if (position >= vtotal) 967 position = vtotal - 1; 968 969 /* 970 * Start of vblank interrupt is triggered at start of hsync, 971 * just prior to the first active line of vblank. However we 972 * consider lines to start at the leading edge of horizontal 973 * active. So, should we get here before we've crossed into 974 * the horizontal active of the first line in vblank, we would 975 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 976 * always add htotal-hsync_start to the current pixel position. 977 */ 978 position = (position + htotal - hsync_start) % vtotal; 979 } 980 981 /* Get optional system timestamp after query. */ 982 if (etime) 983 *etime = ktime_get(); 984 985 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 986 987 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 988 989 in_vbl = position >= vbl_start && position < vbl_end; 990 991 /* 992 * While in vblank, position will be negative 993 * counting up towards 0 at vbl_end. And outside 994 * vblank, position will be positive counting 995 * up since vbl_end. 996 */ 997 if (position >= vbl_start) 998 position -= vbl_end; 999 else 1000 position += vtotal - vbl_end; 1001 1002 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 1003 *vpos = position; 1004 *hpos = 0; 1005 } else { 1006 *vpos = position / htotal; 1007 *hpos = position - (*vpos * htotal); 1008 } 1009 1010 /* In vblank? */ 1011 if (in_vbl) 1012 ret |= DRM_SCANOUTPOS_INVBL; 1013 1014 return ret; 1015 } 1016 1017 int intel_get_crtc_scanline(struct intel_crtc *crtc) 1018 { 1019 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1020 int position; 1021 1022 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 1023 position = __intel_get_crtc_scanline(crtc); 1024 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 1025 1026 return position; 1027 } 1028 1029 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 1030 int *max_error, 1031 struct timeval *vblank_time, 1032 unsigned flags) 1033 { 1034 struct drm_crtc *crtc; 1035 1036 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 1037 DRM_ERROR("Invalid crtc %d\n", pipe); 1038 return -EINVAL; 1039 } 1040 1041 /* Get drm_crtc to timestamp: */ 1042 crtc = intel_get_crtc_for_pipe(dev, pipe); 1043 if (crtc == NULL) { 1044 DRM_ERROR("Invalid crtc %d\n", pipe); 1045 return -EINVAL; 1046 } 1047 1048 if (!crtc->enabled) { 1049 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 1050 return -EBUSY; 1051 } 1052 1053 /* Helper routine in DRM core does all the work: */ 1054 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 1055 vblank_time, flags, 1056 crtc, 1057 &to_intel_crtc(crtc)->config.adjusted_mode); 1058 } 1059 1060 static bool intel_hpd_irq_event(struct drm_device *dev, 1061 struct drm_connector *connector) 1062 { 1063 enum drm_connector_status old_status; 1064 1065 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 1066 old_status = connector->status; 1067 1068 connector->status = connector->funcs->detect(connector, false); 1069 if (old_status == connector->status) 1070 return false; 1071 1072 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 1073 connector->base.id, 1074 connector->name, 1075 drm_get_connector_status_name(old_status), 1076 drm_get_connector_status_name(connector->status)); 1077 1078 return true; 1079 } 1080 1081 static void i915_digport_work_func(struct work_struct *work) 1082 { 1083 struct drm_i915_private *dev_priv = 1084 container_of(work, struct drm_i915_private, dig_port_work); 1085 u32 long_port_mask, short_port_mask; 1086 struct intel_digital_port *intel_dig_port; 1087 int i, ret; 1088 u32 old_bits = 0; 1089 1090 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1091 long_port_mask = dev_priv->long_hpd_port_mask; 1092 dev_priv->long_hpd_port_mask = 0; 1093 short_port_mask = dev_priv->short_hpd_port_mask; 1094 dev_priv->short_hpd_port_mask = 0; 1095 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1096 1097 for (i = 0; i < I915_MAX_PORTS; i++) { 1098 bool valid = false; 1099 bool long_hpd = false; 1100 intel_dig_port = dev_priv->hpd_irq_port[i]; 1101 if (!intel_dig_port || !intel_dig_port->hpd_pulse) 1102 continue; 1103 1104 if (long_port_mask & (1 << i)) { 1105 valid = true; 1106 long_hpd = true; 1107 } else if (short_port_mask & (1 << i)) 1108 valid = true; 1109 1110 if (valid) { 1111 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); 1112 if (ret == true) { 1113 /* if we get true fallback to old school hpd */ 1114 old_bits |= (1 << intel_dig_port->base.hpd_pin); 1115 } 1116 } 1117 } 1118 1119 if (old_bits) { 1120 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1121 dev_priv->hpd_event_bits |= old_bits; 1122 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1123 schedule_work(&dev_priv->hotplug_work); 1124 } 1125 } 1126 1127 /* 1128 * Handle hotplug events outside the interrupt handler proper. 1129 */ 1130 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 1131 1132 static void i915_hotplug_work_func(struct work_struct *work) 1133 { 1134 struct drm_i915_private *dev_priv = 1135 container_of(work, struct drm_i915_private, hotplug_work); 1136 struct drm_device *dev = dev_priv->dev; 1137 struct drm_mode_config *mode_config = &dev->mode_config; 1138 struct intel_connector *intel_connector; 1139 struct intel_encoder *intel_encoder; 1140 struct drm_connector *connector; 1141 bool hpd_disabled = false; 1142 bool changed = false; 1143 u32 hpd_event_bits; 1144 1145 mutex_lock(&mode_config->mutex); 1146 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 1147 1148 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1149 1150 hpd_event_bits = dev_priv->hpd_event_bits; 1151 dev_priv->hpd_event_bits = 0; 1152 list_for_each_entry(connector, &mode_config->connector_list, head) { 1153 intel_connector = to_intel_connector(connector); 1154 if (!intel_connector->encoder) 1155 continue; 1156 intel_encoder = intel_connector->encoder; 1157 if (intel_encoder->hpd_pin > HPD_NONE && 1158 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 1159 connector->polled == DRM_CONNECTOR_POLL_HPD) { 1160 DRM_INFO("HPD interrupt storm detected on connector %s: " 1161 "switching from hotplug detection to polling\n", 1162 connector->name); 1163 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 1164 connector->polled = DRM_CONNECTOR_POLL_CONNECT 1165 | DRM_CONNECTOR_POLL_DISCONNECT; 1166 hpd_disabled = true; 1167 } 1168 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 1169 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 1170 connector->name, intel_encoder->hpd_pin); 1171 } 1172 } 1173 /* if there were no outputs to poll, poll was disabled, 1174 * therefore make sure it's enabled when disabling HPD on 1175 * some connectors */ 1176 if (hpd_disabled) { 1177 drm_kms_helper_poll_enable(dev); 1178 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work, 1179 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 1180 } 1181 1182 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1183 1184 list_for_each_entry(connector, &mode_config->connector_list, head) { 1185 intel_connector = to_intel_connector(connector); 1186 if (!intel_connector->encoder) 1187 continue; 1188 intel_encoder = intel_connector->encoder; 1189 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 1190 if (intel_encoder->hot_plug) 1191 intel_encoder->hot_plug(intel_encoder); 1192 if (intel_hpd_irq_event(dev, connector)) 1193 changed = true; 1194 } 1195 } 1196 mutex_unlock(&mode_config->mutex); 1197 1198 if (changed) 1199 drm_kms_helper_hotplug_event(dev); 1200 } 1201 1202 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 1203 { 1204 struct drm_i915_private *dev_priv = dev->dev_private; 1205 u32 busy_up, busy_down, max_avg, min_avg; 1206 u8 new_delay; 1207 1208 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 1209 1210 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1211 1212 new_delay = dev_priv->ips.cur_delay; 1213 1214 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1215 busy_up = I915_READ(RCPREVBSYTUPAVG); 1216 busy_down = I915_READ(RCPREVBSYTDNAVG); 1217 max_avg = I915_READ(RCBMAXAVG); 1218 min_avg = I915_READ(RCBMINAVG); 1219 1220 /* Handle RCS change request from hw */ 1221 if (busy_up > max_avg) { 1222 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1223 new_delay = dev_priv->ips.cur_delay - 1; 1224 if (new_delay < dev_priv->ips.max_delay) 1225 new_delay = dev_priv->ips.max_delay; 1226 } else if (busy_down < min_avg) { 1227 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1228 new_delay = dev_priv->ips.cur_delay + 1; 1229 if (new_delay > dev_priv->ips.min_delay) 1230 new_delay = dev_priv->ips.min_delay; 1231 } 1232 1233 if (ironlake_set_drps(dev, new_delay)) 1234 dev_priv->ips.cur_delay = new_delay; 1235 1236 lockmgr(&mchdev_lock, LK_RELEASE); 1237 1238 return; 1239 } 1240 1241 static void notify_ring(struct drm_device *dev, 1242 struct intel_engine_cs *ring) 1243 { 1244 if (!intel_ring_initialized(ring)) 1245 return; 1246 1247 trace_i915_gem_request_complete(ring); 1248 1249 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1250 intel_notify_mmio_flip(ring); 1251 1252 wake_up_all(&ring->irq_queue); 1253 i915_queue_hangcheck(dev); 1254 } 1255 1256 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, 1257 struct intel_rps_ei *rps_ei) 1258 { 1259 u32 cz_ts, cz_freq_khz; 1260 u32 render_count, media_count; 1261 u32 elapsed_render, elapsed_media, elapsed_time; 1262 u32 residency = 0; 1263 1264 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 1265 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4); 1266 1267 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG); 1268 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG); 1269 1270 if (rps_ei->cz_clock == 0) { 1271 rps_ei->cz_clock = cz_ts; 1272 rps_ei->render_c0 = render_count; 1273 rps_ei->media_c0 = media_count; 1274 1275 return dev_priv->rps.cur_freq; 1276 } 1277 1278 elapsed_time = cz_ts - rps_ei->cz_clock; 1279 rps_ei->cz_clock = cz_ts; 1280 1281 elapsed_render = render_count - rps_ei->render_c0; 1282 rps_ei->render_c0 = render_count; 1283 1284 elapsed_media = media_count - rps_ei->media_c0; 1285 rps_ei->media_c0 = media_count; 1286 1287 /* Convert all the counters into common unit of milli sec */ 1288 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC; 1289 elapsed_render /= cz_freq_khz; 1290 elapsed_media /= cz_freq_khz; 1291 1292 /* 1293 * Calculate overall C0 residency percentage 1294 * only if elapsed time is non zero 1295 */ 1296 if (elapsed_time) { 1297 residency = 1298 ((max(elapsed_render, elapsed_media) * 100) 1299 / elapsed_time); 1300 } 1301 1302 return residency; 1303 } 1304 1305 /** 1306 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU 1307 * busy-ness calculated from C0 counters of render & media power wells 1308 * @dev_priv: DRM device private 1309 * 1310 */ 1311 static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) 1312 { 1313 u32 residency_C0_up = 0, residency_C0_down = 0; 1314 u8 new_delay, adj; 1315 1316 dev_priv->rps.ei_interrupt_count++; 1317 1318 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 1319 1320 1321 if (dev_priv->rps.up_ei.cz_clock == 0) { 1322 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei); 1323 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei); 1324 return dev_priv->rps.cur_freq; 1325 } 1326 1327 1328 /* 1329 * To down throttle, C0 residency should be less than down threshold 1330 * for continous EI intervals. So calculate down EI counters 1331 * once in VLV_INT_COUNT_FOR_DOWN_EI 1332 */ 1333 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) { 1334 1335 dev_priv->rps.ei_interrupt_count = 0; 1336 1337 residency_C0_down = vlv_c0_residency(dev_priv, 1338 &dev_priv->rps.down_ei); 1339 } else { 1340 residency_C0_up = vlv_c0_residency(dev_priv, 1341 &dev_priv->rps.up_ei); 1342 } 1343 1344 new_delay = dev_priv->rps.cur_freq; 1345 1346 adj = dev_priv->rps.last_adj; 1347 /* C0 residency is greater than UP threshold. Increase Frequency */ 1348 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) { 1349 if (adj > 0) 1350 adj *= 2; 1351 else 1352 adj = 1; 1353 1354 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit) 1355 new_delay = dev_priv->rps.cur_freq + adj; 1356 1357 /* 1358 * For better performance, jump directly 1359 * to RPe if we're below it. 1360 */ 1361 if (new_delay < dev_priv->rps.efficient_freq) 1362 new_delay = dev_priv->rps.efficient_freq; 1363 1364 } else if (!dev_priv->rps.ei_interrupt_count && 1365 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) { 1366 if (adj < 0) 1367 adj *= 2; 1368 else 1369 adj = -1; 1370 /* 1371 * This means, C0 residency is less than down threshold over 1372 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq 1373 */ 1374 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) 1375 new_delay = dev_priv->rps.cur_freq + adj; 1376 } 1377 1378 return new_delay; 1379 } 1380 1381 static void gen6_pm_rps_work(struct work_struct *work) 1382 { 1383 struct drm_i915_private *dev_priv = 1384 container_of(work, struct drm_i915_private, rps.work); 1385 u32 pm_iir; 1386 int new_delay, adj; 1387 1388 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1389 pm_iir = dev_priv->rps.pm_iir; 1390 dev_priv->rps.pm_iir = 0; 1391 if (INTEL_INFO(dev_priv->dev)->gen >= 8) 1392 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1393 else { 1394 /* Make sure not to corrupt PMIMR state used by ringbuffer */ 1395 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1396 } 1397 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1398 1399 /* Make sure we didn't queue anything we're not going to process. */ 1400 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1401 1402 if ((pm_iir & dev_priv->pm_rps_events) == 0) 1403 return; 1404 1405 mutex_lock(&dev_priv->rps.hw_lock); 1406 1407 adj = dev_priv->rps.last_adj; 1408 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1409 if (adj > 0) 1410 adj *= 2; 1411 else { 1412 /* CHV needs even encode values */ 1413 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1; 1414 } 1415 new_delay = dev_priv->rps.cur_freq + adj; 1416 1417 /* 1418 * For better performance, jump directly 1419 * to RPe if we're below it. 1420 */ 1421 if (new_delay < dev_priv->rps.efficient_freq) 1422 new_delay = dev_priv->rps.efficient_freq; 1423 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1424 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1425 new_delay = dev_priv->rps.efficient_freq; 1426 else 1427 new_delay = dev_priv->rps.min_freq_softlimit; 1428 adj = 0; 1429 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 1430 new_delay = vlv_calc_delay_from_C0_counters(dev_priv); 1431 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1432 if (adj < 0) 1433 adj *= 2; 1434 else { 1435 /* CHV needs even encode values */ 1436 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1; 1437 } 1438 new_delay = dev_priv->rps.cur_freq + adj; 1439 } else { /* unknown event */ 1440 new_delay = dev_priv->rps.cur_freq; 1441 } 1442 1443 /* sysfs frequency interfaces may have snuck in while servicing the 1444 * interrupt 1445 */ 1446 new_delay = clamp_t(int, new_delay, 1447 dev_priv->rps.min_freq_softlimit, 1448 dev_priv->rps.max_freq_softlimit); 1449 1450 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; 1451 1452 if (IS_VALLEYVIEW(dev_priv->dev)) 1453 valleyview_set_rps(dev_priv->dev, new_delay); 1454 else 1455 gen6_set_rps(dev_priv->dev, new_delay); 1456 1457 mutex_unlock(&dev_priv->rps.hw_lock); 1458 } 1459 1460 1461 /** 1462 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1463 * occurred. 1464 * @work: workqueue struct 1465 * 1466 * Doesn't actually do anything except notify userspace. As a consequence of 1467 * this event, userspace should try to remap the bad rows since statistically 1468 * it is likely the same row is more likely to go bad again. 1469 */ 1470 static void ivybridge_parity_work(struct work_struct *work) 1471 { 1472 struct drm_i915_private *dev_priv = 1473 container_of(work, struct drm_i915_private, l3_parity.error_work); 1474 u32 error_status, row, bank, subbank; 1475 char *parity_event[6]; 1476 uint32_t misccpctl; 1477 uint8_t slice = 0; 1478 1479 /* We must turn off DOP level clock gating to access the L3 registers. 1480 * In order to prevent a get/put style interface, acquire struct mutex 1481 * any time we access those registers. 1482 */ 1483 mutex_lock(&dev_priv->dev->struct_mutex); 1484 1485 /* If we've screwed up tracking, just let the interrupt fire again */ 1486 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1487 goto out; 1488 1489 misccpctl = I915_READ(GEN7_MISCCPCTL); 1490 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1491 POSTING_READ(GEN7_MISCCPCTL); 1492 1493 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1494 u32 reg; 1495 1496 slice--; 1497 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1498 break; 1499 1500 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1501 1502 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1503 1504 error_status = I915_READ(reg); 1505 row = GEN7_PARITY_ERROR_ROW(error_status); 1506 bank = GEN7_PARITY_ERROR_BANK(error_status); 1507 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1508 1509 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1510 POSTING_READ(reg); 1511 1512 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1513 parity_event[1] = drm_asprintf(GFP_KERNEL, "ROW=%d", row); 1514 parity_event[2] = drm_asprintf(GFP_KERNEL, "BANK=%d", bank); 1515 parity_event[3] = drm_asprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1516 parity_event[4] = drm_asprintf(GFP_KERNEL, "SLICE=%d", slice); 1517 parity_event[5] = NULL; 1518 1519 #if 0 1520 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1521 KOBJ_CHANGE, parity_event); 1522 #endif 1523 1524 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1525 slice, row, bank, subbank); 1526 1527 kfree(parity_event[4]); 1528 kfree(parity_event[3]); 1529 kfree(parity_event[2]); 1530 kfree(parity_event[1]); 1531 } 1532 1533 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1534 1535 out: 1536 WARN_ON(dev_priv->l3_parity.which_slice); 1537 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1538 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1539 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1540 1541 mutex_unlock(&dev_priv->dev->struct_mutex); 1542 } 1543 1544 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1545 { 1546 struct drm_i915_private *dev_priv = dev->dev_private; 1547 1548 if (!HAS_L3_DPF(dev)) 1549 return; 1550 1551 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1552 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1553 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1554 1555 iir &= GT_PARITY_ERROR(dev); 1556 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1557 dev_priv->l3_parity.which_slice |= 1 << 1; 1558 1559 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1560 dev_priv->l3_parity.which_slice |= 1 << 0; 1561 1562 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1563 } 1564 1565 static void ilk_gt_irq_handler(struct drm_device *dev, 1566 struct drm_i915_private *dev_priv, 1567 u32 gt_iir) 1568 { 1569 if (gt_iir & 1570 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1571 notify_ring(dev, &dev_priv->ring[RCS]); 1572 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1573 notify_ring(dev, &dev_priv->ring[VCS]); 1574 } 1575 1576 static void snb_gt_irq_handler(struct drm_device *dev, 1577 struct drm_i915_private *dev_priv, 1578 u32 gt_iir) 1579 { 1580 1581 if (gt_iir & 1582 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1583 notify_ring(dev, &dev_priv->ring[RCS]); 1584 if (gt_iir & GT_BSD_USER_INTERRUPT) 1585 notify_ring(dev, &dev_priv->ring[VCS]); 1586 if (gt_iir & GT_BLT_USER_INTERRUPT) 1587 notify_ring(dev, &dev_priv->ring[BCS]); 1588 1589 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1590 GT_BSD_CS_ERROR_INTERRUPT | 1591 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 1592 i915_handle_error(dev, false, "GT error interrupt 0x%08x", 1593 gt_iir); 1594 } 1595 1596 if (gt_iir & GT_PARITY_ERROR(dev)) 1597 ivybridge_parity_error_irq_handler(dev, gt_iir); 1598 } 1599 1600 static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1601 { 1602 if ((pm_iir & dev_priv->pm_rps_events) == 0) 1603 return; 1604 1605 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1606 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1607 gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1608 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1609 1610 queue_work(dev_priv->wq, &dev_priv->rps.work); 1611 } 1612 1613 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1614 struct drm_i915_private *dev_priv, 1615 u32 master_ctl) 1616 { 1617 u32 rcs, bcs, vcs; 1618 uint32_t tmp = 0; 1619 1620 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1621 tmp = I915_READ(GEN8_GT_IIR(0)); 1622 if (tmp) { 1623 I915_WRITE(GEN8_GT_IIR(0), tmp); 1624 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1625 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1626 if (rcs & GT_RENDER_USER_INTERRUPT) 1627 notify_ring(dev, &dev_priv->ring[RCS]); 1628 if (bcs & GT_RENDER_USER_INTERRUPT) 1629 notify_ring(dev, &dev_priv->ring[BCS]); 1630 } else 1631 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1632 } 1633 1634 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1635 tmp = I915_READ(GEN8_GT_IIR(1)); 1636 if (tmp) { 1637 I915_WRITE(GEN8_GT_IIR(1), tmp); 1638 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1639 if (vcs & GT_RENDER_USER_INTERRUPT) 1640 notify_ring(dev, &dev_priv->ring[VCS]); 1641 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; 1642 if (vcs & GT_RENDER_USER_INTERRUPT) 1643 notify_ring(dev, &dev_priv->ring[VCS2]); 1644 } else 1645 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1646 } 1647 1648 if (master_ctl & GEN8_GT_PM_IRQ) { 1649 tmp = I915_READ(GEN8_GT_IIR(2)); 1650 if (tmp & dev_priv->pm_rps_events) { 1651 I915_WRITE(GEN8_GT_IIR(2), 1652 tmp & dev_priv->pm_rps_events); 1653 gen8_rps_irq_handler(dev_priv, tmp); 1654 } else 1655 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1656 } 1657 1658 if (master_ctl & GEN8_GT_VECS_IRQ) { 1659 tmp = I915_READ(GEN8_GT_IIR(3)); 1660 if (tmp) { 1661 I915_WRITE(GEN8_GT_IIR(3), tmp); 1662 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1663 if (vcs & GT_RENDER_USER_INTERRUPT) 1664 notify_ring(dev, &dev_priv->ring[VECS]); 1665 } else 1666 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1667 } 1668 1669 } 1670 1671 #define HPD_STORM_DETECT_PERIOD 1000 1672 #define HPD_STORM_THRESHOLD 5 1673 1674 static int ilk_port_to_hotplug_shift(enum port port) 1675 { 1676 switch (port) { 1677 case PORT_A: 1678 case PORT_E: 1679 default: 1680 return -1; 1681 case PORT_B: 1682 return 0; 1683 case PORT_C: 1684 return 8; 1685 case PORT_D: 1686 return 16; 1687 } 1688 } 1689 1690 static int g4x_port_to_hotplug_shift(enum port port) 1691 { 1692 switch (port) { 1693 case PORT_A: 1694 case PORT_E: 1695 default: 1696 return -1; 1697 case PORT_B: 1698 return 17; 1699 case PORT_C: 1700 return 19; 1701 case PORT_D: 1702 return 21; 1703 } 1704 } 1705 1706 static inline enum port get_port_from_pin(enum hpd_pin pin) 1707 { 1708 switch (pin) { 1709 case HPD_PORT_B: 1710 return PORT_B; 1711 case HPD_PORT_C: 1712 return PORT_C; 1713 case HPD_PORT_D: 1714 return PORT_D; 1715 default: 1716 return PORT_A; /* no hpd */ 1717 } 1718 } 1719 1720 static inline void intel_hpd_irq_handler(struct drm_device *dev, 1721 u32 hotplug_trigger, 1722 u32 dig_hotplug_reg, 1723 const u32 *hpd) 1724 { 1725 struct drm_i915_private *dev_priv = dev->dev_private; 1726 int i; 1727 enum port port; 1728 bool storm_detected = false; 1729 bool queue_dig = false, queue_hp = false; 1730 u32 dig_shift; 1731 u32 dig_port_mask = 0; 1732 1733 if (!hotplug_trigger) 1734 return; 1735 1736 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n", 1737 hotplug_trigger, dig_hotplug_reg); 1738 1739 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1740 for (i = 1; i < HPD_NUM_PINS; i++) { 1741 if (!(hpd[i] & hotplug_trigger)) 1742 continue; 1743 1744 port = get_port_from_pin(i); 1745 if (port && dev_priv->hpd_irq_port[port]) { 1746 bool long_hpd; 1747 1748 if (IS_G4X(dev)) { 1749 dig_shift = g4x_port_to_hotplug_shift(port); 1750 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1751 } else { 1752 dig_shift = ilk_port_to_hotplug_shift(port); 1753 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1754 } 1755 1756 DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd); 1757 /* for long HPD pulses we want to have the digital queue happen, 1758 but we still want HPD storm detection to function. */ 1759 if (long_hpd) { 1760 dev_priv->long_hpd_port_mask |= (1 << port); 1761 dig_port_mask |= hpd[i]; 1762 } else { 1763 /* for short HPD just trigger the digital queue */ 1764 dev_priv->short_hpd_port_mask |= (1 << port); 1765 hotplug_trigger &= ~hpd[i]; 1766 } 1767 queue_dig = true; 1768 } 1769 } 1770 1771 for (i = 1; i < HPD_NUM_PINS; i++) { 1772 if (hpd[i] & hotplug_trigger && 1773 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { 1774 /* 1775 * On GMCH platforms the interrupt mask bits only 1776 * prevent irq generation, not the setting of the 1777 * hotplug bits itself. So only WARN about unexpected 1778 * interrupts on saner platforms. 1779 */ 1780 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), 1781 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1782 hotplug_trigger, i, hpd[i]); 1783 1784 continue; 1785 } 1786 1787 if (!(hpd[i] & hotplug_trigger) || 1788 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1789 continue; 1790 1791 if (!(dig_port_mask & hpd[i])) { 1792 dev_priv->hpd_event_bits |= (1 << i); 1793 queue_hp = true; 1794 } 1795 1796 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1797 dev_priv->hpd_stats[i].hpd_last_jiffies 1798 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1799 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1800 dev_priv->hpd_stats[i].hpd_cnt = 0; 1801 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1802 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1803 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1804 dev_priv->hpd_event_bits &= ~(1 << i); 1805 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1806 storm_detected = true; 1807 } else { 1808 dev_priv->hpd_stats[i].hpd_cnt++; 1809 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1810 dev_priv->hpd_stats[i].hpd_cnt); 1811 } 1812 } 1813 1814 if (storm_detected) 1815 dev_priv->display.hpd_irq_setup(dev); 1816 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1817 1818 /* 1819 * Our hotplug handler can grab modeset locks (by calling down into the 1820 * fb helpers). Hence it must not be run on our own dev-priv->wq work 1821 * queue for otherwise the flush_work in the pageflip code will 1822 * deadlock. 1823 */ 1824 if (queue_dig) 1825 schedule_work(&dev_priv->dig_port_work); 1826 if (queue_hp) 1827 schedule_work(&dev_priv->hotplug_work); 1828 } 1829 1830 static void gmbus_irq_handler(struct drm_device *dev) 1831 { 1832 struct drm_i915_private *dev_priv = dev->dev_private; 1833 1834 wake_up_all(&dev_priv->gmbus_wait_queue); 1835 } 1836 1837 static void dp_aux_irq_handler(struct drm_device *dev) 1838 { 1839 struct drm_i915_private *dev_priv = dev->dev_private; 1840 1841 wake_up_all(&dev_priv->gmbus_wait_queue); 1842 } 1843 1844 #if defined(CONFIG_DEBUG_FS) 1845 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe, 1846 uint32_t crc0, uint32_t crc1, 1847 uint32_t crc2, uint32_t crc3, 1848 uint32_t crc4) 1849 { 1850 struct drm_i915_private *dev_priv = dev->dev_private; 1851 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1852 struct intel_pipe_crc_entry *entry; 1853 int head, tail; 1854 1855 spin_lock(&pipe_crc->lock); 1856 1857 if (!pipe_crc->entries) { 1858 spin_unlock(&pipe_crc->lock); 1859 DRM_ERROR("spurious interrupt\n"); 1860 return; 1861 } 1862 1863 head = pipe_crc->head; 1864 tail = pipe_crc->tail; 1865 1866 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1867 spin_unlock(&pipe_crc->lock); 1868 DRM_ERROR("CRC buffer overflowing\n"); 1869 return; 1870 } 1871 1872 entry = &pipe_crc->entries[head]; 1873 1874 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1875 entry->crc[0] = crc0; 1876 entry->crc[1] = crc1; 1877 entry->crc[2] = crc2; 1878 entry->crc[3] = crc3; 1879 entry->crc[4] = crc4; 1880 1881 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1882 pipe_crc->head = head; 1883 1884 spin_unlock(&pipe_crc->lock); 1885 1886 wake_up_interruptible(&pipe_crc->wq); 1887 } 1888 #else 1889 static inline void 1890 display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe, 1891 uint32_t crc0, uint32_t crc1, 1892 uint32_t crc2, uint32_t crc3, 1893 uint32_t crc4) {} 1894 #endif 1895 1896 1897 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1898 { 1899 struct drm_i915_private *dev_priv = dev->dev_private; 1900 1901 display_pipe_crc_irq_handler(dev, pipe, 1902 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1903 0, 0, 0, 0); 1904 } 1905 1906 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1907 { 1908 struct drm_i915_private *dev_priv = dev->dev_private; 1909 1910 display_pipe_crc_irq_handler(dev, pipe, 1911 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1912 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1913 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1914 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1915 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1916 } 1917 1918 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1919 { 1920 struct drm_i915_private *dev_priv = dev->dev_private; 1921 uint32_t res1, res2; 1922 1923 if (INTEL_INFO(dev)->gen >= 3) 1924 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1925 else 1926 res1 = 0; 1927 1928 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1929 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1930 else 1931 res2 = 0; 1932 1933 display_pipe_crc_irq_handler(dev, pipe, 1934 I915_READ(PIPE_CRC_RES_RED(pipe)), 1935 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1936 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1937 res1, res2); 1938 } 1939 1940 /* The RPS events need forcewake, so we add them to a work queue and mask their 1941 * IMR bits until the work is done. Other interrupts can be processed without 1942 * the work queue. */ 1943 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1944 { 1945 if (pm_iir & dev_priv->pm_rps_events) { 1946 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1947 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1948 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1949 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1950 1951 queue_work(dev_priv->wq, &dev_priv->rps.work); 1952 } 1953 1954 if (HAS_VEBOX(dev_priv->dev)) { 1955 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1956 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1957 1958 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 1959 i915_handle_error(dev_priv->dev, false, 1960 "VEBOX CS error interrupt 0x%08x", 1961 pm_iir); 1962 } 1963 } 1964 } 1965 1966 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum i915_pipe pipe) 1967 { 1968 struct intel_crtc *crtc; 1969 1970 if (!drm_handle_vblank(dev, pipe)) 1971 return false; 1972 1973 crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); 1974 wake_up(&crtc->vbl_wait); 1975 1976 return true; 1977 } 1978 1979 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) 1980 { 1981 struct drm_i915_private *dev_priv = dev->dev_private; 1982 u32 pipe_stats[I915_MAX_PIPES] = { }; 1983 int pipe; 1984 1985 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1986 for_each_pipe(pipe) { 1987 int reg; 1988 u32 mask, iir_bit = 0; 1989 1990 /* 1991 * PIPESTAT bits get signalled even when the interrupt is 1992 * disabled with the mask bits, and some of the status bits do 1993 * not generate interrupts at all (like the underrun bit). Hence 1994 * we need to be careful that we only handle what we want to 1995 * handle. 1996 */ 1997 mask = 0; 1998 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe)) 1999 mask |= PIPE_FIFO_UNDERRUN_STATUS; 2000 2001 switch (pipe) { 2002 case PIPE_A: 2003 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 2004 break; 2005 case PIPE_B: 2006 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 2007 break; 2008 case PIPE_C: 2009 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 2010 break; 2011 } 2012 if (iir & iir_bit) 2013 mask |= dev_priv->pipestat_irq_mask[pipe]; 2014 2015 if (!mask) 2016 continue; 2017 2018 reg = PIPESTAT(pipe); 2019 mask |= PIPESTAT_INT_ENABLE_MASK; 2020 pipe_stats[pipe] = I915_READ(reg) & mask; 2021 2022 /* 2023 * Clear the PIPE*STAT regs before the IIR 2024 */ 2025 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 2026 PIPESTAT_INT_STATUS_MASK)) 2027 I915_WRITE(reg, pipe_stats[pipe]); 2028 } 2029 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2030 2031 for_each_pipe(pipe) { 2032 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 2033 intel_pipe_handle_vblank(dev, pipe); 2034 2035 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 2036 intel_prepare_page_flip(dev, pipe); 2037 intel_finish_page_flip(dev, pipe); 2038 } 2039 2040 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 2041 i9xx_pipe_crc_irq_handler(dev, pipe); 2042 2043 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 2044 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 2045 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 2046 } 2047 2048 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2049 gmbus_irq_handler(dev); 2050 } 2051 2052 static void i9xx_hpd_irq_handler(struct drm_device *dev) 2053 { 2054 struct drm_i915_private *dev_priv = dev->dev_private; 2055 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2056 2057 if (hotplug_status) { 2058 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2059 /* 2060 * Make sure hotplug status is cleared before we clear IIR, or else we 2061 * may miss hotplug events. 2062 */ 2063 POSTING_READ(PORT_HOTPLUG_STAT); 2064 2065 if (IS_G4X(dev)) { 2066 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 2067 2068 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x); 2069 } else { 2070 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 2071 2072 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915); 2073 } 2074 2075 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && 2076 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 2077 dp_aux_irq_handler(dev); 2078 } 2079 } 2080 2081 static irqreturn_t valleyview_irq_handler(void *arg) 2082 { 2083 struct drm_device *dev = arg; 2084 struct drm_i915_private *dev_priv = dev->dev_private; 2085 u32 iir, gt_iir, pm_iir; 2086 2087 while (true) { 2088 /* Find, clear, then process each source of interrupt */ 2089 2090 gt_iir = I915_READ(GTIIR); 2091 if (gt_iir) 2092 I915_WRITE(GTIIR, gt_iir); 2093 2094 pm_iir = I915_READ(GEN6_PMIIR); 2095 if (pm_iir) 2096 I915_WRITE(GEN6_PMIIR, pm_iir); 2097 2098 iir = I915_READ(VLV_IIR); 2099 if (iir) { 2100 /* Consume port before clearing IIR or we'll miss events */ 2101 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2102 i9xx_hpd_irq_handler(dev); 2103 I915_WRITE(VLV_IIR, iir); 2104 } 2105 2106 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 2107 goto out; 2108 2109 if (gt_iir) 2110 snb_gt_irq_handler(dev, dev_priv, gt_iir); 2111 if (pm_iir) 2112 gen6_rps_irq_handler(dev_priv, pm_iir); 2113 /* Call regardless, as some status bits might not be 2114 * signalled in iir */ 2115 valleyview_pipestat_irq_handler(dev, iir); 2116 } 2117 2118 out: 2119 return; 2120 } 2121 2122 static irqreturn_t cherryview_irq_handler(void *arg) 2123 { 2124 struct drm_device *dev = arg; 2125 struct drm_i915_private *dev_priv = dev->dev_private; 2126 u32 master_ctl, iir; 2127 2128 for (;;) { 2129 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 2130 iir = I915_READ(VLV_IIR); 2131 2132 if (master_ctl == 0 && iir == 0) 2133 break; 2134 2135 2136 I915_WRITE(GEN8_MASTER_IRQ, 0); 2137 2138 /* Find, clear, then process each source of interrupt */ 2139 2140 if (iir) { 2141 /* Consume port before clearing IIR or we'll miss events */ 2142 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2143 i9xx_hpd_irq_handler(dev); 2144 I915_WRITE(VLV_IIR, iir); 2145 } 2146 2147 gen8_gt_irq_handler(dev, dev_priv, master_ctl); 2148 2149 /* Call regardless, as some status bits might not be 2150 * signalled in iir */ 2151 valleyview_pipestat_irq_handler(dev, iir); 2152 2153 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 2154 POSTING_READ(GEN8_MASTER_IRQ); 2155 } 2156 2157 } 2158 2159 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 2160 { 2161 struct drm_i915_private *dev_priv = dev->dev_private; 2162 int pipe; 2163 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2164 u32 dig_hotplug_reg; 2165 2166 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2167 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2168 2169 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx); 2170 2171 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2172 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2173 SDE_AUDIO_POWER_SHIFT); 2174 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2175 port_name(port)); 2176 } 2177 2178 if (pch_iir & SDE_AUX_MASK) 2179 dp_aux_irq_handler(dev); 2180 2181 if (pch_iir & SDE_GMBUS) 2182 gmbus_irq_handler(dev); 2183 2184 if (pch_iir & SDE_AUDIO_HDCP_MASK) 2185 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2186 2187 if (pch_iir & SDE_AUDIO_TRANS_MASK) 2188 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2189 2190 if (pch_iir & SDE_POISON) 2191 DRM_ERROR("PCH poison interrupt\n"); 2192 2193 if (pch_iir & SDE_FDI_MASK) 2194 for_each_pipe(pipe) 2195 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2196 pipe_name(pipe), 2197 I915_READ(FDI_RX_IIR(pipe))); 2198 2199 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2200 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2201 2202 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2203 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2204 2205 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2206 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 2207 false)) 2208 DRM_ERROR("PCH transcoder A FIFO underrun\n"); 2209 2210 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2211 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 2212 false)) 2213 DRM_ERROR("PCH transcoder B FIFO underrun\n"); 2214 } 2215 2216 static void ivb_err_int_handler(struct drm_device *dev) 2217 { 2218 struct drm_i915_private *dev_priv = dev->dev_private; 2219 u32 err_int = I915_READ(GEN7_ERR_INT); 2220 enum i915_pipe pipe; 2221 2222 if (err_int & ERR_INT_POISON) 2223 DRM_ERROR("Poison interrupt\n"); 2224 2225 for_each_pipe(pipe) { 2226 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 2227 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 2228 false)) 2229 DRM_ERROR("Pipe %c FIFO underrun\n", 2230 pipe_name(pipe)); 2231 } 2232 2233 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2234 if (IS_IVYBRIDGE(dev)) 2235 ivb_pipe_crc_irq_handler(dev, pipe); 2236 else 2237 hsw_pipe_crc_irq_handler(dev, pipe); 2238 } 2239 } 2240 2241 I915_WRITE(GEN7_ERR_INT, err_int); 2242 } 2243 2244 static void cpt_serr_int_handler(struct drm_device *dev) 2245 { 2246 struct drm_i915_private *dev_priv = dev->dev_private; 2247 u32 serr_int = I915_READ(SERR_INT); 2248 2249 if (serr_int & SERR_INT_POISON) 2250 DRM_ERROR("PCH poison interrupt\n"); 2251 2252 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2253 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 2254 false)) 2255 DRM_ERROR("PCH transcoder A FIFO underrun\n"); 2256 2257 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2258 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 2259 false)) 2260 DRM_ERROR("PCH transcoder B FIFO underrun\n"); 2261 2262 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2263 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 2264 false)) 2265 DRM_ERROR("PCH transcoder C FIFO underrun\n"); 2266 2267 I915_WRITE(SERR_INT, serr_int); 2268 } 2269 2270 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 2271 { 2272 struct drm_i915_private *dev_priv = dev->dev_private; 2273 int pipe; 2274 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2275 u32 dig_hotplug_reg; 2276 2277 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2278 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2279 2280 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt); 2281 2282 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2283 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2284 SDE_AUDIO_POWER_SHIFT_CPT); 2285 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2286 port_name(port)); 2287 } 2288 2289 if (pch_iir & SDE_AUX_MASK_CPT) 2290 dp_aux_irq_handler(dev); 2291 2292 if (pch_iir & SDE_GMBUS_CPT) 2293 gmbus_irq_handler(dev); 2294 2295 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2296 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2297 2298 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2299 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2300 2301 if (pch_iir & SDE_FDI_MASK_CPT) 2302 for_each_pipe(pipe) 2303 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2304 pipe_name(pipe), 2305 I915_READ(FDI_RX_IIR(pipe))); 2306 2307 if (pch_iir & SDE_ERROR_CPT) 2308 cpt_serr_int_handler(dev); 2309 } 2310 2311 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 2312 { 2313 struct drm_i915_private *dev_priv = dev->dev_private; 2314 enum i915_pipe pipe; 2315 2316 if (de_iir & DE_AUX_CHANNEL_A) 2317 dp_aux_irq_handler(dev); 2318 2319 if (de_iir & DE_GSE) 2320 intel_opregion_asle_intr(dev); 2321 2322 if (de_iir & DE_POISON) 2323 DRM_ERROR("Poison interrupt\n"); 2324 2325 for_each_pipe(pipe) { 2326 if (de_iir & DE_PIPE_VBLANK(pipe)) 2327 intel_pipe_handle_vblank(dev, pipe); 2328 2329 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2330 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 2331 DRM_ERROR("Pipe %c FIFO underrun\n", 2332 pipe_name(pipe)); 2333 2334 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2335 i9xx_pipe_crc_irq_handler(dev, pipe); 2336 2337 /* plane/pipes map 1:1 on ilk+ */ 2338 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 2339 intel_prepare_page_flip(dev, pipe); 2340 intel_finish_page_flip_plane(dev, pipe); 2341 } 2342 } 2343 2344 /* check event from PCH */ 2345 if (de_iir & DE_PCH_EVENT) { 2346 u32 pch_iir = I915_READ(SDEIIR); 2347 2348 if (HAS_PCH_CPT(dev)) 2349 cpt_irq_handler(dev, pch_iir); 2350 else 2351 ibx_irq_handler(dev, pch_iir); 2352 2353 /* should clear PCH hotplug event before clear CPU irq */ 2354 I915_WRITE(SDEIIR, pch_iir); 2355 } 2356 2357 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 2358 ironlake_rps_change_irq_handler(dev); 2359 } 2360 2361 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 2362 { 2363 struct drm_i915_private *dev_priv = dev->dev_private; 2364 enum i915_pipe pipe; 2365 2366 if (de_iir & DE_ERR_INT_IVB) 2367 ivb_err_int_handler(dev); 2368 2369 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2370 dp_aux_irq_handler(dev); 2371 2372 if (de_iir & DE_GSE_IVB) 2373 intel_opregion_asle_intr(dev); 2374 2375 for_each_pipe(pipe) { 2376 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2377 intel_pipe_handle_vblank(dev, pipe); 2378 2379 /* plane/pipes map 1:1 on ilk+ */ 2380 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2381 intel_prepare_page_flip(dev, pipe); 2382 intel_finish_page_flip_plane(dev, pipe); 2383 } 2384 } 2385 2386 /* check event from PCH */ 2387 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 2388 u32 pch_iir = I915_READ(SDEIIR); 2389 2390 cpt_irq_handler(dev, pch_iir); 2391 2392 /* clear PCH hotplug event before clear CPU irq */ 2393 I915_WRITE(SDEIIR, pch_iir); 2394 } 2395 } 2396 2397 /* 2398 * To handle irqs with the minimum potential races with fresh interrupts, we: 2399 * 1 - Disable Master Interrupt Control. 2400 * 2 - Find the source(s) of the interrupt. 2401 * 3 - Clear the Interrupt Identity bits (IIR). 2402 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2403 * 5 - Re-enable Master Interrupt Control. 2404 */ 2405 static irqreturn_t ironlake_irq_handler(void *arg) 2406 { 2407 struct drm_device *dev = arg; 2408 struct drm_i915_private *dev_priv = dev->dev_private; 2409 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2410 2411 /* We get interrupts on unclaimed registers, so check for this before we 2412 * do any I915_{READ,WRITE}. */ 2413 intel_uncore_check_errors(dev); 2414 2415 /* disable master interrupt before clearing iir */ 2416 de_ier = I915_READ(DEIER); 2417 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2418 POSTING_READ(DEIER); 2419 2420 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2421 * interrupts will will be stored on its back queue, and then we'll be 2422 * able to process them after we restore SDEIER (as soon as we restore 2423 * it, we'll get an interrupt if SDEIIR still has something to process 2424 * due to its back queue). */ 2425 if (!HAS_PCH_NOP(dev)) { 2426 sde_ier = I915_READ(SDEIER); 2427 I915_WRITE(SDEIER, 0); 2428 POSTING_READ(SDEIER); 2429 } 2430 2431 /* Find, clear, then process each source of interrupt */ 2432 2433 gt_iir = I915_READ(GTIIR); 2434 if (gt_iir) { 2435 I915_WRITE(GTIIR, gt_iir); 2436 if (INTEL_INFO(dev)->gen >= 6) 2437 snb_gt_irq_handler(dev, dev_priv, gt_iir); 2438 else 2439 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 2440 } 2441 2442 de_iir = I915_READ(DEIIR); 2443 if (de_iir) { 2444 I915_WRITE(DEIIR, de_iir); 2445 if (INTEL_INFO(dev)->gen >= 7) 2446 ivb_display_irq_handler(dev, de_iir); 2447 else 2448 ilk_display_irq_handler(dev, de_iir); 2449 } 2450 2451 if (INTEL_INFO(dev)->gen >= 6) { 2452 u32 pm_iir = I915_READ(GEN6_PMIIR); 2453 if (pm_iir) { 2454 I915_WRITE(GEN6_PMIIR, pm_iir); 2455 gen6_rps_irq_handler(dev_priv, pm_iir); 2456 } 2457 } 2458 2459 I915_WRITE(DEIER, de_ier); 2460 POSTING_READ(DEIER); 2461 if (!HAS_PCH_NOP(dev)) { 2462 I915_WRITE(SDEIER, sde_ier); 2463 POSTING_READ(SDEIER); 2464 } 2465 2466 } 2467 2468 static irqreturn_t gen8_irq_handler(void *arg) 2469 { 2470 struct drm_device *dev = arg; 2471 struct drm_i915_private *dev_priv = dev->dev_private; 2472 u32 master_ctl; 2473 uint32_t tmp = 0; 2474 enum i915_pipe pipe; 2475 2476 master_ctl = I915_READ(GEN8_MASTER_IRQ); 2477 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2478 if (!master_ctl) 2479 return; 2480 2481 I915_WRITE(GEN8_MASTER_IRQ, 0); 2482 POSTING_READ(GEN8_MASTER_IRQ); 2483 2484 /* Find, clear, then process each source of interrupt */ 2485 2486 gen8_gt_irq_handler(dev, dev_priv, master_ctl); 2487 2488 if (master_ctl & GEN8_DE_MISC_IRQ) { 2489 tmp = I915_READ(GEN8_DE_MISC_IIR); 2490 if (tmp) { 2491 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 2492 if (tmp & GEN8_DE_MISC_GSE) 2493 intel_opregion_asle_intr(dev); 2494 else 2495 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2496 } 2497 else 2498 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2499 } 2500 2501 if (master_ctl & GEN8_DE_PORT_IRQ) { 2502 tmp = I915_READ(GEN8_DE_PORT_IIR); 2503 if (tmp) { 2504 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 2505 if (tmp & GEN8_AUX_CHANNEL_A) 2506 dp_aux_irq_handler(dev); 2507 else 2508 DRM_ERROR("Unexpected DE Port interrupt\n"); 2509 } 2510 else 2511 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2512 } 2513 2514 for_each_pipe(pipe) { 2515 uint32_t pipe_iir; 2516 2517 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2518 continue; 2519 2520 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2521 if (pipe_iir) { 2522 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2523 if (pipe_iir & GEN8_PIPE_VBLANK) 2524 intel_pipe_handle_vblank(dev, pipe); 2525 2526 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { 2527 intel_prepare_page_flip(dev, pipe); 2528 intel_finish_page_flip_plane(dev, pipe); 2529 } 2530 2531 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 2532 hsw_pipe_crc_irq_handler(dev, pipe); 2533 2534 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { 2535 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 2536 false)) 2537 DRM_ERROR("Pipe %c FIFO underrun\n", 2538 pipe_name(pipe)); 2539 } 2540 2541 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { 2542 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2543 pipe_name(pipe), 2544 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 2545 } 2546 } else 2547 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2548 } 2549 2550 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { 2551 /* 2552 * FIXME(BDW): Assume for now that the new interrupt handling 2553 * scheme also closed the SDE interrupt handling race we've seen 2554 * on older pch-split platforms. But this needs testing. 2555 */ 2556 u32 pch_iir = I915_READ(SDEIIR); 2557 if (pch_iir) { 2558 I915_WRITE(SDEIIR, pch_iir); 2559 cpt_irq_handler(dev, pch_iir); 2560 } else 2561 DRM_ERROR("The master control interrupt lied (SDE)!\n"); 2562 2563 } 2564 2565 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2566 POSTING_READ(GEN8_MASTER_IRQ); 2567 2568 } 2569 2570 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2571 bool reset_completed) 2572 { 2573 struct intel_engine_cs *ring; 2574 int i; 2575 2576 /* 2577 * Notify all waiters for GPU completion events that reset state has 2578 * been changed, and that they need to restart their wait after 2579 * checking for potential errors (and bail out to drop locks if there is 2580 * a gpu reset pending so that i915_error_work_func can acquire them). 2581 */ 2582 2583 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2584 for_each_ring(ring, dev_priv, i) 2585 wake_up_all(&ring->irq_queue); 2586 2587 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2588 wake_up_all(&dev_priv->pending_flip_queue); 2589 2590 /* 2591 * Signal tasks blocked in i915_gem_wait_for_error that the pending 2592 * reset state is cleared. 2593 */ 2594 if (reset_completed) 2595 wake_up_all(&dev_priv->gpu_error.reset_queue); 2596 } 2597 2598 /** 2599 * i915_error_work_func - do process context error handling work 2600 * @work: work struct 2601 * 2602 * Fire an error uevent so userspace can see that a hang or error 2603 * was detected. 2604 */ 2605 static void i915_error_work_func(struct work_struct *work) 2606 { 2607 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 2608 work); 2609 struct drm_i915_private *dev_priv = 2610 container_of(error, struct drm_i915_private, gpu_error); 2611 struct drm_device *dev = dev_priv->dev; 2612 #if 0 2613 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2614 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2615 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2616 #endif 2617 int ret; 2618 2619 /* kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); */ 2620 2621 /* 2622 * Note that there's only one work item which does gpu resets, so we 2623 * need not worry about concurrent gpu resets potentially incrementing 2624 * error->reset_counter twice. We only need to take care of another 2625 * racing irq/hangcheck declaring the gpu dead for a second time. A 2626 * quick check for that is good enough: schedule_work ensures the 2627 * correct ordering between hang detection and this work item, and since 2628 * the reset in-progress bit is only ever set by code outside of this 2629 * work we don't need to worry about any other races. 2630 */ 2631 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 2632 DRM_DEBUG_DRIVER("resetting chip\n"); 2633 #if 0 2634 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2635 reset_event); 2636 #endif 2637 2638 /* 2639 * In most cases it's guaranteed that we get here with an RPM 2640 * reference held, for example because there is a pending GPU 2641 * request that won't finish until the reset is done. This 2642 * isn't the case at least when we get here by doing a 2643 * simulated reset via debugs, so get an RPM reference. 2644 */ 2645 intel_runtime_pm_get(dev_priv); 2646 /* 2647 * All state reset _must_ be completed before we update the 2648 * reset counter, for otherwise waiters might miss the reset 2649 * pending state and not properly drop locks, resulting in 2650 * deadlocks with the reset work. 2651 */ 2652 ret = i915_reset(dev); 2653 2654 intel_display_handle_reset(dev); 2655 2656 intel_runtime_pm_put(dev_priv); 2657 2658 if (ret == 0) { 2659 /* 2660 * After all the gem state is reset, increment the reset 2661 * counter and wake up everyone waiting for the reset to 2662 * complete. 2663 * 2664 * Since unlock operations are a one-sided barrier only, 2665 * we need to insert a barrier here to order any seqno 2666 * updates before 2667 * the counter increment. 2668 */ 2669 smp_mb__before_atomic(); 2670 atomic_inc(&dev_priv->gpu_error.reset_counter); 2671 2672 #if 0 2673 kobject_uevent_env(&dev->primary->kdev->kobj, 2674 KOBJ_CHANGE, reset_done_event); 2675 #endif 2676 } else { 2677 atomic_set_mask(I915_WEDGED, &error->reset_counter); 2678 } 2679 2680 /* 2681 * Note: The wake_up also serves as a memory barrier so that 2682 * waiters see the update value of the reset counter atomic_t. 2683 */ 2684 i915_error_wake_up(dev_priv, true); 2685 } 2686 } 2687 2688 static void i915_report_and_clear_eir(struct drm_device *dev) 2689 { 2690 struct drm_i915_private *dev_priv = dev->dev_private; 2691 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2692 u32 eir = I915_READ(EIR); 2693 int pipe, i; 2694 2695 if (!eir) 2696 return; 2697 2698 pr_err("render error detected, EIR: 0x%08x\n", eir); 2699 2700 #if 0 2701 i915_get_extra_instdone(dev, instdone); 2702 #endif 2703 2704 if (IS_G4X(dev)) { 2705 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2706 u32 ipeir = I915_READ(IPEIR_I965); 2707 2708 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2709 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2710 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2711 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2712 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2713 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2714 I915_WRITE(IPEIR_I965, ipeir); 2715 POSTING_READ(IPEIR_I965); 2716 } 2717 if (eir & GM45_ERROR_PAGE_TABLE) { 2718 u32 pgtbl_err = I915_READ(PGTBL_ER); 2719 pr_err("page table error\n"); 2720 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2721 I915_WRITE(PGTBL_ER, pgtbl_err); 2722 POSTING_READ(PGTBL_ER); 2723 } 2724 } 2725 2726 if (!IS_GEN2(dev)) { 2727 if (eir & I915_ERROR_PAGE_TABLE) { 2728 u32 pgtbl_err = I915_READ(PGTBL_ER); 2729 pr_err("page table error\n"); 2730 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2731 I915_WRITE(PGTBL_ER, pgtbl_err); 2732 POSTING_READ(PGTBL_ER); 2733 } 2734 } 2735 2736 if (eir & I915_ERROR_MEMORY_REFRESH) { 2737 pr_err("memory refresh error:\n"); 2738 for_each_pipe(pipe) 2739 pr_err("pipe %c stat: 0x%08x\n", 2740 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2741 /* pipestat has already been acked */ 2742 } 2743 if (eir & I915_ERROR_INSTRUCTION) { 2744 pr_err("instruction error\n"); 2745 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2746 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2747 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2748 if (INTEL_INFO(dev)->gen < 4) { 2749 u32 ipeir = I915_READ(IPEIR); 2750 2751 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2752 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2753 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2754 I915_WRITE(IPEIR, ipeir); 2755 POSTING_READ(IPEIR); 2756 } else { 2757 u32 ipeir = I915_READ(IPEIR_I965); 2758 2759 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2760 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2761 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2762 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2763 I915_WRITE(IPEIR_I965, ipeir); 2764 POSTING_READ(IPEIR_I965); 2765 } 2766 } 2767 2768 I915_WRITE(EIR, eir); 2769 POSTING_READ(EIR); 2770 eir = I915_READ(EIR); 2771 if (eir) { 2772 /* 2773 * some errors might have become stuck, 2774 * mask them. 2775 */ 2776 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2777 I915_WRITE(EMR, I915_READ(EMR) | eir); 2778 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2779 } 2780 } 2781 2782 /** 2783 * i915_handle_error - handle an error interrupt 2784 * @dev: drm device 2785 * 2786 * Do some basic checking of regsiter state at error interrupt time and 2787 * dump it to the syslog. Also call i915_capture_error_state() to make 2788 * sure we get a record and make it available in debugfs. Fire a uevent 2789 * so userspace knows something bad happened (should trigger collection 2790 * of a ring dump etc.). 2791 */ 2792 void i915_handle_error(struct drm_device *dev, bool wedged, 2793 const char *fmt, ...) 2794 { 2795 struct drm_i915_private *dev_priv = dev->dev_private; 2796 #if 0 2797 va_list args; 2798 char error_msg[80]; 2799 2800 va_start(args, fmt); 2801 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2802 va_end(args); 2803 2804 i915_capture_error_state(dev, wedged, error_msg); 2805 #endif 2806 i915_report_and_clear_eir(dev); 2807 2808 if (wedged) { 2809 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2810 &dev_priv->gpu_error.reset_counter); 2811 2812 /* 2813 * Wakeup waiting processes so that the reset work function 2814 * i915_error_work_func doesn't deadlock trying to grab various 2815 * locks. By bumping the reset counter first, the woken 2816 * processes will see a reset in progress and back off, 2817 * releasing their locks and then wait for the reset completion. 2818 * We must do this for _all_ gpu waiters that might hold locks 2819 * that the reset work needs to acquire. 2820 * 2821 * Note: The wake_up serves as the required memory barrier to 2822 * ensure that the waiters see the updated value of the reset 2823 * counter atomic_t. 2824 */ 2825 i915_error_wake_up(dev_priv, false); 2826 } 2827 2828 /* 2829 * Our reset work can grab modeset locks (since it needs to reset the 2830 * state of outstanding pagelips). Hence it must not be run on our own 2831 * dev-priv->wq work queue for otherwise the flush_work in the pageflip 2832 * code will deadlock. 2833 */ 2834 schedule_work(&dev_priv->gpu_error.work); 2835 } 2836 2837 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 2838 { 2839 struct drm_i915_private *dev_priv = dev->dev_private; 2840 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2841 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2842 struct drm_i915_gem_object *obj; 2843 struct intel_unpin_work *work; 2844 bool stall_detected; 2845 2846 /* Ignore early vblank irqs */ 2847 if (intel_crtc == NULL) 2848 return; 2849 2850 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 2851 work = intel_crtc->unpin_work; 2852 2853 if (work == NULL || 2854 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 2855 !work->enable_stall_check) { 2856 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 2857 lockmgr(&dev->event_lock, LK_RELEASE); 2858 return; 2859 } 2860 2861 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 2862 obj = work->pending_flip_obj; 2863 if (INTEL_INFO(dev)->gen >= 4) { 2864 int dspsurf = DSPSURF(intel_crtc->plane); 2865 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 2866 i915_gem_obj_ggtt_offset(obj); 2867 } else { 2868 int dspaddr = DSPADDR(intel_crtc->plane); 2869 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 2870 crtc->y * crtc->primary->fb->pitches[0] + 2871 crtc->x * crtc->primary->fb->bits_per_pixel/8); 2872 } 2873 2874 lockmgr(&dev->event_lock, LK_RELEASE); 2875 2876 if (stall_detected) { 2877 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 2878 intel_prepare_page_flip(dev, intel_crtc->plane); 2879 } 2880 } 2881 2882 /* Called from drm generic code, passed 'crtc' which 2883 * we use as a pipe index 2884 */ 2885 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2886 { 2887 struct drm_i915_private *dev_priv = dev->dev_private; 2888 2889 if (!i915_pipe_enabled(dev, pipe)) 2890 return -EINVAL; 2891 2892 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2893 if (INTEL_INFO(dev)->gen >= 4) 2894 i915_enable_pipestat(dev_priv, pipe, 2895 PIPE_START_VBLANK_INTERRUPT_STATUS); 2896 else 2897 i915_enable_pipestat(dev_priv, pipe, 2898 PIPE_VBLANK_INTERRUPT_STATUS); 2899 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2900 2901 return 0; 2902 } 2903 2904 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2905 { 2906 struct drm_i915_private *dev_priv = dev->dev_private; 2907 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2908 DE_PIPE_VBLANK(pipe); 2909 2910 if (!i915_pipe_enabled(dev, pipe)) 2911 return -EINVAL; 2912 2913 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2914 ironlake_enable_display_irq(dev_priv, bit); 2915 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2916 2917 return 0; 2918 } 2919 2920 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2921 { 2922 struct drm_i915_private *dev_priv = dev->dev_private; 2923 2924 if (!i915_pipe_enabled(dev, pipe)) 2925 return -EINVAL; 2926 2927 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2928 i915_enable_pipestat(dev_priv, pipe, 2929 PIPE_START_VBLANK_INTERRUPT_STATUS); 2930 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2931 2932 return 0; 2933 } 2934 2935 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2936 { 2937 struct drm_i915_private *dev_priv = dev->dev_private; 2938 2939 if (!i915_pipe_enabled(dev, pipe)) 2940 return -EINVAL; 2941 2942 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2943 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2944 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2945 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2946 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2947 return 0; 2948 } 2949 2950 /* Called from drm generic code, passed 'crtc' which 2951 * we use as a pipe index 2952 */ 2953 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2954 { 2955 struct drm_i915_private *dev_priv = dev->dev_private; 2956 2957 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2958 i915_disable_pipestat(dev_priv, pipe, 2959 PIPE_VBLANK_INTERRUPT_STATUS | 2960 PIPE_START_VBLANK_INTERRUPT_STATUS); 2961 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2962 } 2963 2964 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2965 { 2966 struct drm_i915_private *dev_priv = dev->dev_private; 2967 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2968 DE_PIPE_VBLANK(pipe); 2969 2970 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2971 ironlake_disable_display_irq(dev_priv, bit); 2972 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2973 } 2974 2975 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2976 { 2977 struct drm_i915_private *dev_priv = dev->dev_private; 2978 2979 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2980 i915_disable_pipestat(dev_priv, pipe, 2981 PIPE_START_VBLANK_INTERRUPT_STATUS); 2982 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2983 } 2984 2985 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2986 { 2987 struct drm_i915_private *dev_priv = dev->dev_private; 2988 2989 if (!i915_pipe_enabled(dev, pipe)) 2990 return; 2991 2992 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2993 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2994 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2995 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2996 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2997 } 2998 2999 static u32 3000 ring_last_seqno(struct intel_engine_cs *ring) 3001 { 3002 return list_entry(ring->request_list.prev, 3003 struct drm_i915_gem_request, list)->seqno; 3004 } 3005 3006 static bool 3007 ring_idle(struct intel_engine_cs *ring, u32 seqno) 3008 { 3009 return (list_empty(&ring->request_list) || 3010 i915_seqno_passed(seqno, ring_last_seqno(ring))); 3011 } 3012 3013 static bool 3014 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 3015 { 3016 if (INTEL_INFO(dev)->gen >= 8) { 3017 return (ipehr >> 23) == 0x1c; 3018 } else { 3019 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 3020 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 3021 MI_SEMAPHORE_REGISTER); 3022 } 3023 } 3024 3025 static struct intel_engine_cs * 3026 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) 3027 { 3028 struct drm_i915_private *dev_priv = ring->dev->dev_private; 3029 struct intel_engine_cs *signaller; 3030 int i; 3031 3032 if (INTEL_INFO(dev_priv->dev)->gen >= 8) { 3033 for_each_ring(signaller, dev_priv, i) { 3034 if (ring == signaller) 3035 continue; 3036 3037 if (offset == signaller->semaphore.signal_ggtt[ring->id]) 3038 return signaller; 3039 } 3040 } else { 3041 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 3042 3043 for_each_ring(signaller, dev_priv, i) { 3044 if(ring == signaller) 3045 continue; 3046 3047 if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) 3048 return signaller; 3049 } 3050 } 3051 3052 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016lx\n", 3053 ring->id, ipehr, offset); 3054 3055 return NULL; 3056 } 3057 3058 static struct intel_engine_cs * 3059 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) 3060 { 3061 struct drm_i915_private *dev_priv = ring->dev->dev_private; 3062 u32 cmd, ipehr, head; 3063 u64 offset = 0; 3064 int i, backwards; 3065 3066 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 3067 if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) 3068 return NULL; 3069 3070 /* 3071 * HEAD is likely pointing to the dword after the actual command, 3072 * so scan backwards until we find the MBOX. But limit it to just 3 3073 * or 4 dwords depending on the semaphore wait command size. 3074 * Note that we don't care about ACTHD here since that might 3075 * point at at batch, and semaphores are always emitted into the 3076 * ringbuffer itself. 3077 */ 3078 head = I915_READ_HEAD(ring) & HEAD_ADDR; 3079 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; 3080 3081 for (i = backwards; i; --i) { 3082 /* 3083 * Be paranoid and presume the hw has gone off into the wild - 3084 * our ring is smaller than what the hardware (and hence 3085 * HEAD_ADDR) allows. Also handles wrap-around. 3086 */ 3087 head &= ring->buffer->size - 1; 3088 3089 /* This here seems to blow up */ 3090 cmd = ioread32(ring->buffer->virtual_start + head); 3091 if (cmd == ipehr) 3092 break; 3093 3094 head -= 4; 3095 } 3096 3097 if (!i) 3098 return NULL; 3099 3100 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; 3101 if (INTEL_INFO(ring->dev)->gen >= 8) { 3102 offset = ioread32(ring->buffer->virtual_start + head + 12); 3103 offset <<= 32; 3104 offset = ioread32(ring->buffer->virtual_start + head + 8); 3105 } 3106 return semaphore_wait_to_signaller_ring(ring, ipehr, offset); 3107 } 3108 3109 static int semaphore_passed(struct intel_engine_cs *ring) 3110 { 3111 struct drm_i915_private *dev_priv = ring->dev->dev_private; 3112 struct intel_engine_cs *signaller; 3113 u32 seqno; 3114 3115 ring->hangcheck.deadlock++; 3116 3117 signaller = semaphore_waits_for(ring, &seqno); 3118 if (signaller == NULL) 3119 return -1; 3120 3121 /* Prevent pathological recursion due to driver bugs */ 3122 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) 3123 return -1; 3124 3125 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) 3126 return 1; 3127 3128 /* cursory check for an unkickable deadlock */ 3129 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 3130 semaphore_passed(signaller) < 0) 3131 return -1; 3132 3133 return 0; 3134 } 3135 3136 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 3137 { 3138 struct intel_engine_cs *ring; 3139 int i; 3140 3141 for_each_ring(ring, dev_priv, i) 3142 ring->hangcheck.deadlock = 0; 3143 } 3144 3145 static enum intel_ring_hangcheck_action 3146 ring_stuck(struct intel_engine_cs *ring, u64 acthd) 3147 { 3148 struct drm_device *dev = ring->dev; 3149 struct drm_i915_private *dev_priv = dev->dev_private; 3150 u32 tmp; 3151 3152 if (acthd != ring->hangcheck.acthd) { 3153 if (acthd > ring->hangcheck.max_acthd) { 3154 ring->hangcheck.max_acthd = acthd; 3155 return HANGCHECK_ACTIVE; 3156 } 3157 3158 return HANGCHECK_ACTIVE_LOOP; 3159 } 3160 3161 if (IS_GEN2(dev)) 3162 return HANGCHECK_HUNG; 3163 3164 /* Is the chip hanging on a WAIT_FOR_EVENT? 3165 * If so we can simply poke the RB_WAIT bit 3166 * and break the hang. This should work on 3167 * all but the second generation chipsets. 3168 */ 3169 tmp = I915_READ_CTL(ring); 3170 if (tmp & RING_WAIT) { 3171 i915_handle_error(dev, false, 3172 "Kicking stuck wait on %s", 3173 ring->name); 3174 I915_WRITE_CTL(ring, tmp); 3175 return HANGCHECK_KICK; 3176 } 3177 3178 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 3179 switch (semaphore_passed(ring)) { 3180 default: 3181 return HANGCHECK_HUNG; 3182 case 1: 3183 i915_handle_error(dev, false, 3184 "Kicking stuck semaphore on %s", 3185 ring->name); 3186 I915_WRITE_CTL(ring, tmp); 3187 return HANGCHECK_KICK; 3188 case 0: 3189 return HANGCHECK_WAIT; 3190 } 3191 } 3192 3193 return HANGCHECK_HUNG; 3194 } 3195 3196 /** 3197 * This is called when the chip hasn't reported back with completed 3198 * batchbuffers in a long time. We keep track per ring seqno progress and 3199 * if there are no progress, hangcheck score for that ring is increased. 3200 * Further, acthd is inspected to see if the ring is stuck. On stuck case 3201 * we kick the ring. If we see no progress on three subsequent calls 3202 * we assume chip is wedged and try to fix it by resetting the chip. 3203 */ 3204 static void i915_hangcheck_elapsed(unsigned long data) 3205 { 3206 struct drm_device *dev = (struct drm_device *)data; 3207 struct drm_i915_private *dev_priv = dev->dev_private; 3208 struct intel_engine_cs *ring; 3209 int i; 3210 int busy_count = 0, rings_hung = 0; 3211 bool stuck[I915_NUM_RINGS] = { 0 }; 3212 #define BUSY 1 3213 #define KICK 5 3214 #define HUNG 20 3215 3216 if (!i915.enable_hangcheck) 3217 return; 3218 3219 for_each_ring(ring, dev_priv, i) { 3220 u64 acthd; 3221 u32 seqno; 3222 bool busy = true; 3223 3224 semaphore_clear_deadlocks(dev_priv); 3225 3226 seqno = ring->get_seqno(ring, false); 3227 acthd = intel_ring_get_active_head(ring); 3228 3229 if (ring->hangcheck.seqno == seqno) { 3230 if (ring_idle(ring, seqno)) { 3231 ring->hangcheck.action = HANGCHECK_IDLE; 3232 3233 if (waitqueue_active(&ring->irq_queue)) { 3234 /* Issue a wake-up to catch stuck h/w. */ 3235 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 3236 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 3237 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 3238 ring->name); 3239 else 3240 DRM_INFO("Fake missed irq on %s\n", 3241 ring->name); 3242 wake_up_all(&ring->irq_queue); 3243 } 3244 /* Safeguard against driver failure */ 3245 ring->hangcheck.score += BUSY; 3246 } else 3247 busy = false; 3248 } else { 3249 /* We always increment the hangcheck score 3250 * if the ring is busy and still processing 3251 * the same request, so that no single request 3252 * can run indefinitely (such as a chain of 3253 * batches). The only time we do not increment 3254 * the hangcheck score on this ring, if this 3255 * ring is in a legitimate wait for another 3256 * ring. In that case the waiting ring is a 3257 * victim and we want to be sure we catch the 3258 * right culprit. Then every time we do kick 3259 * the ring, add a small increment to the 3260 * score so that we can catch a batch that is 3261 * being repeatedly kicked and so responsible 3262 * for stalling the machine. 3263 */ 3264 ring->hangcheck.action = ring_stuck(ring, 3265 acthd); 3266 3267 switch (ring->hangcheck.action) { 3268 case HANGCHECK_IDLE: 3269 case HANGCHECK_WAIT: 3270 case HANGCHECK_ACTIVE: 3271 break; 3272 case HANGCHECK_ACTIVE_LOOP: 3273 ring->hangcheck.score += BUSY; 3274 break; 3275 case HANGCHECK_KICK: 3276 ring->hangcheck.score += KICK; 3277 break; 3278 case HANGCHECK_HUNG: 3279 ring->hangcheck.score += HUNG; 3280 stuck[i] = true; 3281 break; 3282 } 3283 } 3284 } else { 3285 ring->hangcheck.action = HANGCHECK_ACTIVE; 3286 3287 /* Gradually reduce the count so that we catch DoS 3288 * attempts across multiple batches. 3289 */ 3290 if (ring->hangcheck.score > 0) 3291 ring->hangcheck.score--; 3292 3293 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; 3294 } 3295 3296 ring->hangcheck.seqno = seqno; 3297 ring->hangcheck.acthd = acthd; 3298 busy_count += busy; 3299 } 3300 3301 for_each_ring(ring, dev_priv, i) { 3302 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 3303 DRM_INFO("%s on %s\n", 3304 stuck[i] ? "stuck" : "no progress", 3305 ring->name); 3306 rings_hung++; 3307 } 3308 } 3309 3310 if (rings_hung) 3311 return i915_handle_error(dev, true, "Ring hung"); 3312 3313 if (busy_count) 3314 /* Reset timer case chip hangs without another request 3315 * being added */ 3316 i915_queue_hangcheck(dev); 3317 } 3318 3319 void i915_queue_hangcheck(struct drm_device *dev) 3320 { 3321 struct drm_i915_private *dev_priv = dev->dev_private; 3322 if (!i915.enable_hangcheck) 3323 return; 3324 3325 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 3326 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 3327 } 3328 3329 static void ibx_irq_reset(struct drm_device *dev) 3330 { 3331 struct drm_i915_private *dev_priv = dev->dev_private; 3332 3333 if (HAS_PCH_NOP(dev)) 3334 return; 3335 3336 GEN5_IRQ_RESET(SDE); 3337 3338 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3339 I915_WRITE(SERR_INT, 0xffffffff); 3340 } 3341 3342 /* 3343 * SDEIER is also touched by the interrupt handler to work around missed PCH 3344 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3345 * instead we unconditionally enable all PCH interrupt sources here, but then 3346 * only unmask them as needed with SDEIMR. 3347 * 3348 * This function needs to be called before interrupts are enabled. 3349 */ 3350 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3351 { 3352 struct drm_i915_private *dev_priv = dev->dev_private; 3353 3354 if (HAS_PCH_NOP(dev)) 3355 return; 3356 3357 WARN_ON(I915_READ(SDEIER) != 0); 3358 I915_WRITE(SDEIER, 0xffffffff); 3359 POSTING_READ(SDEIER); 3360 } 3361 3362 static void gen5_gt_irq_reset(struct drm_device *dev) 3363 { 3364 struct drm_i915_private *dev_priv = dev->dev_private; 3365 3366 GEN5_IRQ_RESET(GT); 3367 if (INTEL_INFO(dev)->gen >= 6) 3368 GEN5_IRQ_RESET(GEN6_PM); 3369 } 3370 3371 /* drm_dma.h hooks 3372 */ 3373 static void ironlake_irq_reset(struct drm_device *dev) 3374 { 3375 struct drm_i915_private *dev_priv = dev->dev_private; 3376 3377 I915_WRITE(HWSTAM, 0xffffffff); 3378 3379 GEN5_IRQ_RESET(DE); 3380 if (IS_GEN7(dev)) 3381 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3382 3383 gen5_gt_irq_reset(dev); 3384 3385 ibx_irq_reset(dev); 3386 } 3387 3388 static void valleyview_irq_preinstall(struct drm_device *dev) 3389 { 3390 struct drm_i915_private *dev_priv = dev->dev_private; 3391 int pipe; 3392 3393 /* VLV magic */ 3394 I915_WRITE(VLV_IMR, 0); 3395 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 3396 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 3397 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 3398 3399 /* and GT */ 3400 I915_WRITE(GTIIR, I915_READ(GTIIR)); 3401 I915_WRITE(GTIIR, I915_READ(GTIIR)); 3402 3403 gen5_gt_irq_reset(dev); 3404 3405 I915_WRITE(DPINVGTT, 0xff); 3406 3407 I915_WRITE(PORT_HOTPLUG_EN, 0); 3408 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3409 for_each_pipe(pipe) 3410 I915_WRITE(PIPESTAT(pipe), 0xffff); 3411 I915_WRITE(VLV_IIR, 0xffffffff); 3412 I915_WRITE(VLV_IMR, 0xffffffff); 3413 I915_WRITE(VLV_IER, 0x0); 3414 POSTING_READ(VLV_IER); 3415 } 3416 3417 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3418 { 3419 GEN8_IRQ_RESET_NDX(GT, 0); 3420 GEN8_IRQ_RESET_NDX(GT, 1); 3421 GEN8_IRQ_RESET_NDX(GT, 2); 3422 GEN8_IRQ_RESET_NDX(GT, 3); 3423 } 3424 3425 static void gen8_irq_reset(struct drm_device *dev) 3426 { 3427 struct drm_i915_private *dev_priv = dev->dev_private; 3428 int pipe; 3429 3430 I915_WRITE(GEN8_MASTER_IRQ, 0); 3431 POSTING_READ(GEN8_MASTER_IRQ); 3432 3433 gen8_gt_irq_reset(dev_priv); 3434 3435 for_each_pipe(pipe) 3436 if (intel_display_power_enabled(dev_priv, 3437 POWER_DOMAIN_PIPE(pipe))) 3438 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3439 3440 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3441 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3442 GEN5_IRQ_RESET(GEN8_PCU_); 3443 3444 ibx_irq_reset(dev); 3445 } 3446 3447 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) 3448 { 3449 3450 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3451 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], 3452 ~dev_priv->de_irq_mask[PIPE_B]); 3453 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], 3454 ~dev_priv->de_irq_mask[PIPE_C]); 3455 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3456 } 3457 3458 static void cherryview_irq_preinstall(struct drm_device *dev) 3459 { 3460 struct drm_i915_private *dev_priv = dev->dev_private; 3461 int pipe; 3462 3463 I915_WRITE(GEN8_MASTER_IRQ, 0); 3464 POSTING_READ(GEN8_MASTER_IRQ); 3465 3466 gen8_gt_irq_reset(dev_priv); 3467 3468 GEN5_IRQ_RESET(GEN8_PCU_); 3469 3470 POSTING_READ(GEN8_PCU_IIR); 3471 3472 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3473 3474 I915_WRITE(PORT_HOTPLUG_EN, 0); 3475 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3476 3477 for_each_pipe(pipe) 3478 I915_WRITE(PIPESTAT(pipe), 0xffff); 3479 3480 I915_WRITE(VLV_IMR, 0xffffffff); 3481 I915_WRITE(VLV_IER, 0x0); 3482 I915_WRITE(VLV_IIR, 0xffffffff); 3483 POSTING_READ(VLV_IIR); 3484 } 3485 3486 static void ibx_hpd_irq_setup(struct drm_device *dev) 3487 { 3488 struct drm_i915_private *dev_priv = dev->dev_private; 3489 struct drm_mode_config *mode_config = &dev->mode_config; 3490 struct intel_encoder *intel_encoder; 3491 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 3492 3493 if (HAS_PCH_IBX(dev)) { 3494 hotplug_irqs = SDE_HOTPLUG_MASK; 3495 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3496 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3497 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 3498 } else { 3499 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3500 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3501 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3502 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 3503 } 3504 3505 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3506 3507 /* 3508 * Enable digital hotplug on the PCH, and configure the DP short pulse 3509 * duration to 2ms (which is the minimum in the Display Port spec) 3510 * 3511 * This register is the same on all known PCH chips. 3512 */ 3513 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3514 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3515 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3516 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3517 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3518 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3519 } 3520 3521 static void ibx_irq_postinstall(struct drm_device *dev) 3522 { 3523 struct drm_i915_private *dev_priv = dev->dev_private; 3524 u32 mask; 3525 3526 if (HAS_PCH_NOP(dev)) 3527 return; 3528 3529 if (HAS_PCH_IBX(dev)) 3530 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3531 else 3532 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3533 3534 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR); 3535 I915_WRITE(SDEIMR, ~mask); 3536 } 3537 3538 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3539 { 3540 struct drm_i915_private *dev_priv = dev->dev_private; 3541 u32 pm_irqs, gt_irqs; 3542 3543 pm_irqs = gt_irqs = 0; 3544 3545 dev_priv->gt_irq_mask = ~0; 3546 if (HAS_L3_DPF(dev)) { 3547 /* L3 parity interrupt is always unmasked. */ 3548 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3549 gt_irqs |= GT_PARITY_ERROR(dev); 3550 } 3551 3552 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3553 if (IS_GEN5(dev)) { 3554 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 3555 ILK_BSD_USER_INTERRUPT; 3556 } else { 3557 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3558 } 3559 3560 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3561 3562 if (INTEL_INFO(dev)->gen >= 6) { 3563 pm_irqs |= dev_priv->pm_rps_events; 3564 3565 if (HAS_VEBOX(dev)) 3566 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3567 3568 dev_priv->pm_irq_mask = 0xffffffff; 3569 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3570 } 3571 } 3572 3573 static int ironlake_irq_postinstall(struct drm_device *dev) 3574 { 3575 struct drm_i915_private *dev_priv = dev->dev_private; 3576 u32 display_mask, extra_mask; 3577 3578 if (INTEL_INFO(dev)->gen >= 7) { 3579 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3580 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3581 DE_PLANEB_FLIP_DONE_IVB | 3582 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3583 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3584 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 3585 } else { 3586 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3587 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3588 DE_AUX_CHANNEL_A | 3589 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3590 DE_POISON); 3591 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3592 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; 3593 } 3594 3595 dev_priv->irq_mask = ~display_mask; 3596 3597 I915_WRITE(HWSTAM, 0xeffe); 3598 3599 ibx_irq_pre_postinstall(dev); 3600 3601 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3602 3603 gen5_gt_irq_postinstall(dev); 3604 3605 ibx_irq_postinstall(dev); 3606 3607 if (IS_IRONLAKE_M(dev)) { 3608 /* Enable PCU event interrupts 3609 * 3610 * spinlocking not required here for correctness since interrupt 3611 * setup is guaranteed to run in single-threaded context. But we 3612 * need it to make the assert_spin_locked happy. */ 3613 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3614 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 3615 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3616 } 3617 3618 return 0; 3619 } 3620 3621 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) 3622 { 3623 u32 pipestat_mask; 3624 u32 iir_mask; 3625 3626 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3627 PIPE_FIFO_UNDERRUN_STATUS; 3628 3629 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); 3630 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); 3631 POSTING_READ(PIPESTAT(PIPE_A)); 3632 3633 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3634 PIPE_CRC_DONE_INTERRUPT_STATUS; 3635 3636 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask | 3637 PIPE_GMBUS_INTERRUPT_STATUS); 3638 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask); 3639 3640 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3641 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3642 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3643 dev_priv->irq_mask &= ~iir_mask; 3644 3645 I915_WRITE(VLV_IIR, iir_mask); 3646 I915_WRITE(VLV_IIR, iir_mask); 3647 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3648 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3649 POSTING_READ(VLV_IER); 3650 } 3651 3652 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) 3653 { 3654 u32 pipestat_mask; 3655 u32 iir_mask; 3656 3657 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3658 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3659 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3660 3661 dev_priv->irq_mask |= iir_mask; 3662 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3663 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3664 I915_WRITE(VLV_IIR, iir_mask); 3665 I915_WRITE(VLV_IIR, iir_mask); 3666 POSTING_READ(VLV_IIR); 3667 3668 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3669 PIPE_CRC_DONE_INTERRUPT_STATUS; 3670 3671 i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask | 3672 PIPE_GMBUS_INTERRUPT_STATUS); 3673 i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask); 3674 3675 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3676 PIPE_FIFO_UNDERRUN_STATUS; 3677 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); 3678 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); 3679 POSTING_READ(PIPESTAT(PIPE_A)); 3680 } 3681 3682 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3683 { 3684 assert_spin_locked(&dev_priv->irq_lock); 3685 3686 if (dev_priv->display_irqs_enabled) 3687 return; 3688 3689 dev_priv->display_irqs_enabled = true; 3690 3691 if (dev_priv->dev->irq_enabled) 3692 valleyview_display_irqs_install(dev_priv); 3693 } 3694 3695 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3696 { 3697 assert_spin_locked(&dev_priv->irq_lock); 3698 3699 if (!dev_priv->display_irqs_enabled) 3700 return; 3701 3702 dev_priv->display_irqs_enabled = false; 3703 3704 if (dev_priv->dev->irq_enabled) 3705 valleyview_display_irqs_uninstall(dev_priv); 3706 } 3707 3708 static int valleyview_irq_postinstall(struct drm_device *dev) 3709 { 3710 struct drm_i915_private *dev_priv = dev->dev_private; 3711 3712 dev_priv->irq_mask = ~0; 3713 3714 I915_WRITE(PORT_HOTPLUG_EN, 0); 3715 POSTING_READ(PORT_HOTPLUG_EN); 3716 3717 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3718 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3719 I915_WRITE(VLV_IIR, 0xffffffff); 3720 POSTING_READ(VLV_IER); 3721 3722 /* Interrupt setup is already guaranteed to be single-threaded, this is 3723 * just to make the assert_spin_locked check happy. */ 3724 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3725 if (dev_priv->display_irqs_enabled) 3726 valleyview_display_irqs_install(dev_priv); 3727 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3728 3729 I915_WRITE(VLV_IIR, 0xffffffff); 3730 I915_WRITE(VLV_IIR, 0xffffffff); 3731 3732 gen5_gt_irq_postinstall(dev); 3733 3734 /* ack & enable invalid PTE error interrupts */ 3735 #if 0 /* FIXME: add support to irq handler for checking these bits */ 3736 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3737 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 3738 #endif 3739 3740 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3741 3742 return 0; 3743 } 3744 3745 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3746 { 3747 int i; 3748 3749 /* These are interrupts we'll toggle with the ring mask register */ 3750 uint32_t gt_interrupts[] = { 3751 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3752 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 3753 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3754 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3755 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3756 0, 3757 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3758 }; 3759 3760 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) 3761 GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]); 3762 3763 dev_priv->pm_irq_mask = 0xffffffff; 3764 } 3765 3766 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3767 { 3768 struct drm_device *dev = dev_priv->dev; 3769 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE | 3770 GEN8_PIPE_CDCLK_CRC_DONE | 3771 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3772 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3773 GEN8_PIPE_FIFO_UNDERRUN; 3774 int pipe; 3775 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3776 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3777 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3778 3779 for_each_pipe(pipe) 3780 if (intel_display_power_enabled(dev_priv, 3781 POWER_DOMAIN_PIPE(pipe))) 3782 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3783 dev_priv->de_irq_mask[pipe], 3784 de_pipe_enables); 3785 3786 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A); 3787 } 3788 3789 static int gen8_irq_postinstall(struct drm_device *dev) 3790 { 3791 struct drm_i915_private *dev_priv = dev->dev_private; 3792 3793 ibx_irq_pre_postinstall(dev); 3794 3795 gen8_gt_irq_postinstall(dev_priv); 3796 gen8_de_irq_postinstall(dev_priv); 3797 3798 ibx_irq_postinstall(dev); 3799 3800 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 3801 POSTING_READ(GEN8_MASTER_IRQ); 3802 3803 return 0; 3804 } 3805 3806 static int cherryview_irq_postinstall(struct drm_device *dev) 3807 { 3808 struct drm_i915_private *dev_priv = dev->dev_private; 3809 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3810 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3811 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3812 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3813 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV | 3814 PIPE_CRC_DONE_INTERRUPT_STATUS; 3815 int pipe; 3816 3817 /* 3818 * Leave vblank interrupts masked initially. enable/disable will 3819 * toggle them based on usage. 3820 */ 3821 dev_priv->irq_mask = ~enable_mask; 3822 3823 for_each_pipe(pipe) 3824 I915_WRITE(PIPESTAT(pipe), 0xffff); 3825 3826 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3827 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3828 for_each_pipe(pipe) 3829 i915_enable_pipestat(dev_priv, pipe, pipestat_enable); 3830 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3831 3832 I915_WRITE(VLV_IIR, 0xffffffff); 3833 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3834 I915_WRITE(VLV_IER, enable_mask); 3835 3836 gen8_gt_irq_postinstall(dev_priv); 3837 3838 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); 3839 POSTING_READ(GEN8_MASTER_IRQ); 3840 3841 return 0; 3842 } 3843 3844 static void gen8_irq_uninstall(struct drm_device *dev) 3845 { 3846 struct drm_i915_private *dev_priv = dev->dev_private; 3847 3848 if (!dev_priv) 3849 return; 3850 3851 gen8_irq_reset(dev); 3852 } 3853 3854 static void valleyview_irq_uninstall(struct drm_device *dev) 3855 { 3856 struct drm_i915_private *dev_priv = dev->dev_private; 3857 int pipe; 3858 3859 if (!dev_priv) 3860 return; 3861 3862 I915_WRITE(VLV_MASTER_IER, 0); 3863 3864 for_each_pipe(pipe) 3865 I915_WRITE(PIPESTAT(pipe), 0xffff); 3866 3867 I915_WRITE(HWSTAM, 0xffffffff); 3868 I915_WRITE(PORT_HOTPLUG_EN, 0); 3869 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3870 3871 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3872 if (dev_priv->display_irqs_enabled) 3873 valleyview_display_irqs_uninstall(dev_priv); 3874 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3875 3876 dev_priv->irq_mask = 0; 3877 3878 I915_WRITE(VLV_IIR, 0xffffffff); 3879 I915_WRITE(VLV_IMR, 0xffffffff); 3880 I915_WRITE(VLV_IER, 0x0); 3881 POSTING_READ(VLV_IER); 3882 } 3883 3884 static void cherryview_irq_uninstall(struct drm_device *dev) 3885 { 3886 struct drm_i915_private *dev_priv = dev->dev_private; 3887 int pipe; 3888 3889 if (!dev_priv) 3890 return; 3891 3892 I915_WRITE(GEN8_MASTER_IRQ, 0); 3893 POSTING_READ(GEN8_MASTER_IRQ); 3894 3895 #define GEN8_IRQ_FINI_NDX(type, which) \ 3896 do { \ 3897 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 3898 I915_WRITE(GEN8_##type##_IER(which), 0); \ 3899 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 3900 POSTING_READ(GEN8_##type##_IIR(which)); \ 3901 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 3902 } while (0) 3903 3904 #define GEN8_IRQ_FINI(type) \ 3905 do { \ 3906 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 3907 I915_WRITE(GEN8_##type##_IER, 0); \ 3908 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 3909 POSTING_READ(GEN8_##type##_IIR); \ 3910 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 3911 } while (0) 3912 3913 GEN8_IRQ_FINI_NDX(GT, 0); 3914 GEN8_IRQ_FINI_NDX(GT, 1); 3915 GEN8_IRQ_FINI_NDX(GT, 2); 3916 GEN8_IRQ_FINI_NDX(GT, 3); 3917 3918 GEN8_IRQ_FINI(PCU); 3919 3920 #undef GEN8_IRQ_FINI 3921 #undef GEN8_IRQ_FINI_NDX 3922 3923 I915_WRITE(PORT_HOTPLUG_EN, 0); 3924 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3925 3926 for_each_pipe(pipe) 3927 I915_WRITE(PIPESTAT(pipe), 0xffff); 3928 3929 I915_WRITE(VLV_IMR, 0xffffffff); 3930 I915_WRITE(VLV_IER, 0x0); 3931 I915_WRITE(VLV_IIR, 0xffffffff); 3932 POSTING_READ(VLV_IIR); 3933 } 3934 3935 static void ironlake_irq_uninstall(struct drm_device *dev) 3936 { 3937 struct drm_i915_private *dev_priv = dev->dev_private; 3938 3939 if (!dev_priv) 3940 return; 3941 3942 ironlake_irq_reset(dev); 3943 } 3944 3945 static void i8xx_irq_preinstall(struct drm_device * dev) 3946 { 3947 struct drm_i915_private *dev_priv = dev->dev_private; 3948 int pipe; 3949 3950 for_each_pipe(pipe) 3951 I915_WRITE(PIPESTAT(pipe), 0); 3952 I915_WRITE16(IMR, 0xffff); 3953 I915_WRITE16(IER, 0x0); 3954 POSTING_READ16(IER); 3955 } 3956 3957 static int i8xx_irq_postinstall(struct drm_device *dev) 3958 { 3959 struct drm_i915_private *dev_priv = dev->dev_private; 3960 3961 I915_WRITE16(EMR, 3962 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3963 3964 /* Unmask the interrupts that we always want on. */ 3965 dev_priv->irq_mask = 3966 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3967 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3968 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3969 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3970 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3971 I915_WRITE16(IMR, dev_priv->irq_mask); 3972 3973 I915_WRITE16(IER, 3974 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3975 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3976 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3977 I915_USER_INTERRUPT); 3978 POSTING_READ16(IER); 3979 3980 /* Interrupt setup is already guaranteed to be single-threaded, this is 3981 * just to make the assert_spin_locked check happy. */ 3982 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3983 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3984 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3985 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3986 3987 return 0; 3988 } 3989 3990 /* 3991 * Returns true when a page flip has completed. 3992 */ 3993 static bool i8xx_handle_vblank(struct drm_device *dev, 3994 int plane, int pipe, u32 iir) 3995 { 3996 struct drm_i915_private *dev_priv = dev->dev_private; 3997 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3998 3999 if (!intel_pipe_handle_vblank(dev, pipe)) 4000 return false; 4001 4002 if ((iir & flip_pending) == 0) 4003 return false; 4004 4005 intel_prepare_page_flip(dev, plane); 4006 4007 /* We detect FlipDone by looking for the change in PendingFlip from '1' 4008 * to '0' on the following vblank, i.e. IIR has the Pendingflip 4009 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 4010 * the flip is completed (no longer pending). Since this doesn't raise 4011 * an interrupt per se, we watch for the change at vblank. 4012 */ 4013 if (I915_READ16(ISR) & flip_pending) 4014 return false; 4015 4016 intel_finish_page_flip(dev, pipe); 4017 4018 return true; 4019 } 4020 4021 static irqreturn_t i8xx_irq_handler(void *arg) 4022 { 4023 struct drm_device *dev = arg; 4024 struct drm_i915_private *dev_priv = dev->dev_private; 4025 u16 iir, new_iir; 4026 u32 pipe_stats[2]; 4027 int pipe; 4028 u16 flip_mask = 4029 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4030 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4031 4032 iir = I915_READ16(IIR); 4033 if (iir == 0) 4034 return; 4035 4036 while (iir & ~flip_mask) { 4037 /* Can't rely on pipestat interrupt bit in iir as it might 4038 * have been cleared after the pipestat interrupt was received. 4039 * It doesn't set the bit in iir again, but it still produces 4040 * interrupts (for non-MSI). 4041 */ 4042 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4043 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4044 i915_handle_error(dev, false, 4045 "Command parser error, iir 0x%08x", 4046 iir); 4047 4048 for_each_pipe(pipe) { 4049 int reg = PIPESTAT(pipe); 4050 pipe_stats[pipe] = I915_READ(reg); 4051 4052 /* 4053 * Clear the PIPE*STAT regs before the IIR 4054 */ 4055 if (pipe_stats[pipe] & 0x8000ffff) 4056 I915_WRITE(reg, pipe_stats[pipe]); 4057 } 4058 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4059 4060 I915_WRITE16(IIR, iir & ~flip_mask); 4061 new_iir = I915_READ16(IIR); /* Flush posted writes */ 4062 4063 i915_update_dri1_breadcrumb(dev); 4064 4065 if (iir & I915_USER_INTERRUPT) 4066 notify_ring(dev, &dev_priv->ring[RCS]); 4067 4068 for_each_pipe(pipe) { 4069 int plane = pipe; 4070 if (HAS_FBC(dev)) 4071 plane = !plane; 4072 4073 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4074 i8xx_handle_vblank(dev, plane, pipe, iir)) 4075 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4076 4077 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4078 i9xx_pipe_crc_irq_handler(dev, pipe); 4079 4080 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 4081 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 4082 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 4083 } 4084 4085 iir = new_iir; 4086 } 4087 4088 } 4089 4090 static void i8xx_irq_uninstall(struct drm_device * dev) 4091 { 4092 struct drm_i915_private *dev_priv = dev->dev_private; 4093 int pipe; 4094 4095 for_each_pipe(pipe) { 4096 /* Clear enable bits; then clear status bits */ 4097 I915_WRITE(PIPESTAT(pipe), 0); 4098 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4099 } 4100 I915_WRITE16(IMR, 0xffff); 4101 I915_WRITE16(IER, 0x0); 4102 I915_WRITE16(IIR, I915_READ16(IIR)); 4103 } 4104 4105 static void i915_irq_preinstall(struct drm_device * dev) 4106 { 4107 struct drm_i915_private *dev_priv = dev->dev_private; 4108 int pipe; 4109 4110 if (I915_HAS_HOTPLUG(dev)) { 4111 I915_WRITE(PORT_HOTPLUG_EN, 0); 4112 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4113 } 4114 4115 I915_WRITE16(HWSTAM, 0xeffe); 4116 for_each_pipe(pipe) 4117 I915_WRITE(PIPESTAT(pipe), 0); 4118 I915_WRITE(IMR, 0xffffffff); 4119 I915_WRITE(IER, 0x0); 4120 POSTING_READ(IER); 4121 } 4122 4123 static int i915_irq_postinstall(struct drm_device *dev) 4124 { 4125 struct drm_i915_private *dev_priv = dev->dev_private; 4126 u32 enable_mask; 4127 4128 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 4129 4130 /* Unmask the interrupts that we always want on. */ 4131 dev_priv->irq_mask = 4132 ~(I915_ASLE_INTERRUPT | 4133 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4134 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4135 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4136 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4137 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4138 4139 enable_mask = 4140 I915_ASLE_INTERRUPT | 4141 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4142 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4143 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 4144 I915_USER_INTERRUPT; 4145 4146 if (I915_HAS_HOTPLUG(dev)) { 4147 I915_WRITE(PORT_HOTPLUG_EN, 0); 4148 POSTING_READ(PORT_HOTPLUG_EN); 4149 4150 /* Enable in IER... */ 4151 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4152 /* and unmask in IMR */ 4153 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4154 } 4155 4156 I915_WRITE(IMR, dev_priv->irq_mask); 4157 I915_WRITE(IER, enable_mask); 4158 POSTING_READ(IER); 4159 4160 i915_enable_asle_pipestat(dev); 4161 4162 /* Interrupt setup is already guaranteed to be single-threaded, this is 4163 * just to make the assert_spin_locked check happy. */ 4164 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4165 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4166 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4167 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4168 4169 return 0; 4170 } 4171 4172 /* 4173 * Returns true when a page flip has completed. 4174 */ 4175 static bool i915_handle_vblank(struct drm_device *dev, 4176 int plane, int pipe, u32 iir) 4177 { 4178 struct drm_i915_private *dev_priv = dev->dev_private; 4179 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 4180 4181 if (!intel_pipe_handle_vblank(dev, pipe)) 4182 return false; 4183 4184 if ((iir & flip_pending) == 0) 4185 return false; 4186 4187 intel_prepare_page_flip(dev, plane); 4188 4189 /* We detect FlipDone by looking for the change in PendingFlip from '1' 4190 * to '0' on the following vblank, i.e. IIR has the Pendingflip 4191 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 4192 * the flip is completed (no longer pending). Since this doesn't raise 4193 * an interrupt per se, we watch for the change at vblank. 4194 */ 4195 if (I915_READ(ISR) & flip_pending) 4196 return false; 4197 4198 intel_finish_page_flip(dev, pipe); 4199 4200 return true; 4201 } 4202 4203 static irqreturn_t i915_irq_handler(void *arg) 4204 { 4205 struct drm_device *dev = arg; 4206 struct drm_i915_private *dev_priv = dev->dev_private; 4207 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 4208 u32 flip_mask = 4209 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4210 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4211 int pipe; 4212 4213 iir = I915_READ(IIR); 4214 do { 4215 bool irq_received = (iir & ~flip_mask) != 0; 4216 bool blc_event = false; 4217 4218 /* Can't rely on pipestat interrupt bit in iir as it might 4219 * have been cleared after the pipestat interrupt was received. 4220 * It doesn't set the bit in iir again, but it still produces 4221 * interrupts (for non-MSI). 4222 */ 4223 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4224 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4225 i915_handle_error(dev, false, 4226 "Command parser error, iir 0x%08x", 4227 iir); 4228 4229 for_each_pipe(pipe) { 4230 int reg = PIPESTAT(pipe); 4231 pipe_stats[pipe] = I915_READ(reg); 4232 4233 /* Clear the PIPE*STAT regs before the IIR */ 4234 if (pipe_stats[pipe] & 0x8000ffff) { 4235 I915_WRITE(reg, pipe_stats[pipe]); 4236 irq_received = true; 4237 } 4238 } 4239 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4240 4241 if (!irq_received) 4242 break; 4243 4244 /* Consume port. Then clear IIR or we'll miss events */ 4245 if (I915_HAS_HOTPLUG(dev) && 4246 iir & I915_DISPLAY_PORT_INTERRUPT) 4247 i9xx_hpd_irq_handler(dev); 4248 4249 I915_WRITE(IIR, iir & ~flip_mask); 4250 new_iir = I915_READ(IIR); /* Flush posted writes */ 4251 4252 if (iir & I915_USER_INTERRUPT) 4253 notify_ring(dev, &dev_priv->ring[RCS]); 4254 4255 for_each_pipe(pipe) { 4256 int plane = pipe; 4257 if (HAS_FBC(dev)) 4258 plane = !plane; 4259 4260 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4261 i915_handle_vblank(dev, plane, pipe, iir)) 4262 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4263 4264 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4265 blc_event = true; 4266 4267 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4268 i9xx_pipe_crc_irq_handler(dev, pipe); 4269 4270 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 4271 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 4272 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 4273 } 4274 4275 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4276 intel_opregion_asle_intr(dev); 4277 4278 /* With MSI, interrupts are only generated when iir 4279 * transitions from zero to nonzero. If another bit got 4280 * set while we were handling the existing iir bits, then 4281 * we would never get another interrupt. 4282 * 4283 * This is fine on non-MSI as well, as if we hit this path 4284 * we avoid exiting the interrupt handler only to generate 4285 * another one. 4286 * 4287 * Note that for MSI this could cause a stray interrupt report 4288 * if an interrupt landed in the time between writing IIR and 4289 * the posting read. This should be rare enough to never 4290 * trigger the 99% of 100,000 interrupts test for disabling 4291 * stray interrupts. 4292 */ 4293 iir = new_iir; 4294 } while (iir & ~flip_mask); 4295 4296 i915_update_dri1_breadcrumb(dev); 4297 4298 } 4299 4300 static void i915_irq_uninstall(struct drm_device * dev) 4301 { 4302 struct drm_i915_private *dev_priv = dev->dev_private; 4303 int pipe; 4304 4305 if (I915_HAS_HOTPLUG(dev)) { 4306 I915_WRITE(PORT_HOTPLUG_EN, 0); 4307 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4308 } 4309 4310 I915_WRITE16(HWSTAM, 0xffff); 4311 for_each_pipe(pipe) { 4312 /* Clear enable bits; then clear status bits */ 4313 I915_WRITE(PIPESTAT(pipe), 0); 4314 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4315 } 4316 I915_WRITE(IMR, 0xffffffff); 4317 I915_WRITE(IER, 0x0); 4318 4319 I915_WRITE(IIR, I915_READ(IIR)); 4320 } 4321 4322 static void i965_irq_preinstall(struct drm_device * dev) 4323 { 4324 struct drm_i915_private *dev_priv = dev->dev_private; 4325 int pipe; 4326 4327 I915_WRITE(PORT_HOTPLUG_EN, 0); 4328 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4329 4330 I915_WRITE(HWSTAM, 0xeffe); 4331 for_each_pipe(pipe) 4332 I915_WRITE(PIPESTAT(pipe), 0); 4333 I915_WRITE(IMR, 0xffffffff); 4334 I915_WRITE(IER, 0x0); 4335 POSTING_READ(IER); 4336 } 4337 4338 static int i965_irq_postinstall(struct drm_device *dev) 4339 { 4340 struct drm_i915_private *dev_priv = dev->dev_private; 4341 u32 enable_mask; 4342 u32 error_mask; 4343 4344 /* Unmask the interrupts that we always want on. */ 4345 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4346 I915_DISPLAY_PORT_INTERRUPT | 4347 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4348 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4349 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4350 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4351 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4352 4353 enable_mask = ~dev_priv->irq_mask; 4354 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4355 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4356 enable_mask |= I915_USER_INTERRUPT; 4357 4358 if (IS_G4X(dev)) 4359 enable_mask |= I915_BSD_USER_INTERRUPT; 4360 4361 /* Interrupt setup is already guaranteed to be single-threaded, this is 4362 * just to make the assert_spin_locked check happy. */ 4363 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4364 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4365 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4366 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4367 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4368 4369 /* 4370 * Enable some error detection, note the instruction error mask 4371 * bit is reserved, so we leave it masked. 4372 */ 4373 if (IS_G4X(dev)) { 4374 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4375 GM45_ERROR_MEM_PRIV | 4376 GM45_ERROR_CP_PRIV | 4377 I915_ERROR_MEMORY_REFRESH); 4378 } else { 4379 error_mask = ~(I915_ERROR_PAGE_TABLE | 4380 I915_ERROR_MEMORY_REFRESH); 4381 } 4382 I915_WRITE(EMR, error_mask); 4383 4384 I915_WRITE(IMR, dev_priv->irq_mask); 4385 I915_WRITE(IER, enable_mask); 4386 POSTING_READ(IER); 4387 4388 I915_WRITE(PORT_HOTPLUG_EN, 0); 4389 POSTING_READ(PORT_HOTPLUG_EN); 4390 4391 i915_enable_asle_pipestat(dev); 4392 4393 return 0; 4394 } 4395 4396 static void i915_hpd_irq_setup(struct drm_device *dev) 4397 { 4398 struct drm_i915_private *dev_priv = dev->dev_private; 4399 struct drm_mode_config *mode_config = &dev->mode_config; 4400 struct intel_encoder *intel_encoder; 4401 u32 hotplug_en; 4402 4403 assert_spin_locked(&dev_priv->irq_lock); 4404 4405 if (I915_HAS_HOTPLUG(dev)) { 4406 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 4407 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 4408 /* Note HDMI and DP share hotplug bits */ 4409 /* enable bits are the same for all generations */ 4410 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 4411 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 4412 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 4413 /* Programming the CRT detection parameters tends 4414 to generate a spurious hotplug event about three 4415 seconds later. So just do it once. 4416 */ 4417 if (IS_G4X(dev)) 4418 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4419 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 4420 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4421 4422 /* Ignore TV since it's buggy */ 4423 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 4424 } 4425 } 4426 4427 static irqreturn_t i965_irq_handler(void *arg) 4428 { 4429 struct drm_device *dev = arg; 4430 struct drm_i915_private *dev_priv = dev->dev_private; 4431 u32 iir, new_iir; 4432 u32 pipe_stats[I915_MAX_PIPES]; 4433 int pipe; 4434 u32 flip_mask = 4435 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4436 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4437 4438 iir = I915_READ(IIR); 4439 4440 for (;;) { 4441 bool irq_received = (iir & ~flip_mask) != 0; 4442 bool blc_event = false; 4443 4444 /* Can't rely on pipestat interrupt bit in iir as it might 4445 * have been cleared after the pipestat interrupt was received. 4446 * It doesn't set the bit in iir again, but it still produces 4447 * interrupts (for non-MSI). 4448 */ 4449 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4450 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4451 i915_handle_error(dev, false, 4452 "Command parser error, iir 0x%08x", 4453 iir); 4454 4455 for_each_pipe(pipe) { 4456 int reg = PIPESTAT(pipe); 4457 pipe_stats[pipe] = I915_READ(reg); 4458 4459 /* 4460 * Clear the PIPE*STAT regs before the IIR 4461 */ 4462 if (pipe_stats[pipe] & 0x8000ffff) { 4463 I915_WRITE(reg, pipe_stats[pipe]); 4464 irq_received = true; 4465 } 4466 } 4467 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4468 4469 if (!irq_received) 4470 break; 4471 4472 /* Consume port. Then clear IIR or we'll miss events */ 4473 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4474 i9xx_hpd_irq_handler(dev); 4475 4476 I915_WRITE(IIR, iir & ~flip_mask); 4477 new_iir = I915_READ(IIR); /* Flush posted writes */ 4478 4479 if (iir & I915_USER_INTERRUPT) 4480 notify_ring(dev, &dev_priv->ring[RCS]); 4481 if (iir & I915_BSD_USER_INTERRUPT) 4482 notify_ring(dev, &dev_priv->ring[VCS]); 4483 4484 for_each_pipe(pipe) { 4485 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4486 i915_handle_vblank(dev, pipe, pipe, iir)) 4487 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4488 4489 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4490 blc_event = true; 4491 4492 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4493 i9xx_pipe_crc_irq_handler(dev, pipe); 4494 4495 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 4496 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 4497 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 4498 } 4499 4500 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4501 intel_opregion_asle_intr(dev); 4502 4503 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4504 gmbus_irq_handler(dev); 4505 4506 /* With MSI, interrupts are only generated when iir 4507 * transitions from zero to nonzero. If another bit got 4508 * set while we were handling the existing iir bits, then 4509 * we would never get another interrupt. 4510 * 4511 * This is fine on non-MSI as well, as if we hit this path 4512 * we avoid exiting the interrupt handler only to generate 4513 * another one. 4514 * 4515 * Note that for MSI this could cause a stray interrupt report 4516 * if an interrupt landed in the time between writing IIR and 4517 * the posting read. This should be rare enough to never 4518 * trigger the 99% of 100,000 interrupts test for disabling 4519 * stray interrupts. 4520 */ 4521 iir = new_iir; 4522 } 4523 4524 i915_update_dri1_breadcrumb(dev); 4525 4526 } 4527 4528 static void i965_irq_uninstall(struct drm_device * dev) 4529 { 4530 struct drm_i915_private *dev_priv = dev->dev_private; 4531 int pipe; 4532 4533 if (!dev_priv) 4534 return; 4535 4536 I915_WRITE(PORT_HOTPLUG_EN, 0); 4537 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4538 4539 I915_WRITE(HWSTAM, 0xffffffff); 4540 for_each_pipe(pipe) 4541 I915_WRITE(PIPESTAT(pipe), 0); 4542 I915_WRITE(IMR, 0xffffffff); 4543 I915_WRITE(IER, 0x0); 4544 4545 for_each_pipe(pipe) 4546 I915_WRITE(PIPESTAT(pipe), 4547 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4548 I915_WRITE(IIR, I915_READ(IIR)); 4549 } 4550 4551 static void intel_hpd_irq_reenable(struct work_struct *work) 4552 { 4553 struct drm_i915_private *dev_priv = 4554 container_of(work, typeof(*dev_priv), 4555 hotplug_reenable_work.work); 4556 struct drm_device *dev = dev_priv->dev; 4557 struct drm_mode_config *mode_config = &dev->mode_config; 4558 int i; 4559 4560 intel_runtime_pm_get(dev_priv); 4561 4562 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4563 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 4564 struct drm_connector *connector; 4565 4566 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 4567 continue; 4568 4569 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4570 4571 list_for_each_entry(connector, &mode_config->connector_list, head) { 4572 struct intel_connector *intel_connector = to_intel_connector(connector); 4573 4574 if (intel_connector->encoder->hpd_pin == i) { 4575 if (connector->polled != intel_connector->polled) 4576 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 4577 connector->name); 4578 connector->polled = intel_connector->polled; 4579 if (!connector->polled) 4580 connector->polled = DRM_CONNECTOR_POLL_HPD; 4581 } 4582 } 4583 } 4584 if (dev_priv->display.hpd_irq_setup) 4585 dev_priv->display.hpd_irq_setup(dev); 4586 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4587 4588 intel_runtime_pm_put(dev_priv); 4589 } 4590 4591 void intel_irq_init(struct drm_device *dev) 4592 { 4593 struct drm_i915_private *dev_priv = dev->dev_private; 4594 4595 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 4596 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); 4597 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 4598 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4599 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4600 4601 /* Let's track the enabled rps events */ 4602 if (IS_VALLEYVIEW(dev)) 4603 /* WaGsvRC0ResidenncyMethod:VLV */ 4604 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4605 else 4606 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4607 4608 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 4609 i915_hangcheck_elapsed, 4610 (unsigned long) dev); 4611 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, 4612 intel_hpd_irq_reenable); 4613 4614 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4615 4616 /* Haven't installed the IRQ handler yet */ 4617 dev_priv->pm._irqs_disabled = true; 4618 4619 if (IS_GEN2(dev)) { 4620 dev->max_vblank_count = 0; 4621 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4622 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 4623 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4624 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 4625 } else { 4626 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4627 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4628 } 4629 4630 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 4631 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4632 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4633 } 4634 4635 if (IS_CHERRYVIEW(dev)) { 4636 dev->driver->irq_handler = cherryview_irq_handler; 4637 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4638 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4639 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4640 dev->driver->enable_vblank = valleyview_enable_vblank; 4641 dev->driver->disable_vblank = valleyview_disable_vblank; 4642 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4643 } else if (IS_VALLEYVIEW(dev)) { 4644 dev->driver->irq_handler = valleyview_irq_handler; 4645 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4646 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4647 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4648 dev->driver->enable_vblank = valleyview_enable_vblank; 4649 dev->driver->disable_vblank = valleyview_disable_vblank; 4650 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4651 } else if (IS_GEN8(dev)) { 4652 dev->driver->irq_handler = gen8_irq_handler; 4653 dev->driver->irq_preinstall = gen8_irq_reset; 4654 dev->driver->irq_postinstall = gen8_irq_postinstall; 4655 dev->driver->irq_uninstall = gen8_irq_uninstall; 4656 dev->driver->enable_vblank = gen8_enable_vblank; 4657 dev->driver->disable_vblank = gen8_disable_vblank; 4658 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4659 } else if (HAS_PCH_SPLIT(dev)) { 4660 dev->driver->irq_handler = ironlake_irq_handler; 4661 dev->driver->irq_preinstall = ironlake_irq_reset; 4662 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4663 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4664 dev->driver->enable_vblank = ironlake_enable_vblank; 4665 dev->driver->disable_vblank = ironlake_disable_vblank; 4666 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4667 } else { 4668 if (INTEL_INFO(dev)->gen == 2) { 4669 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4670 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4671 dev->driver->irq_handler = i8xx_irq_handler; 4672 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4673 } else if (INTEL_INFO(dev)->gen == 3) { 4674 dev->driver->irq_preinstall = i915_irq_preinstall; 4675 dev->driver->irq_postinstall = i915_irq_postinstall; 4676 dev->driver->irq_uninstall = i915_irq_uninstall; 4677 dev->driver->irq_handler = i915_irq_handler; 4678 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4679 } else { 4680 dev->driver->irq_preinstall = i965_irq_preinstall; 4681 dev->driver->irq_postinstall = i965_irq_postinstall; 4682 dev->driver->irq_uninstall = i965_irq_uninstall; 4683 dev->driver->irq_handler = i965_irq_handler; 4684 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4685 } 4686 dev->driver->enable_vblank = i915_enable_vblank; 4687 dev->driver->disable_vblank = i915_disable_vblank; 4688 } 4689 } 4690 4691 void intel_hpd_init(struct drm_device *dev) 4692 { 4693 struct drm_i915_private *dev_priv = dev->dev_private; 4694 struct drm_mode_config *mode_config = &dev->mode_config; 4695 struct drm_connector *connector; 4696 int i; 4697 4698 for (i = 1; i < HPD_NUM_PINS; i++) { 4699 dev_priv->hpd_stats[i].hpd_cnt = 0; 4700 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4701 } 4702 list_for_each_entry(connector, &mode_config->connector_list, head) { 4703 struct intel_connector *intel_connector = to_intel_connector(connector); 4704 connector->polled = intel_connector->polled; 4705 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 4706 connector->polled = DRM_CONNECTOR_POLL_HPD; 4707 } 4708 4709 /* Interrupt setup is already guaranteed to be single-threaded, this is 4710 * just to make the assert_spin_locked checks happy. */ 4711 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4712 if (dev_priv->display.hpd_irq_setup) 4713 dev_priv->display.hpd_irq_setup(dev); 4714 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4715 } 4716 4717 /* Disable interrupts so we can allow runtime PM. */ 4718 void intel_runtime_pm_disable_interrupts(struct drm_device *dev) 4719 { 4720 struct drm_i915_private *dev_priv = dev->dev_private; 4721 4722 dev->driver->irq_uninstall(dev); 4723 dev_priv->pm._irqs_disabled = true; 4724 } 4725 4726 /* Restore interrupts so we can recover from runtime PM. */ 4727 void intel_runtime_pm_restore_interrupts(struct drm_device *dev) 4728 { 4729 struct drm_i915_private *dev_priv = dev->dev_private; 4730 4731 dev_priv->pm._irqs_disabled = false; 4732 dev->driver->irq_preinstall(dev); 4733 dev->driver->irq_postinstall(dev); 4734 } 4735