1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define KBUILD_MODNAME "i915" 30 31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 32 33 #include <linux/sysrq.h> 34 #include <linux/slab.h> 35 #include <linux/circ_buf.h> 36 #include <drm/drmP.h> 37 #include <drm/i915_drm.h> 38 #include "i915_drv.h" 39 #include "i915_trace.h" 40 #include "intel_drv.h" 41 42 /** 43 * DOC: interrupt handling 44 * 45 * These functions provide the basic support for enabling and disabling the 46 * interrupt handling support. There's a lot more functionality in i915_irq.c 47 * and related files, but that will be described in separate chapters. 48 */ 49 50 static const u32 hpd_ilk[HPD_NUM_PINS] = { 51 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 52 }; 53 54 static const u32 hpd_ivb[HPD_NUM_PINS] = { 55 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 56 }; 57 58 static const u32 hpd_bdw[HPD_NUM_PINS] = { 59 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 60 }; 61 62 static const u32 hpd_ibx[HPD_NUM_PINS] = { 63 [HPD_CRT] = SDE_CRT_HOTPLUG, 64 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 65 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 66 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 67 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 68 }; 69 70 static const u32 hpd_cpt[HPD_NUM_PINS] = { 71 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 72 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 73 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 74 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 75 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 76 }; 77 78 static const u32 hpd_spt[HPD_NUM_PINS] = { 79 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 80 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 81 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 82 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 83 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 84 }; 85 86 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 87 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 88 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 89 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 90 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 91 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 92 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 93 }; 94 95 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 96 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 97 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 98 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 99 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 100 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 101 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 102 }; 103 104 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 105 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 106 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 107 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 108 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 109 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 110 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 111 }; 112 113 /* BXT hpd list */ 114 static const u32 hpd_bxt[HPD_NUM_PINS] = { 115 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 116 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 117 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 118 }; 119 120 /* IIR can theoretically queue up two events. Be paranoid. */ 121 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 122 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 123 POSTING_READ(GEN8_##type##_IMR(which)); \ 124 I915_WRITE(GEN8_##type##_IER(which), 0); \ 125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 126 POSTING_READ(GEN8_##type##_IIR(which)); \ 127 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 128 POSTING_READ(GEN8_##type##_IIR(which)); \ 129 } while (0) 130 131 #define GEN5_IRQ_RESET(type) do { \ 132 I915_WRITE(type##IMR, 0xffffffff); \ 133 POSTING_READ(type##IMR); \ 134 I915_WRITE(type##IER, 0); \ 135 I915_WRITE(type##IIR, 0xffffffff); \ 136 POSTING_READ(type##IIR); \ 137 I915_WRITE(type##IIR, 0xffffffff); \ 138 POSTING_READ(type##IIR); \ 139 } while (0) 140 141 /* 142 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 143 */ 144 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, 145 i915_reg_t reg) 146 { 147 u32 val = I915_READ(reg); 148 149 if (val == 0) 150 return; 151 152 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 153 i915_mmio_reg_offset(reg), val); 154 I915_WRITE(reg, 0xffffffff); 155 POSTING_READ(reg); 156 I915_WRITE(reg, 0xffffffff); 157 POSTING_READ(reg); 158 } 159 160 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 161 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 162 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 163 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 164 POSTING_READ(GEN8_##type##_IMR(which)); \ 165 } while (0) 166 167 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 168 gen5_assert_iir_is_zero(dev_priv, type##IIR); \ 169 I915_WRITE(type##IER, (ier_val)); \ 170 I915_WRITE(type##IMR, (imr_val)); \ 171 POSTING_READ(type##IMR); \ 172 } while (0) 173 174 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 175 176 /* For display hotplug interrupt */ 177 static inline void 178 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 179 uint32_t mask, 180 uint32_t bits) 181 { 182 uint32_t val; 183 184 assert_spin_locked(&dev_priv->irq_lock); 185 WARN_ON(bits & ~mask); 186 187 val = I915_READ(PORT_HOTPLUG_EN); 188 val &= ~mask; 189 val |= bits; 190 I915_WRITE(PORT_HOTPLUG_EN, val); 191 } 192 193 /** 194 * i915_hotplug_interrupt_update - update hotplug interrupt enable 195 * @dev_priv: driver private 196 * @mask: bits to update 197 * @bits: bits to enable 198 * NOTE: the HPD enable bits are modified both inside and outside 199 * of an interrupt context. To avoid that read-modify-write cycles 200 * interfer, these bits are protected by a spinlock. Since this 201 * function is usually not called from a context where the lock is 202 * held already, this function acquires the lock itself. A non-locking 203 * version is also available. 204 */ 205 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 206 uint32_t mask, 207 uint32_t bits) 208 { 209 spin_lock_irq(&dev_priv->irq_lock); 210 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 211 spin_unlock_irq(&dev_priv->irq_lock); 212 } 213 214 /** 215 * ilk_update_display_irq - update DEIMR 216 * @dev_priv: driver private 217 * @interrupt_mask: mask of interrupt bits to update 218 * @enabled_irq_mask: mask of interrupt bits to enable 219 */ 220 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 221 uint32_t interrupt_mask, 222 uint32_t enabled_irq_mask) 223 { 224 uint32_t new_val; 225 226 assert_spin_locked(&dev_priv->irq_lock); 227 228 WARN_ON(enabled_irq_mask & ~interrupt_mask); 229 230 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 231 return; 232 233 new_val = dev_priv->irq_mask; 234 new_val &= ~interrupt_mask; 235 new_val |= (~enabled_irq_mask & interrupt_mask); 236 237 if (new_val != dev_priv->irq_mask) { 238 dev_priv->irq_mask = new_val; 239 I915_WRITE(DEIMR, dev_priv->irq_mask); 240 POSTING_READ(DEIMR); 241 } 242 } 243 244 /** 245 * ilk_update_gt_irq - update GTIMR 246 * @dev_priv: driver private 247 * @interrupt_mask: mask of interrupt bits to update 248 * @enabled_irq_mask: mask of interrupt bits to enable 249 */ 250 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 251 uint32_t interrupt_mask, 252 uint32_t enabled_irq_mask) 253 { 254 assert_spin_locked(&dev_priv->irq_lock); 255 256 WARN_ON(enabled_irq_mask & ~interrupt_mask); 257 258 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 259 return; 260 261 dev_priv->gt_irq_mask &= ~interrupt_mask; 262 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 263 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 264 } 265 266 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 267 { 268 ilk_update_gt_irq(dev_priv, mask, mask); 269 POSTING_READ_FW(GTIMR); 270 } 271 272 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 273 { 274 ilk_update_gt_irq(dev_priv, mask, 0); 275 } 276 277 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 278 { 279 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 280 } 281 282 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 283 { 284 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 285 } 286 287 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 288 { 289 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 290 } 291 292 /** 293 * snb_update_pm_irq - update GEN6_PMIMR 294 * @dev_priv: driver private 295 * @interrupt_mask: mask of interrupt bits to update 296 * @enabled_irq_mask: mask of interrupt bits to enable 297 */ 298 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 299 uint32_t interrupt_mask, 300 uint32_t enabled_irq_mask) 301 { 302 uint32_t new_val; 303 304 WARN_ON(enabled_irq_mask & ~interrupt_mask); 305 306 assert_spin_locked(&dev_priv->irq_lock); 307 308 new_val = dev_priv->pm_irq_mask; 309 new_val &= ~interrupt_mask; 310 new_val |= (~enabled_irq_mask & interrupt_mask); 311 312 if (new_val != dev_priv->pm_irq_mask) { 313 dev_priv->pm_irq_mask = new_val; 314 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); 315 POSTING_READ(gen6_pm_imr(dev_priv)); 316 } 317 } 318 319 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 320 { 321 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 322 return; 323 324 snb_update_pm_irq(dev_priv, mask, mask); 325 } 326 327 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, 328 uint32_t mask) 329 { 330 snb_update_pm_irq(dev_priv, mask, 0); 331 } 332 333 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 334 { 335 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 336 return; 337 338 __gen6_disable_pm_irq(dev_priv, mask); 339 } 340 341 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 342 { 343 i915_reg_t reg = gen6_pm_iir(dev_priv); 344 345 spin_lock_irq(&dev_priv->irq_lock); 346 I915_WRITE(reg, dev_priv->pm_rps_events); 347 I915_WRITE(reg, dev_priv->pm_rps_events); 348 POSTING_READ(reg); 349 dev_priv->rps.pm_iir = 0; 350 spin_unlock_irq(&dev_priv->irq_lock); 351 } 352 353 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 354 { 355 spin_lock_irq(&dev_priv->irq_lock); 356 WARN_ON_ONCE(dev_priv->rps.pm_iir); 357 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 358 dev_priv->rps.interrupts_enabled = true; 359 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | 360 dev_priv->pm_rps_events); 361 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 362 363 spin_unlock_irq(&dev_priv->irq_lock); 364 } 365 366 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 367 { 368 return (mask & ~dev_priv->rps.pm_intr_keep); 369 } 370 371 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 372 { 373 spin_lock_irq(&dev_priv->irq_lock); 374 dev_priv->rps.interrupts_enabled = false; 375 376 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 377 378 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 379 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 380 ~dev_priv->pm_rps_events); 381 382 spin_unlock_irq(&dev_priv->irq_lock); 383 synchronize_irq(dev_priv->drm.irq); 384 385 /* Now that we will not be generating any more work, flush any 386 * outsanding tasks. As we are called on the RPS idle path, 387 * we will reset the GPU to minimum frequencies, so the current 388 * state of the worker can be discarded. 389 */ 390 cancel_work_sync(&dev_priv->rps.work); 391 gen6_reset_rps_interrupts(dev_priv); 392 } 393 394 /** 395 * bdw_update_port_irq - update DE port interrupt 396 * @dev_priv: driver private 397 * @interrupt_mask: mask of interrupt bits to update 398 * @enabled_irq_mask: mask of interrupt bits to enable 399 */ 400 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 401 uint32_t interrupt_mask, 402 uint32_t enabled_irq_mask) 403 { 404 uint32_t new_val; 405 uint32_t old_val; 406 407 assert_spin_locked(&dev_priv->irq_lock); 408 409 WARN_ON(enabled_irq_mask & ~interrupt_mask); 410 411 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 412 return; 413 414 old_val = I915_READ(GEN8_DE_PORT_IMR); 415 416 new_val = old_val; 417 new_val &= ~interrupt_mask; 418 new_val |= (~enabled_irq_mask & interrupt_mask); 419 420 if (new_val != old_val) { 421 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 422 POSTING_READ(GEN8_DE_PORT_IMR); 423 } 424 } 425 426 /** 427 * bdw_update_pipe_irq - update DE pipe interrupt 428 * @dev_priv: driver private 429 * @pipe: pipe whose interrupt to update 430 * @interrupt_mask: mask of interrupt bits to update 431 * @enabled_irq_mask: mask of interrupt bits to enable 432 */ 433 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 434 enum i915_pipe pipe, 435 uint32_t interrupt_mask, 436 uint32_t enabled_irq_mask) 437 { 438 uint32_t new_val; 439 440 assert_spin_locked(&dev_priv->irq_lock); 441 442 WARN_ON(enabled_irq_mask & ~interrupt_mask); 443 444 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 445 return; 446 447 new_val = dev_priv->de_irq_mask[pipe]; 448 new_val &= ~interrupt_mask; 449 new_val |= (~enabled_irq_mask & interrupt_mask); 450 451 if (new_val != dev_priv->de_irq_mask[pipe]) { 452 dev_priv->de_irq_mask[pipe] = new_val; 453 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 454 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 455 } 456 } 457 458 /** 459 * ibx_display_interrupt_update - update SDEIMR 460 * @dev_priv: driver private 461 * @interrupt_mask: mask of interrupt bits to update 462 * @enabled_irq_mask: mask of interrupt bits to enable 463 */ 464 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 465 uint32_t interrupt_mask, 466 uint32_t enabled_irq_mask) 467 { 468 uint32_t sdeimr = I915_READ(SDEIMR); 469 sdeimr &= ~interrupt_mask; 470 sdeimr |= (~enabled_irq_mask & interrupt_mask); 471 472 WARN_ON(enabled_irq_mask & ~interrupt_mask); 473 474 assert_spin_locked(&dev_priv->irq_lock); 475 476 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 477 return; 478 479 I915_WRITE(SDEIMR, sdeimr); 480 POSTING_READ(SDEIMR); 481 } 482 483 static void 484 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 485 u32 enable_mask, u32 status_mask) 486 { 487 i915_reg_t reg = PIPESTAT(pipe); 488 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 489 490 assert_spin_locked(&dev_priv->irq_lock); 491 WARN_ON(!intel_irqs_enabled(dev_priv)); 492 493 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 494 status_mask & ~PIPESTAT_INT_STATUS_MASK, 495 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 496 pipe_name(pipe), enable_mask, status_mask)) 497 return; 498 499 if ((pipestat & enable_mask) == enable_mask) 500 return; 501 502 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 503 504 /* Enable the interrupt, clear any pending status */ 505 pipestat |= enable_mask | status_mask; 506 I915_WRITE(reg, pipestat); 507 POSTING_READ(reg); 508 } 509 510 static void 511 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 512 u32 enable_mask, u32 status_mask) 513 { 514 i915_reg_t reg = PIPESTAT(pipe); 515 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 516 517 assert_spin_locked(&dev_priv->irq_lock); 518 WARN_ON(!intel_irqs_enabled(dev_priv)); 519 520 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 521 status_mask & ~PIPESTAT_INT_STATUS_MASK, 522 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 523 pipe_name(pipe), enable_mask, status_mask)) 524 return; 525 526 if ((pipestat & enable_mask) == 0) 527 return; 528 529 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 530 531 pipestat &= ~enable_mask; 532 I915_WRITE(reg, pipestat); 533 POSTING_READ(reg); 534 } 535 536 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 537 { 538 u32 enable_mask = status_mask << 16; 539 540 /* 541 * On pipe A we don't support the PSR interrupt yet, 542 * on pipe B and C the same bit MBZ. 543 */ 544 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 545 return 0; 546 /* 547 * On pipe B and C we don't support the PSR interrupt yet, on pipe 548 * A the same bit is for perf counters which we don't use either. 549 */ 550 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 551 return 0; 552 553 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 554 SPRITE0_FLIP_DONE_INT_EN_VLV | 555 SPRITE1_FLIP_DONE_INT_EN_VLV); 556 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 557 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 558 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 559 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 560 561 return enable_mask; 562 } 563 564 void 565 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 566 u32 status_mask) 567 { 568 u32 enable_mask; 569 570 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 571 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 572 status_mask); 573 else 574 enable_mask = status_mask << 16; 575 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 576 } 577 578 void 579 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 580 u32 status_mask) 581 { 582 u32 enable_mask; 583 584 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 585 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 586 status_mask); 587 else 588 enable_mask = status_mask << 16; 589 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 590 } 591 592 /** 593 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 594 * @dev_priv: i915 device private 595 */ 596 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 597 { 598 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 599 return; 600 601 spin_lock_irq(&dev_priv->irq_lock); 602 603 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 604 if (INTEL_GEN(dev_priv) >= 4) 605 i915_enable_pipestat(dev_priv, PIPE_A, 606 PIPE_LEGACY_BLC_EVENT_STATUS); 607 608 spin_unlock_irq(&dev_priv->irq_lock); 609 } 610 611 /* 612 * This timing diagram depicts the video signal in and 613 * around the vertical blanking period. 614 * 615 * Assumptions about the fictitious mode used in this example: 616 * vblank_start >= 3 617 * vsync_start = vblank_start + 1 618 * vsync_end = vblank_start + 2 619 * vtotal = vblank_start + 3 620 * 621 * start of vblank: 622 * latch double buffered registers 623 * increment frame counter (ctg+) 624 * generate start of vblank interrupt (gen4+) 625 * | 626 * | frame start: 627 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 628 * | may be shifted forward 1-3 extra lines via PIPECONF 629 * | | 630 * | | start of vsync: 631 * | | generate vsync interrupt 632 * | | | 633 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 634 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 635 * ----va---> <-----------------vb--------------------> <--------va------------- 636 * | | <----vs-----> | 637 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 638 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 639 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 640 * | | | 641 * last visible pixel first visible pixel 642 * | increment frame counter (gen3/4) 643 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 644 * 645 * x = horizontal active 646 * _ = horizontal blanking 647 * hs = horizontal sync 648 * va = vertical active 649 * vb = vertical blanking 650 * vs = vertical sync 651 * vbs = vblank_start (number) 652 * 653 * Summary: 654 * - most events happen at the start of horizontal sync 655 * - frame start happens at the start of horizontal blank, 1-4 lines 656 * (depending on PIPECONF settings) after the start of vblank 657 * - gen3/4 pixel and frame counter are synchronized with the start 658 * of horizontal active on the first line of vertical active 659 */ 660 661 static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 662 { 663 /* Gen2 doesn't have a hardware frame counter */ 664 return 0; 665 } 666 667 /* Called from drm generic code, passed a 'crtc', which 668 * we use as a pipe index 669 */ 670 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 671 { 672 struct drm_i915_private *dev_priv = to_i915(dev); 673 i915_reg_t high_frame, low_frame; 674 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 675 struct intel_crtc *intel_crtc = 676 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 677 const struct drm_display_mode *mode = &intel_crtc->base.hwmode; 678 679 htotal = mode->crtc_htotal; 680 hsync_start = mode->crtc_hsync_start; 681 vbl_start = mode->crtc_vblank_start; 682 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 683 vbl_start = DIV_ROUND_UP(vbl_start, 2); 684 685 /* Convert to pixel count */ 686 vbl_start *= htotal; 687 688 /* Start of vblank event occurs at start of hsync */ 689 vbl_start -= htotal - hsync_start; 690 691 high_frame = PIPEFRAME(pipe); 692 low_frame = PIPEFRAMEPIXEL(pipe); 693 694 /* 695 * High & low register fields aren't synchronized, so make sure 696 * we get a low value that's stable across two reads of the high 697 * register. 698 */ 699 do { 700 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 701 low = I915_READ(low_frame); 702 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 703 } while (high1 != high2); 704 705 high1 >>= PIPE_FRAME_HIGH_SHIFT; 706 pixel = low & PIPE_PIXEL_MASK; 707 low >>= PIPE_FRAME_LOW_SHIFT; 708 709 /* 710 * The frame counter increments at beginning of active. 711 * Cook up a vblank counter by also checking the pixel 712 * counter against vblank start. 713 */ 714 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 715 } 716 717 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 718 { 719 struct drm_i915_private *dev_priv = to_i915(dev); 720 721 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 722 } 723 724 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 725 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 726 { 727 struct drm_device *dev = crtc->base.dev; 728 struct drm_i915_private *dev_priv = to_i915(dev); 729 const struct drm_display_mode *mode = &crtc->base.hwmode; 730 enum i915_pipe pipe = crtc->pipe; 731 int position, vtotal; 732 733 vtotal = mode->crtc_vtotal; 734 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 735 vtotal /= 2; 736 737 if (IS_GEN2(dev_priv)) 738 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 739 else 740 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 741 742 /* 743 * On HSW, the DSL reg (0x70000) appears to return 0 if we 744 * read it just before the start of vblank. So try it again 745 * so we don't accidentally end up spanning a vblank frame 746 * increment, causing the pipe_update_end() code to squak at us. 747 * 748 * The nature of this problem means we can't simply check the ISR 749 * bit and return the vblank start value; nor can we use the scanline 750 * debug register in the transcoder as it appears to have the same 751 * problem. We may need to extend this to include other platforms, 752 * but so far testing only shows the problem on HSW. 753 */ 754 if (HAS_DDI(dev_priv) && !position) { 755 int i, temp; 756 757 for (i = 0; i < 100; i++) { 758 udelay(1); 759 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & 760 DSL_LINEMASK_GEN3; 761 if (temp != position) { 762 position = temp; 763 break; 764 } 765 } 766 } 767 768 /* 769 * See update_scanline_offset() for the details on the 770 * scanline_offset adjustment. 771 */ 772 return (position + crtc->scanline_offset) % vtotal; 773 } 774 775 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 776 unsigned int flags, int *vpos, int *hpos, 777 ktime_t *stime, ktime_t *etime, 778 const struct drm_display_mode *mode) 779 { 780 struct drm_i915_private *dev_priv = to_i915(dev); 781 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 782 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 783 int position; 784 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 785 bool in_vbl = true; 786 int ret = 0; 787 unsigned long irqflags; 788 789 if (WARN_ON(!mode->crtc_clock)) { 790 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 791 "pipe %c\n", pipe_name(pipe)); 792 return 0; 793 } 794 795 htotal = mode->crtc_htotal; 796 hsync_start = mode->crtc_hsync_start; 797 vtotal = mode->crtc_vtotal; 798 vbl_start = mode->crtc_vblank_start; 799 vbl_end = mode->crtc_vblank_end; 800 801 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 802 vbl_start = DIV_ROUND_UP(vbl_start, 2); 803 vbl_end /= 2; 804 vtotal /= 2; 805 } 806 807 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 808 809 /* 810 * Lock uncore.lock, as we will do multiple timing critical raw 811 * register reads, potentially with preemption disabled, so the 812 * following code must not block on uncore.lock. 813 */ 814 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 815 816 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 817 818 /* Get optional system timestamp before query. */ 819 if (stime) 820 *stime = ktime_get(); 821 822 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 823 /* No obvious pixelcount register. Only query vertical 824 * scanout position from Display scan line register. 825 */ 826 position = __intel_get_crtc_scanline(intel_crtc); 827 } else { 828 /* Have access to pixelcount since start of frame. 829 * We can split this into vertical and horizontal 830 * scanout position. 831 */ 832 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 833 834 /* convert to pixel counts */ 835 vbl_start *= htotal; 836 vbl_end *= htotal; 837 vtotal *= htotal; 838 839 /* 840 * In interlaced modes, the pixel counter counts all pixels, 841 * so one field will have htotal more pixels. In order to avoid 842 * the reported position from jumping backwards when the pixel 843 * counter is beyond the length of the shorter field, just 844 * clamp the position the length of the shorter field. This 845 * matches how the scanline counter based position works since 846 * the scanline counter doesn't count the two half lines. 847 */ 848 if (position >= vtotal) 849 position = vtotal - 1; 850 851 /* 852 * Start of vblank interrupt is triggered at start of hsync, 853 * just prior to the first active line of vblank. However we 854 * consider lines to start at the leading edge of horizontal 855 * active. So, should we get here before we've crossed into 856 * the horizontal active of the first line in vblank, we would 857 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 858 * always add htotal-hsync_start to the current pixel position. 859 */ 860 position = (position + htotal - hsync_start) % vtotal; 861 } 862 863 /* Get optional system timestamp after query. */ 864 if (etime) 865 *etime = ktime_get(); 866 867 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 868 869 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 870 871 in_vbl = position >= vbl_start && position < vbl_end; 872 873 /* 874 * While in vblank, position will be negative 875 * counting up towards 0 at vbl_end. And outside 876 * vblank, position will be positive counting 877 * up since vbl_end. 878 */ 879 if (position >= vbl_start) 880 position -= vbl_end; 881 else 882 position += vtotal - vbl_end; 883 884 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 885 *vpos = position; 886 *hpos = 0; 887 } else { 888 *vpos = position / htotal; 889 *hpos = position - (*vpos * htotal); 890 } 891 892 /* In vblank? */ 893 if (in_vbl) 894 ret |= DRM_SCANOUTPOS_IN_VBLANK; 895 896 return ret; 897 } 898 899 int intel_get_crtc_scanline(struct intel_crtc *crtc) 900 { 901 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 902 unsigned long irqflags; 903 int position; 904 905 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 906 position = __intel_get_crtc_scanline(crtc); 907 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 908 909 return position; 910 } 911 912 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, 913 int *max_error, 914 struct timeval *vblank_time, 915 unsigned flags) 916 { 917 struct drm_crtc *crtc; 918 919 if (pipe >= INTEL_INFO(dev)->num_pipes) { 920 DRM_ERROR("Invalid crtc %u\n", pipe); 921 return -EINVAL; 922 } 923 924 /* Get drm_crtc to timestamp: */ 925 crtc = intel_get_crtc_for_pipe(dev, pipe); 926 if (crtc == NULL) { 927 DRM_ERROR("Invalid crtc %u\n", pipe); 928 return -EINVAL; 929 } 930 931 if (!crtc->hwmode.crtc_clock) { 932 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe); 933 return -EBUSY; 934 } 935 936 /* Helper routine in DRM core does all the work: */ 937 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 938 vblank_time, flags, 939 &crtc->hwmode); 940 } 941 942 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 943 { 944 u32 busy_up, busy_down, max_avg, min_avg; 945 u8 new_delay; 946 947 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 948 949 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 950 951 new_delay = dev_priv->ips.cur_delay; 952 953 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 954 busy_up = I915_READ(RCPREVBSYTUPAVG); 955 busy_down = I915_READ(RCPREVBSYTDNAVG); 956 max_avg = I915_READ(RCBMAXAVG); 957 min_avg = I915_READ(RCBMINAVG); 958 959 /* Handle RCS change request from hw */ 960 if (busy_up > max_avg) { 961 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 962 new_delay = dev_priv->ips.cur_delay - 1; 963 if (new_delay < dev_priv->ips.max_delay) 964 new_delay = dev_priv->ips.max_delay; 965 } else if (busy_down < min_avg) { 966 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 967 new_delay = dev_priv->ips.cur_delay + 1; 968 if (new_delay > dev_priv->ips.min_delay) 969 new_delay = dev_priv->ips.min_delay; 970 } 971 972 if (ironlake_set_drps(dev_priv, new_delay)) 973 dev_priv->ips.cur_delay = new_delay; 974 975 lockmgr(&mchdev_lock, LK_RELEASE); 976 977 return; 978 } 979 980 static void notify_ring(struct intel_engine_cs *engine) 981 { 982 smp_store_mb(engine->breadcrumbs.irq_posted, true); 983 if (intel_engine_wakeup(engine)) { 984 trace_i915_gem_request_notify(engine); 985 engine->breadcrumbs.irq_wakeups++; 986 } 987 } 988 989 static void vlv_c0_read(struct drm_i915_private *dev_priv, 990 struct intel_rps_ei *ei) 991 { 992 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 993 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 994 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 995 } 996 997 static bool vlv_c0_above(struct drm_i915_private *dev_priv, 998 const struct intel_rps_ei *old, 999 const struct intel_rps_ei *now, 1000 int threshold) 1001 { 1002 u64 time, c0; 1003 unsigned int mul = 100; 1004 1005 if (old->cz_clock == 0) 1006 return false; 1007 1008 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) 1009 mul <<= 8; 1010 1011 time = now->cz_clock - old->cz_clock; 1012 time *= threshold * dev_priv->czclk_freq; 1013 1014 /* Workload can be split between render + media, e.g. SwapBuffers 1015 * being blitted in X after being rendered in mesa. To account for 1016 * this we need to combine both engines into our activity counter. 1017 */ 1018 c0 = now->render_c0 - old->render_c0; 1019 c0 += now->media_c0 - old->media_c0; 1020 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC; 1021 1022 return c0 >= time; 1023 } 1024 1025 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1026 { 1027 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); 1028 dev_priv->rps.up_ei = dev_priv->rps.down_ei; 1029 } 1030 1031 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1032 { 1033 struct intel_rps_ei now; 1034 u32 events = 0; 1035 1036 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) 1037 return 0; 1038 1039 vlv_c0_read(dev_priv, &now); 1040 if (now.cz_clock == 0) 1041 return 0; 1042 1043 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { 1044 if (!vlv_c0_above(dev_priv, 1045 &dev_priv->rps.down_ei, &now, 1046 dev_priv->rps.down_threshold)) 1047 events |= GEN6_PM_RP_DOWN_THRESHOLD; 1048 dev_priv->rps.down_ei = now; 1049 } 1050 1051 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 1052 if (vlv_c0_above(dev_priv, 1053 &dev_priv->rps.up_ei, &now, 1054 dev_priv->rps.up_threshold)) 1055 events |= GEN6_PM_RP_UP_THRESHOLD; 1056 dev_priv->rps.up_ei = now; 1057 } 1058 1059 return events; 1060 } 1061 1062 static bool any_waiters(struct drm_i915_private *dev_priv) 1063 { 1064 struct intel_engine_cs *engine; 1065 1066 for_each_engine(engine, dev_priv) 1067 if (intel_engine_has_waiter(engine)) 1068 return true; 1069 1070 return false; 1071 } 1072 1073 static void gen6_pm_rps_work(struct work_struct *work) 1074 { 1075 struct drm_i915_private *dev_priv = 1076 container_of(work, struct drm_i915_private, rps.work); 1077 bool client_boost; 1078 int new_delay, adj, min, max; 1079 u32 pm_iir; 1080 1081 spin_lock_irq(&dev_priv->irq_lock); 1082 /* Speed up work cancelation during disabling rps interrupts. */ 1083 if (!dev_priv->rps.interrupts_enabled) { 1084 spin_unlock_irq(&dev_priv->irq_lock); 1085 return; 1086 } 1087 1088 pm_iir = dev_priv->rps.pm_iir; 1089 dev_priv->rps.pm_iir = 0; 1090 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1091 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1092 client_boost = dev_priv->rps.client_boost; 1093 dev_priv->rps.client_boost = false; 1094 spin_unlock_irq(&dev_priv->irq_lock); 1095 1096 /* Make sure we didn't queue anything we're not going to process. */ 1097 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1098 1099 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1100 return; 1101 1102 mutex_lock(&dev_priv->rps.hw_lock); 1103 1104 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1105 1106 adj = dev_priv->rps.last_adj; 1107 new_delay = dev_priv->rps.cur_freq; 1108 min = dev_priv->rps.min_freq_softlimit; 1109 max = dev_priv->rps.max_freq_softlimit; 1110 1111 if (client_boost) { 1112 new_delay = dev_priv->rps.max_freq_softlimit; 1113 adj = 0; 1114 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1115 if (adj > 0) 1116 adj *= 2; 1117 else /* CHV needs even encode values */ 1118 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1119 /* 1120 * For better performance, jump directly 1121 * to RPe if we're below it. 1122 */ 1123 if (new_delay < dev_priv->rps.efficient_freq - adj) { 1124 new_delay = dev_priv->rps.efficient_freq; 1125 adj = 0; 1126 } 1127 } else if (any_waiters(dev_priv)) { 1128 adj = 0; 1129 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1130 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1131 new_delay = dev_priv->rps.efficient_freq; 1132 else 1133 new_delay = dev_priv->rps.min_freq_softlimit; 1134 adj = 0; 1135 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1136 if (adj < 0) 1137 adj *= 2; 1138 else /* CHV needs even encode values */ 1139 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1140 } else { /* unknown event */ 1141 adj = 0; 1142 } 1143 1144 dev_priv->rps.last_adj = adj; 1145 1146 /* sysfs frequency interfaces may have snuck in while servicing the 1147 * interrupt 1148 */ 1149 new_delay += adj; 1150 new_delay = clamp_t(int, new_delay, min, max); 1151 1152 intel_set_rps(dev_priv, new_delay); 1153 1154 mutex_unlock(&dev_priv->rps.hw_lock); 1155 } 1156 1157 1158 /** 1159 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1160 * occurred. 1161 * @work: workqueue struct 1162 * 1163 * Doesn't actually do anything except notify userspace. As a consequence of 1164 * this event, userspace should try to remap the bad rows since statistically 1165 * it is likely the same row is more likely to go bad again. 1166 */ 1167 static void ivybridge_parity_work(struct work_struct *work) 1168 { 1169 struct drm_i915_private *dev_priv = 1170 container_of(work, struct drm_i915_private, l3_parity.error_work); 1171 u32 error_status, row, bank, subbank; 1172 char *parity_event[6]; 1173 uint32_t misccpctl; 1174 uint8_t slice = 0; 1175 1176 /* We must turn off DOP level clock gating to access the L3 registers. 1177 * In order to prevent a get/put style interface, acquire struct mutex 1178 * any time we access those registers. 1179 */ 1180 mutex_lock(&dev_priv->drm.struct_mutex); 1181 1182 /* If we've screwed up tracking, just let the interrupt fire again */ 1183 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1184 goto out; 1185 1186 misccpctl = I915_READ(GEN7_MISCCPCTL); 1187 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1188 POSTING_READ(GEN7_MISCCPCTL); 1189 1190 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1191 i915_reg_t reg; 1192 1193 slice--; 1194 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1195 break; 1196 1197 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1198 1199 reg = GEN7_L3CDERRST1(slice); 1200 1201 error_status = I915_READ(reg); 1202 row = GEN7_PARITY_ERROR_ROW(error_status); 1203 bank = GEN7_PARITY_ERROR_BANK(error_status); 1204 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1205 1206 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1207 POSTING_READ(reg); 1208 1209 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1210 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1211 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1212 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1213 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1214 parity_event[5] = NULL; 1215 1216 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1217 KOBJ_CHANGE, parity_event); 1218 1219 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1220 slice, row, bank, subbank); 1221 1222 kfree(parity_event[4]); 1223 kfree(parity_event[3]); 1224 kfree(parity_event[2]); 1225 kfree(parity_event[1]); 1226 } 1227 1228 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1229 1230 out: 1231 WARN_ON(dev_priv->l3_parity.which_slice); 1232 spin_lock_irq(&dev_priv->irq_lock); 1233 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1234 spin_unlock_irq(&dev_priv->irq_lock); 1235 1236 mutex_unlock(&dev_priv->drm.struct_mutex); 1237 } 1238 1239 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1240 u32 iir) 1241 { 1242 if (!HAS_L3_DPF(dev_priv)) 1243 return; 1244 1245 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1246 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1247 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1248 1249 iir &= GT_PARITY_ERROR(dev_priv); 1250 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1251 dev_priv->l3_parity.which_slice |= 1 << 1; 1252 1253 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1254 dev_priv->l3_parity.which_slice |= 1 << 0; 1255 1256 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1257 } 1258 1259 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1260 u32 gt_iir) 1261 { 1262 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1263 notify_ring(&dev_priv->engine[RCS]); 1264 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1265 notify_ring(&dev_priv->engine[VCS]); 1266 } 1267 1268 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1269 u32 gt_iir) 1270 { 1271 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1272 notify_ring(&dev_priv->engine[RCS]); 1273 if (gt_iir & GT_BSD_USER_INTERRUPT) 1274 notify_ring(&dev_priv->engine[VCS]); 1275 if (gt_iir & GT_BLT_USER_INTERRUPT) 1276 notify_ring(&dev_priv->engine[BCS]); 1277 1278 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1279 GT_BSD_CS_ERROR_INTERRUPT | 1280 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1281 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1282 1283 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1284 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1285 } 1286 1287 static __always_inline void 1288 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) 1289 { 1290 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) 1291 notify_ring(engine); 1292 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) 1293 tasklet_schedule(&engine->irq_tasklet); 1294 } 1295 1296 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, 1297 u32 master_ctl, 1298 u32 gt_iir[4]) 1299 { 1300 irqreturn_t ret = IRQ_NONE; 1301 1302 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1303 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0)); 1304 if (gt_iir[0]) { 1305 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]); 1306 ret = IRQ_HANDLED; 1307 } else 1308 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1309 } 1310 1311 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1312 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1)); 1313 if (gt_iir[1]) { 1314 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]); 1315 ret = IRQ_HANDLED; 1316 } else 1317 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1318 } 1319 1320 if (master_ctl & GEN8_GT_VECS_IRQ) { 1321 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3)); 1322 if (gt_iir[3]) { 1323 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]); 1324 ret = IRQ_HANDLED; 1325 } else 1326 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1327 } 1328 1329 if (master_ctl & GEN8_GT_PM_IRQ) { 1330 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2)); 1331 if (gt_iir[2] & dev_priv->pm_rps_events) { 1332 I915_WRITE_FW(GEN8_GT_IIR(2), 1333 gt_iir[2] & dev_priv->pm_rps_events); 1334 ret = IRQ_HANDLED; 1335 } else 1336 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1337 } 1338 1339 return ret; 1340 } 1341 1342 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1343 u32 gt_iir[4]) 1344 { 1345 if (gt_iir[0]) { 1346 gen8_cs_irq_handler(&dev_priv->engine[RCS], 1347 gt_iir[0], GEN8_RCS_IRQ_SHIFT); 1348 gen8_cs_irq_handler(&dev_priv->engine[BCS], 1349 gt_iir[0], GEN8_BCS_IRQ_SHIFT); 1350 } 1351 1352 if (gt_iir[1]) { 1353 gen8_cs_irq_handler(&dev_priv->engine[VCS], 1354 gt_iir[1], GEN8_VCS1_IRQ_SHIFT); 1355 gen8_cs_irq_handler(&dev_priv->engine[VCS2], 1356 gt_iir[1], GEN8_VCS2_IRQ_SHIFT); 1357 } 1358 1359 if (gt_iir[3]) 1360 gen8_cs_irq_handler(&dev_priv->engine[VECS], 1361 gt_iir[3], GEN8_VECS_IRQ_SHIFT); 1362 1363 if (gt_iir[2] & dev_priv->pm_rps_events) 1364 gen6_rps_irq_handler(dev_priv, gt_iir[2]); 1365 } 1366 1367 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1368 { 1369 switch (port) { 1370 case PORT_A: 1371 return val & PORTA_HOTPLUG_LONG_DETECT; 1372 case PORT_B: 1373 return val & PORTB_HOTPLUG_LONG_DETECT; 1374 case PORT_C: 1375 return val & PORTC_HOTPLUG_LONG_DETECT; 1376 default: 1377 return false; 1378 } 1379 } 1380 1381 static bool spt_port_hotplug2_long_detect(enum port port, u32 val) 1382 { 1383 switch (port) { 1384 case PORT_E: 1385 return val & PORTE_HOTPLUG_LONG_DETECT; 1386 default: 1387 return false; 1388 } 1389 } 1390 1391 static bool spt_port_hotplug_long_detect(enum port port, u32 val) 1392 { 1393 switch (port) { 1394 case PORT_A: 1395 return val & PORTA_HOTPLUG_LONG_DETECT; 1396 case PORT_B: 1397 return val & PORTB_HOTPLUG_LONG_DETECT; 1398 case PORT_C: 1399 return val & PORTC_HOTPLUG_LONG_DETECT; 1400 case PORT_D: 1401 return val & PORTD_HOTPLUG_LONG_DETECT; 1402 default: 1403 return false; 1404 } 1405 } 1406 1407 static bool ilk_port_hotplug_long_detect(enum port port, u32 val) 1408 { 1409 switch (port) { 1410 case PORT_A: 1411 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1412 default: 1413 return false; 1414 } 1415 } 1416 1417 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1418 { 1419 switch (port) { 1420 case PORT_B: 1421 return val & PORTB_HOTPLUG_LONG_DETECT; 1422 case PORT_C: 1423 return val & PORTC_HOTPLUG_LONG_DETECT; 1424 case PORT_D: 1425 return val & PORTD_HOTPLUG_LONG_DETECT; 1426 default: 1427 return false; 1428 } 1429 } 1430 1431 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1432 { 1433 switch (port) { 1434 case PORT_B: 1435 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1436 case PORT_C: 1437 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1438 case PORT_D: 1439 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1440 default: 1441 return false; 1442 } 1443 } 1444 1445 /* 1446 * Get a bit mask of pins that have triggered, and which ones may be long. 1447 * This can be called multiple times with the same masks to accumulate 1448 * hotplug detection results from several registers. 1449 * 1450 * Note that the caller is expected to zero out the masks initially. 1451 */ 1452 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, 1453 u32 hotplug_trigger, u32 dig_hotplug_reg, 1454 const u32 hpd[HPD_NUM_PINS], 1455 bool long_pulse_detect(enum port port, u32 val)) 1456 { 1457 enum port port; 1458 int i; 1459 1460 for_each_hpd_pin(i) { 1461 if ((hpd[i] & hotplug_trigger) == 0) 1462 continue; 1463 1464 *pin_mask |= BIT(i); 1465 1466 if (!intel_hpd_pin_to_port(i, &port)) 1467 continue; 1468 1469 if (long_pulse_detect(port, dig_hotplug_reg)) 1470 *long_mask |= BIT(i); 1471 } 1472 1473 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1474 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1475 1476 } 1477 1478 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1479 { 1480 wake_up_all(&dev_priv->gmbus_wait_queue); 1481 } 1482 1483 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1484 { 1485 wake_up_all(&dev_priv->gmbus_wait_queue); 1486 } 1487 1488 #if defined(CONFIG_DEBUG_FS) 1489 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1490 enum i915_pipe pipe, 1491 uint32_t crc0, uint32_t crc1, 1492 uint32_t crc2, uint32_t crc3, 1493 uint32_t crc4) 1494 { 1495 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1496 struct intel_pipe_crc_entry *entry; 1497 int head, tail; 1498 1499 spin_lock(&pipe_crc->lock); 1500 1501 if (!pipe_crc->entries) { 1502 spin_unlock(&pipe_crc->lock); 1503 DRM_DEBUG_KMS("spurious interrupt\n"); 1504 return; 1505 } 1506 1507 head = pipe_crc->head; 1508 tail = pipe_crc->tail; 1509 1510 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1511 spin_unlock(&pipe_crc->lock); 1512 DRM_ERROR("CRC buffer overflowing\n"); 1513 return; 1514 } 1515 1516 entry = &pipe_crc->entries[head]; 1517 1518 entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, 1519 pipe); 1520 entry->crc[0] = crc0; 1521 entry->crc[1] = crc1; 1522 entry->crc[2] = crc2; 1523 entry->crc[3] = crc3; 1524 entry->crc[4] = crc4; 1525 1526 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1527 pipe_crc->head = head; 1528 1529 spin_unlock(&pipe_crc->lock); 1530 1531 wake_up_interruptible(&pipe_crc->wq); 1532 } 1533 #else 1534 static inline void 1535 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1536 enum i915_pipe pipe, 1537 uint32_t crc0, uint32_t crc1, 1538 uint32_t crc2, uint32_t crc3, 1539 uint32_t crc4) {} 1540 #endif 1541 1542 1543 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1544 enum i915_pipe pipe) 1545 { 1546 display_pipe_crc_irq_handler(dev_priv, pipe, 1547 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1548 0, 0, 0, 0); 1549 } 1550 1551 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1552 enum i915_pipe pipe) 1553 { 1554 display_pipe_crc_irq_handler(dev_priv, pipe, 1555 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1556 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1557 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1558 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1559 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1560 } 1561 1562 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1563 enum i915_pipe pipe) 1564 { 1565 uint32_t res1, res2; 1566 1567 if (INTEL_GEN(dev_priv) >= 3) 1568 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1569 else 1570 res1 = 0; 1571 1572 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1573 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1574 else 1575 res2 = 0; 1576 1577 display_pipe_crc_irq_handler(dev_priv, pipe, 1578 I915_READ(PIPE_CRC_RES_RED(pipe)), 1579 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1580 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1581 res1, res2); 1582 } 1583 1584 /* The RPS events need forcewake, so we add them to a work queue and mask their 1585 * IMR bits until the work is done. Other interrupts can be processed without 1586 * the work queue. */ 1587 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1588 { 1589 if (pm_iir & dev_priv->pm_rps_events) { 1590 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1591 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1592 if (dev_priv->rps.interrupts_enabled) { 1593 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1594 schedule_work(&dev_priv->rps.work); 1595 } 1596 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1597 } 1598 1599 if (INTEL_INFO(dev_priv)->gen >= 8) 1600 return; 1601 1602 if (HAS_VEBOX(dev_priv)) { 1603 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1604 notify_ring(&dev_priv->engine[VECS]); 1605 1606 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1607 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1608 } 1609 } 1610 1611 static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv, 1612 enum i915_pipe pipe) 1613 { 1614 bool ret; 1615 1616 ret = drm_handle_vblank(&dev_priv->drm, pipe); 1617 if (ret) 1618 intel_finish_page_flip_mmio(dev_priv, pipe); 1619 1620 return ret; 1621 } 1622 1623 static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1624 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1625 { 1626 int pipe; 1627 1628 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1629 1630 if (!dev_priv->display_irqs_enabled) { 1631 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1632 return; 1633 } 1634 1635 for_each_pipe(dev_priv, pipe) { 1636 i915_reg_t reg; 1637 u32 mask, iir_bit = 0; 1638 1639 /* 1640 * PIPESTAT bits get signalled even when the interrupt is 1641 * disabled with the mask bits, and some of the status bits do 1642 * not generate interrupts at all (like the underrun bit). Hence 1643 * we need to be careful that we only handle what we want to 1644 * handle. 1645 */ 1646 1647 /* fifo underruns are filterered in the underrun handler. */ 1648 mask = PIPE_FIFO_UNDERRUN_STATUS; 1649 1650 switch (pipe) { 1651 case PIPE_A: 1652 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1653 break; 1654 case PIPE_B: 1655 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1656 break; 1657 case PIPE_C: 1658 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1659 break; 1660 } 1661 if (iir & iir_bit) 1662 mask |= dev_priv->pipestat_irq_mask[pipe]; 1663 1664 if (!mask) 1665 continue; 1666 1667 reg = PIPESTAT(pipe); 1668 mask |= PIPESTAT_INT_ENABLE_MASK; 1669 pipe_stats[pipe] = I915_READ(reg) & mask; 1670 1671 /* 1672 * Clear the PIPE*STAT regs before the IIR 1673 */ 1674 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1675 PIPESTAT_INT_STATUS_MASK)) 1676 I915_WRITE(reg, pipe_stats[pipe]); 1677 } 1678 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1679 } 1680 1681 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1682 u32 pipe_stats[I915_MAX_PIPES]) 1683 { 1684 enum i915_pipe pipe; 1685 1686 for_each_pipe(dev_priv, pipe) { 1687 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1688 intel_pipe_handle_vblank(dev_priv, pipe)) 1689 intel_check_page_flip(dev_priv, pipe); 1690 1691 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 1692 intel_finish_page_flip_cs(dev_priv, pipe); 1693 1694 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1695 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1696 1697 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1698 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1699 } 1700 1701 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1702 gmbus_irq_handler(dev_priv); 1703 } 1704 1705 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1706 { 1707 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1708 1709 if (hotplug_status) 1710 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1711 1712 return hotplug_status; 1713 } 1714 1715 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1716 u32 hotplug_status) 1717 { 1718 u32 pin_mask = 0, long_mask = 0; 1719 1720 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 1721 IS_CHERRYVIEW(dev_priv)) { 1722 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1723 1724 if (hotplug_trigger) { 1725 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1726 hotplug_trigger, hpd_status_g4x, 1727 i9xx_port_hotplug_long_detect); 1728 1729 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1730 } 1731 1732 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1733 dp_aux_irq_handler(dev_priv); 1734 } else { 1735 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1736 1737 if (hotplug_trigger) { 1738 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1739 hotplug_trigger, hpd_status_i915, 1740 i9xx_port_hotplug_long_detect); 1741 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1742 } 1743 } 1744 } 1745 1746 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1747 { 1748 struct drm_device *dev = arg; 1749 struct drm_i915_private *dev_priv = to_i915(dev); 1750 irqreturn_t ret = IRQ_NONE; 1751 1752 if (!intel_irqs_enabled(dev_priv)) 1753 return IRQ_NONE; 1754 1755 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1756 disable_rpm_wakeref_asserts(dev_priv); 1757 1758 do { 1759 u32 iir, gt_iir, pm_iir; 1760 u32 pipe_stats[I915_MAX_PIPES] = {}; 1761 u32 hotplug_status = 0; 1762 u32 ier = 0; 1763 1764 gt_iir = I915_READ(GTIIR); 1765 pm_iir = I915_READ(GEN6_PMIIR); 1766 iir = I915_READ(VLV_IIR); 1767 1768 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1769 break; 1770 1771 ret = IRQ_HANDLED; 1772 1773 /* 1774 * Theory on interrupt generation, based on empirical evidence: 1775 * 1776 * x = ((VLV_IIR & VLV_IER) || 1777 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 1778 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 1779 * 1780 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1781 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 1782 * guarantee the CPU interrupt will be raised again even if we 1783 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1784 * bits this time around. 1785 */ 1786 I915_WRITE(VLV_MASTER_IER, 0); 1787 ier = I915_READ(VLV_IER); 1788 I915_WRITE(VLV_IER, 0); 1789 1790 if (gt_iir) 1791 I915_WRITE(GTIIR, gt_iir); 1792 if (pm_iir) 1793 I915_WRITE(GEN6_PMIIR, pm_iir); 1794 1795 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1796 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1797 1798 /* Call regardless, as some status bits might not be 1799 * signalled in iir */ 1800 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1801 1802 /* 1803 * VLV_IIR is single buffered, and reflects the level 1804 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1805 */ 1806 if (iir) 1807 I915_WRITE(VLV_IIR, iir); 1808 1809 I915_WRITE(VLV_IER, ier); 1810 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1811 POSTING_READ(VLV_MASTER_IER); 1812 1813 if (gt_iir) 1814 snb_gt_irq_handler(dev_priv, gt_iir); 1815 if (pm_iir) 1816 gen6_rps_irq_handler(dev_priv, pm_iir); 1817 1818 if (hotplug_status) 1819 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1820 1821 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1822 } while (0); 1823 1824 enable_rpm_wakeref_asserts(dev_priv); 1825 1826 return ret; 1827 } 1828 1829 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1830 { 1831 struct drm_device *dev = arg; 1832 struct drm_i915_private *dev_priv = to_i915(dev); 1833 irqreturn_t ret = IRQ_NONE; 1834 1835 if (!intel_irqs_enabled(dev_priv)) 1836 return IRQ_NONE; 1837 1838 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1839 disable_rpm_wakeref_asserts(dev_priv); 1840 1841 do { 1842 u32 master_ctl, iir; 1843 u32 gt_iir[4] = {}; 1844 u32 pipe_stats[I915_MAX_PIPES] = {}; 1845 u32 hotplug_status = 0; 1846 u32 ier = 0; 1847 1848 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1849 iir = I915_READ(VLV_IIR); 1850 1851 if (master_ctl == 0 && iir == 0) 1852 break; 1853 1854 ret = IRQ_HANDLED; 1855 1856 /* 1857 * Theory on interrupt generation, based on empirical evidence: 1858 * 1859 * x = ((VLV_IIR & VLV_IER) || 1860 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 1861 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 1862 * 1863 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1864 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 1865 * guarantee the CPU interrupt will be raised again even if we 1866 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 1867 * bits this time around. 1868 */ 1869 I915_WRITE(GEN8_MASTER_IRQ, 0); 1870 ier = I915_READ(VLV_IER); 1871 I915_WRITE(VLV_IER, 0); 1872 1873 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 1874 1875 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1876 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1877 1878 /* Call regardless, as some status bits might not be 1879 * signalled in iir */ 1880 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1881 1882 /* 1883 * VLV_IIR is single buffered, and reflects the level 1884 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1885 */ 1886 if (iir) 1887 I915_WRITE(VLV_IIR, iir); 1888 1889 I915_WRITE(VLV_IER, ier); 1890 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1891 POSTING_READ(GEN8_MASTER_IRQ); 1892 1893 gen8_gt_irq_handler(dev_priv, gt_iir); 1894 1895 if (hotplug_status) 1896 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1897 1898 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1899 } while (0); 1900 1901 enable_rpm_wakeref_asserts(dev_priv); 1902 1903 return ret; 1904 } 1905 1906 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1907 u32 hotplug_trigger, 1908 const u32 hpd[HPD_NUM_PINS]) 1909 { 1910 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1911 1912 /* 1913 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 1914 * unless we touch the hotplug register, even if hotplug_trigger is 1915 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 1916 * errors. 1917 */ 1918 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1919 if (!hotplug_trigger) { 1920 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 1921 PORTD_HOTPLUG_STATUS_MASK | 1922 PORTC_HOTPLUG_STATUS_MASK | 1923 PORTB_HOTPLUG_STATUS_MASK; 1924 dig_hotplug_reg &= ~mask; 1925 } 1926 1927 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1928 if (!hotplug_trigger) 1929 return; 1930 1931 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1932 dig_hotplug_reg, hpd, 1933 pch_port_hotplug_long_detect); 1934 1935 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1936 } 1937 1938 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1939 { 1940 int pipe; 1941 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1942 1943 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 1944 1945 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1946 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1947 SDE_AUDIO_POWER_SHIFT); 1948 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1949 port_name(port)); 1950 } 1951 1952 if (pch_iir & SDE_AUX_MASK) 1953 dp_aux_irq_handler(dev_priv); 1954 1955 if (pch_iir & SDE_GMBUS) 1956 gmbus_irq_handler(dev_priv); 1957 1958 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1959 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1960 1961 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1962 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1963 1964 if (pch_iir & SDE_POISON) 1965 DRM_ERROR("PCH poison interrupt\n"); 1966 1967 if (pch_iir & SDE_FDI_MASK) 1968 for_each_pipe(dev_priv, pipe) 1969 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1970 pipe_name(pipe), 1971 I915_READ(FDI_RX_IIR(pipe))); 1972 1973 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1974 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1975 1976 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1977 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1978 1979 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1980 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1981 1982 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1983 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1984 } 1985 1986 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 1987 { 1988 u32 err_int = I915_READ(GEN7_ERR_INT); 1989 enum i915_pipe pipe; 1990 1991 if (err_int & ERR_INT_POISON) 1992 DRM_ERROR("Poison interrupt\n"); 1993 1994 for_each_pipe(dev_priv, pipe) { 1995 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 1996 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1997 1998 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1999 if (IS_IVYBRIDGE(dev_priv)) 2000 ivb_pipe_crc_irq_handler(dev_priv, pipe); 2001 else 2002 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2003 } 2004 } 2005 2006 I915_WRITE(GEN7_ERR_INT, err_int); 2007 } 2008 2009 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2010 { 2011 u32 serr_int = I915_READ(SERR_INT); 2012 2013 if (serr_int & SERR_INT_POISON) 2014 DRM_ERROR("PCH poison interrupt\n"); 2015 2016 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2017 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2018 2019 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2020 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2021 2022 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2023 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 2024 2025 I915_WRITE(SERR_INT, serr_int); 2026 } 2027 2028 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2029 { 2030 int pipe; 2031 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2032 2033 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2034 2035 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2036 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2037 SDE_AUDIO_POWER_SHIFT_CPT); 2038 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2039 port_name(port)); 2040 } 2041 2042 if (pch_iir & SDE_AUX_MASK_CPT) 2043 dp_aux_irq_handler(dev_priv); 2044 2045 if (pch_iir & SDE_GMBUS_CPT) 2046 gmbus_irq_handler(dev_priv); 2047 2048 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2049 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2050 2051 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2052 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2053 2054 if (pch_iir & SDE_FDI_MASK_CPT) 2055 for_each_pipe(dev_priv, pipe) 2056 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2057 pipe_name(pipe), 2058 I915_READ(FDI_RX_IIR(pipe))); 2059 2060 if (pch_iir & SDE_ERROR_CPT) 2061 cpt_serr_int_handler(dev_priv); 2062 } 2063 2064 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2065 { 2066 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2067 ~SDE_PORTE_HOTPLUG_SPT; 2068 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2069 u32 pin_mask = 0, long_mask = 0; 2070 2071 if (hotplug_trigger) { 2072 u32 dig_hotplug_reg; 2073 2074 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2075 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2076 2077 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2078 dig_hotplug_reg, hpd_spt, 2079 spt_port_hotplug_long_detect); 2080 } 2081 2082 if (hotplug2_trigger) { 2083 u32 dig_hotplug_reg; 2084 2085 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2086 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2087 2088 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger, 2089 dig_hotplug_reg, hpd_spt, 2090 spt_port_hotplug2_long_detect); 2091 } 2092 2093 if (pin_mask) 2094 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2095 2096 if (pch_iir & SDE_GMBUS_CPT) 2097 gmbus_irq_handler(dev_priv); 2098 } 2099 2100 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2101 u32 hotplug_trigger, 2102 const u32 hpd[HPD_NUM_PINS]) 2103 { 2104 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2105 2106 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2107 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2108 2109 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2110 dig_hotplug_reg, hpd, 2111 ilk_port_hotplug_long_detect); 2112 2113 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2114 } 2115 2116 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2117 u32 de_iir) 2118 { 2119 enum i915_pipe pipe; 2120 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2121 2122 if (hotplug_trigger) 2123 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2124 2125 if (de_iir & DE_AUX_CHANNEL_A) 2126 dp_aux_irq_handler(dev_priv); 2127 2128 if (de_iir & DE_GSE) 2129 intel_opregion_asle_intr(dev_priv); 2130 2131 if (de_iir & DE_POISON) 2132 DRM_ERROR("Poison interrupt\n"); 2133 2134 for_each_pipe(dev_priv, pipe) { 2135 if (de_iir & DE_PIPE_VBLANK(pipe) && 2136 intel_pipe_handle_vblank(dev_priv, pipe)) 2137 intel_check_page_flip(dev_priv, pipe); 2138 2139 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2140 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2141 2142 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2143 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2144 2145 /* plane/pipes map 1:1 on ilk+ */ 2146 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 2147 intel_finish_page_flip_cs(dev_priv, pipe); 2148 } 2149 2150 /* check event from PCH */ 2151 if (de_iir & DE_PCH_EVENT) { 2152 u32 pch_iir = I915_READ(SDEIIR); 2153 2154 if (HAS_PCH_CPT(dev_priv)) 2155 cpt_irq_handler(dev_priv, pch_iir); 2156 else 2157 ibx_irq_handler(dev_priv, pch_iir); 2158 2159 /* should clear PCH hotplug event before clear CPU irq */ 2160 I915_WRITE(SDEIIR, pch_iir); 2161 } 2162 2163 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 2164 ironlake_rps_change_irq_handler(dev_priv); 2165 } 2166 2167 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2168 u32 de_iir) 2169 { 2170 enum i915_pipe pipe; 2171 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2172 2173 if (hotplug_trigger) 2174 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2175 2176 if (de_iir & DE_ERR_INT_IVB) 2177 ivb_err_int_handler(dev_priv); 2178 2179 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2180 dp_aux_irq_handler(dev_priv); 2181 2182 if (de_iir & DE_GSE_IVB) 2183 intel_opregion_asle_intr(dev_priv); 2184 2185 for_each_pipe(dev_priv, pipe) { 2186 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2187 intel_pipe_handle_vblank(dev_priv, pipe)) 2188 intel_check_page_flip(dev_priv, pipe); 2189 2190 /* plane/pipes map 1:1 on ilk+ */ 2191 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 2192 intel_finish_page_flip_cs(dev_priv, pipe); 2193 } 2194 2195 /* check event from PCH */ 2196 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2197 u32 pch_iir = I915_READ(SDEIIR); 2198 2199 cpt_irq_handler(dev_priv, pch_iir); 2200 2201 /* clear PCH hotplug event before clear CPU irq */ 2202 I915_WRITE(SDEIIR, pch_iir); 2203 } 2204 } 2205 2206 /* 2207 * To handle irqs with the minimum potential races with fresh interrupts, we: 2208 * 1 - Disable Master Interrupt Control. 2209 * 2 - Find the source(s) of the interrupt. 2210 * 3 - Clear the Interrupt Identity bits (IIR). 2211 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2212 * 5 - Re-enable Master Interrupt Control. 2213 */ 2214 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2215 { 2216 struct drm_device *dev = arg; 2217 struct drm_i915_private *dev_priv = to_i915(dev); 2218 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2219 irqreturn_t ret = IRQ_NONE; 2220 2221 if (!intel_irqs_enabled(dev_priv)) 2222 return IRQ_NONE; 2223 2224 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2225 disable_rpm_wakeref_asserts(dev_priv); 2226 2227 /* disable master interrupt before clearing iir */ 2228 de_ier = I915_READ(DEIER); 2229 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2230 POSTING_READ(DEIER); 2231 2232 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2233 * interrupts will will be stored on its back queue, and then we'll be 2234 * able to process them after we restore SDEIER (as soon as we restore 2235 * it, we'll get an interrupt if SDEIIR still has something to process 2236 * due to its back queue). */ 2237 if (!HAS_PCH_NOP(dev_priv)) { 2238 sde_ier = I915_READ(SDEIER); 2239 I915_WRITE(SDEIER, 0); 2240 POSTING_READ(SDEIER); 2241 } 2242 2243 /* Find, clear, then process each source of interrupt */ 2244 2245 gt_iir = I915_READ(GTIIR); 2246 if (gt_iir) { 2247 I915_WRITE(GTIIR, gt_iir); 2248 ret = IRQ_HANDLED; 2249 if (INTEL_GEN(dev_priv) >= 6) 2250 snb_gt_irq_handler(dev_priv, gt_iir); 2251 else 2252 ilk_gt_irq_handler(dev_priv, gt_iir); 2253 } 2254 2255 de_iir = I915_READ(DEIIR); 2256 if (de_iir) { 2257 I915_WRITE(DEIIR, de_iir); 2258 ret = IRQ_HANDLED; 2259 if (INTEL_GEN(dev_priv) >= 7) 2260 ivb_display_irq_handler(dev_priv, de_iir); 2261 else 2262 ilk_display_irq_handler(dev_priv, de_iir); 2263 } 2264 2265 if (INTEL_GEN(dev_priv) >= 6) { 2266 u32 pm_iir = I915_READ(GEN6_PMIIR); 2267 if (pm_iir) { 2268 I915_WRITE(GEN6_PMIIR, pm_iir); 2269 ret = IRQ_HANDLED; 2270 gen6_rps_irq_handler(dev_priv, pm_iir); 2271 } 2272 } 2273 2274 I915_WRITE(DEIER, de_ier); 2275 POSTING_READ(DEIER); 2276 if (!HAS_PCH_NOP(dev_priv)) { 2277 I915_WRITE(SDEIER, sde_ier); 2278 POSTING_READ(SDEIER); 2279 } 2280 2281 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2282 enable_rpm_wakeref_asserts(dev_priv); 2283 2284 return ret; 2285 } 2286 2287 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2288 u32 hotplug_trigger, 2289 const u32 hpd[HPD_NUM_PINS]) 2290 { 2291 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2292 2293 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2294 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2295 2296 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2297 dig_hotplug_reg, hpd, 2298 bxt_port_hotplug_long_detect); 2299 2300 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2301 } 2302 2303 static irqreturn_t 2304 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2305 { 2306 irqreturn_t ret = IRQ_NONE; 2307 u32 iir; 2308 enum i915_pipe pipe; 2309 2310 if (master_ctl & GEN8_DE_MISC_IRQ) { 2311 iir = I915_READ(GEN8_DE_MISC_IIR); 2312 if (iir) { 2313 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2314 ret = IRQ_HANDLED; 2315 if (iir & GEN8_DE_MISC_GSE) 2316 intel_opregion_asle_intr(dev_priv); 2317 else 2318 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2319 } 2320 else 2321 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2322 } 2323 2324 if (master_ctl & GEN8_DE_PORT_IRQ) { 2325 iir = I915_READ(GEN8_DE_PORT_IIR); 2326 if (iir) { 2327 u32 tmp_mask; 2328 bool found = false; 2329 2330 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2331 ret = IRQ_HANDLED; 2332 2333 tmp_mask = GEN8_AUX_CHANNEL_A; 2334 if (INTEL_INFO(dev_priv)->gen >= 9) 2335 tmp_mask |= GEN9_AUX_CHANNEL_B | 2336 GEN9_AUX_CHANNEL_C | 2337 GEN9_AUX_CHANNEL_D; 2338 2339 if (iir & tmp_mask) { 2340 dp_aux_irq_handler(dev_priv); 2341 found = true; 2342 } 2343 2344 if (IS_BROXTON(dev_priv)) { 2345 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2346 if (tmp_mask) { 2347 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2348 hpd_bxt); 2349 found = true; 2350 } 2351 } else if (IS_BROADWELL(dev_priv)) { 2352 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2353 if (tmp_mask) { 2354 ilk_hpd_irq_handler(dev_priv, 2355 tmp_mask, hpd_bdw); 2356 found = true; 2357 } 2358 } 2359 2360 if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2361 gmbus_irq_handler(dev_priv); 2362 found = true; 2363 } 2364 2365 if (!found) 2366 DRM_ERROR("Unexpected DE Port interrupt\n"); 2367 } 2368 else 2369 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2370 } 2371 2372 for_each_pipe(dev_priv, pipe) { 2373 u32 flip_done, fault_errors; 2374 2375 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2376 continue; 2377 2378 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2379 if (!iir) { 2380 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2381 continue; 2382 } 2383 2384 ret = IRQ_HANDLED; 2385 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2386 2387 if (iir & GEN8_PIPE_VBLANK && 2388 intel_pipe_handle_vblank(dev_priv, pipe)) 2389 intel_check_page_flip(dev_priv, pipe); 2390 2391 flip_done = iir; 2392 if (INTEL_INFO(dev_priv)->gen >= 9) 2393 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE; 2394 else 2395 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; 2396 2397 if (flip_done) 2398 intel_finish_page_flip_cs(dev_priv, pipe); 2399 2400 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2401 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2402 2403 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2404 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2405 2406 fault_errors = iir; 2407 if (INTEL_INFO(dev_priv)->gen >= 9) 2408 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2409 else 2410 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2411 2412 if (fault_errors) 2413 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2414 pipe_name(pipe), 2415 fault_errors); 2416 } 2417 2418 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2419 master_ctl & GEN8_DE_PCH_IRQ) { 2420 /* 2421 * FIXME(BDW): Assume for now that the new interrupt handling 2422 * scheme also closed the SDE interrupt handling race we've seen 2423 * on older pch-split platforms. But this needs testing. 2424 */ 2425 iir = I915_READ(SDEIIR); 2426 if (iir) { 2427 I915_WRITE(SDEIIR, iir); 2428 ret = IRQ_HANDLED; 2429 2430 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) 2431 spt_irq_handler(dev_priv, iir); 2432 else 2433 cpt_irq_handler(dev_priv, iir); 2434 } else { 2435 /* 2436 * Like on previous PCH there seems to be something 2437 * fishy going on with forwarding PCH interrupts. 2438 */ 2439 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2440 } 2441 } 2442 2443 return ret; 2444 } 2445 2446 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2447 { 2448 struct drm_device *dev = arg; 2449 struct drm_i915_private *dev_priv = to_i915(dev); 2450 u32 master_ctl; 2451 u32 gt_iir[4] = {}; 2452 irqreturn_t ret; 2453 2454 if (!intel_irqs_enabled(dev_priv)) 2455 return IRQ_NONE; 2456 2457 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2458 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2459 if (!master_ctl) 2460 return IRQ_NONE; 2461 2462 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2463 2464 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2465 disable_rpm_wakeref_asserts(dev_priv); 2466 2467 /* Find, clear, then process each source of interrupt */ 2468 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2469 gen8_gt_irq_handler(dev_priv, gt_iir); 2470 ret |= gen8_de_irq_handler(dev_priv, master_ctl); 2471 2472 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2473 POSTING_READ_FW(GEN8_MASTER_IRQ); 2474 2475 enable_rpm_wakeref_asserts(dev_priv); 2476 2477 return ret; 2478 } 2479 2480 static void i915_error_wake_up(struct drm_i915_private *dev_priv) 2481 { 2482 /* 2483 * Notify all waiters for GPU completion events that reset state has 2484 * been changed, and that they need to restart their wait after 2485 * checking for potential errors (and bail out to drop locks if there is 2486 * a gpu reset pending so that i915_error_work_func can acquire them). 2487 */ 2488 2489 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2490 wake_up_all(&dev_priv->gpu_error.wait_queue); 2491 2492 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2493 wake_up_all(&dev_priv->pending_flip_queue); 2494 } 2495 2496 /** 2497 * i915_reset_and_wakeup - do process context error handling work 2498 * @dev_priv: i915 device private 2499 * 2500 * Fire an error uevent so userspace can see that a hang or error 2501 * was detected. 2502 */ 2503 static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) 2504 { 2505 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 2506 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2507 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2508 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2509 int ret; 2510 2511 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 2512 2513 /* 2514 * Note that there's only one work item which does gpu resets, so we 2515 * need not worry about concurrent gpu resets potentially incrementing 2516 * error->reset_counter twice. We only need to take care of another 2517 * racing irq/hangcheck declaring the gpu dead for a second time. A 2518 * quick check for that is good enough: schedule_work ensures the 2519 * correct ordering between hang detection and this work item, and since 2520 * the reset in-progress bit is only ever set by code outside of this 2521 * work we don't need to worry about any other races. 2522 */ 2523 if (i915_reset_in_progress(&dev_priv->gpu_error)) { 2524 DRM_DEBUG_DRIVER("resetting chip\n"); 2525 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 2526 2527 /* 2528 * In most cases it's guaranteed that we get here with an RPM 2529 * reference held, for example because there is a pending GPU 2530 * request that won't finish until the reset is done. This 2531 * isn't the case at least when we get here by doing a 2532 * simulated reset via debugs, so get an RPM reference. 2533 */ 2534 intel_runtime_pm_get(dev_priv); 2535 2536 intel_prepare_reset(dev_priv); 2537 2538 /* 2539 * All state reset _must_ be completed before we update the 2540 * reset counter, for otherwise waiters might miss the reset 2541 * pending state and not properly drop locks, resulting in 2542 * deadlocks with the reset work. 2543 */ 2544 ret = i915_reset(dev_priv); 2545 2546 intel_finish_reset(dev_priv); 2547 2548 intel_runtime_pm_put(dev_priv); 2549 2550 if (ret == 0) 2551 kobject_uevent_env(kobj, 2552 KOBJ_CHANGE, reset_done_event); 2553 2554 /* 2555 * Note: The wake_up also serves as a memory barrier so that 2556 * waiters see the update value of the reset counter atomic_t. 2557 */ 2558 wake_up_all(&dev_priv->gpu_error.reset_queue); 2559 } 2560 } 2561 2562 static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv) 2563 { 2564 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2565 u32 eir = I915_READ(EIR); 2566 int pipe, i; 2567 2568 if (!eir) 2569 return; 2570 2571 pr_err("render error detected, EIR: 0x%08x\n", eir); 2572 2573 i915_get_extra_instdone(dev_priv, instdone); 2574 2575 if (IS_G4X(dev_priv)) { 2576 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2577 u32 ipeir = I915_READ(IPEIR_I965); 2578 2579 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2580 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2581 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2582 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2583 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2584 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2585 I915_WRITE(IPEIR_I965, ipeir); 2586 POSTING_READ(IPEIR_I965); 2587 } 2588 if (eir & GM45_ERROR_PAGE_TABLE) { 2589 u32 pgtbl_err = I915_READ(PGTBL_ER); 2590 pr_err("page table error\n"); 2591 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2592 I915_WRITE(PGTBL_ER, pgtbl_err); 2593 POSTING_READ(PGTBL_ER); 2594 } 2595 } 2596 2597 if (!IS_GEN2(dev_priv)) { 2598 if (eir & I915_ERROR_PAGE_TABLE) { 2599 u32 pgtbl_err = I915_READ(PGTBL_ER); 2600 pr_err("page table error\n"); 2601 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2602 I915_WRITE(PGTBL_ER, pgtbl_err); 2603 POSTING_READ(PGTBL_ER); 2604 } 2605 } 2606 2607 if (eir & I915_ERROR_MEMORY_REFRESH) { 2608 pr_err("memory refresh error:\n"); 2609 for_each_pipe(dev_priv, pipe) 2610 pr_err("pipe %c stat: 0x%08x\n", 2611 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2612 /* pipestat has already been acked */ 2613 } 2614 if (eir & I915_ERROR_INSTRUCTION) { 2615 pr_err("instruction error\n"); 2616 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2617 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2618 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2619 if (INTEL_GEN(dev_priv) < 4) { 2620 u32 ipeir = I915_READ(IPEIR); 2621 2622 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2623 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2624 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2625 I915_WRITE(IPEIR, ipeir); 2626 POSTING_READ(IPEIR); 2627 } else { 2628 u32 ipeir = I915_READ(IPEIR_I965); 2629 2630 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2631 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2632 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2633 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2634 I915_WRITE(IPEIR_I965, ipeir); 2635 POSTING_READ(IPEIR_I965); 2636 } 2637 } 2638 2639 I915_WRITE(EIR, eir); 2640 POSTING_READ(EIR); 2641 eir = I915_READ(EIR); 2642 if (eir) { 2643 /* 2644 * some errors might have become stuck, 2645 * mask them. 2646 */ 2647 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2648 I915_WRITE(EMR, I915_READ(EMR) | eir); 2649 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2650 } 2651 } 2652 2653 /** 2654 * i915_handle_error - handle a gpu error 2655 * @dev_priv: i915 device private 2656 * @engine_mask: mask representing engines that are hung 2657 * Do some basic checking of register state at error time and 2658 * dump it to the syslog. Also call i915_capture_error_state() to make 2659 * sure we get a record and make it available in debugfs. Fire a uevent 2660 * so userspace knows something bad happened (should trigger collection 2661 * of a ring dump etc.). 2662 * @fmt: Error message format string 2663 */ 2664 void i915_handle_error(struct drm_i915_private *dev_priv, 2665 u32 engine_mask, 2666 const char *fmt, ...) 2667 { 2668 va_list args; 2669 char error_msg[80]; 2670 2671 va_start(args, fmt); 2672 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2673 va_end(args); 2674 2675 i915_capture_error_state(dev_priv, engine_mask, error_msg); 2676 i915_report_and_clear_eir(dev_priv); 2677 2678 if (engine_mask) { 2679 atomic_or(I915_RESET_IN_PROGRESS_FLAG, 2680 &dev_priv->gpu_error.reset_counter); 2681 2682 /* 2683 * Wakeup waiting processes so that the reset function 2684 * i915_reset_and_wakeup doesn't deadlock trying to grab 2685 * various locks. By bumping the reset counter first, the woken 2686 * processes will see a reset in progress and back off, 2687 * releasing their locks and then wait for the reset completion. 2688 * We must do this for _all_ gpu waiters that might hold locks 2689 * that the reset work needs to acquire. 2690 * 2691 * Note: The wake_up serves as the required memory barrier to 2692 * ensure that the waiters see the updated value of the reset 2693 * counter atomic_t. 2694 */ 2695 i915_error_wake_up(dev_priv); 2696 } 2697 2698 i915_reset_and_wakeup(dev_priv); 2699 } 2700 2701 /* Called from drm generic code, passed 'crtc' which 2702 * we use as a pipe index 2703 */ 2704 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe) 2705 { 2706 struct drm_i915_private *dev_priv = to_i915(dev); 2707 unsigned long irqflags; 2708 2709 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2710 if (INTEL_INFO(dev)->gen >= 4) 2711 i915_enable_pipestat(dev_priv, pipe, 2712 PIPE_START_VBLANK_INTERRUPT_STATUS); 2713 else 2714 i915_enable_pipestat(dev_priv, pipe, 2715 PIPE_VBLANK_INTERRUPT_STATUS); 2716 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2717 2718 return 0; 2719 } 2720 2721 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 2722 { 2723 struct drm_i915_private *dev_priv = to_i915(dev); 2724 unsigned long irqflags; 2725 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2726 DE_PIPE_VBLANK(pipe); 2727 2728 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2729 ilk_enable_display_irq(dev_priv, bit); 2730 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2731 2732 return 0; 2733 } 2734 2735 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe) 2736 { 2737 struct drm_i915_private *dev_priv = to_i915(dev); 2738 unsigned long irqflags; 2739 2740 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2741 i915_enable_pipestat(dev_priv, pipe, 2742 PIPE_START_VBLANK_INTERRUPT_STATUS); 2743 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2744 2745 return 0; 2746 } 2747 2748 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 2749 { 2750 struct drm_i915_private *dev_priv = to_i915(dev); 2751 unsigned long irqflags; 2752 2753 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2754 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2755 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2756 2757 return 0; 2758 } 2759 2760 /* Called from drm generic code, passed 'crtc' which 2761 * we use as a pipe index 2762 */ 2763 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe) 2764 { 2765 struct drm_i915_private *dev_priv = to_i915(dev); 2766 unsigned long irqflags; 2767 2768 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2769 i915_disable_pipestat(dev_priv, pipe, 2770 PIPE_VBLANK_INTERRUPT_STATUS | 2771 PIPE_START_VBLANK_INTERRUPT_STATUS); 2772 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2773 } 2774 2775 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 2776 { 2777 struct drm_i915_private *dev_priv = to_i915(dev); 2778 unsigned long irqflags; 2779 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2780 DE_PIPE_VBLANK(pipe); 2781 2782 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2783 ilk_disable_display_irq(dev_priv, bit); 2784 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2785 } 2786 2787 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe) 2788 { 2789 struct drm_i915_private *dev_priv = to_i915(dev); 2790 unsigned long irqflags; 2791 2792 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2793 i915_disable_pipestat(dev_priv, pipe, 2794 PIPE_START_VBLANK_INTERRUPT_STATUS); 2795 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2796 } 2797 2798 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 2799 { 2800 struct drm_i915_private *dev_priv = to_i915(dev); 2801 unsigned long irqflags; 2802 2803 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2804 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2805 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2806 } 2807 2808 static bool 2809 ring_idle(struct intel_engine_cs *engine, u32 seqno) 2810 { 2811 return i915_seqno_passed(seqno, 2812 READ_ONCE(engine->last_submitted_seqno)); 2813 } 2814 2815 static bool 2816 ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr) 2817 { 2818 if (INTEL_GEN(engine->i915) >= 8) { 2819 return (ipehr >> 23) == 0x1c; 2820 } else { 2821 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2822 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 2823 MI_SEMAPHORE_REGISTER); 2824 } 2825 } 2826 2827 static struct intel_engine_cs * 2828 semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, 2829 u64 offset) 2830 { 2831 struct drm_i915_private *dev_priv = engine->i915; 2832 struct intel_engine_cs *signaller; 2833 2834 if (INTEL_GEN(dev_priv) >= 8) { 2835 for_each_engine(signaller, dev_priv) { 2836 if (engine == signaller) 2837 continue; 2838 2839 if (offset == signaller->semaphore.signal_ggtt[engine->id]) 2840 return signaller; 2841 } 2842 } else { 2843 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2844 2845 for_each_engine(signaller, dev_priv) { 2846 if(engine == signaller) 2847 continue; 2848 2849 if (sync_bits == signaller->semaphore.mbox.wait[engine->id]) 2850 return signaller; 2851 } 2852 } 2853 2854 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", 2855 engine->id, ipehr, offset); 2856 2857 return NULL; 2858 } 2859 2860 static struct intel_engine_cs * 2861 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) 2862 { 2863 struct drm_i915_private *dev_priv = engine->i915; 2864 u32 cmd, ipehr, head; 2865 u64 offset = 0; 2866 int i, backwards; 2867 2868 /* 2869 * This function does not support execlist mode - any attempt to 2870 * proceed further into this function will result in a kernel panic 2871 * when dereferencing ring->buffer, which is not set up in execlist 2872 * mode. 2873 * 2874 * The correct way of doing it would be to derive the currently 2875 * executing ring buffer from the current context, which is derived 2876 * from the currently running request. Unfortunately, to get the 2877 * current request we would have to grab the struct_mutex before doing 2878 * anything else, which would be ill-advised since some other thread 2879 * might have grabbed it already and managed to hang itself, causing 2880 * the hang checker to deadlock. 2881 * 2882 * Therefore, this function does not support execlist mode in its 2883 * current form. Just return NULL and move on. 2884 */ 2885 if (engine->buffer == NULL) 2886 return NULL; 2887 2888 ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); 2889 if (!ipehr_is_semaphore_wait(engine, ipehr)) 2890 return NULL; 2891 2892 /* 2893 * HEAD is likely pointing to the dword after the actual command, 2894 * so scan backwards until we find the MBOX. But limit it to just 3 2895 * or 4 dwords depending on the semaphore wait command size. 2896 * Note that we don't care about ACTHD here since that might 2897 * point at at batch, and semaphores are always emitted into the 2898 * ringbuffer itself. 2899 */ 2900 head = I915_READ_HEAD(engine) & HEAD_ADDR; 2901 backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4; 2902 2903 for (i = backwards; i; --i) { 2904 /* 2905 * Be paranoid and presume the hw has gone off into the wild - 2906 * our ring is smaller than what the hardware (and hence 2907 * HEAD_ADDR) allows. Also handles wrap-around. 2908 */ 2909 head &= engine->buffer->size - 1; 2910 2911 /* This here seems to blow up */ 2912 cmd = ioread32(engine->buffer->virtual_start + head); 2913 if (cmd == ipehr) 2914 break; 2915 2916 head -= 4; 2917 } 2918 2919 if (!i) 2920 return NULL; 2921 2922 *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1; 2923 if (INTEL_GEN(dev_priv) >= 8) { 2924 offset = ioread32(engine->buffer->virtual_start + head + 12); 2925 offset <<= 32; 2926 offset = ioread32(engine->buffer->virtual_start + head + 8); 2927 } 2928 return semaphore_wait_to_signaller_ring(engine, ipehr, offset); 2929 } 2930 2931 static int semaphore_passed(struct intel_engine_cs *engine) 2932 { 2933 struct drm_i915_private *dev_priv = engine->i915; 2934 struct intel_engine_cs *signaller; 2935 u32 seqno; 2936 2937 engine->hangcheck.deadlock++; 2938 2939 signaller = semaphore_waits_for(engine, &seqno); 2940 if (signaller == NULL) 2941 return -1; 2942 2943 /* Prevent pathological recursion due to driver bugs */ 2944 if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES) 2945 return -1; 2946 2947 if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno)) 2948 return 1; 2949 2950 /* cursory check for an unkickable deadlock */ 2951 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 2952 semaphore_passed(signaller) < 0) 2953 return -1; 2954 2955 return 0; 2956 } 2957 2958 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2959 { 2960 struct intel_engine_cs *engine; 2961 2962 for_each_engine(engine, dev_priv) 2963 engine->hangcheck.deadlock = 0; 2964 } 2965 2966 static bool subunits_stuck(struct intel_engine_cs *engine) 2967 { 2968 u32 instdone[I915_NUM_INSTDONE_REG]; 2969 bool stuck; 2970 int i; 2971 2972 if (engine->id != RCS) 2973 return true; 2974 2975 i915_get_extra_instdone(engine->i915, instdone); 2976 2977 /* There might be unstable subunit states even when 2978 * actual head is not moving. Filter out the unstable ones by 2979 * accumulating the undone -> done transitions and only 2980 * consider those as progress. 2981 */ 2982 stuck = true; 2983 for (i = 0; i < I915_NUM_INSTDONE_REG; i++) { 2984 const u32 tmp = instdone[i] | engine->hangcheck.instdone[i]; 2985 2986 if (tmp != engine->hangcheck.instdone[i]) 2987 stuck = false; 2988 2989 engine->hangcheck.instdone[i] |= tmp; 2990 } 2991 2992 return stuck; 2993 } 2994 2995 static enum intel_ring_hangcheck_action 2996 head_stuck(struct intel_engine_cs *engine, u64 acthd) 2997 { 2998 if (acthd != engine->hangcheck.acthd) { 2999 3000 /* Clear subunit states on head movement */ 3001 memset(engine->hangcheck.instdone, 0, 3002 sizeof(engine->hangcheck.instdone)); 3003 3004 return HANGCHECK_ACTIVE; 3005 } 3006 3007 if (!subunits_stuck(engine)) 3008 return HANGCHECK_ACTIVE; 3009 3010 return HANGCHECK_HUNG; 3011 } 3012 3013 static enum intel_ring_hangcheck_action 3014 ring_stuck(struct intel_engine_cs *engine, u64 acthd) 3015 { 3016 struct drm_i915_private *dev_priv = engine->i915; 3017 enum intel_ring_hangcheck_action ha; 3018 u32 tmp; 3019 3020 ha = head_stuck(engine, acthd); 3021 if (ha != HANGCHECK_HUNG) 3022 return ha; 3023 3024 if (IS_GEN2(dev_priv)) 3025 return HANGCHECK_HUNG; 3026 3027 /* Is the chip hanging on a WAIT_FOR_EVENT? 3028 * If so we can simply poke the RB_WAIT bit 3029 * and break the hang. This should work on 3030 * all but the second generation chipsets. 3031 */ 3032 tmp = I915_READ_CTL(engine); 3033 if (tmp & RING_WAIT) { 3034 i915_handle_error(dev_priv, 0, 3035 "Kicking stuck wait on %s", 3036 engine->name); 3037 I915_WRITE_CTL(engine, tmp); 3038 return HANGCHECK_KICK; 3039 } 3040 3041 if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) { 3042 switch (semaphore_passed(engine)) { 3043 default: 3044 return HANGCHECK_HUNG; 3045 case 1: 3046 i915_handle_error(dev_priv, 0, 3047 "Kicking stuck semaphore on %s", 3048 engine->name); 3049 I915_WRITE_CTL(engine, tmp); 3050 return HANGCHECK_KICK; 3051 case 0: 3052 return HANGCHECK_WAIT; 3053 } 3054 } 3055 3056 return HANGCHECK_HUNG; 3057 } 3058 3059 static unsigned long kick_waiters(struct intel_engine_cs *engine) 3060 { 3061 struct drm_i915_private *i915 = engine->i915; 3062 unsigned long irq_count = READ_ONCE(engine->breadcrumbs.irq_wakeups); 3063 3064 if (engine->hangcheck.user_interrupts == irq_count && 3065 !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) { 3066 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings)) 3067 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 3068 engine->name); 3069 3070 intel_engine_enable_fake_irq(engine); 3071 } 3072 3073 return irq_count; 3074 } 3075 /* 3076 * This is called when the chip hasn't reported back with completed 3077 * batchbuffers in a long time. We keep track per ring seqno progress and 3078 * if there are no progress, hangcheck score for that ring is increased. 3079 * Further, acthd is inspected to see if the ring is stuck. On stuck case 3080 * we kick the ring. If we see no progress on three subsequent calls 3081 * we assume chip is wedged and try to fix it by resetting the chip. 3082 */ 3083 static void i915_hangcheck_elapsed(struct work_struct *work) 3084 { 3085 struct drm_i915_private *dev_priv = 3086 container_of(work, typeof(*dev_priv), 3087 gpu_error.hangcheck_work.work); 3088 struct intel_engine_cs *engine; 3089 unsigned int hung = 0, stuck = 0; 3090 int busy_count = 0; 3091 #define BUSY 1 3092 #define KICK 5 3093 #define HUNG 20 3094 #define ACTIVE_DECAY 15 3095 3096 if (!i915.enable_hangcheck) 3097 return; 3098 3099 if (!READ_ONCE(dev_priv->gt.awake)) 3100 return; 3101 3102 /* As enabling the GPU requires fairly extensive mmio access, 3103 * periodically arm the mmio checker to see if we are triggering 3104 * any invalid access. 3105 */ 3106 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 3107 3108 for_each_engine(engine, dev_priv) { 3109 bool busy = intel_engine_has_waiter(engine); 3110 u64 acthd; 3111 u32 seqno; 3112 unsigned user_interrupts; 3113 3114 semaphore_clear_deadlocks(dev_priv); 3115 3116 /* We don't strictly need an irq-barrier here, as we are not 3117 * serving an interrupt request, be paranoid in case the 3118 * barrier has side-effects (such as preventing a broken 3119 * cacheline snoop) and so be sure that we can see the seqno 3120 * advance. If the seqno should stick, due to a stale 3121 * cacheline, we would erroneously declare the GPU hung. 3122 */ 3123 if (engine->irq_seqno_barrier) 3124 engine->irq_seqno_barrier(engine); 3125 3126 acthd = intel_ring_get_active_head(engine); 3127 seqno = intel_engine_get_seqno(engine); 3128 3129 /* Reset stuck interrupts between batch advances */ 3130 user_interrupts = 0; 3131 3132 if (engine->hangcheck.seqno == seqno) { 3133 if (ring_idle(engine, seqno)) { 3134 engine->hangcheck.action = HANGCHECK_IDLE; 3135 if (busy) { 3136 /* Safeguard against driver failure */ 3137 user_interrupts = kick_waiters(engine); 3138 engine->hangcheck.score += BUSY; 3139 } 3140 } else { 3141 /* We always increment the hangcheck score 3142 * if the ring is busy and still processing 3143 * the same request, so that no single request 3144 * can run indefinitely (such as a chain of 3145 * batches). The only time we do not increment 3146 * the hangcheck score on this ring, if this 3147 * ring is in a legitimate wait for another 3148 * ring. In that case the waiting ring is a 3149 * victim and we want to be sure we catch the 3150 * right culprit. Then every time we do kick 3151 * the ring, add a small increment to the 3152 * score so that we can catch a batch that is 3153 * being repeatedly kicked and so responsible 3154 * for stalling the machine. 3155 */ 3156 engine->hangcheck.action = ring_stuck(engine, 3157 acthd); 3158 3159 switch (engine->hangcheck.action) { 3160 case HANGCHECK_IDLE: 3161 case HANGCHECK_WAIT: 3162 break; 3163 case HANGCHECK_ACTIVE: 3164 engine->hangcheck.score += BUSY; 3165 break; 3166 case HANGCHECK_KICK: 3167 engine->hangcheck.score += KICK; 3168 break; 3169 case HANGCHECK_HUNG: 3170 engine->hangcheck.score += HUNG; 3171 break; 3172 } 3173 } 3174 3175 if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 3176 hung |= intel_engine_flag(engine); 3177 if (engine->hangcheck.action != HANGCHECK_HUNG) 3178 stuck |= intel_engine_flag(engine); 3179 } 3180 } else { 3181 engine->hangcheck.action = HANGCHECK_ACTIVE; 3182 3183 /* Gradually reduce the count so that we catch DoS 3184 * attempts across multiple batches. 3185 */ 3186 if (engine->hangcheck.score > 0) 3187 engine->hangcheck.score -= ACTIVE_DECAY; 3188 if (engine->hangcheck.score < 0) 3189 engine->hangcheck.score = 0; 3190 3191 /* Clear head and subunit states on seqno movement */ 3192 acthd = 0; 3193 3194 memset(engine->hangcheck.instdone, 0, 3195 sizeof(engine->hangcheck.instdone)); 3196 } 3197 3198 engine->hangcheck.seqno = seqno; 3199 engine->hangcheck.acthd = acthd; 3200 engine->hangcheck.user_interrupts = user_interrupts; 3201 busy_count += busy; 3202 } 3203 3204 if (hung) { 3205 char msg[80]; 3206 int len; 3207 3208 /* If some rings hung but others were still busy, only 3209 * blame the hanging rings in the synopsis. 3210 */ 3211 if (stuck != hung) 3212 hung &= ~stuck; 3213 len = scnprintf(msg, sizeof(msg), 3214 "%s on ", stuck == hung ? "No progress" : "Hang"); 3215 for_each_engine_masked(engine, dev_priv, hung) 3216 len += scnprintf(msg + len, sizeof(msg) - len, 3217 "%s, ", engine->name); 3218 msg[len-2] = '\0'; 3219 3220 return i915_handle_error(dev_priv, hung, msg); 3221 } 3222 3223 /* Reset timer in case GPU hangs without another request being added */ 3224 if (busy_count) 3225 i915_queue_hangcheck(dev_priv); 3226 } 3227 3228 static void ibx_irq_reset(struct drm_device *dev) 3229 { 3230 struct drm_i915_private *dev_priv = to_i915(dev); 3231 3232 if (HAS_PCH_NOP(dev)) 3233 return; 3234 3235 GEN5_IRQ_RESET(SDE); 3236 3237 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3238 I915_WRITE(SERR_INT, 0xffffffff); 3239 } 3240 3241 /* 3242 * SDEIER is also touched by the interrupt handler to work around missed PCH 3243 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3244 * instead we unconditionally enable all PCH interrupt sources here, but then 3245 * only unmask them as needed with SDEIMR. 3246 * 3247 * This function needs to be called before interrupts are enabled. 3248 */ 3249 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3250 { 3251 struct drm_i915_private *dev_priv = to_i915(dev); 3252 3253 if (HAS_PCH_NOP(dev)) 3254 return; 3255 3256 WARN_ON(I915_READ(SDEIER) != 0); 3257 I915_WRITE(SDEIER, 0xffffffff); 3258 POSTING_READ(SDEIER); 3259 } 3260 3261 static void gen5_gt_irq_reset(struct drm_device *dev) 3262 { 3263 struct drm_i915_private *dev_priv = to_i915(dev); 3264 3265 GEN5_IRQ_RESET(GT); 3266 if (INTEL_INFO(dev)->gen >= 6) 3267 GEN5_IRQ_RESET(GEN6_PM); 3268 } 3269 3270 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3271 { 3272 enum i915_pipe pipe; 3273 3274 if (IS_CHERRYVIEW(dev_priv)) 3275 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3276 else 3277 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3278 3279 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3280 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3281 3282 for_each_pipe(dev_priv, pipe) { 3283 I915_WRITE(PIPESTAT(pipe), 3284 PIPE_FIFO_UNDERRUN_STATUS | 3285 PIPESTAT_INT_STATUS_MASK); 3286 dev_priv->pipestat_irq_mask[pipe] = 0; 3287 } 3288 3289 GEN5_IRQ_RESET(VLV_); 3290 dev_priv->irq_mask = ~0; 3291 } 3292 3293 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3294 { 3295 u32 pipestat_mask; 3296 u32 enable_mask; 3297 enum i915_pipe pipe; 3298 3299 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3300 PIPE_CRC_DONE_INTERRUPT_STATUS; 3301 3302 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3303 for_each_pipe(dev_priv, pipe) 3304 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3305 3306 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3307 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3308 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3309 if (IS_CHERRYVIEW(dev_priv)) 3310 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3311 3312 WARN_ON(dev_priv->irq_mask != ~0); 3313 3314 dev_priv->irq_mask = ~enable_mask; 3315 3316 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 3317 } 3318 3319 /* drm_dma.h hooks 3320 */ 3321 static void ironlake_irq_reset(struct drm_device *dev) 3322 { 3323 struct drm_i915_private *dev_priv = to_i915(dev); 3324 3325 I915_WRITE(HWSTAM, 0xffffffff); 3326 3327 GEN5_IRQ_RESET(DE); 3328 if (IS_GEN7(dev)) 3329 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3330 3331 gen5_gt_irq_reset(dev); 3332 3333 ibx_irq_reset(dev); 3334 } 3335 3336 static void valleyview_irq_preinstall(struct drm_device *dev) 3337 { 3338 struct drm_i915_private *dev_priv = to_i915(dev); 3339 3340 I915_WRITE(VLV_MASTER_IER, 0); 3341 POSTING_READ(VLV_MASTER_IER); 3342 3343 gen5_gt_irq_reset(dev); 3344 3345 spin_lock_irq(&dev_priv->irq_lock); 3346 if (dev_priv->display_irqs_enabled) 3347 vlv_display_irq_reset(dev_priv); 3348 spin_unlock_irq(&dev_priv->irq_lock); 3349 } 3350 3351 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3352 { 3353 GEN8_IRQ_RESET_NDX(GT, 0); 3354 GEN8_IRQ_RESET_NDX(GT, 1); 3355 GEN8_IRQ_RESET_NDX(GT, 2); 3356 GEN8_IRQ_RESET_NDX(GT, 3); 3357 } 3358 3359 static void gen8_irq_reset(struct drm_device *dev) 3360 { 3361 struct drm_i915_private *dev_priv = to_i915(dev); 3362 int pipe; 3363 3364 I915_WRITE(GEN8_MASTER_IRQ, 0); 3365 POSTING_READ(GEN8_MASTER_IRQ); 3366 3367 gen8_gt_irq_reset(dev_priv); 3368 3369 for_each_pipe(dev_priv, pipe) 3370 if (intel_display_power_is_enabled(dev_priv, 3371 POWER_DOMAIN_PIPE(pipe))) 3372 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3373 3374 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3375 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3376 GEN5_IRQ_RESET(GEN8_PCU_); 3377 3378 if (HAS_PCH_SPLIT(dev)) 3379 ibx_irq_reset(dev); 3380 } 3381 3382 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3383 unsigned int pipe_mask) 3384 { 3385 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3386 enum i915_pipe pipe; 3387 3388 spin_lock_irq(&dev_priv->irq_lock); 3389 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3390 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3391 dev_priv->de_irq_mask[pipe], 3392 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3393 spin_unlock_irq(&dev_priv->irq_lock); 3394 } 3395 3396 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3397 unsigned int pipe_mask) 3398 { 3399 enum i915_pipe pipe; 3400 3401 spin_lock_irq(&dev_priv->irq_lock); 3402 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3403 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3404 spin_unlock_irq(&dev_priv->irq_lock); 3405 3406 /* make sure we're done processing display irqs */ 3407 synchronize_irq(dev_priv->drm.irq); 3408 } 3409 3410 static void cherryview_irq_preinstall(struct drm_device *dev) 3411 { 3412 struct drm_i915_private *dev_priv = to_i915(dev); 3413 3414 I915_WRITE(GEN8_MASTER_IRQ, 0); 3415 POSTING_READ(GEN8_MASTER_IRQ); 3416 3417 gen8_gt_irq_reset(dev_priv); 3418 3419 GEN5_IRQ_RESET(GEN8_PCU_); 3420 3421 spin_lock_irq(&dev_priv->irq_lock); 3422 if (dev_priv->display_irqs_enabled) 3423 vlv_display_irq_reset(dev_priv); 3424 spin_unlock_irq(&dev_priv->irq_lock); 3425 } 3426 3427 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3428 const u32 hpd[HPD_NUM_PINS]) 3429 { 3430 struct intel_encoder *encoder; 3431 u32 enabled_irqs = 0; 3432 3433 for_each_intel_encoder(&dev_priv->drm, encoder) 3434 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3435 enabled_irqs |= hpd[encoder->hpd_pin]; 3436 3437 return enabled_irqs; 3438 } 3439 3440 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3441 { 3442 u32 hotplug_irqs, hotplug, enabled_irqs; 3443 3444 if (HAS_PCH_IBX(dev_priv)) { 3445 hotplug_irqs = SDE_HOTPLUG_MASK; 3446 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3447 } else { 3448 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3449 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3450 } 3451 3452 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3453 3454 /* 3455 * Enable digital hotplug on the PCH, and configure the DP short pulse 3456 * duration to 2ms (which is the minimum in the Display Port spec). 3457 * The pulse duration bits are reserved on LPT+. 3458 */ 3459 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3460 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3461 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3462 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3463 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3464 /* 3465 * When CPU and PCH are on the same package, port A 3466 * HPD must be enabled in both north and south. 3467 */ 3468 if (HAS_PCH_LPT_LP(dev_priv)) 3469 hotplug |= PORTA_HOTPLUG_ENABLE; 3470 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3471 } 3472 3473 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3474 { 3475 u32 hotplug_irqs, hotplug, enabled_irqs; 3476 3477 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3478 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3479 3480 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3481 3482 /* Enable digital hotplug on the PCH */ 3483 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3484 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE | 3485 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE; 3486 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3487 3488 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3489 hotplug |= PORTE_HOTPLUG_ENABLE; 3490 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3491 } 3492 3493 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3494 { 3495 u32 hotplug_irqs, hotplug, enabled_irqs; 3496 3497 if (INTEL_GEN(dev_priv) >= 8) { 3498 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3499 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3500 3501 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3502 } else if (INTEL_GEN(dev_priv) >= 7) { 3503 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3504 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3505 3506 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3507 } else { 3508 hotplug_irqs = DE_DP_A_HOTPLUG; 3509 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3510 3511 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3512 } 3513 3514 /* 3515 * Enable digital hotplug on the CPU, and configure the DP short pulse 3516 * duration to 2ms (which is the minimum in the Display Port spec) 3517 * The pulse duration bits are reserved on HSW+. 3518 */ 3519 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3520 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3521 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; 3522 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3523 3524 ibx_hpd_irq_setup(dev_priv); 3525 } 3526 3527 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3528 { 3529 u32 hotplug_irqs, hotplug, enabled_irqs; 3530 3531 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3532 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3533 3534 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3535 3536 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3537 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE | 3538 PORTA_HOTPLUG_ENABLE; 3539 3540 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3541 hotplug, enabled_irqs); 3542 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3543 3544 /* 3545 * For BXT invert bit has to be set based on AOB design 3546 * for HPD detection logic, update it based on VBT fields. 3547 */ 3548 3549 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3550 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3551 hotplug |= BXT_DDIA_HPD_INVERT; 3552 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3553 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3554 hotplug |= BXT_DDIB_HPD_INVERT; 3555 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3556 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3557 hotplug |= BXT_DDIC_HPD_INVERT; 3558 3559 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3560 } 3561 3562 static void ibx_irq_postinstall(struct drm_device *dev) 3563 { 3564 struct drm_i915_private *dev_priv = to_i915(dev); 3565 u32 mask; 3566 3567 if (HAS_PCH_NOP(dev)) 3568 return; 3569 3570 if (HAS_PCH_IBX(dev)) 3571 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3572 else 3573 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3574 3575 gen5_assert_iir_is_zero(dev_priv, SDEIIR); 3576 I915_WRITE(SDEIMR, ~mask); 3577 } 3578 3579 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3580 { 3581 struct drm_i915_private *dev_priv = to_i915(dev); 3582 u32 pm_irqs, gt_irqs; 3583 3584 pm_irqs = gt_irqs = 0; 3585 3586 dev_priv->gt_irq_mask = ~0; 3587 if (HAS_L3_DPF(dev)) { 3588 /* L3 parity interrupt is always unmasked. */ 3589 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3590 gt_irqs |= GT_PARITY_ERROR(dev); 3591 } 3592 3593 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3594 if (IS_GEN5(dev)) { 3595 gt_irqs |= ILK_BSD_USER_INTERRUPT; 3596 } else { 3597 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3598 } 3599 3600 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3601 3602 if (INTEL_INFO(dev)->gen >= 6) { 3603 /* 3604 * RPS interrupts will get enabled/disabled on demand when RPS 3605 * itself is enabled/disabled. 3606 */ 3607 if (HAS_VEBOX(dev)) 3608 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3609 3610 dev_priv->pm_irq_mask = 0xffffffff; 3611 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3612 } 3613 } 3614 3615 static int ironlake_irq_postinstall(struct drm_device *dev) 3616 { 3617 struct drm_i915_private *dev_priv = to_i915(dev); 3618 u32 display_mask, extra_mask; 3619 3620 if (INTEL_INFO(dev)->gen >= 7) { 3621 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3622 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3623 DE_PLANEB_FLIP_DONE_IVB | 3624 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3625 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3626 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3627 DE_DP_A_HOTPLUG_IVB); 3628 } else { 3629 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3630 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3631 DE_AUX_CHANNEL_A | 3632 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3633 DE_POISON); 3634 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3635 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3636 DE_DP_A_HOTPLUG); 3637 } 3638 3639 dev_priv->irq_mask = ~display_mask; 3640 3641 I915_WRITE(HWSTAM, 0xeffe); 3642 3643 ibx_irq_pre_postinstall(dev); 3644 3645 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3646 3647 gen5_gt_irq_postinstall(dev); 3648 3649 ibx_irq_postinstall(dev); 3650 3651 if (IS_IRONLAKE_M(dev)) { 3652 /* Enable PCU event interrupts 3653 * 3654 * spinlocking not required here for correctness since interrupt 3655 * setup is guaranteed to run in single-threaded context. But we 3656 * need it to make the assert_spin_locked happy. */ 3657 spin_lock_irq(&dev_priv->irq_lock); 3658 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3659 spin_unlock_irq(&dev_priv->irq_lock); 3660 } 3661 3662 return 0; 3663 } 3664 3665 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3666 { 3667 assert_spin_locked(&dev_priv->irq_lock); 3668 3669 if (dev_priv->display_irqs_enabled) 3670 return; 3671 3672 dev_priv->display_irqs_enabled = true; 3673 3674 if (intel_irqs_enabled(dev_priv)) { 3675 vlv_display_irq_reset(dev_priv); 3676 vlv_display_irq_postinstall(dev_priv); 3677 } 3678 } 3679 3680 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3681 { 3682 assert_spin_locked(&dev_priv->irq_lock); 3683 3684 if (!dev_priv->display_irqs_enabled) 3685 return; 3686 3687 dev_priv->display_irqs_enabled = false; 3688 3689 if (intel_irqs_enabled(dev_priv)) 3690 vlv_display_irq_reset(dev_priv); 3691 } 3692 3693 3694 static int valleyview_irq_postinstall(struct drm_device *dev) 3695 { 3696 struct drm_i915_private *dev_priv = to_i915(dev); 3697 3698 gen5_gt_irq_postinstall(dev); 3699 3700 spin_lock_irq(&dev_priv->irq_lock); 3701 if (dev_priv->display_irqs_enabled) 3702 vlv_display_irq_postinstall(dev_priv); 3703 spin_unlock_irq(&dev_priv->irq_lock); 3704 3705 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3706 POSTING_READ(VLV_MASTER_IER); 3707 3708 return 0; 3709 } 3710 3711 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3712 { 3713 /* These are interrupts we'll toggle with the ring mask register */ 3714 uint32_t gt_interrupts[] = { 3715 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3716 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3717 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3718 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3719 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3720 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3721 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3722 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3723 0, 3724 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3725 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3726 }; 3727 3728 if (HAS_L3_DPF(dev_priv)) 3729 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 3730 3731 dev_priv->pm_irq_mask = 0xffffffff; 3732 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3733 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3734 /* 3735 * RPS interrupts will get enabled/disabled on demand when RPS itself 3736 * is enabled/disabled. 3737 */ 3738 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); 3739 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3740 } 3741 3742 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3743 { 3744 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3745 uint32_t de_pipe_enables; 3746 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3747 u32 de_port_enables; 3748 u32 de_misc_masked = GEN8_DE_MISC_GSE; 3749 enum i915_pipe pipe; 3750 3751 if (INTEL_INFO(dev_priv)->gen >= 9) { 3752 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3753 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3754 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3755 GEN9_AUX_CHANNEL_D; 3756 if (IS_BROXTON(dev_priv)) 3757 de_port_masked |= BXT_DE_PORT_GMBUS; 3758 } else { 3759 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3760 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3761 } 3762 3763 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3764 GEN8_PIPE_FIFO_UNDERRUN; 3765 3766 de_port_enables = de_port_masked; 3767 if (IS_BROXTON(dev_priv)) 3768 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3769 else if (IS_BROADWELL(dev_priv)) 3770 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3771 3772 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3773 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3774 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3775 3776 for_each_pipe(dev_priv, pipe) 3777 if (intel_display_power_is_enabled(dev_priv, 3778 POWER_DOMAIN_PIPE(pipe))) 3779 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3780 dev_priv->de_irq_mask[pipe], 3781 de_pipe_enables); 3782 3783 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3784 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3785 } 3786 3787 static int gen8_irq_postinstall(struct drm_device *dev) 3788 { 3789 struct drm_i915_private *dev_priv = to_i915(dev); 3790 3791 if (HAS_PCH_SPLIT(dev)) 3792 ibx_irq_pre_postinstall(dev); 3793 3794 gen8_gt_irq_postinstall(dev_priv); 3795 gen8_de_irq_postinstall(dev_priv); 3796 3797 if (HAS_PCH_SPLIT(dev)) 3798 ibx_irq_postinstall(dev); 3799 3800 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3801 POSTING_READ(GEN8_MASTER_IRQ); 3802 3803 return 0; 3804 } 3805 3806 static int cherryview_irq_postinstall(struct drm_device *dev) 3807 { 3808 struct drm_i915_private *dev_priv = to_i915(dev); 3809 3810 gen8_gt_irq_postinstall(dev_priv); 3811 3812 spin_lock_irq(&dev_priv->irq_lock); 3813 if (dev_priv->display_irqs_enabled) 3814 vlv_display_irq_postinstall(dev_priv); 3815 spin_unlock_irq(&dev_priv->irq_lock); 3816 3817 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3818 POSTING_READ(GEN8_MASTER_IRQ); 3819 3820 return 0; 3821 } 3822 3823 static void gen8_irq_uninstall(struct drm_device *dev) 3824 { 3825 struct drm_i915_private *dev_priv = to_i915(dev); 3826 3827 if (!dev_priv) 3828 return; 3829 3830 gen8_irq_reset(dev); 3831 } 3832 3833 static void valleyview_irq_uninstall(struct drm_device *dev) 3834 { 3835 struct drm_i915_private *dev_priv = to_i915(dev); 3836 3837 if (!dev_priv) 3838 return; 3839 3840 I915_WRITE(VLV_MASTER_IER, 0); 3841 POSTING_READ(VLV_MASTER_IER); 3842 3843 gen5_gt_irq_reset(dev); 3844 3845 I915_WRITE(HWSTAM, 0xffffffff); 3846 3847 spin_lock_irq(&dev_priv->irq_lock); 3848 if (dev_priv->display_irqs_enabled) 3849 vlv_display_irq_reset(dev_priv); 3850 spin_unlock_irq(&dev_priv->irq_lock); 3851 } 3852 3853 static void cherryview_irq_uninstall(struct drm_device *dev) 3854 { 3855 struct drm_i915_private *dev_priv = to_i915(dev); 3856 3857 if (!dev_priv) 3858 return; 3859 3860 I915_WRITE(GEN8_MASTER_IRQ, 0); 3861 POSTING_READ(GEN8_MASTER_IRQ); 3862 3863 gen8_gt_irq_reset(dev_priv); 3864 3865 GEN5_IRQ_RESET(GEN8_PCU_); 3866 3867 spin_lock_irq(&dev_priv->irq_lock); 3868 if (dev_priv->display_irqs_enabled) 3869 vlv_display_irq_reset(dev_priv); 3870 spin_unlock_irq(&dev_priv->irq_lock); 3871 } 3872 3873 static void ironlake_irq_uninstall(struct drm_device *dev) 3874 { 3875 struct drm_i915_private *dev_priv = to_i915(dev); 3876 3877 if (!dev_priv) 3878 return; 3879 3880 ironlake_irq_reset(dev); 3881 } 3882 3883 static void i8xx_irq_preinstall(struct drm_device * dev) 3884 { 3885 struct drm_i915_private *dev_priv = to_i915(dev); 3886 int pipe; 3887 3888 for_each_pipe(dev_priv, pipe) 3889 I915_WRITE(PIPESTAT(pipe), 0); 3890 I915_WRITE16(IMR, 0xffff); 3891 I915_WRITE16(IER, 0x0); 3892 POSTING_READ16(IER); 3893 } 3894 3895 static int i8xx_irq_postinstall(struct drm_device *dev) 3896 { 3897 struct drm_i915_private *dev_priv = to_i915(dev); 3898 3899 I915_WRITE16(EMR, 3900 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3901 3902 /* Unmask the interrupts that we always want on. */ 3903 dev_priv->irq_mask = 3904 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3905 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3906 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3907 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3908 I915_WRITE16(IMR, dev_priv->irq_mask); 3909 3910 I915_WRITE16(IER, 3911 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3912 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3913 I915_USER_INTERRUPT); 3914 POSTING_READ16(IER); 3915 3916 /* Interrupt setup is already guaranteed to be single-threaded, this is 3917 * just to make the assert_spin_locked check happy. */ 3918 spin_lock_irq(&dev_priv->irq_lock); 3919 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3920 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3921 spin_unlock_irq(&dev_priv->irq_lock); 3922 3923 return 0; 3924 } 3925 3926 /* 3927 * Returns true when a page flip has completed. 3928 */ 3929 static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv, 3930 int plane, int pipe, u32 iir) 3931 { 3932 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3933 3934 if (!intel_pipe_handle_vblank(dev_priv, pipe)) 3935 return false; 3936 3937 if ((iir & flip_pending) == 0) 3938 goto check_page_flip; 3939 3940 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3941 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3942 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3943 * the flip is completed (no longer pending). Since this doesn't raise 3944 * an interrupt per se, we watch for the change at vblank. 3945 */ 3946 if (I915_READ16(ISR) & flip_pending) 3947 goto check_page_flip; 3948 3949 intel_finish_page_flip_cs(dev_priv, pipe); 3950 return true; 3951 3952 check_page_flip: 3953 intel_check_page_flip(dev_priv, pipe); 3954 return false; 3955 } 3956 3957 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3958 { 3959 struct drm_device *dev = arg; 3960 struct drm_i915_private *dev_priv = to_i915(dev); 3961 u16 iir, new_iir; 3962 u32 pipe_stats[2]; 3963 int pipe; 3964 u16 flip_mask = 3965 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3966 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3967 irqreturn_t ret; 3968 3969 if (!intel_irqs_enabled(dev_priv)) 3970 return IRQ_NONE; 3971 3972 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3973 disable_rpm_wakeref_asserts(dev_priv); 3974 3975 ret = IRQ_NONE; 3976 iir = I915_READ16(IIR); 3977 if (iir == 0) 3978 goto out; 3979 3980 while (iir & ~flip_mask) { 3981 /* Can't rely on pipestat interrupt bit in iir as it might 3982 * have been cleared after the pipestat interrupt was received. 3983 * It doesn't set the bit in iir again, but it still produces 3984 * interrupts (for non-MSI). 3985 */ 3986 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3987 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3988 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3989 3990 for_each_pipe(dev_priv, pipe) { 3991 i915_reg_t reg = PIPESTAT(pipe); 3992 pipe_stats[pipe] = I915_READ(reg); 3993 3994 /* 3995 * Clear the PIPE*STAT regs before the IIR 3996 */ 3997 if (pipe_stats[pipe] & 0x8000ffff) 3998 I915_WRITE(reg, pipe_stats[pipe]); 3999 } 4000 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4001 4002 I915_WRITE16(IIR, iir & ~flip_mask); 4003 new_iir = I915_READ16(IIR); /* Flush posted writes */ 4004 4005 if (iir & I915_USER_INTERRUPT) 4006 notify_ring(&dev_priv->engine[RCS]); 4007 4008 for_each_pipe(dev_priv, pipe) { 4009 int plane = pipe; 4010 if (HAS_FBC(dev_priv)) 4011 plane = !plane; 4012 4013 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4014 i8xx_handle_vblank(dev_priv, plane, pipe, iir)) 4015 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4016 4017 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4018 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 4019 4020 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4021 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4022 pipe); 4023 } 4024 4025 iir = new_iir; 4026 } 4027 ret = IRQ_HANDLED; 4028 4029 out: 4030 enable_rpm_wakeref_asserts(dev_priv); 4031 4032 return ret; 4033 } 4034 4035 static void i8xx_irq_uninstall(struct drm_device * dev) 4036 { 4037 struct drm_i915_private *dev_priv = to_i915(dev); 4038 int pipe; 4039 4040 for_each_pipe(dev_priv, pipe) { 4041 /* Clear enable bits; then clear status bits */ 4042 I915_WRITE(PIPESTAT(pipe), 0); 4043 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4044 } 4045 I915_WRITE16(IMR, 0xffff); 4046 I915_WRITE16(IER, 0x0); 4047 I915_WRITE16(IIR, I915_READ16(IIR)); 4048 } 4049 4050 static void i915_irq_preinstall(struct drm_device * dev) 4051 { 4052 struct drm_i915_private *dev_priv = to_i915(dev); 4053 int pipe; 4054 4055 if (I915_HAS_HOTPLUG(dev)) { 4056 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4057 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4058 } 4059 4060 I915_WRITE16(HWSTAM, 0xeffe); 4061 for_each_pipe(dev_priv, pipe) 4062 I915_WRITE(PIPESTAT(pipe), 0); 4063 I915_WRITE(IMR, 0xffffffff); 4064 I915_WRITE(IER, 0x0); 4065 POSTING_READ(IER); 4066 } 4067 4068 static int i915_irq_postinstall(struct drm_device *dev) 4069 { 4070 struct drm_i915_private *dev_priv = to_i915(dev); 4071 u32 enable_mask; 4072 4073 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 4074 4075 /* Unmask the interrupts that we always want on. */ 4076 dev_priv->irq_mask = 4077 ~(I915_ASLE_INTERRUPT | 4078 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4079 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4080 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4081 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4082 4083 enable_mask = 4084 I915_ASLE_INTERRUPT | 4085 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4086 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4087 I915_USER_INTERRUPT; 4088 4089 if (I915_HAS_HOTPLUG(dev)) { 4090 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4091 POSTING_READ(PORT_HOTPLUG_EN); 4092 4093 /* Enable in IER... */ 4094 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4095 /* and unmask in IMR */ 4096 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4097 } 4098 4099 I915_WRITE(IMR, dev_priv->irq_mask); 4100 I915_WRITE(IER, enable_mask); 4101 POSTING_READ(IER); 4102 4103 i915_enable_asle_pipestat(dev_priv); 4104 4105 /* Interrupt setup is already guaranteed to be single-threaded, this is 4106 * just to make the assert_spin_locked check happy. */ 4107 spin_lock_irq(&dev_priv->irq_lock); 4108 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4109 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4110 spin_unlock_irq(&dev_priv->irq_lock); 4111 4112 return 0; 4113 } 4114 4115 /* 4116 * Returns true when a page flip has completed. 4117 */ 4118 static bool i915_handle_vblank(struct drm_i915_private *dev_priv, 4119 int plane, int pipe, u32 iir) 4120 { 4121 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 4122 4123 if (!intel_pipe_handle_vblank(dev_priv, pipe)) 4124 return false; 4125 4126 if ((iir & flip_pending) == 0) 4127 goto check_page_flip; 4128 4129 /* We detect FlipDone by looking for the change in PendingFlip from '1' 4130 * to '0' on the following vblank, i.e. IIR has the Pendingflip 4131 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 4132 * the flip is completed (no longer pending). Since this doesn't raise 4133 * an interrupt per se, we watch for the change at vblank. 4134 */ 4135 if (I915_READ(ISR) & flip_pending) 4136 goto check_page_flip; 4137 4138 intel_finish_page_flip_cs(dev_priv, pipe); 4139 return true; 4140 4141 check_page_flip: 4142 intel_check_page_flip(dev_priv, pipe); 4143 return false; 4144 } 4145 4146 static irqreturn_t i915_irq_handler(int irq, void *arg) 4147 { 4148 struct drm_device *dev = arg; 4149 struct drm_i915_private *dev_priv = to_i915(dev); 4150 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 4151 u32 flip_mask = 4152 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4153 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4154 int pipe, ret = IRQ_NONE; 4155 4156 if (!intel_irqs_enabled(dev_priv)) 4157 return IRQ_NONE; 4158 4159 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4160 disable_rpm_wakeref_asserts(dev_priv); 4161 4162 iir = I915_READ(IIR); 4163 do { 4164 bool irq_received = (iir & ~flip_mask) != 0; 4165 bool blc_event = false; 4166 4167 /* Can't rely on pipestat interrupt bit in iir as it might 4168 * have been cleared after the pipestat interrupt was received. 4169 * It doesn't set the bit in iir again, but it still produces 4170 * interrupts (for non-MSI). 4171 */ 4172 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4173 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4174 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4175 4176 for_each_pipe(dev_priv, pipe) { 4177 i915_reg_t reg = PIPESTAT(pipe); 4178 pipe_stats[pipe] = I915_READ(reg); 4179 4180 /* Clear the PIPE*STAT regs before the IIR */ 4181 if (pipe_stats[pipe] & 0x8000ffff) { 4182 I915_WRITE(reg, pipe_stats[pipe]); 4183 irq_received = true; 4184 } 4185 } 4186 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4187 4188 if (!irq_received) 4189 break; 4190 4191 /* Consume port. Then clear IIR or we'll miss events */ 4192 if (I915_HAS_HOTPLUG(dev_priv) && 4193 iir & I915_DISPLAY_PORT_INTERRUPT) { 4194 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4195 if (hotplug_status) 4196 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4197 } 4198 4199 I915_WRITE(IIR, iir & ~flip_mask); 4200 new_iir = I915_READ(IIR); /* Flush posted writes */ 4201 4202 if (iir & I915_USER_INTERRUPT) 4203 notify_ring(&dev_priv->engine[RCS]); 4204 4205 for_each_pipe(dev_priv, pipe) { 4206 int plane = pipe; 4207 if (HAS_FBC(dev_priv)) 4208 plane = !plane; 4209 4210 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4211 i915_handle_vblank(dev_priv, plane, pipe, iir)) 4212 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4213 4214 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4215 blc_event = true; 4216 4217 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4218 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 4219 4220 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4221 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4222 pipe); 4223 } 4224 4225 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4226 intel_opregion_asle_intr(dev_priv); 4227 4228 /* With MSI, interrupts are only generated when iir 4229 * transitions from zero to nonzero. If another bit got 4230 * set while we were handling the existing iir bits, then 4231 * we would never get another interrupt. 4232 * 4233 * This is fine on non-MSI as well, as if we hit this path 4234 * we avoid exiting the interrupt handler only to generate 4235 * another one. 4236 * 4237 * Note that for MSI this could cause a stray interrupt report 4238 * if an interrupt landed in the time between writing IIR and 4239 * the posting read. This should be rare enough to never 4240 * trigger the 99% of 100,000 interrupts test for disabling 4241 * stray interrupts. 4242 */ 4243 ret = IRQ_HANDLED; 4244 iir = new_iir; 4245 } while (iir & ~flip_mask); 4246 4247 enable_rpm_wakeref_asserts(dev_priv); 4248 4249 return ret; 4250 } 4251 4252 static void i915_irq_uninstall(struct drm_device * dev) 4253 { 4254 struct drm_i915_private *dev_priv = to_i915(dev); 4255 int pipe; 4256 4257 if (I915_HAS_HOTPLUG(dev)) { 4258 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4259 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4260 } 4261 4262 I915_WRITE16(HWSTAM, 0xffff); 4263 for_each_pipe(dev_priv, pipe) { 4264 /* Clear enable bits; then clear status bits */ 4265 I915_WRITE(PIPESTAT(pipe), 0); 4266 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4267 } 4268 I915_WRITE(IMR, 0xffffffff); 4269 I915_WRITE(IER, 0x0); 4270 4271 I915_WRITE(IIR, I915_READ(IIR)); 4272 } 4273 4274 static void i965_irq_preinstall(struct drm_device * dev) 4275 { 4276 struct drm_i915_private *dev_priv = to_i915(dev); 4277 int pipe; 4278 4279 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4280 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4281 4282 I915_WRITE(HWSTAM, 0xeffe); 4283 for_each_pipe(dev_priv, pipe) 4284 I915_WRITE(PIPESTAT(pipe), 0); 4285 I915_WRITE(IMR, 0xffffffff); 4286 I915_WRITE(IER, 0x0); 4287 POSTING_READ(IER); 4288 } 4289 4290 static int i965_irq_postinstall(struct drm_device *dev) 4291 { 4292 struct drm_i915_private *dev_priv = to_i915(dev); 4293 u32 enable_mask; 4294 u32 error_mask; 4295 4296 /* Unmask the interrupts that we always want on. */ 4297 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4298 I915_DISPLAY_PORT_INTERRUPT | 4299 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4300 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4301 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4302 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4303 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4304 4305 enable_mask = ~dev_priv->irq_mask; 4306 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4307 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4308 enable_mask |= I915_USER_INTERRUPT; 4309 4310 if (IS_G4X(dev_priv)) 4311 enable_mask |= I915_BSD_USER_INTERRUPT; 4312 4313 /* Interrupt setup is already guaranteed to be single-threaded, this is 4314 * just to make the assert_spin_locked check happy. */ 4315 spin_lock_irq(&dev_priv->irq_lock); 4316 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4317 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4318 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4319 spin_unlock_irq(&dev_priv->irq_lock); 4320 4321 /* 4322 * Enable some error detection, note the instruction error mask 4323 * bit is reserved, so we leave it masked. 4324 */ 4325 if (IS_G4X(dev_priv)) { 4326 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4327 GM45_ERROR_MEM_PRIV | 4328 GM45_ERROR_CP_PRIV | 4329 I915_ERROR_MEMORY_REFRESH); 4330 } else { 4331 error_mask = ~(I915_ERROR_PAGE_TABLE | 4332 I915_ERROR_MEMORY_REFRESH); 4333 } 4334 I915_WRITE(EMR, error_mask); 4335 4336 I915_WRITE(IMR, dev_priv->irq_mask); 4337 I915_WRITE(IER, enable_mask); 4338 POSTING_READ(IER); 4339 4340 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4341 POSTING_READ(PORT_HOTPLUG_EN); 4342 4343 i915_enable_asle_pipestat(dev_priv); 4344 4345 return 0; 4346 } 4347 4348 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4349 { 4350 u32 hotplug_en; 4351 4352 assert_spin_locked(&dev_priv->irq_lock); 4353 4354 /* Note HDMI and DP share hotplug bits */ 4355 /* enable bits are the same for all generations */ 4356 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4357 /* Programming the CRT detection parameters tends 4358 to generate a spurious hotplug event about three 4359 seconds later. So just do it once. 4360 */ 4361 if (IS_G4X(dev_priv)) 4362 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4363 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4364 4365 /* Ignore TV since it's buggy */ 4366 i915_hotplug_interrupt_update_locked(dev_priv, 4367 HOTPLUG_INT_EN_MASK | 4368 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4369 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4370 hotplug_en); 4371 } 4372 4373 static irqreturn_t i965_irq_handler(int irq, void *arg) 4374 { 4375 struct drm_device *dev = arg; 4376 struct drm_i915_private *dev_priv = to_i915(dev); 4377 u32 iir, new_iir; 4378 u32 pipe_stats[I915_MAX_PIPES]; 4379 int ret = IRQ_NONE, pipe; 4380 u32 flip_mask = 4381 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4382 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4383 4384 if (!intel_irqs_enabled(dev_priv)) 4385 return IRQ_NONE; 4386 4387 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4388 disable_rpm_wakeref_asserts(dev_priv); 4389 4390 iir = I915_READ(IIR); 4391 4392 for (;;) { 4393 bool irq_received = (iir & ~flip_mask) != 0; 4394 bool blc_event = false; 4395 4396 /* Can't rely on pipestat interrupt bit in iir as it might 4397 * have been cleared after the pipestat interrupt was received. 4398 * It doesn't set the bit in iir again, but it still produces 4399 * interrupts (for non-MSI). 4400 */ 4401 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4402 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4403 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4404 4405 for_each_pipe(dev_priv, pipe) { 4406 i915_reg_t reg = PIPESTAT(pipe); 4407 pipe_stats[pipe] = I915_READ(reg); 4408 4409 /* 4410 * Clear the PIPE*STAT regs before the IIR 4411 */ 4412 if (pipe_stats[pipe] & 0x8000ffff) { 4413 I915_WRITE(reg, pipe_stats[pipe]); 4414 irq_received = true; 4415 } 4416 } 4417 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4418 4419 if (!irq_received) 4420 break; 4421 4422 ret = IRQ_HANDLED; 4423 4424 /* Consume port. Then clear IIR or we'll miss events */ 4425 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 4426 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4427 if (hotplug_status) 4428 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4429 } 4430 4431 I915_WRITE(IIR, iir & ~flip_mask); 4432 new_iir = I915_READ(IIR); /* Flush posted writes */ 4433 4434 if (iir & I915_USER_INTERRUPT) 4435 notify_ring(&dev_priv->engine[RCS]); 4436 if (iir & I915_BSD_USER_INTERRUPT) 4437 notify_ring(&dev_priv->engine[VCS]); 4438 4439 for_each_pipe(dev_priv, pipe) { 4440 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4441 i915_handle_vblank(dev_priv, pipe, pipe, iir)) 4442 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4443 4444 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4445 blc_event = true; 4446 4447 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4448 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 4449 4450 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4451 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4452 } 4453 4454 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4455 intel_opregion_asle_intr(dev_priv); 4456 4457 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4458 gmbus_irq_handler(dev_priv); 4459 4460 /* With MSI, interrupts are only generated when iir 4461 * transitions from zero to nonzero. If another bit got 4462 * set while we were handling the existing iir bits, then 4463 * we would never get another interrupt. 4464 * 4465 * This is fine on non-MSI as well, as if we hit this path 4466 * we avoid exiting the interrupt handler only to generate 4467 * another one. 4468 * 4469 * Note that for MSI this could cause a stray interrupt report 4470 * if an interrupt landed in the time between writing IIR and 4471 * the posting read. This should be rare enough to never 4472 * trigger the 99% of 100,000 interrupts test for disabling 4473 * stray interrupts. 4474 */ 4475 iir = new_iir; 4476 } 4477 4478 enable_rpm_wakeref_asserts(dev_priv); 4479 4480 return ret; 4481 } 4482 4483 static void i965_irq_uninstall(struct drm_device * dev) 4484 { 4485 struct drm_i915_private *dev_priv = to_i915(dev); 4486 int pipe; 4487 4488 if (!dev_priv) 4489 return; 4490 4491 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4492 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4493 4494 I915_WRITE(HWSTAM, 0xffffffff); 4495 for_each_pipe(dev_priv, pipe) 4496 I915_WRITE(PIPESTAT(pipe), 0); 4497 I915_WRITE(IMR, 0xffffffff); 4498 I915_WRITE(IER, 0x0); 4499 4500 for_each_pipe(dev_priv, pipe) 4501 I915_WRITE(PIPESTAT(pipe), 4502 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4503 I915_WRITE(IIR, I915_READ(IIR)); 4504 } 4505 4506 /** 4507 * intel_irq_init - initializes irq support 4508 * @dev_priv: i915 device instance 4509 * 4510 * This function initializes all the irq support including work items, timers 4511 * and all the vtables. It does not setup the interrupt itself though. 4512 */ 4513 void intel_irq_init(struct drm_i915_private *dev_priv) 4514 { 4515 struct drm_device *dev = &dev_priv->drm; 4516 4517 intel_hpd_init_work(dev_priv); 4518 4519 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4520 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4521 4522 /* Let's track the enabled rps events */ 4523 if (IS_VALLEYVIEW(dev_priv)) 4524 /* WaGsvRC0ResidencyMethod:vlv */ 4525 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; 4526 else 4527 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4528 4529 dev_priv->rps.pm_intr_keep = 0; 4530 4531 /* 4532 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer 4533 * if GEN6_PM_UP_EI_EXPIRED is masked. 4534 * 4535 * TODO: verify if this can be reproduced on VLV,CHV. 4536 */ 4537 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) 4538 dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED; 4539 4540 if (INTEL_INFO(dev_priv)->gen >= 8) 4541 dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP; 4542 4543 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, 4544 i915_hangcheck_elapsed); 4545 4546 if (IS_GEN2(dev_priv)) { 4547 dev->max_vblank_count = 0; 4548 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4549 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4550 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4551 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4552 } else { 4553 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4554 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4555 } 4556 4557 /* 4558 * Opt out of the vblank disable timer on everything except gen2. 4559 * Gen2 doesn't have a hardware frame counter and so depends on 4560 * vblank interrupts to produce sane vblank seuquence numbers. 4561 */ 4562 if (!IS_GEN2(dev_priv)) 4563 dev->vblank_disable_immediate = true; 4564 4565 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4566 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4567 4568 if (IS_CHERRYVIEW(dev_priv)) { 4569 dev->driver->irq_handler = cherryview_irq_handler; 4570 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4571 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4572 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4573 dev->driver->enable_vblank = valleyview_enable_vblank; 4574 dev->driver->disable_vblank = valleyview_disable_vblank; 4575 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4576 } else if (IS_VALLEYVIEW(dev_priv)) { 4577 dev->driver->irq_handler = valleyview_irq_handler; 4578 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4579 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4580 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4581 dev->driver->enable_vblank = valleyview_enable_vblank; 4582 dev->driver->disable_vblank = valleyview_disable_vblank; 4583 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4584 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4585 dev->driver->irq_handler = gen8_irq_handler; 4586 dev->driver->irq_preinstall = gen8_irq_reset; 4587 dev->driver->irq_postinstall = gen8_irq_postinstall; 4588 dev->driver->irq_uninstall = gen8_irq_uninstall; 4589 dev->driver->enable_vblank = gen8_enable_vblank; 4590 dev->driver->disable_vblank = gen8_disable_vblank; 4591 if (IS_BROXTON(dev)) 4592 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4593 else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev)) 4594 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4595 else 4596 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4597 } else if (HAS_PCH_SPLIT(dev)) { 4598 dev->driver->irq_handler = ironlake_irq_handler; 4599 dev->driver->irq_preinstall = ironlake_irq_reset; 4600 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4601 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4602 dev->driver->enable_vblank = ironlake_enable_vblank; 4603 dev->driver->disable_vblank = ironlake_disable_vblank; 4604 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4605 } else { 4606 if (IS_GEN2(dev_priv)) { 4607 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4608 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4609 dev->driver->irq_handler = i8xx_irq_handler; 4610 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4611 } else if (IS_GEN3(dev_priv)) { 4612 dev->driver->irq_preinstall = i915_irq_preinstall; 4613 dev->driver->irq_postinstall = i915_irq_postinstall; 4614 dev->driver->irq_uninstall = i915_irq_uninstall; 4615 dev->driver->irq_handler = i915_irq_handler; 4616 } else { 4617 dev->driver->irq_preinstall = i965_irq_preinstall; 4618 dev->driver->irq_postinstall = i965_irq_postinstall; 4619 dev->driver->irq_uninstall = i965_irq_uninstall; 4620 dev->driver->irq_handler = i965_irq_handler; 4621 } 4622 if (I915_HAS_HOTPLUG(dev_priv)) 4623 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4624 dev->driver->enable_vblank = i915_enable_vblank; 4625 dev->driver->disable_vblank = i915_disable_vblank; 4626 } 4627 } 4628 4629 /** 4630 * intel_irq_install - enables the hardware interrupt 4631 * @dev_priv: i915 device instance 4632 * 4633 * This function enables the hardware interrupt handling, but leaves the hotplug 4634 * handling still disabled. It is called after intel_irq_init(). 4635 * 4636 * In the driver load and resume code we need working interrupts in a few places 4637 * but don't want to deal with the hassle of concurrent probe and hotplug 4638 * workers. Hence the split into this two-stage approach. 4639 */ 4640 int intel_irq_install(struct drm_i915_private *dev_priv) 4641 { 4642 /* 4643 * We enable some interrupt sources in our postinstall hooks, so mark 4644 * interrupts as enabled _before_ actually enabling them to avoid 4645 * special cases in our ordering checks. 4646 */ 4647 dev_priv->pm.irqs_enabled = true; 4648 4649 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4650 } 4651 4652 /** 4653 * intel_irq_uninstall - finilizes all irq handling 4654 * @dev_priv: i915 device instance 4655 * 4656 * This stops interrupt and hotplug handling and unregisters and frees all 4657 * resources acquired in the init functions. 4658 */ 4659 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4660 { 4661 drm_irq_uninstall(&dev_priv->drm); 4662 intel_hpd_cancel_work(dev_priv); 4663 dev_priv->pm.irqs_enabled = false; 4664 } 4665 4666 /** 4667 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4668 * @dev_priv: i915 device instance 4669 * 4670 * This function is used to disable interrupts at runtime, both in the runtime 4671 * pm and the system suspend/resume code. 4672 */ 4673 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4674 { 4675 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4676 dev_priv->pm.irqs_enabled = false; 4677 synchronize_irq(dev_priv->drm.irq); 4678 } 4679 4680 /** 4681 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4682 * @dev_priv: i915 device instance 4683 * 4684 * This function is used to enable interrupts at runtime, both in the runtime 4685 * pm and the system suspend/resume code. 4686 */ 4687 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4688 { 4689 dev_priv->pm.irqs_enabled = true; 4690 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 4691 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4692 } 4693