1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ilk[HPD_NUM_PINS] = { 49 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50 }; 51 52 static const u32 hpd_ivb[HPD_NUM_PINS] = { 53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 54 }; 55 56 static const u32 hpd_bdw[HPD_NUM_PINS] = { 57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 58 }; 59 60 static const u32 hpd_ibx[HPD_NUM_PINS] = { 61 [HPD_CRT] = SDE_CRT_HOTPLUG, 62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66 }; 67 68 static const u32 hpd_cpt[HPD_NUM_PINS] = { 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74 }; 75 76 static const u32 hpd_spt[HPD_NUM_PINS] = { 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 82 }; 83 84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91 }; 92 93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100 }; 101 102 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109 }; 110 111 /* BXT hpd list */ 112 static const u32 hpd_bxt[HPD_NUM_PINS] = { 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116 }; 117 118 static const u32 hpd_gen11[HPD_NUM_PINS] = { 119 [HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG, 120 [HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG, 121 [HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG, 122 [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG 123 }; 124 125 static const u32 hpd_icp[HPD_NUM_PINS] = { 126 [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, 127 [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, 128 [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP, 129 [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP, 130 [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP, 131 [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP 132 }; 133 134 /* IIR can theoretically queue up two events. Be paranoid. */ 135 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 136 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 137 POSTING_READ(GEN8_##type##_IMR(which)); \ 138 I915_WRITE(GEN8_##type##_IER(which), 0); \ 139 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 140 POSTING_READ(GEN8_##type##_IIR(which)); \ 141 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 142 POSTING_READ(GEN8_##type##_IIR(which)); \ 143 } while (0) 144 145 #define GEN3_IRQ_RESET(type) do { \ 146 I915_WRITE(type##IMR, 0xffffffff); \ 147 POSTING_READ(type##IMR); \ 148 I915_WRITE(type##IER, 0); \ 149 I915_WRITE(type##IIR, 0xffffffff); \ 150 POSTING_READ(type##IIR); \ 151 I915_WRITE(type##IIR, 0xffffffff); \ 152 POSTING_READ(type##IIR); \ 153 } while (0) 154 155 #define GEN2_IRQ_RESET(type) do { \ 156 I915_WRITE16(type##IMR, 0xffff); \ 157 POSTING_READ16(type##IMR); \ 158 I915_WRITE16(type##IER, 0); \ 159 I915_WRITE16(type##IIR, 0xffff); \ 160 POSTING_READ16(type##IIR); \ 161 I915_WRITE16(type##IIR, 0xffff); \ 162 POSTING_READ16(type##IIR); \ 163 } while (0) 164 165 /* 166 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 167 */ 168 static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv, 169 i915_reg_t reg) 170 { 171 u32 val = I915_READ(reg); 172 173 if (val == 0) 174 return; 175 176 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 177 i915_mmio_reg_offset(reg), val); 178 I915_WRITE(reg, 0xffffffff); 179 POSTING_READ(reg); 180 I915_WRITE(reg, 0xffffffff); 181 POSTING_READ(reg); 182 } 183 184 static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv, 185 i915_reg_t reg) 186 { 187 u16 val = I915_READ16(reg); 188 189 if (val == 0) 190 return; 191 192 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 193 i915_mmio_reg_offset(reg), val); 194 I915_WRITE16(reg, 0xffff); 195 POSTING_READ16(reg); 196 I915_WRITE16(reg, 0xffff); 197 POSTING_READ16(reg); 198 } 199 200 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 201 gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 202 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 203 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 204 POSTING_READ(GEN8_##type##_IMR(which)); \ 205 } while (0) 206 207 #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \ 208 gen3_assert_iir_is_zero(dev_priv, type##IIR); \ 209 I915_WRITE(type##IER, (ier_val)); \ 210 I915_WRITE(type##IMR, (imr_val)); \ 211 POSTING_READ(type##IMR); \ 212 } while (0) 213 214 #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \ 215 gen2_assert_iir_is_zero(dev_priv, type##IIR); \ 216 I915_WRITE16(type##IER, (ier_val)); \ 217 I915_WRITE16(type##IMR, (imr_val)); \ 218 POSTING_READ16(type##IMR); \ 219 } while (0) 220 221 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 222 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 223 224 /* For display hotplug interrupt */ 225 static inline void 226 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 227 uint32_t mask, 228 uint32_t bits) 229 { 230 uint32_t val; 231 232 lockdep_assert_held(&dev_priv->irq_lock); 233 WARN_ON(bits & ~mask); 234 235 val = I915_READ(PORT_HOTPLUG_EN); 236 val &= ~mask; 237 val |= bits; 238 I915_WRITE(PORT_HOTPLUG_EN, val); 239 } 240 241 /** 242 * i915_hotplug_interrupt_update - update hotplug interrupt enable 243 * @dev_priv: driver private 244 * @mask: bits to update 245 * @bits: bits to enable 246 * NOTE: the HPD enable bits are modified both inside and outside 247 * of an interrupt context. To avoid that read-modify-write cycles 248 * interfer, these bits are protected by a spinlock. Since this 249 * function is usually not called from a context where the lock is 250 * held already, this function acquires the lock itself. A non-locking 251 * version is also available. 252 */ 253 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 254 uint32_t mask, 255 uint32_t bits) 256 { 257 spin_lock_irq(&dev_priv->irq_lock); 258 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 259 spin_unlock_irq(&dev_priv->irq_lock); 260 } 261 262 static u32 263 gen11_gt_engine_identity(struct drm_i915_private * const i915, 264 const unsigned int bank, const unsigned int bit); 265 266 static bool gen11_reset_one_iir(struct drm_i915_private * const i915, 267 const unsigned int bank, 268 const unsigned int bit) 269 { 270 void __iomem * const regs = i915->regs; 271 u32 dw; 272 273 lockdep_assert_held(&i915->irq_lock); 274 275 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 276 if (dw & BIT(bit)) { 277 /* 278 * According to the BSpec, DW_IIR bits cannot be cleared without 279 * first servicing the Selector & Shared IIR registers. 280 */ 281 gen11_gt_engine_identity(i915, bank, bit); 282 283 /* 284 * We locked GT INT DW by reading it. If we want to (try 285 * to) recover from this succesfully, we need to clear 286 * our bit, otherwise we are locking the register for 287 * everybody. 288 */ 289 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit)); 290 291 return true; 292 } 293 294 return false; 295 } 296 297 /** 298 * ilk_update_display_irq - update DEIMR 299 * @dev_priv: driver private 300 * @interrupt_mask: mask of interrupt bits to update 301 * @enabled_irq_mask: mask of interrupt bits to enable 302 */ 303 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 304 uint32_t interrupt_mask, 305 uint32_t enabled_irq_mask) 306 { 307 uint32_t new_val; 308 309 lockdep_assert_held(&dev_priv->irq_lock); 310 311 WARN_ON(enabled_irq_mask & ~interrupt_mask); 312 313 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 314 return; 315 316 new_val = dev_priv->irq_mask; 317 new_val &= ~interrupt_mask; 318 new_val |= (~enabled_irq_mask & interrupt_mask); 319 320 if (new_val != dev_priv->irq_mask) { 321 dev_priv->irq_mask = new_val; 322 I915_WRITE(DEIMR, dev_priv->irq_mask); 323 POSTING_READ(DEIMR); 324 } 325 } 326 327 /** 328 * ilk_update_gt_irq - update GTIMR 329 * @dev_priv: driver private 330 * @interrupt_mask: mask of interrupt bits to update 331 * @enabled_irq_mask: mask of interrupt bits to enable 332 */ 333 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 334 uint32_t interrupt_mask, 335 uint32_t enabled_irq_mask) 336 { 337 lockdep_assert_held(&dev_priv->irq_lock); 338 339 WARN_ON(enabled_irq_mask & ~interrupt_mask); 340 341 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 342 return; 343 344 dev_priv->gt_irq_mask &= ~interrupt_mask; 345 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 346 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 347 } 348 349 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 350 { 351 ilk_update_gt_irq(dev_priv, mask, mask); 352 POSTING_READ_FW(GTIMR); 353 } 354 355 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 356 { 357 ilk_update_gt_irq(dev_priv, mask, 0); 358 } 359 360 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 361 { 362 WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11); 363 364 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 365 } 366 367 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 368 { 369 if (INTEL_GEN(dev_priv) >= 11) 370 return GEN11_GPM_WGBOXPERF_INTR_MASK; 371 else if (INTEL_GEN(dev_priv) >= 8) 372 return GEN8_GT_IMR(2); 373 else 374 return GEN6_PMIMR; 375 } 376 377 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 378 { 379 if (INTEL_GEN(dev_priv) >= 11) 380 return GEN11_GPM_WGBOXPERF_INTR_ENABLE; 381 else if (INTEL_GEN(dev_priv) >= 8) 382 return GEN8_GT_IER(2); 383 else 384 return GEN6_PMIER; 385 } 386 387 /** 388 * snb_update_pm_irq - update GEN6_PMIMR 389 * @dev_priv: driver private 390 * @interrupt_mask: mask of interrupt bits to update 391 * @enabled_irq_mask: mask of interrupt bits to enable 392 */ 393 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 394 uint32_t interrupt_mask, 395 uint32_t enabled_irq_mask) 396 { 397 uint32_t new_val; 398 399 WARN_ON(enabled_irq_mask & ~interrupt_mask); 400 401 lockdep_assert_held(&dev_priv->irq_lock); 402 403 new_val = dev_priv->pm_imr; 404 new_val &= ~interrupt_mask; 405 new_val |= (~enabled_irq_mask & interrupt_mask); 406 407 if (new_val != dev_priv->pm_imr) { 408 dev_priv->pm_imr = new_val; 409 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr); 410 POSTING_READ(gen6_pm_imr(dev_priv)); 411 } 412 } 413 414 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 415 { 416 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 417 return; 418 419 snb_update_pm_irq(dev_priv, mask, mask); 420 } 421 422 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 423 { 424 snb_update_pm_irq(dev_priv, mask, 0); 425 } 426 427 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 428 { 429 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 430 return; 431 432 __gen6_mask_pm_irq(dev_priv, mask); 433 } 434 435 static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 436 { 437 i915_reg_t reg = gen6_pm_iir(dev_priv); 438 439 lockdep_assert_held(&dev_priv->irq_lock); 440 441 I915_WRITE(reg, reset_mask); 442 I915_WRITE(reg, reset_mask); 443 POSTING_READ(reg); 444 } 445 446 static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 447 { 448 lockdep_assert_held(&dev_priv->irq_lock); 449 450 dev_priv->pm_ier |= enable_mask; 451 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 452 gen6_unmask_pm_irq(dev_priv, enable_mask); 453 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 454 } 455 456 static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 457 { 458 lockdep_assert_held(&dev_priv->irq_lock); 459 460 dev_priv->pm_ier &= ~disable_mask; 461 __gen6_mask_pm_irq(dev_priv, disable_mask); 462 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 463 /* though a barrier is missing here, but don't really need a one */ 464 } 465 466 void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv) 467 { 468 spin_lock_irq(&dev_priv->irq_lock); 469 470 while (gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)) 471 ; 472 473 dev_priv->gt_pm.rps.pm_iir = 0; 474 475 spin_unlock_irq(&dev_priv->irq_lock); 476 } 477 478 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 479 { 480 spin_lock_irq(&dev_priv->irq_lock); 481 gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events); 482 dev_priv->gt_pm.rps.pm_iir = 0; 483 spin_unlock_irq(&dev_priv->irq_lock); 484 } 485 486 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 487 { 488 struct intel_rps *rps = &dev_priv->gt_pm.rps; 489 490 if (READ_ONCE(rps->interrupts_enabled)) 491 return; 492 493 spin_lock_irq(&dev_priv->irq_lock); 494 WARN_ON_ONCE(rps->pm_iir); 495 496 if (INTEL_GEN(dev_priv) >= 11) 497 WARN_ON_ONCE(gen11_reset_one_iir(dev_priv, 0, GEN11_GTPM)); 498 else 499 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 500 501 rps->interrupts_enabled = true; 502 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 503 504 spin_unlock_irq(&dev_priv->irq_lock); 505 } 506 507 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 508 { 509 struct intel_rps *rps = &dev_priv->gt_pm.rps; 510 511 if (!READ_ONCE(rps->interrupts_enabled)) 512 return; 513 514 spin_lock_irq(&dev_priv->irq_lock); 515 rps->interrupts_enabled = false; 516 517 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 518 519 gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 520 521 spin_unlock_irq(&dev_priv->irq_lock); 522 synchronize_irq(dev_priv->drm.irq); 523 524 /* Now that we will not be generating any more work, flush any 525 * outstanding tasks. As we are called on the RPS idle path, 526 * we will reset the GPU to minimum frequencies, so the current 527 * state of the worker can be discarded. 528 */ 529 cancel_work_sync(&rps->work); 530 if (INTEL_GEN(dev_priv) >= 11) 531 gen11_reset_rps_interrupts(dev_priv); 532 else 533 gen6_reset_rps_interrupts(dev_priv); 534 } 535 536 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 537 { 538 assert_rpm_wakelock_held(dev_priv); 539 540 spin_lock_irq(&dev_priv->irq_lock); 541 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 542 spin_unlock_irq(&dev_priv->irq_lock); 543 } 544 545 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 546 { 547 assert_rpm_wakelock_held(dev_priv); 548 549 spin_lock_irq(&dev_priv->irq_lock); 550 if (!dev_priv->guc.interrupts_enabled) { 551 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 552 dev_priv->pm_guc_events); 553 dev_priv->guc.interrupts_enabled = true; 554 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 555 } 556 spin_unlock_irq(&dev_priv->irq_lock); 557 } 558 559 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 560 { 561 assert_rpm_wakelock_held(dev_priv); 562 563 spin_lock_irq(&dev_priv->irq_lock); 564 dev_priv->guc.interrupts_enabled = false; 565 566 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 567 568 spin_unlock_irq(&dev_priv->irq_lock); 569 synchronize_irq(dev_priv->drm.irq); 570 571 gen9_reset_guc_interrupts(dev_priv); 572 } 573 574 /** 575 * bdw_update_port_irq - update DE port interrupt 576 * @dev_priv: driver private 577 * @interrupt_mask: mask of interrupt bits to update 578 * @enabled_irq_mask: mask of interrupt bits to enable 579 */ 580 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 581 uint32_t interrupt_mask, 582 uint32_t enabled_irq_mask) 583 { 584 uint32_t new_val; 585 uint32_t old_val; 586 587 lockdep_assert_held(&dev_priv->irq_lock); 588 589 WARN_ON(enabled_irq_mask & ~interrupt_mask); 590 591 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 592 return; 593 594 old_val = I915_READ(GEN8_DE_PORT_IMR); 595 596 new_val = old_val; 597 new_val &= ~interrupt_mask; 598 new_val |= (~enabled_irq_mask & interrupt_mask); 599 600 if (new_val != old_val) { 601 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 602 POSTING_READ(GEN8_DE_PORT_IMR); 603 } 604 } 605 606 /** 607 * bdw_update_pipe_irq - update DE pipe interrupt 608 * @dev_priv: driver private 609 * @pipe: pipe whose interrupt to update 610 * @interrupt_mask: mask of interrupt bits to update 611 * @enabled_irq_mask: mask of interrupt bits to enable 612 */ 613 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 614 enum pipe pipe, 615 uint32_t interrupt_mask, 616 uint32_t enabled_irq_mask) 617 { 618 uint32_t new_val; 619 620 lockdep_assert_held(&dev_priv->irq_lock); 621 622 WARN_ON(enabled_irq_mask & ~interrupt_mask); 623 624 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 625 return; 626 627 new_val = dev_priv->de_irq_mask[pipe]; 628 new_val &= ~interrupt_mask; 629 new_val |= (~enabled_irq_mask & interrupt_mask); 630 631 if (new_val != dev_priv->de_irq_mask[pipe]) { 632 dev_priv->de_irq_mask[pipe] = new_val; 633 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 634 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 635 } 636 } 637 638 /** 639 * ibx_display_interrupt_update - update SDEIMR 640 * @dev_priv: driver private 641 * @interrupt_mask: mask of interrupt bits to update 642 * @enabled_irq_mask: mask of interrupt bits to enable 643 */ 644 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 645 uint32_t interrupt_mask, 646 uint32_t enabled_irq_mask) 647 { 648 uint32_t sdeimr = I915_READ(SDEIMR); 649 sdeimr &= ~interrupt_mask; 650 sdeimr |= (~enabled_irq_mask & interrupt_mask); 651 652 WARN_ON(enabled_irq_mask & ~interrupt_mask); 653 654 lockdep_assert_held(&dev_priv->irq_lock); 655 656 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 657 return; 658 659 I915_WRITE(SDEIMR, sdeimr); 660 POSTING_READ(SDEIMR); 661 } 662 663 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 664 enum pipe pipe) 665 { 666 u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 667 u32 enable_mask = status_mask << 16; 668 669 lockdep_assert_held(&dev_priv->irq_lock); 670 671 if (INTEL_GEN(dev_priv) < 5) 672 goto out; 673 674 /* 675 * On pipe A we don't support the PSR interrupt yet, 676 * on pipe B and C the same bit MBZ. 677 */ 678 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 679 return 0; 680 /* 681 * On pipe B and C we don't support the PSR interrupt yet, on pipe 682 * A the same bit is for perf counters which we don't use either. 683 */ 684 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 685 return 0; 686 687 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 688 SPRITE0_FLIP_DONE_INT_EN_VLV | 689 SPRITE1_FLIP_DONE_INT_EN_VLV); 690 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 691 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 692 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 693 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 694 695 out: 696 WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 697 status_mask & ~PIPESTAT_INT_STATUS_MASK, 698 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 699 pipe_name(pipe), enable_mask, status_mask); 700 701 return enable_mask; 702 } 703 704 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 705 enum pipe pipe, u32 status_mask) 706 { 707 i915_reg_t reg = PIPESTAT(pipe); 708 u32 enable_mask; 709 710 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 711 "pipe %c: status_mask=0x%x\n", 712 pipe_name(pipe), status_mask); 713 714 lockdep_assert_held(&dev_priv->irq_lock); 715 WARN_ON(!intel_irqs_enabled(dev_priv)); 716 717 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 718 return; 719 720 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 721 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 722 723 I915_WRITE(reg, enable_mask | status_mask); 724 POSTING_READ(reg); 725 } 726 727 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 728 enum pipe pipe, u32 status_mask) 729 { 730 i915_reg_t reg = PIPESTAT(pipe); 731 u32 enable_mask; 732 733 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 734 "pipe %c: status_mask=0x%x\n", 735 pipe_name(pipe), status_mask); 736 737 lockdep_assert_held(&dev_priv->irq_lock); 738 WARN_ON(!intel_irqs_enabled(dev_priv)); 739 740 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 741 return; 742 743 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 744 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 745 746 I915_WRITE(reg, enable_mask | status_mask); 747 POSTING_READ(reg); 748 } 749 750 /** 751 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 752 * @dev_priv: i915 device private 753 */ 754 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 755 { 756 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 757 return; 758 759 spin_lock_irq(&dev_priv->irq_lock); 760 761 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 762 if (INTEL_GEN(dev_priv) >= 4) 763 i915_enable_pipestat(dev_priv, PIPE_A, 764 PIPE_LEGACY_BLC_EVENT_STATUS); 765 766 spin_unlock_irq(&dev_priv->irq_lock); 767 } 768 769 /* 770 * This timing diagram depicts the video signal in and 771 * around the vertical blanking period. 772 * 773 * Assumptions about the fictitious mode used in this example: 774 * vblank_start >= 3 775 * vsync_start = vblank_start + 1 776 * vsync_end = vblank_start + 2 777 * vtotal = vblank_start + 3 778 * 779 * start of vblank: 780 * latch double buffered registers 781 * increment frame counter (ctg+) 782 * generate start of vblank interrupt (gen4+) 783 * | 784 * | frame start: 785 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 786 * | may be shifted forward 1-3 extra lines via PIPECONF 787 * | | 788 * | | start of vsync: 789 * | | generate vsync interrupt 790 * | | | 791 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 792 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 793 * ----va---> <-----------------vb--------------------> <--------va------------- 794 * | | <----vs-----> | 795 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 796 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 797 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 798 * | | | 799 * last visible pixel first visible pixel 800 * | increment frame counter (gen3/4) 801 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 802 * 803 * x = horizontal active 804 * _ = horizontal blanking 805 * hs = horizontal sync 806 * va = vertical active 807 * vb = vertical blanking 808 * vs = vertical sync 809 * vbs = vblank_start (number) 810 * 811 * Summary: 812 * - most events happen at the start of horizontal sync 813 * - frame start happens at the start of horizontal blank, 1-4 lines 814 * (depending on PIPECONF settings) after the start of vblank 815 * - gen3/4 pixel and frame counter are synchronized with the start 816 * of horizontal active on the first line of vertical active 817 */ 818 819 /* Called from drm generic code, passed a 'crtc', which 820 * we use as a pipe index 821 */ 822 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 823 { 824 struct drm_i915_private *dev_priv = to_i915(dev); 825 i915_reg_t high_frame, low_frame; 826 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 827 const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode; 828 unsigned long irqflags; 829 830 htotal = mode->crtc_htotal; 831 hsync_start = mode->crtc_hsync_start; 832 vbl_start = mode->crtc_vblank_start; 833 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 834 vbl_start = DIV_ROUND_UP(vbl_start, 2); 835 836 /* Convert to pixel count */ 837 vbl_start *= htotal; 838 839 /* Start of vblank event occurs at start of hsync */ 840 vbl_start -= htotal - hsync_start; 841 842 high_frame = PIPEFRAME(pipe); 843 low_frame = PIPEFRAMEPIXEL(pipe); 844 845 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 846 847 /* 848 * High & low register fields aren't synchronized, so make sure 849 * we get a low value that's stable across two reads of the high 850 * register. 851 */ 852 do { 853 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 854 low = I915_READ_FW(low_frame); 855 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 856 } while (high1 != high2); 857 858 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 859 860 high1 >>= PIPE_FRAME_HIGH_SHIFT; 861 pixel = low & PIPE_PIXEL_MASK; 862 low >>= PIPE_FRAME_LOW_SHIFT; 863 864 /* 865 * The frame counter increments at beginning of active. 866 * Cook up a vblank counter by also checking the pixel 867 * counter against vblank start. 868 */ 869 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 870 } 871 872 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 873 { 874 struct drm_i915_private *dev_priv = to_i915(dev); 875 876 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 877 } 878 879 /* 880 * On certain encoders on certain platforms, pipe 881 * scanline register will not work to get the scanline, 882 * since the timings are driven from the PORT or issues 883 * with scanline register updates. 884 * This function will use Framestamp and current 885 * timestamp registers to calculate the scanline. 886 */ 887 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 888 { 889 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 890 struct drm_vblank_crtc *vblank = 891 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 892 const struct drm_display_mode *mode = &vblank->hwmode; 893 u32 vblank_start = mode->crtc_vblank_start; 894 u32 vtotal = mode->crtc_vtotal; 895 u32 htotal = mode->crtc_htotal; 896 u32 clock = mode->crtc_clock; 897 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time; 898 899 /* 900 * To avoid the race condition where we might cross into the 901 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 902 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 903 * during the same frame. 904 */ 905 do { 906 /* 907 * This field provides read back of the display 908 * pipe frame time stamp. The time stamp value 909 * is sampled at every start of vertical blank. 910 */ 911 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 912 913 /* 914 * The TIMESTAMP_CTR register has the current 915 * time stamp value. 916 */ 917 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR); 918 919 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 920 } while (scan_post_time != scan_prev_time); 921 922 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 923 clock), 1000 * htotal); 924 scanline = min(scanline, vtotal - 1); 925 scanline = (scanline + vblank_start) % vtotal; 926 927 return scanline; 928 } 929 930 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 931 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 932 { 933 struct drm_device *dev = crtc->base.dev; 934 struct drm_i915_private *dev_priv = to_i915(dev); 935 const struct drm_display_mode *mode; 936 struct drm_vblank_crtc *vblank; 937 enum pipe pipe = crtc->pipe; 938 int position, vtotal; 939 940 if (!crtc->active) 941 return -1; 942 943 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 944 mode = &vblank->hwmode; 945 946 if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 947 return __intel_get_crtc_scanline_from_timestamp(crtc); 948 949 vtotal = mode->crtc_vtotal; 950 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 951 vtotal /= 2; 952 953 if (IS_GEN2(dev_priv)) 954 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 955 else 956 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 957 958 /* 959 * On HSW, the DSL reg (0x70000) appears to return 0 if we 960 * read it just before the start of vblank. So try it again 961 * so we don't accidentally end up spanning a vblank frame 962 * increment, causing the pipe_update_end() code to squak at us. 963 * 964 * The nature of this problem means we can't simply check the ISR 965 * bit and return the vblank start value; nor can we use the scanline 966 * debug register in the transcoder as it appears to have the same 967 * problem. We may need to extend this to include other platforms, 968 * but so far testing only shows the problem on HSW. 969 */ 970 if (HAS_DDI(dev_priv) && !position) { 971 int i, temp; 972 973 for (i = 0; i < 100; i++) { 974 udelay(1); 975 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 976 if (temp != position) { 977 position = temp; 978 break; 979 } 980 } 981 } 982 983 /* 984 * See update_scanline_offset() for the details on the 985 * scanline_offset adjustment. 986 */ 987 return (position + crtc->scanline_offset) % vtotal; 988 } 989 990 static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 991 bool in_vblank_irq, int *vpos, int *hpos, 992 ktime_t *stime, ktime_t *etime, 993 const struct drm_display_mode *mode) 994 { 995 struct drm_i915_private *dev_priv = to_i915(dev); 996 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 997 pipe); 998 int position; 999 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 1000 unsigned long irqflags; 1001 1002 if (WARN_ON(!mode->crtc_clock)) { 1003 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 1004 "pipe %c\n", pipe_name(pipe)); 1005 return false; 1006 } 1007 1008 htotal = mode->crtc_htotal; 1009 hsync_start = mode->crtc_hsync_start; 1010 vtotal = mode->crtc_vtotal; 1011 vbl_start = mode->crtc_vblank_start; 1012 vbl_end = mode->crtc_vblank_end; 1013 1014 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 1015 vbl_start = DIV_ROUND_UP(vbl_start, 2); 1016 vbl_end /= 2; 1017 vtotal /= 2; 1018 } 1019 1020 /* 1021 * Lock uncore.lock, as we will do multiple timing critical raw 1022 * register reads, potentially with preemption disabled, so the 1023 * following code must not block on uncore.lock. 1024 */ 1025 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1026 1027 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 1028 1029 /* Get optional system timestamp before query. */ 1030 if (stime) 1031 *stime = ktime_get(); 1032 1033 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 1034 /* No obvious pixelcount register. Only query vertical 1035 * scanout position from Display scan line register. 1036 */ 1037 position = __intel_get_crtc_scanline(intel_crtc); 1038 } else { 1039 /* Have access to pixelcount since start of frame. 1040 * We can split this into vertical and horizontal 1041 * scanout position. 1042 */ 1043 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 1044 1045 /* convert to pixel counts */ 1046 vbl_start *= htotal; 1047 vbl_end *= htotal; 1048 vtotal *= htotal; 1049 1050 /* 1051 * In interlaced modes, the pixel counter counts all pixels, 1052 * so one field will have htotal more pixels. In order to avoid 1053 * the reported position from jumping backwards when the pixel 1054 * counter is beyond the length of the shorter field, just 1055 * clamp the position the length of the shorter field. This 1056 * matches how the scanline counter based position works since 1057 * the scanline counter doesn't count the two half lines. 1058 */ 1059 if (position >= vtotal) 1060 position = vtotal - 1; 1061 1062 /* 1063 * Start of vblank interrupt is triggered at start of hsync, 1064 * just prior to the first active line of vblank. However we 1065 * consider lines to start at the leading edge of horizontal 1066 * active. So, should we get here before we've crossed into 1067 * the horizontal active of the first line in vblank, we would 1068 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 1069 * always add htotal-hsync_start to the current pixel position. 1070 */ 1071 position = (position + htotal - hsync_start) % vtotal; 1072 } 1073 1074 /* Get optional system timestamp after query. */ 1075 if (etime) 1076 *etime = ktime_get(); 1077 1078 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 1079 1080 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1081 1082 /* 1083 * While in vblank, position will be negative 1084 * counting up towards 0 at vbl_end. And outside 1085 * vblank, position will be positive counting 1086 * up since vbl_end. 1087 */ 1088 if (position >= vbl_start) 1089 position -= vbl_end; 1090 else 1091 position += vtotal - vbl_end; 1092 1093 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 1094 *vpos = position; 1095 *hpos = 0; 1096 } else { 1097 *vpos = position / htotal; 1098 *hpos = position - (*vpos * htotal); 1099 } 1100 1101 return true; 1102 } 1103 1104 int intel_get_crtc_scanline(struct intel_crtc *crtc) 1105 { 1106 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1107 unsigned long irqflags; 1108 int position; 1109 1110 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1111 position = __intel_get_crtc_scanline(crtc); 1112 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1113 1114 return position; 1115 } 1116 1117 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 1118 { 1119 u32 busy_up, busy_down, max_avg, min_avg; 1120 u8 new_delay; 1121 1122 spin_lock(&mchdev_lock); 1123 1124 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1125 1126 new_delay = dev_priv->ips.cur_delay; 1127 1128 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1129 busy_up = I915_READ(RCPREVBSYTUPAVG); 1130 busy_down = I915_READ(RCPREVBSYTDNAVG); 1131 max_avg = I915_READ(RCBMAXAVG); 1132 min_avg = I915_READ(RCBMINAVG); 1133 1134 /* Handle RCS change request from hw */ 1135 if (busy_up > max_avg) { 1136 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1137 new_delay = dev_priv->ips.cur_delay - 1; 1138 if (new_delay < dev_priv->ips.max_delay) 1139 new_delay = dev_priv->ips.max_delay; 1140 } else if (busy_down < min_avg) { 1141 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1142 new_delay = dev_priv->ips.cur_delay + 1; 1143 if (new_delay > dev_priv->ips.min_delay) 1144 new_delay = dev_priv->ips.min_delay; 1145 } 1146 1147 if (ironlake_set_drps(dev_priv, new_delay)) 1148 dev_priv->ips.cur_delay = new_delay; 1149 1150 spin_unlock(&mchdev_lock); 1151 1152 return; 1153 } 1154 1155 static void notify_ring(struct intel_engine_cs *engine) 1156 { 1157 const u32 seqno = intel_engine_get_seqno(engine); 1158 struct i915_request *rq = NULL; 1159 #ifdef __linux__ 1160 struct task_struct *tsk = NULL; 1161 #else 1162 struct proc *tsk = NULL; 1163 #endif 1164 1165 struct intel_wait *wait; 1166 1167 if (unlikely(!engine->breadcrumbs.irq_armed)) 1168 return; 1169 1170 rcu_read_lock(); 1171 1172 spin_lock(&engine->breadcrumbs.irq_lock); 1173 wait = engine->breadcrumbs.irq_wait; 1174 if (wait) { 1175 /* 1176 * We use a callback from the dma-fence to submit 1177 * requests after waiting on our own requests. To 1178 * ensure minimum delay in queuing the next request to 1179 * hardware, signal the fence now rather than wait for 1180 * the signaler to be woken up. We still wake up the 1181 * waiter in order to handle the irq-seqno coherency 1182 * issues (we may receive the interrupt before the 1183 * seqno is written, see __i915_request_irq_complete()) 1184 * and to handle coalescing of multiple seqno updates 1185 * and many waiters. 1186 */ 1187 if (i915_seqno_passed(seqno, wait->seqno)) { 1188 struct i915_request *waiter = wait->request; 1189 1190 if (waiter && 1191 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1192 &waiter->fence.flags) && 1193 intel_wait_check_request(wait, waiter)) 1194 rq = i915_request_get(waiter); 1195 1196 tsk = wait->tsk; 1197 } else { 1198 if (engine->irq_seqno_barrier && 1199 i915_seqno_passed(seqno, wait->seqno - 1)) { 1200 set_bit(ENGINE_IRQ_BREADCRUMB, 1201 &engine->irq_posted); 1202 tsk = wait->tsk; 1203 } 1204 } 1205 1206 engine->breadcrumbs.irq_count++; 1207 } else { 1208 if (engine->breadcrumbs.irq_armed) 1209 __intel_engine_disarm_breadcrumbs(engine); 1210 } 1211 spin_unlock(&engine->breadcrumbs.irq_lock); 1212 1213 if (rq) { 1214 spin_lock(&rq->lock); 1215 dma_fence_signal_locked(&rq->fence); 1216 GEM_BUG_ON(!i915_request_completed(rq)); 1217 spin_unlock(&rq->lock); 1218 1219 i915_request_put(rq); 1220 } 1221 1222 #ifdef __linux__ 1223 if (tsk && tsk->state & TASK_NORMAL) 1224 #else 1225 if (tsk && tsk->p_stat == SSLEEP) 1226 #endif 1227 wake_up_process(tsk); 1228 1229 rcu_read_unlock(); 1230 1231 trace_intel_engine_notify(engine, wait); 1232 } 1233 1234 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1235 struct intel_rps_ei *ei) 1236 { 1237 ei->ktime = ktime_get_raw(); 1238 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1239 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1240 } 1241 1242 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1243 { 1244 memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei)); 1245 } 1246 1247 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1248 { 1249 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1250 const struct intel_rps_ei *prev = &rps->ei; 1251 struct intel_rps_ei now; 1252 u32 events = 0; 1253 1254 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1255 return 0; 1256 1257 vlv_c0_read(dev_priv, &now); 1258 1259 #ifdef __linux__ 1260 if (prev->ktime) { 1261 #else 1262 if (ktime_to_ns(prev->ktime)) { 1263 #endif 1264 u64 time, c0; 1265 u32 render, media; 1266 1267 time = ktime_us_delta(now.ktime, prev->ktime); 1268 1269 time *= dev_priv->czclk_freq; 1270 1271 /* Workload can be split between render + media, 1272 * e.g. SwapBuffers being blitted in X after being rendered in 1273 * mesa. To account for this we need to combine both engines 1274 * into our activity counter. 1275 */ 1276 render = now.render_c0 - prev->render_c0; 1277 media = now.media_c0 - prev->media_c0; 1278 c0 = max(render, media); 1279 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1280 1281 if (c0 > time * rps->power.up_threshold) 1282 events = GEN6_PM_RP_UP_THRESHOLD; 1283 else if (c0 < time * rps->power.down_threshold) 1284 events = GEN6_PM_RP_DOWN_THRESHOLD; 1285 } 1286 1287 rps->ei = now; 1288 return events; 1289 } 1290 1291 static void gen6_pm_rps_work(struct work_struct *work) 1292 { 1293 struct drm_i915_private *dev_priv = 1294 container_of(work, struct drm_i915_private, gt_pm.rps.work); 1295 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1296 bool client_boost = false; 1297 int new_delay, adj, min, max; 1298 u32 pm_iir = 0; 1299 1300 spin_lock_irq(&dev_priv->irq_lock); 1301 if (rps->interrupts_enabled) { 1302 pm_iir = fetch_and_zero(&rps->pm_iir); 1303 client_boost = atomic_read(&rps->num_waiters); 1304 } 1305 spin_unlock_irq(&dev_priv->irq_lock); 1306 1307 /* Make sure we didn't queue anything we're not going to process. */ 1308 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1309 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1310 goto out; 1311 1312 mutex_lock(&dev_priv->pcu_lock); 1313 1314 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1315 1316 adj = rps->last_adj; 1317 new_delay = rps->cur_freq; 1318 min = rps->min_freq_softlimit; 1319 max = rps->max_freq_softlimit; 1320 if (client_boost) 1321 max = rps->max_freq; 1322 if (client_boost && new_delay < rps->boost_freq) { 1323 new_delay = rps->boost_freq; 1324 adj = 0; 1325 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1326 if (adj > 0) 1327 adj *= 2; 1328 else /* CHV needs even encode values */ 1329 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1330 1331 if (new_delay >= rps->max_freq_softlimit) 1332 adj = 0; 1333 } else if (client_boost) { 1334 adj = 0; 1335 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1336 if (rps->cur_freq > rps->efficient_freq) 1337 new_delay = rps->efficient_freq; 1338 else if (rps->cur_freq > rps->min_freq_softlimit) 1339 new_delay = rps->min_freq_softlimit; 1340 adj = 0; 1341 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1342 if (adj < 0) 1343 adj *= 2; 1344 else /* CHV needs even encode values */ 1345 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1346 1347 if (new_delay <= rps->min_freq_softlimit) 1348 adj = 0; 1349 } else { /* unknown event */ 1350 adj = 0; 1351 } 1352 1353 rps->last_adj = adj; 1354 1355 /* sysfs frequency interfaces may have snuck in while servicing the 1356 * interrupt 1357 */ 1358 new_delay += adj; 1359 new_delay = clamp_t(int, new_delay, min, max); 1360 1361 if (intel_set_rps(dev_priv, new_delay)) { 1362 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1363 rps->last_adj = 0; 1364 } 1365 1366 mutex_unlock(&dev_priv->pcu_lock); 1367 1368 out: 1369 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1370 spin_lock_irq(&dev_priv->irq_lock); 1371 if (rps->interrupts_enabled) 1372 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 1373 spin_unlock_irq(&dev_priv->irq_lock); 1374 } 1375 1376 1377 /** 1378 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1379 * occurred. 1380 * @work: workqueue struct 1381 * 1382 * Doesn't actually do anything except notify userspace. As a consequence of 1383 * this event, userspace should try to remap the bad rows since statistically 1384 * it is likely the same row is more likely to go bad again. 1385 */ 1386 static void ivybridge_parity_work(struct work_struct *work) 1387 { 1388 struct drm_i915_private *dev_priv = 1389 container_of(work, typeof(*dev_priv), l3_parity.error_work); 1390 u32 error_status, row, bank, subbank; 1391 char *parity_event[6]; 1392 uint32_t misccpctl; 1393 uint8_t slice = 0; 1394 1395 /* We must turn off DOP level clock gating to access the L3 registers. 1396 * In order to prevent a get/put style interface, acquire struct mutex 1397 * any time we access those registers. 1398 */ 1399 mutex_lock(&dev_priv->drm.struct_mutex); 1400 1401 /* If we've screwed up tracking, just let the interrupt fire again */ 1402 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1403 goto out; 1404 1405 misccpctl = I915_READ(GEN7_MISCCPCTL); 1406 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1407 POSTING_READ(GEN7_MISCCPCTL); 1408 1409 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1410 i915_reg_t reg; 1411 1412 slice--; 1413 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1414 break; 1415 1416 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1417 1418 reg = GEN7_L3CDERRST1(slice); 1419 1420 error_status = I915_READ(reg); 1421 row = GEN7_PARITY_ERROR_ROW(error_status); 1422 bank = GEN7_PARITY_ERROR_BANK(error_status); 1423 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1424 1425 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1426 POSTING_READ(reg); 1427 1428 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1429 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1430 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1431 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1432 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1433 parity_event[5] = NULL; 1434 1435 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1436 KOBJ_CHANGE, parity_event); 1437 1438 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1439 slice, row, bank, subbank); 1440 1441 kfree(parity_event[4]); 1442 kfree(parity_event[3]); 1443 kfree(parity_event[2]); 1444 kfree(parity_event[1]); 1445 } 1446 1447 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1448 1449 out: 1450 WARN_ON(dev_priv->l3_parity.which_slice); 1451 spin_lock_irq(&dev_priv->irq_lock); 1452 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1453 spin_unlock_irq(&dev_priv->irq_lock); 1454 1455 mutex_unlock(&dev_priv->drm.struct_mutex); 1456 } 1457 1458 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1459 u32 iir) 1460 { 1461 if (!HAS_L3_DPF(dev_priv)) 1462 return; 1463 1464 spin_lock(&dev_priv->irq_lock); 1465 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1466 spin_unlock(&dev_priv->irq_lock); 1467 1468 iir &= GT_PARITY_ERROR(dev_priv); 1469 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1470 dev_priv->l3_parity.which_slice |= 1 << 1; 1471 1472 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1473 dev_priv->l3_parity.which_slice |= 1 << 0; 1474 1475 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1476 } 1477 1478 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1479 u32 gt_iir) 1480 { 1481 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1482 notify_ring(dev_priv->engine[RCS]); 1483 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1484 notify_ring(dev_priv->engine[VCS]); 1485 } 1486 1487 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1488 u32 gt_iir) 1489 { 1490 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1491 notify_ring(dev_priv->engine[RCS]); 1492 if (gt_iir & GT_BSD_USER_INTERRUPT) 1493 notify_ring(dev_priv->engine[VCS]); 1494 if (gt_iir & GT_BLT_USER_INTERRUPT) 1495 notify_ring(dev_priv->engine[BCS]); 1496 1497 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1498 GT_BSD_CS_ERROR_INTERRUPT | 1499 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1500 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1501 1502 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1503 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1504 } 1505 1506 static void 1507 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) 1508 { 1509 bool tasklet = false; 1510 1511 if (iir & GT_CONTEXT_SWITCH_INTERRUPT) 1512 tasklet = true; 1513 1514 if (iir & GT_RENDER_USER_INTERRUPT) { 1515 notify_ring(engine); 1516 tasklet |= USES_GUC_SUBMISSION(engine->i915); 1517 } 1518 1519 if (tasklet) 1520 tasklet_hi_schedule(&engine->execlists.tasklet); 1521 } 1522 1523 static void gen8_gt_irq_ack(struct drm_i915_private *i915, 1524 u32 master_ctl, u32 gt_iir[4]) 1525 { 1526 void __iomem * const regs = i915->regs; 1527 1528 #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \ 1529 GEN8_GT_BCS_IRQ | \ 1530 GEN8_GT_VCS1_IRQ | \ 1531 GEN8_GT_VCS2_IRQ | \ 1532 GEN8_GT_VECS_IRQ | \ 1533 GEN8_GT_PM_IRQ | \ 1534 GEN8_GT_GUC_IRQ) 1535 1536 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1537 gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0)); 1538 if (likely(gt_iir[0])) 1539 raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); 1540 } 1541 1542 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1543 gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); 1544 if (likely(gt_iir[1])) 1545 raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); 1546 } 1547 1548 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1549 gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); 1550 if (likely(gt_iir[2] & (i915->pm_rps_events | 1551 i915->pm_guc_events))) 1552 raw_reg_write(regs, GEN8_GT_IIR(2), 1553 gt_iir[2] & (i915->pm_rps_events | 1554 i915->pm_guc_events)); 1555 } 1556 1557 if (master_ctl & GEN8_GT_VECS_IRQ) { 1558 gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3)); 1559 if (likely(gt_iir[3])) 1560 raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]); 1561 } 1562 } 1563 1564 static void gen8_gt_irq_handler(struct drm_i915_private *i915, 1565 u32 master_ctl, u32 gt_iir[4]) 1566 { 1567 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1568 gen8_cs_irq_handler(i915->engine[RCS], 1569 gt_iir[0] >> GEN8_RCS_IRQ_SHIFT); 1570 gen8_cs_irq_handler(i915->engine[BCS], 1571 gt_iir[0] >> GEN8_BCS_IRQ_SHIFT); 1572 } 1573 1574 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1575 gen8_cs_irq_handler(i915->engine[VCS], 1576 gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT); 1577 gen8_cs_irq_handler(i915->engine[VCS2], 1578 gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT); 1579 } 1580 1581 if (master_ctl & GEN8_GT_VECS_IRQ) { 1582 gen8_cs_irq_handler(i915->engine[VECS], 1583 gt_iir[3] >> GEN8_VECS_IRQ_SHIFT); 1584 } 1585 1586 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1587 gen6_rps_irq_handler(i915, gt_iir[2]); 1588 gen9_guc_irq_handler(i915, gt_iir[2]); 1589 } 1590 } 1591 1592 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1593 { 1594 switch (pin) { 1595 case HPD_PORT_C: 1596 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1); 1597 case HPD_PORT_D: 1598 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2); 1599 case HPD_PORT_E: 1600 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3); 1601 case HPD_PORT_F: 1602 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4); 1603 default: 1604 return false; 1605 } 1606 } 1607 1608 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1609 { 1610 switch (pin) { 1611 case HPD_PORT_A: 1612 return val & PORTA_HOTPLUG_LONG_DETECT; 1613 case HPD_PORT_B: 1614 return val & PORTB_HOTPLUG_LONG_DETECT; 1615 case HPD_PORT_C: 1616 return val & PORTC_HOTPLUG_LONG_DETECT; 1617 default: 1618 return false; 1619 } 1620 } 1621 1622 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1623 { 1624 switch (pin) { 1625 case HPD_PORT_A: 1626 return val & ICP_DDIA_HPD_LONG_DETECT; 1627 case HPD_PORT_B: 1628 return val & ICP_DDIB_HPD_LONG_DETECT; 1629 default: 1630 return false; 1631 } 1632 } 1633 1634 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1635 { 1636 switch (pin) { 1637 case HPD_PORT_C: 1638 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1); 1639 case HPD_PORT_D: 1640 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2); 1641 case HPD_PORT_E: 1642 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3); 1643 case HPD_PORT_F: 1644 return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4); 1645 default: 1646 return false; 1647 } 1648 } 1649 1650 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val) 1651 { 1652 switch (pin) { 1653 case HPD_PORT_E: 1654 return val & PORTE_HOTPLUG_LONG_DETECT; 1655 default: 1656 return false; 1657 } 1658 } 1659 1660 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1661 { 1662 switch (pin) { 1663 case HPD_PORT_A: 1664 return val & PORTA_HOTPLUG_LONG_DETECT; 1665 case HPD_PORT_B: 1666 return val & PORTB_HOTPLUG_LONG_DETECT; 1667 case HPD_PORT_C: 1668 return val & PORTC_HOTPLUG_LONG_DETECT; 1669 case HPD_PORT_D: 1670 return val & PORTD_HOTPLUG_LONG_DETECT; 1671 default: 1672 return false; 1673 } 1674 } 1675 1676 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1677 { 1678 switch (pin) { 1679 case HPD_PORT_A: 1680 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1681 default: 1682 return false; 1683 } 1684 } 1685 1686 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1687 { 1688 switch (pin) { 1689 case HPD_PORT_B: 1690 return val & PORTB_HOTPLUG_LONG_DETECT; 1691 case HPD_PORT_C: 1692 return val & PORTC_HOTPLUG_LONG_DETECT; 1693 case HPD_PORT_D: 1694 return val & PORTD_HOTPLUG_LONG_DETECT; 1695 default: 1696 return false; 1697 } 1698 } 1699 1700 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) 1701 { 1702 switch (pin) { 1703 case HPD_PORT_B: 1704 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1705 case HPD_PORT_C: 1706 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1707 case HPD_PORT_D: 1708 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1709 default: 1710 return false; 1711 } 1712 } 1713 1714 /* 1715 * Get a bit mask of pins that have triggered, and which ones may be long. 1716 * This can be called multiple times with the same masks to accumulate 1717 * hotplug detection results from several registers. 1718 * 1719 * Note that the caller is expected to zero out the masks initially. 1720 */ 1721 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, 1722 u32 *pin_mask, u32 *long_mask, 1723 u32 hotplug_trigger, u32 dig_hotplug_reg, 1724 const u32 hpd[HPD_NUM_PINS], 1725 bool long_pulse_detect(enum hpd_pin pin, u32 val)) 1726 { 1727 enum hpd_pin pin; 1728 1729 for_each_hpd_pin(pin) { 1730 if ((hpd[pin] & hotplug_trigger) == 0) 1731 continue; 1732 1733 *pin_mask |= BIT(pin); 1734 1735 if (long_pulse_detect(pin, dig_hotplug_reg)) 1736 *long_mask |= BIT(pin); 1737 } 1738 1739 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", 1740 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); 1741 1742 } 1743 1744 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1745 { 1746 wake_up_all(&dev_priv->gmbus_wait_queue); 1747 } 1748 1749 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1750 { 1751 wake_up_all(&dev_priv->gmbus_wait_queue); 1752 } 1753 1754 #if defined(CONFIG_DEBUG_FS) 1755 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1756 enum pipe pipe, 1757 uint32_t crc0, uint32_t crc1, 1758 uint32_t crc2, uint32_t crc3, 1759 uint32_t crc4) 1760 { 1761 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1762 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1763 uint32_t crcs[5]; 1764 1765 spin_lock(&pipe_crc->lock); 1766 /* 1767 * For some not yet identified reason, the first CRC is 1768 * bonkers. So let's just wait for the next vblank and read 1769 * out the buggy result. 1770 * 1771 * On GEN8+ sometimes the second CRC is bonkers as well, so 1772 * don't trust that one either. 1773 */ 1774 if (pipe_crc->skipped <= 0 || 1775 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 1776 pipe_crc->skipped++; 1777 spin_unlock(&pipe_crc->lock); 1778 return; 1779 } 1780 spin_unlock(&pipe_crc->lock); 1781 1782 crcs[0] = crc0; 1783 crcs[1] = crc1; 1784 crcs[2] = crc2; 1785 crcs[3] = crc3; 1786 crcs[4] = crc4; 1787 drm_crtc_add_crc_entry(&crtc->base, true, 1788 drm_crtc_accurate_vblank_count(&crtc->base), 1789 crcs); 1790 } 1791 #else 1792 static inline void 1793 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1794 enum pipe pipe, 1795 uint32_t crc0, uint32_t crc1, 1796 uint32_t crc2, uint32_t crc3, 1797 uint32_t crc4) {} 1798 #endif 1799 1800 1801 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1802 enum pipe pipe) 1803 { 1804 display_pipe_crc_irq_handler(dev_priv, pipe, 1805 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1806 0, 0, 0, 0); 1807 } 1808 1809 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1810 enum pipe pipe) 1811 { 1812 display_pipe_crc_irq_handler(dev_priv, pipe, 1813 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1814 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1815 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1816 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1817 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1818 } 1819 1820 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1821 enum pipe pipe) 1822 { 1823 uint32_t res1, res2; 1824 1825 if (INTEL_GEN(dev_priv) >= 3) 1826 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1827 else 1828 res1 = 0; 1829 1830 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1831 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1832 else 1833 res2 = 0; 1834 1835 display_pipe_crc_irq_handler(dev_priv, pipe, 1836 I915_READ(PIPE_CRC_RES_RED(pipe)), 1837 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1838 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1839 res1, res2); 1840 } 1841 1842 /* The RPS events need forcewake, so we add them to a work queue and mask their 1843 * IMR bits until the work is done. Other interrupts can be processed without 1844 * the work queue. */ 1845 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1846 { 1847 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1848 1849 if (pm_iir & dev_priv->pm_rps_events) { 1850 spin_lock(&dev_priv->irq_lock); 1851 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1852 if (rps->interrupts_enabled) { 1853 rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; 1854 schedule_work(&rps->work); 1855 } 1856 spin_unlock(&dev_priv->irq_lock); 1857 } 1858 1859 if (INTEL_GEN(dev_priv) >= 8) 1860 return; 1861 1862 if (HAS_VEBOX(dev_priv)) { 1863 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1864 notify_ring(dev_priv->engine[VECS]); 1865 1866 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1867 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1868 } 1869 } 1870 1871 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 1872 { 1873 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) 1874 intel_guc_to_host_event_handler(&dev_priv->guc); 1875 } 1876 1877 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 1878 { 1879 enum pipe pipe; 1880 1881 for_each_pipe(dev_priv, pipe) { 1882 I915_WRITE(PIPESTAT(pipe), 1883 PIPESTAT_INT_STATUS_MASK | 1884 PIPE_FIFO_UNDERRUN_STATUS); 1885 1886 dev_priv->pipestat_irq_mask[pipe] = 0; 1887 } 1888 } 1889 1890 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1891 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1892 { 1893 int pipe; 1894 1895 spin_lock(&dev_priv->irq_lock); 1896 1897 if (!dev_priv->display_irqs_enabled) { 1898 spin_unlock(&dev_priv->irq_lock); 1899 return; 1900 } 1901 1902 for_each_pipe(dev_priv, pipe) { 1903 i915_reg_t reg; 1904 u32 status_mask, enable_mask, iir_bit = 0; 1905 1906 /* 1907 * PIPESTAT bits get signalled even when the interrupt is 1908 * disabled with the mask bits, and some of the status bits do 1909 * not generate interrupts at all (like the underrun bit). Hence 1910 * we need to be careful that we only handle what we want to 1911 * handle. 1912 */ 1913 1914 /* fifo underruns are filterered in the underrun handler. */ 1915 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1916 1917 switch (pipe) { 1918 case PIPE_A: 1919 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1920 break; 1921 case PIPE_B: 1922 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1923 break; 1924 case PIPE_C: 1925 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1926 break; 1927 } 1928 if (iir & iir_bit) 1929 status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1930 1931 if (!status_mask) 1932 continue; 1933 1934 reg = PIPESTAT(pipe); 1935 pipe_stats[pipe] = I915_READ(reg) & status_mask; 1936 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 1937 1938 /* 1939 * Clear the PIPE*STAT regs before the IIR 1940 * 1941 * Toggle the enable bits to make sure we get an 1942 * edge in the ISR pipe event bit if we don't clear 1943 * all the enabled status bits. Otherwise the edge 1944 * triggered IIR on i965/g4x wouldn't notice that 1945 * an interrupt is still pending. 1946 */ 1947 if (pipe_stats[pipe]) { 1948 I915_WRITE(reg, pipe_stats[pipe]); 1949 I915_WRITE(reg, enable_mask); 1950 } 1951 } 1952 spin_unlock(&dev_priv->irq_lock); 1953 } 1954 1955 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1956 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1957 { 1958 enum pipe pipe; 1959 1960 for_each_pipe(dev_priv, pipe) { 1961 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1962 drm_handle_vblank(&dev_priv->drm, pipe); 1963 1964 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1965 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1966 1967 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1968 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1969 } 1970 } 1971 1972 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1973 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1974 { 1975 bool blc_event = false; 1976 enum pipe pipe; 1977 1978 for_each_pipe(dev_priv, pipe) { 1979 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1980 drm_handle_vblank(&dev_priv->drm, pipe); 1981 1982 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1983 blc_event = true; 1984 1985 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1986 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1987 1988 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1989 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1990 } 1991 1992 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1993 intel_opregion_asle_intr(dev_priv); 1994 } 1995 1996 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1997 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1998 { 1999 bool blc_event = false; 2000 enum pipe pipe; 2001 2002 for_each_pipe(dev_priv, pipe) { 2003 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 2004 drm_handle_vblank(&dev_priv->drm, pipe); 2005 2006 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2007 blc_event = true; 2008 2009 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 2010 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2011 2012 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2013 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2014 } 2015 2016 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2017 intel_opregion_asle_intr(dev_priv); 2018 2019 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2020 gmbus_irq_handler(dev_priv); 2021 } 2022 2023 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 2024 u32 pipe_stats[I915_MAX_PIPES]) 2025 { 2026 enum pipe pipe; 2027 2028 for_each_pipe(dev_priv, pipe) { 2029 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 2030 drm_handle_vblank(&dev_priv->drm, pipe); 2031 2032 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 2033 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2034 2035 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2036 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2037 } 2038 2039 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2040 gmbus_irq_handler(dev_priv); 2041 } 2042 2043 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 2044 { 2045 u32 hotplug_status = 0, hotplug_status_mask; 2046 int i; 2047 2048 if (IS_G4X(dev_priv) || 2049 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2050 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | 2051 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; 2052 else 2053 hotplug_status_mask = HOTPLUG_INT_STATUS_I915; 2054 2055 /* 2056 * We absolutely have to clear all the pending interrupt 2057 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port 2058 * interrupt bit won't have an edge, and the i965/g4x 2059 * edge triggered IIR will not notice that an interrupt 2060 * is still pending. We can't use PORT_HOTPLUG_EN to 2061 * guarantee the edge as the act of toggling the enable 2062 * bits can itself generate a new hotplug interrupt :( 2063 */ 2064 for (i = 0; i < 10; i++) { 2065 u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; 2066 2067 if (tmp == 0) 2068 return hotplug_status; 2069 2070 hotplug_status |= tmp; 2071 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2072 } 2073 2074 WARN_ONCE(1, 2075 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", 2076 I915_READ(PORT_HOTPLUG_STAT)); 2077 2078 return hotplug_status; 2079 } 2080 2081 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2082 u32 hotplug_status) 2083 { 2084 u32 pin_mask = 0, long_mask = 0; 2085 2086 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 2087 IS_CHERRYVIEW(dev_priv)) { 2088 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 2089 2090 if (hotplug_trigger) { 2091 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2092 hotplug_trigger, hotplug_trigger, 2093 hpd_status_g4x, 2094 i9xx_port_hotplug_long_detect); 2095 2096 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2097 } 2098 2099 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 2100 dp_aux_irq_handler(dev_priv); 2101 } else { 2102 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 2103 2104 if (hotplug_trigger) { 2105 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2106 hotplug_trigger, hotplug_trigger, 2107 hpd_status_i915, 2108 i9xx_port_hotplug_long_detect); 2109 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2110 } 2111 } 2112 } 2113 2114 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 2115 { 2116 struct drm_device *dev = arg; 2117 struct drm_i915_private *dev_priv = to_i915(dev); 2118 irqreturn_t ret = IRQ_NONE; 2119 2120 if (!intel_irqs_enabled(dev_priv)) 2121 return IRQ_NONE; 2122 2123 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2124 disable_rpm_wakeref_asserts(dev_priv); 2125 2126 do { 2127 u32 iir, gt_iir, pm_iir; 2128 u32 pipe_stats[I915_MAX_PIPES] = {}; 2129 u32 hotplug_status = 0; 2130 u32 ier = 0; 2131 2132 gt_iir = I915_READ(GTIIR); 2133 pm_iir = I915_READ(GEN6_PMIIR); 2134 iir = I915_READ(VLV_IIR); 2135 2136 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 2137 break; 2138 2139 ret = IRQ_HANDLED; 2140 2141 /* 2142 * Theory on interrupt generation, based on empirical evidence: 2143 * 2144 * x = ((VLV_IIR & VLV_IER) || 2145 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 2146 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 2147 * 2148 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2149 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 2150 * guarantee the CPU interrupt will be raised again even if we 2151 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 2152 * bits this time around. 2153 */ 2154 I915_WRITE(VLV_MASTER_IER, 0); 2155 ier = I915_READ(VLV_IER); 2156 I915_WRITE(VLV_IER, 0); 2157 2158 if (gt_iir) 2159 I915_WRITE(GTIIR, gt_iir); 2160 if (pm_iir) 2161 I915_WRITE(GEN6_PMIIR, pm_iir); 2162 2163 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2164 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2165 2166 /* Call regardless, as some status bits might not be 2167 * signalled in iir */ 2168 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2169 2170 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2171 I915_LPE_PIPE_B_INTERRUPT)) 2172 intel_lpe_audio_irq_handler(dev_priv); 2173 2174 /* 2175 * VLV_IIR is single buffered, and reflects the level 2176 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2177 */ 2178 if (iir) 2179 I915_WRITE(VLV_IIR, iir); 2180 2181 I915_WRITE(VLV_IER, ier); 2182 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2183 2184 if (gt_iir) 2185 snb_gt_irq_handler(dev_priv, gt_iir); 2186 if (pm_iir) 2187 gen6_rps_irq_handler(dev_priv, pm_iir); 2188 2189 if (hotplug_status) 2190 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2191 2192 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2193 } while (0); 2194 2195 enable_rpm_wakeref_asserts(dev_priv); 2196 2197 return ret; 2198 } 2199 2200 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 2201 { 2202 struct drm_device *dev = arg; 2203 struct drm_i915_private *dev_priv = to_i915(dev); 2204 irqreturn_t ret = IRQ_NONE; 2205 2206 if (!intel_irqs_enabled(dev_priv)) 2207 return IRQ_NONE; 2208 2209 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2210 disable_rpm_wakeref_asserts(dev_priv); 2211 2212 do { 2213 u32 master_ctl, iir; 2214 u32 pipe_stats[I915_MAX_PIPES] = {}; 2215 u32 hotplug_status = 0; 2216 u32 gt_iir[4]; 2217 u32 ier = 0; 2218 2219 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 2220 iir = I915_READ(VLV_IIR); 2221 2222 if (master_ctl == 0 && iir == 0) 2223 break; 2224 2225 ret = IRQ_HANDLED; 2226 2227 /* 2228 * Theory on interrupt generation, based on empirical evidence: 2229 * 2230 * x = ((VLV_IIR & VLV_IER) || 2231 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 2232 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 2233 * 2234 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2235 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 2236 * guarantee the CPU interrupt will be raised again even if we 2237 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 2238 * bits this time around. 2239 */ 2240 I915_WRITE(GEN8_MASTER_IRQ, 0); 2241 ier = I915_READ(VLV_IER); 2242 I915_WRITE(VLV_IER, 0); 2243 2244 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2245 2246 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2247 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2248 2249 /* Call regardless, as some status bits might not be 2250 * signalled in iir */ 2251 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2252 2253 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2254 I915_LPE_PIPE_B_INTERRUPT | 2255 I915_LPE_PIPE_C_INTERRUPT)) 2256 intel_lpe_audio_irq_handler(dev_priv); 2257 2258 /* 2259 * VLV_IIR is single buffered, and reflects the level 2260 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2261 */ 2262 if (iir) 2263 I915_WRITE(VLV_IIR, iir); 2264 2265 I915_WRITE(VLV_IER, ier); 2266 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2267 2268 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2269 2270 if (hotplug_status) 2271 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2272 2273 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2274 } while (0); 2275 2276 enable_rpm_wakeref_asserts(dev_priv); 2277 2278 return ret; 2279 } 2280 2281 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2282 u32 hotplug_trigger, 2283 const u32 hpd[HPD_NUM_PINS]) 2284 { 2285 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2286 2287 /* 2288 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 2289 * unless we touch the hotplug register, even if hotplug_trigger is 2290 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 2291 * errors. 2292 */ 2293 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2294 if (!hotplug_trigger) { 2295 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 2296 PORTD_HOTPLUG_STATUS_MASK | 2297 PORTC_HOTPLUG_STATUS_MASK | 2298 PORTB_HOTPLUG_STATUS_MASK; 2299 dig_hotplug_reg &= ~mask; 2300 } 2301 2302 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2303 if (!hotplug_trigger) 2304 return; 2305 2306 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2307 dig_hotplug_reg, hpd, 2308 pch_port_hotplug_long_detect); 2309 2310 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2311 } 2312 2313 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2314 { 2315 int pipe; 2316 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2317 2318 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 2319 2320 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2321 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2322 SDE_AUDIO_POWER_SHIFT); 2323 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2324 port_name(port)); 2325 } 2326 2327 if (pch_iir & SDE_AUX_MASK) 2328 dp_aux_irq_handler(dev_priv); 2329 2330 if (pch_iir & SDE_GMBUS) 2331 gmbus_irq_handler(dev_priv); 2332 2333 if (pch_iir & SDE_AUDIO_HDCP_MASK) 2334 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2335 2336 if (pch_iir & SDE_AUDIO_TRANS_MASK) 2337 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2338 2339 if (pch_iir & SDE_POISON) 2340 DRM_ERROR("PCH poison interrupt\n"); 2341 2342 if (pch_iir & SDE_FDI_MASK) 2343 for_each_pipe(dev_priv, pipe) 2344 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2345 pipe_name(pipe), 2346 I915_READ(FDI_RX_IIR(pipe))); 2347 2348 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2349 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2350 2351 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2352 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2353 2354 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2355 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 2356 2357 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2358 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 2359 } 2360 2361 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 2362 { 2363 u32 err_int = I915_READ(GEN7_ERR_INT); 2364 enum pipe pipe; 2365 2366 if (err_int & ERR_INT_POISON) 2367 DRM_ERROR("Poison interrupt\n"); 2368 2369 for_each_pipe(dev_priv, pipe) { 2370 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2371 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2372 2373 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2374 if (IS_IVYBRIDGE(dev_priv)) 2375 ivb_pipe_crc_irq_handler(dev_priv, pipe); 2376 else 2377 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2378 } 2379 } 2380 2381 I915_WRITE(GEN7_ERR_INT, err_int); 2382 } 2383 2384 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2385 { 2386 u32 serr_int = I915_READ(SERR_INT); 2387 enum pipe pipe; 2388 2389 if (serr_int & SERR_INT_POISON) 2390 DRM_ERROR("PCH poison interrupt\n"); 2391 2392 for_each_pipe(dev_priv, pipe) 2393 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 2394 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 2395 2396 I915_WRITE(SERR_INT, serr_int); 2397 } 2398 2399 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2400 { 2401 int pipe; 2402 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2403 2404 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2405 2406 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2407 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2408 SDE_AUDIO_POWER_SHIFT_CPT); 2409 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2410 port_name(port)); 2411 } 2412 2413 if (pch_iir & SDE_AUX_MASK_CPT) 2414 dp_aux_irq_handler(dev_priv); 2415 2416 if (pch_iir & SDE_GMBUS_CPT) 2417 gmbus_irq_handler(dev_priv); 2418 2419 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2420 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2421 2422 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2423 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2424 2425 if (pch_iir & SDE_FDI_MASK_CPT) 2426 for_each_pipe(dev_priv, pipe) 2427 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2428 pipe_name(pipe), 2429 I915_READ(FDI_RX_IIR(pipe))); 2430 2431 if (pch_iir & SDE_ERROR_CPT) 2432 cpt_serr_int_handler(dev_priv); 2433 } 2434 2435 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2436 { 2437 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; 2438 u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; 2439 u32 pin_mask = 0, long_mask = 0; 2440 2441 if (ddi_hotplug_trigger) { 2442 u32 dig_hotplug_reg; 2443 2444 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); 2445 I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); 2446 2447 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2448 ddi_hotplug_trigger, 2449 dig_hotplug_reg, hpd_icp, 2450 icp_ddi_port_hotplug_long_detect); 2451 } 2452 2453 if (tc_hotplug_trigger) { 2454 u32 dig_hotplug_reg; 2455 2456 dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); 2457 I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); 2458 2459 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2460 tc_hotplug_trigger, 2461 dig_hotplug_reg, hpd_icp, 2462 icp_tc_port_hotplug_long_detect); 2463 } 2464 2465 if (pin_mask) 2466 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2467 2468 if (pch_iir & SDE_GMBUS_ICP) 2469 gmbus_irq_handler(dev_priv); 2470 } 2471 2472 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2473 { 2474 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2475 ~SDE_PORTE_HOTPLUG_SPT; 2476 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2477 u32 pin_mask = 0, long_mask = 0; 2478 2479 if (hotplug_trigger) { 2480 u32 dig_hotplug_reg; 2481 2482 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2483 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2484 2485 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2486 hotplug_trigger, dig_hotplug_reg, hpd_spt, 2487 spt_port_hotplug_long_detect); 2488 } 2489 2490 if (hotplug2_trigger) { 2491 u32 dig_hotplug_reg; 2492 2493 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2494 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2495 2496 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, 2497 hotplug2_trigger, dig_hotplug_reg, hpd_spt, 2498 spt_port_hotplug2_long_detect); 2499 } 2500 2501 if (pin_mask) 2502 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2503 2504 if (pch_iir & SDE_GMBUS_CPT) 2505 gmbus_irq_handler(dev_priv); 2506 } 2507 2508 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2509 u32 hotplug_trigger, 2510 const u32 hpd[HPD_NUM_PINS]) 2511 { 2512 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2513 2514 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2515 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2516 2517 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2518 dig_hotplug_reg, hpd, 2519 ilk_port_hotplug_long_detect); 2520 2521 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2522 } 2523 2524 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2525 u32 de_iir) 2526 { 2527 enum pipe pipe; 2528 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2529 2530 if (hotplug_trigger) 2531 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2532 2533 if (de_iir & DE_AUX_CHANNEL_A) 2534 dp_aux_irq_handler(dev_priv); 2535 2536 if (de_iir & DE_GSE) 2537 intel_opregion_asle_intr(dev_priv); 2538 2539 if (de_iir & DE_POISON) 2540 DRM_ERROR("Poison interrupt\n"); 2541 2542 for_each_pipe(dev_priv, pipe) { 2543 if (de_iir & DE_PIPE_VBLANK(pipe)) 2544 drm_handle_vblank(&dev_priv->drm, pipe); 2545 2546 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2547 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2548 2549 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2550 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2551 } 2552 2553 /* check event from PCH */ 2554 if (de_iir & DE_PCH_EVENT) { 2555 u32 pch_iir = I915_READ(SDEIIR); 2556 2557 if (HAS_PCH_CPT(dev_priv)) 2558 cpt_irq_handler(dev_priv, pch_iir); 2559 else 2560 ibx_irq_handler(dev_priv, pch_iir); 2561 2562 /* should clear PCH hotplug event before clear CPU irq */ 2563 I915_WRITE(SDEIIR, pch_iir); 2564 } 2565 2566 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 2567 ironlake_rps_change_irq_handler(dev_priv); 2568 } 2569 2570 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2571 u32 de_iir) 2572 { 2573 enum pipe pipe; 2574 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2575 2576 if (hotplug_trigger) 2577 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2578 2579 if (de_iir & DE_ERR_INT_IVB) 2580 ivb_err_int_handler(dev_priv); 2581 2582 if (de_iir & DE_EDP_PSR_INT_HSW) { 2583 u32 psr_iir = I915_READ(EDP_PSR_IIR); 2584 2585 intel_psr_irq_handler(dev_priv, psr_iir); 2586 I915_WRITE(EDP_PSR_IIR, psr_iir); 2587 } 2588 2589 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2590 dp_aux_irq_handler(dev_priv); 2591 2592 if (de_iir & DE_GSE_IVB) 2593 intel_opregion_asle_intr(dev_priv); 2594 2595 for_each_pipe(dev_priv, pipe) { 2596 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2597 drm_handle_vblank(&dev_priv->drm, pipe); 2598 } 2599 2600 /* check event from PCH */ 2601 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2602 u32 pch_iir = I915_READ(SDEIIR); 2603 2604 cpt_irq_handler(dev_priv, pch_iir); 2605 2606 /* clear PCH hotplug event before clear CPU irq */ 2607 I915_WRITE(SDEIIR, pch_iir); 2608 } 2609 } 2610 2611 /* 2612 * To handle irqs with the minimum potential races with fresh interrupts, we: 2613 * 1 - Disable Master Interrupt Control. 2614 * 2 - Find the source(s) of the interrupt. 2615 * 3 - Clear the Interrupt Identity bits (IIR). 2616 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2617 * 5 - Re-enable Master Interrupt Control. 2618 */ 2619 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2620 { 2621 struct drm_device *dev = arg; 2622 struct drm_i915_private *dev_priv = to_i915(dev); 2623 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2624 irqreturn_t ret = IRQ_NONE; 2625 2626 if (!intel_irqs_enabled(dev_priv)) 2627 return IRQ_NONE; 2628 2629 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2630 disable_rpm_wakeref_asserts(dev_priv); 2631 2632 /* disable master interrupt before clearing iir */ 2633 de_ier = I915_READ(DEIER); 2634 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2635 2636 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2637 * interrupts will will be stored on its back queue, and then we'll be 2638 * able to process them after we restore SDEIER (as soon as we restore 2639 * it, we'll get an interrupt if SDEIIR still has something to process 2640 * due to its back queue). */ 2641 if (!HAS_PCH_NOP(dev_priv)) { 2642 sde_ier = I915_READ(SDEIER); 2643 I915_WRITE(SDEIER, 0); 2644 } 2645 2646 /* Find, clear, then process each source of interrupt */ 2647 2648 gt_iir = I915_READ(GTIIR); 2649 if (gt_iir) { 2650 I915_WRITE(GTIIR, gt_iir); 2651 ret = IRQ_HANDLED; 2652 if (INTEL_GEN(dev_priv) >= 6) 2653 snb_gt_irq_handler(dev_priv, gt_iir); 2654 else 2655 ilk_gt_irq_handler(dev_priv, gt_iir); 2656 } 2657 2658 de_iir = I915_READ(DEIIR); 2659 if (de_iir) { 2660 I915_WRITE(DEIIR, de_iir); 2661 ret = IRQ_HANDLED; 2662 if (INTEL_GEN(dev_priv) >= 7) 2663 ivb_display_irq_handler(dev_priv, de_iir); 2664 else 2665 ilk_display_irq_handler(dev_priv, de_iir); 2666 } 2667 2668 if (INTEL_GEN(dev_priv) >= 6) { 2669 u32 pm_iir = I915_READ(GEN6_PMIIR); 2670 if (pm_iir) { 2671 I915_WRITE(GEN6_PMIIR, pm_iir); 2672 ret = IRQ_HANDLED; 2673 gen6_rps_irq_handler(dev_priv, pm_iir); 2674 } 2675 } 2676 2677 I915_WRITE(DEIER, de_ier); 2678 if (!HAS_PCH_NOP(dev_priv)) 2679 I915_WRITE(SDEIER, sde_ier); 2680 2681 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2682 enable_rpm_wakeref_asserts(dev_priv); 2683 2684 return ret; 2685 } 2686 2687 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2688 u32 hotplug_trigger, 2689 const u32 hpd[HPD_NUM_PINS]) 2690 { 2691 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2692 2693 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2694 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2695 2696 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, 2697 dig_hotplug_reg, hpd, 2698 bxt_port_hotplug_long_detect); 2699 2700 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2701 } 2702 2703 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 2704 { 2705 u32 pin_mask = 0, long_mask = 0; 2706 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; 2707 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; 2708 2709 if (trigger_tc) { 2710 u32 dig_hotplug_reg; 2711 2712 dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL); 2713 I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); 2714 2715 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, 2716 dig_hotplug_reg, hpd_gen11, 2717 gen11_port_hotplug_long_detect); 2718 } 2719 2720 if (trigger_tbt) { 2721 u32 dig_hotplug_reg; 2722 2723 dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL); 2724 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); 2725 2726 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, 2727 dig_hotplug_reg, hpd_gen11, 2728 gen11_port_hotplug_long_detect); 2729 } 2730 2731 if (pin_mask) 2732 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2733 else 2734 DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir); 2735 } 2736 2737 static irqreturn_t 2738 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2739 { 2740 irqreturn_t ret = IRQ_NONE; 2741 u32 iir; 2742 enum pipe pipe; 2743 2744 if (master_ctl & GEN8_DE_MISC_IRQ) { 2745 iir = I915_READ(GEN8_DE_MISC_IIR); 2746 if (iir) { 2747 bool found = false; 2748 2749 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2750 ret = IRQ_HANDLED; 2751 2752 if (iir & GEN8_DE_MISC_GSE) { 2753 intel_opregion_asle_intr(dev_priv); 2754 found = true; 2755 } 2756 2757 if (iir & GEN8_DE_EDP_PSR) { 2758 u32 psr_iir = I915_READ(EDP_PSR_IIR); 2759 2760 intel_psr_irq_handler(dev_priv, psr_iir); 2761 I915_WRITE(EDP_PSR_IIR, psr_iir); 2762 found = true; 2763 } 2764 2765 if (!found) 2766 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2767 } 2768 else 2769 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2770 } 2771 2772 if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 2773 iir = I915_READ(GEN11_DE_HPD_IIR); 2774 if (iir) { 2775 I915_WRITE(GEN11_DE_HPD_IIR, iir); 2776 ret = IRQ_HANDLED; 2777 gen11_hpd_irq_handler(dev_priv, iir); 2778 } else { 2779 DRM_ERROR("The master control interrupt lied, (DE HPD)!\n"); 2780 } 2781 } 2782 2783 if (master_ctl & GEN8_DE_PORT_IRQ) { 2784 iir = I915_READ(GEN8_DE_PORT_IIR); 2785 if (iir) { 2786 u32 tmp_mask; 2787 bool found = false; 2788 2789 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2790 ret = IRQ_HANDLED; 2791 2792 tmp_mask = GEN8_AUX_CHANNEL_A; 2793 if (INTEL_GEN(dev_priv) >= 9) 2794 tmp_mask |= GEN9_AUX_CHANNEL_B | 2795 GEN9_AUX_CHANNEL_C | 2796 GEN9_AUX_CHANNEL_D; 2797 2798 if (INTEL_GEN(dev_priv) >= 11) 2799 tmp_mask |= ICL_AUX_CHANNEL_E; 2800 2801 if (IS_CNL_WITH_PORT_F(dev_priv) || 2802 INTEL_GEN(dev_priv) >= 11) 2803 tmp_mask |= CNL_AUX_CHANNEL_F; 2804 2805 if (iir & tmp_mask) { 2806 dp_aux_irq_handler(dev_priv); 2807 found = true; 2808 } 2809 2810 if (IS_GEN9_LP(dev_priv)) { 2811 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2812 if (tmp_mask) { 2813 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2814 hpd_bxt); 2815 found = true; 2816 } 2817 } else if (IS_BROADWELL(dev_priv)) { 2818 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2819 if (tmp_mask) { 2820 ilk_hpd_irq_handler(dev_priv, 2821 tmp_mask, hpd_bdw); 2822 found = true; 2823 } 2824 } 2825 2826 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2827 gmbus_irq_handler(dev_priv); 2828 found = true; 2829 } 2830 2831 if (!found) 2832 DRM_ERROR("Unexpected DE Port interrupt\n"); 2833 } 2834 else 2835 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2836 } 2837 2838 for_each_pipe(dev_priv, pipe) { 2839 u32 fault_errors; 2840 2841 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2842 continue; 2843 2844 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2845 if (!iir) { 2846 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2847 continue; 2848 } 2849 2850 ret = IRQ_HANDLED; 2851 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2852 2853 if (iir & GEN8_PIPE_VBLANK) 2854 drm_handle_vblank(&dev_priv->drm, pipe); 2855 2856 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2857 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2858 2859 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2860 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2861 2862 fault_errors = iir; 2863 if (INTEL_GEN(dev_priv) >= 9) 2864 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2865 else 2866 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2867 2868 if (fault_errors) 2869 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 2870 pipe_name(pipe), 2871 fault_errors); 2872 } 2873 2874 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2875 master_ctl & GEN8_DE_PCH_IRQ) { 2876 /* 2877 * FIXME(BDW): Assume for now that the new interrupt handling 2878 * scheme also closed the SDE interrupt handling race we've seen 2879 * on older pch-split platforms. But this needs testing. 2880 */ 2881 iir = I915_READ(SDEIIR); 2882 if (iir) { 2883 I915_WRITE(SDEIIR, iir); 2884 ret = IRQ_HANDLED; 2885 2886 if (HAS_PCH_ICP(dev_priv)) 2887 icp_irq_handler(dev_priv, iir); 2888 else if (HAS_PCH_SPT(dev_priv) || 2889 HAS_PCH_KBP(dev_priv) || 2890 HAS_PCH_CNP(dev_priv)) 2891 spt_irq_handler(dev_priv, iir); 2892 else 2893 cpt_irq_handler(dev_priv, iir); 2894 } else { 2895 /* 2896 * Like on previous PCH there seems to be something 2897 * fishy going on with forwarding PCH interrupts. 2898 */ 2899 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2900 } 2901 } 2902 2903 return ret; 2904 } 2905 2906 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2907 { 2908 struct drm_i915_private *dev_priv = to_i915(arg); 2909 u32 master_ctl; 2910 u32 gt_iir[4]; 2911 2912 if (!intel_irqs_enabled(dev_priv)) 2913 return IRQ_NONE; 2914 2915 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2916 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2917 if (!master_ctl) 2918 return IRQ_NONE; 2919 2920 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2921 2922 /* Find, clear, then process each source of interrupt */ 2923 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2924 2925 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2926 if (master_ctl & ~GEN8_GT_IRQS) { 2927 disable_rpm_wakeref_asserts(dev_priv); 2928 gen8_de_irq_handler(dev_priv, master_ctl); 2929 enable_rpm_wakeref_asserts(dev_priv); 2930 } 2931 2932 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2933 2934 gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2935 2936 return IRQ_HANDLED; 2937 } 2938 2939 struct wedge_me { 2940 struct delayed_work work; 2941 struct drm_i915_private *i915; 2942 const char *name; 2943 }; 2944 2945 static void wedge_me(struct work_struct *work) 2946 { 2947 struct wedge_me *w = container_of(work, typeof(*w), work.work); 2948 2949 dev_err(w->i915->drm.dev, 2950 "%s timed out, cancelling all in-flight rendering.\n", 2951 w->name); 2952 i915_gem_set_wedged(w->i915); 2953 } 2954 2955 static void __init_wedge(struct wedge_me *w, 2956 struct drm_i915_private *i915, 2957 long timeout, 2958 const char *name) 2959 { 2960 w->i915 = i915; 2961 w->name = name; 2962 2963 INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); 2964 schedule_delayed_work(&w->work, timeout); 2965 } 2966 2967 static void __fini_wedge(struct wedge_me *w) 2968 { 2969 cancel_delayed_work_sync(&w->work); 2970 destroy_delayed_work_on_stack(&w->work); 2971 w->i915 = NULL; 2972 } 2973 2974 #define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ 2975 for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \ 2976 (W)->i915; \ 2977 __fini_wedge((W))) 2978 2979 static u32 2980 gen11_gt_engine_identity(struct drm_i915_private * const i915, 2981 const unsigned int bank, const unsigned int bit) 2982 { 2983 void __iomem * const regs = i915->regs; 2984 u32 timeout_ts; 2985 u32 ident; 2986 2987 lockdep_assert_held(&i915->irq_lock); 2988 2989 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit)); 2990 2991 /* 2992 * NB: Specs do not specify how long to spin wait, 2993 * so we do ~100us as an educated guess. 2994 */ 2995 timeout_ts = (local_clock() >> 10) + 100; 2996 do { 2997 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank)); 2998 } while (!(ident & GEN11_INTR_DATA_VALID) && 2999 !time_after32(local_clock() >> 10, timeout_ts)); 3000 3001 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) { 3002 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n", 3003 bank, bit, ident); 3004 return 0; 3005 } 3006 3007 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank), 3008 GEN11_INTR_DATA_VALID); 3009 3010 return ident; 3011 } 3012 3013 static void 3014 gen11_other_irq_handler(struct drm_i915_private * const i915, 3015 const u8 instance, const u16 iir) 3016 { 3017 if (instance == OTHER_GTPM_INSTANCE) 3018 return gen6_rps_irq_handler(i915, iir); 3019 3020 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", 3021 instance, iir); 3022 } 3023 3024 static void 3025 gen11_engine_irq_handler(struct drm_i915_private * const i915, 3026 const u8 class, const u8 instance, const u16 iir) 3027 { 3028 struct intel_engine_cs *engine; 3029 3030 if (instance <= MAX_ENGINE_INSTANCE) 3031 engine = i915->engine_class[class][instance]; 3032 else 3033 engine = NULL; 3034 3035 if (likely(engine)) 3036 return gen8_cs_irq_handler(engine, iir); 3037 3038 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n", 3039 class, instance); 3040 } 3041 3042 static void 3043 gen11_gt_identity_handler(struct drm_i915_private * const i915, 3044 const u32 identity) 3045 { 3046 const u8 class = GEN11_INTR_ENGINE_CLASS(identity); 3047 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity); 3048 const u16 intr = GEN11_INTR_ENGINE_INTR(identity); 3049 3050 if (unlikely(!intr)) 3051 return; 3052 3053 if (class <= COPY_ENGINE_CLASS) 3054 return gen11_engine_irq_handler(i915, class, instance, intr); 3055 3056 if (class == OTHER_CLASS) 3057 return gen11_other_irq_handler(i915, instance, intr); 3058 3059 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n", 3060 class, instance, intr); 3061 } 3062 3063 static void 3064 gen11_gt_bank_handler(struct drm_i915_private * const i915, 3065 const unsigned int bank) 3066 { 3067 void __iomem * const regs = i915->regs; 3068 unsigned long intr_dw; 3069 unsigned int bit; 3070 3071 lockdep_assert_held(&i915->irq_lock); 3072 3073 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank)); 3074 3075 if (unlikely(!intr_dw)) { 3076 DRM_ERROR("GT_INTR_DW%u blank!\n", bank); 3077 return; 3078 } 3079 3080 for_each_set_bit(bit, &intr_dw, 32) { 3081 const u32 ident = gen11_gt_engine_identity(i915, 3082 bank, bit); 3083 3084 gen11_gt_identity_handler(i915, ident); 3085 } 3086 3087 /* Clear must be after shared has been served for engine */ 3088 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw); 3089 } 3090 3091 static void 3092 gen11_gt_irq_handler(struct drm_i915_private * const i915, 3093 const u32 master_ctl) 3094 { 3095 unsigned int bank; 3096 3097 spin_lock(&i915->irq_lock); 3098 3099 for (bank = 0; bank < 2; bank++) { 3100 if (master_ctl & GEN11_GT_DW_IRQ(bank)) 3101 gen11_gt_bank_handler(i915, bank); 3102 } 3103 3104 spin_unlock(&i915->irq_lock); 3105 } 3106 3107 static u32 3108 gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl) 3109 { 3110 void __iomem * const regs = dev_priv->regs; 3111 u32 iir; 3112 3113 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 3114 return 0; 3115 3116 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 3117 if (likely(iir)) 3118 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); 3119 3120 return iir; 3121 } 3122 3123 static void 3124 gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir) 3125 { 3126 if (iir & GEN11_GU_MISC_GSE) 3127 intel_opregion_asle_intr(dev_priv); 3128 } 3129 3130 static irqreturn_t gen11_irq_handler(int irq, void *arg) 3131 { 3132 struct drm_i915_private * const i915 = to_i915(arg); 3133 void __iomem * const regs = i915->regs; 3134 u32 master_ctl; 3135 u32 gu_misc_iir; 3136 3137 if (!intel_irqs_enabled(i915)) 3138 return IRQ_NONE; 3139 3140 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ); 3141 master_ctl &= ~GEN11_MASTER_IRQ; 3142 if (!master_ctl) 3143 return IRQ_NONE; 3144 3145 /* Disable interrupts. */ 3146 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0); 3147 3148 /* Find, clear, then process each source of interrupt. */ 3149 gen11_gt_irq_handler(i915, master_ctl); 3150 3151 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3152 if (master_ctl & GEN11_DISPLAY_IRQ) { 3153 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 3154 3155 disable_rpm_wakeref_asserts(i915); 3156 /* 3157 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 3158 * for the display related bits. 3159 */ 3160 gen8_de_irq_handler(i915, disp_ctl); 3161 enable_rpm_wakeref_asserts(i915); 3162 } 3163 3164 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); 3165 3166 /* Acknowledge and enable interrupts. */ 3167 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl); 3168 3169 gen11_gu_misc_irq_handler(i915, gu_misc_iir); 3170 3171 return IRQ_HANDLED; 3172 } 3173 3174 static void i915_reset_device(struct drm_i915_private *dev_priv, 3175 u32 engine_mask, 3176 const char *reason) 3177 { 3178 struct i915_gpu_error *error = &dev_priv->gpu_error; 3179 #ifdef notyet 3180 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 3181 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 3182 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 3183 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 3184 #endif 3185 struct wedge_me w; 3186 3187 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 3188 3189 DRM_DEBUG_DRIVER("resetting chip\n"); 3190 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 3191 3192 /* Use a watchdog to ensure that our reset completes */ 3193 i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { 3194 intel_prepare_reset(dev_priv); 3195 3196 error->reason = reason; 3197 error->stalled_mask = engine_mask; 3198 3199 /* Signal that locked waiters should reset the GPU */ 3200 smp_mb__before_atomic(); 3201 set_bit(I915_RESET_HANDOFF, &error->flags); 3202 wake_up_all(&error->wait_queue); 3203 3204 /* Wait for anyone holding the lock to wakeup, without 3205 * blocking indefinitely on struct_mutex. 3206 */ 3207 do { 3208 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 3209 i915_reset(dev_priv, engine_mask, reason); 3210 mutex_unlock(&dev_priv->drm.struct_mutex); 3211 } 3212 } while (wait_on_bit_timeout(&error->flags, 3213 I915_RESET_HANDOFF, 3214 TASK_UNINTERRUPTIBLE, 3215 1)); 3216 3217 error->stalled_mask = 0; 3218 error->reason = NULL; 3219 3220 intel_finish_reset(dev_priv); 3221 } 3222 3223 if (!test_bit(I915_WEDGED, &error->flags)) 3224 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event); 3225 } 3226 3227 static void i915_clear_error_registers(struct drm_i915_private *dev_priv) 3228 { 3229 u32 eir; 3230 3231 if (!IS_GEN2(dev_priv)) 3232 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 3233 3234 if (INTEL_GEN(dev_priv) < 4) 3235 I915_WRITE(IPEIR, I915_READ(IPEIR)); 3236 else 3237 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); 3238 3239 I915_WRITE(EIR, I915_READ(EIR)); 3240 eir = I915_READ(EIR); 3241 if (eir) { 3242 /* 3243 * some errors might have become stuck, 3244 * mask them. 3245 */ 3246 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 3247 I915_WRITE(EMR, I915_READ(EMR) | eir); 3248 I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT); 3249 } 3250 } 3251 3252 /** 3253 * i915_handle_error - handle a gpu error 3254 * @dev_priv: i915 device private 3255 * @engine_mask: mask representing engines that are hung 3256 * @flags: control flags 3257 * @fmt: Error message format string 3258 * 3259 * Do some basic checking of register state at error time and 3260 * dump it to the syslog. Also call i915_capture_error_state() to make 3261 * sure we get a record and make it available in debugfs. Fire a uevent 3262 * so userspace knows something bad happened (should trigger collection 3263 * of a ring dump etc.). 3264 */ 3265 void i915_handle_error(struct drm_i915_private *dev_priv, 3266 u32 engine_mask, 3267 unsigned long flags, 3268 const char *fmt, ...) 3269 { 3270 struct intel_engine_cs *engine; 3271 unsigned int tmp; 3272 char error_msg[80]; 3273 char *msg = NULL; 3274 3275 if (fmt) { 3276 va_list args; 3277 3278 va_start(args, fmt); 3279 vsnprintf(error_msg, sizeof(error_msg), fmt, args); 3280 va_end(args); 3281 3282 msg = error_msg; 3283 } 3284 3285 /* 3286 * In most cases it's guaranteed that we get here with an RPM 3287 * reference held, for example because there is a pending GPU 3288 * request that won't finish until the reset is done. This 3289 * isn't the case at least when we get here by doing a 3290 * simulated reset via debugfs, so get an RPM reference. 3291 */ 3292 intel_runtime_pm_get(dev_priv); 3293 3294 engine_mask &= INTEL_INFO(dev_priv)->ring_mask; 3295 3296 if (flags & I915_ERROR_CAPTURE) { 3297 i915_capture_error_state(dev_priv, engine_mask, msg); 3298 i915_clear_error_registers(dev_priv); 3299 } 3300 3301 /* 3302 * Try engine reset when available. We fall back to full reset if 3303 * single reset fails. 3304 */ 3305 if (intel_has_reset_engine(dev_priv)) { 3306 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 3307 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); 3308 if (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3309 &dev_priv->gpu_error.flags)) 3310 continue; 3311 3312 if (i915_reset_engine(engine, msg) == 0) 3313 engine_mask &= ~intel_engine_flag(engine); 3314 3315 clear_bit(I915_RESET_ENGINE + engine->id, 3316 &dev_priv->gpu_error.flags); 3317 wake_up_bit(&dev_priv->gpu_error.flags, 3318 I915_RESET_ENGINE + engine->id); 3319 } 3320 } 3321 3322 if (!engine_mask) 3323 goto out; 3324 3325 /* Full reset needs the mutex, stop any other user trying to do so. */ 3326 if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) { 3327 wait_event(dev_priv->gpu_error.reset_queue, 3328 !test_bit(I915_RESET_BACKOFF, 3329 &dev_priv->gpu_error.flags)); 3330 goto out; 3331 } 3332 3333 /* Prevent any other reset-engine attempt. */ 3334 for_each_engine(engine, dev_priv, tmp) { 3335 while (test_and_set_bit(I915_RESET_ENGINE + engine->id, 3336 &dev_priv->gpu_error.flags)) 3337 wait_on_bit(&dev_priv->gpu_error.flags, 3338 I915_RESET_ENGINE + engine->id, 3339 TASK_UNINTERRUPTIBLE); 3340 } 3341 3342 i915_reset_device(dev_priv, engine_mask, msg); 3343 3344 for_each_engine(engine, dev_priv, tmp) { 3345 clear_bit(I915_RESET_ENGINE + engine->id, 3346 &dev_priv->gpu_error.flags); 3347 } 3348 3349 clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); 3350 wake_up_all(&dev_priv->gpu_error.reset_queue); 3351 3352 out: 3353 intel_runtime_pm_put(dev_priv); 3354 } 3355 3356 /* Called from drm generic code, passed 'crtc' which 3357 * we use as a pipe index 3358 */ 3359 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 3360 { 3361 struct drm_i915_private *dev_priv = to_i915(dev); 3362 unsigned long irqflags; 3363 3364 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3365 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 3366 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3367 3368 return 0; 3369 } 3370 3371 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 3372 { 3373 struct drm_i915_private *dev_priv = to_i915(dev); 3374 unsigned long irqflags; 3375 3376 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3377 i915_enable_pipestat(dev_priv, pipe, 3378 PIPE_START_VBLANK_INTERRUPT_STATUS); 3379 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3380 3381 return 0; 3382 } 3383 3384 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 3385 { 3386 struct drm_i915_private *dev_priv = to_i915(dev); 3387 unsigned long irqflags; 3388 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 3389 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3390 3391 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3392 ilk_enable_display_irq(dev_priv, bit); 3393 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3394 3395 /* Even though there is no DMC, frame counter can get stuck when 3396 * PSR is active as no frames are generated. 3397 */ 3398 if (HAS_PSR(dev_priv)) 3399 drm_vblank_restore(dev, pipe); 3400 3401 return 0; 3402 } 3403 3404 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 3405 { 3406 struct drm_i915_private *dev_priv = to_i915(dev); 3407 unsigned long irqflags; 3408 3409 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3410 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3411 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3412 3413 /* Even if there is no DMC, frame counter can get stuck when 3414 * PSR is active as no frames are generated, so check only for PSR. 3415 */ 3416 if (HAS_PSR(dev_priv)) 3417 drm_vblank_restore(dev, pipe); 3418 3419 return 0; 3420 } 3421 3422 /* Called from drm generic code, passed 'crtc' which 3423 * we use as a pipe index 3424 */ 3425 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 3426 { 3427 struct drm_i915_private *dev_priv = to_i915(dev); 3428 unsigned long irqflags; 3429 3430 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3431 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 3432 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3433 } 3434 3435 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 3436 { 3437 struct drm_i915_private *dev_priv = to_i915(dev); 3438 unsigned long irqflags; 3439 3440 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3441 i915_disable_pipestat(dev_priv, pipe, 3442 PIPE_START_VBLANK_INTERRUPT_STATUS); 3443 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3444 } 3445 3446 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 3447 { 3448 struct drm_i915_private *dev_priv = to_i915(dev); 3449 unsigned long irqflags; 3450 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 3451 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3452 3453 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3454 ilk_disable_display_irq(dev_priv, bit); 3455 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3456 } 3457 3458 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 3459 { 3460 struct drm_i915_private *dev_priv = to_i915(dev); 3461 unsigned long irqflags; 3462 3463 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3464 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3465 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3466 } 3467 3468 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 3469 { 3470 if (HAS_PCH_NOP(dev_priv)) 3471 return; 3472 3473 GEN3_IRQ_RESET(SDE); 3474 3475 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3476 I915_WRITE(SERR_INT, 0xffffffff); 3477 } 3478 3479 /* 3480 * SDEIER is also touched by the interrupt handler to work around missed PCH 3481 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3482 * instead we unconditionally enable all PCH interrupt sources here, but then 3483 * only unmask them as needed with SDEIMR. 3484 * 3485 * This function needs to be called before interrupts are enabled. 3486 */ 3487 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3488 { 3489 struct drm_i915_private *dev_priv = to_i915(dev); 3490 3491 if (HAS_PCH_NOP(dev_priv)) 3492 return; 3493 3494 WARN_ON(I915_READ(SDEIER) != 0); 3495 I915_WRITE(SDEIER, 0xffffffff); 3496 POSTING_READ(SDEIER); 3497 } 3498 3499 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 3500 { 3501 GEN3_IRQ_RESET(GT); 3502 if (INTEL_GEN(dev_priv) >= 6) 3503 GEN3_IRQ_RESET(GEN6_PM); 3504 } 3505 3506 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3507 { 3508 if (IS_CHERRYVIEW(dev_priv)) 3509 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3510 else 3511 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3512 3513 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3514 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3515 3516 i9xx_pipestat_irq_reset(dev_priv); 3517 3518 GEN3_IRQ_RESET(VLV_); 3519 dev_priv->irq_mask = ~0u; 3520 } 3521 3522 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3523 { 3524 u32 pipestat_mask; 3525 u32 enable_mask; 3526 enum pipe pipe; 3527 3528 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 3529 3530 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3531 for_each_pipe(dev_priv, pipe) 3532 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3533 3534 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3535 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3536 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3537 I915_LPE_PIPE_A_INTERRUPT | 3538 I915_LPE_PIPE_B_INTERRUPT; 3539 3540 if (IS_CHERRYVIEW(dev_priv)) 3541 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 3542 I915_LPE_PIPE_C_INTERRUPT; 3543 3544 WARN_ON(dev_priv->irq_mask != ~0u); 3545 3546 dev_priv->irq_mask = ~enable_mask; 3547 3548 GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 3549 } 3550 3551 /* drm_dma.h hooks 3552 */ 3553 static void ironlake_irq_reset(struct drm_device *dev) 3554 { 3555 struct drm_i915_private *dev_priv = to_i915(dev); 3556 3557 if (IS_GEN5(dev_priv)) 3558 I915_WRITE(HWSTAM, 0xffffffff); 3559 3560 GEN3_IRQ_RESET(DE); 3561 if (IS_GEN7(dev_priv)) 3562 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3563 3564 if (IS_HASWELL(dev_priv)) { 3565 I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3566 I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3567 } 3568 3569 gen5_gt_irq_reset(dev_priv); 3570 3571 ibx_irq_reset(dev_priv); 3572 } 3573 3574 static void valleyview_irq_reset(struct drm_device *dev) 3575 { 3576 struct drm_i915_private *dev_priv = to_i915(dev); 3577 3578 I915_WRITE(VLV_MASTER_IER, 0); 3579 POSTING_READ(VLV_MASTER_IER); 3580 3581 gen5_gt_irq_reset(dev_priv); 3582 3583 spin_lock_irq(&dev_priv->irq_lock); 3584 if (dev_priv->display_irqs_enabled) 3585 vlv_display_irq_reset(dev_priv); 3586 spin_unlock_irq(&dev_priv->irq_lock); 3587 } 3588 3589 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3590 { 3591 GEN8_IRQ_RESET_NDX(GT, 0); 3592 GEN8_IRQ_RESET_NDX(GT, 1); 3593 GEN8_IRQ_RESET_NDX(GT, 2); 3594 GEN8_IRQ_RESET_NDX(GT, 3); 3595 } 3596 3597 static void gen8_irq_reset(struct drm_device *dev) 3598 { 3599 struct drm_i915_private *dev_priv = to_i915(dev); 3600 int pipe; 3601 3602 I915_WRITE(GEN8_MASTER_IRQ, 0); 3603 POSTING_READ(GEN8_MASTER_IRQ); 3604 3605 gen8_gt_irq_reset(dev_priv); 3606 3607 I915_WRITE(EDP_PSR_IMR, 0xffffffff); 3608 I915_WRITE(EDP_PSR_IIR, 0xffffffff); 3609 3610 for_each_pipe(dev_priv, pipe) 3611 if (intel_display_power_is_enabled(dev_priv, 3612 POWER_DOMAIN_PIPE(pipe))) 3613 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3614 3615 GEN3_IRQ_RESET(GEN8_DE_PORT_); 3616 GEN3_IRQ_RESET(GEN8_DE_MISC_); 3617 GEN3_IRQ_RESET(GEN8_PCU_); 3618 3619 if (HAS_PCH_SPLIT(dev_priv)) 3620 ibx_irq_reset(dev_priv); 3621 } 3622 3623 static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv) 3624 { 3625 /* Disable RCS, BCS, VCS and VECS class engines. */ 3626 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, 0); 3627 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, 0); 3628 3629 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ 3630 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~0); 3631 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~0); 3632 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~0); 3633 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~0); 3634 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~0); 3635 3636 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 3637 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 3638 } 3639 3640 static void gen11_irq_reset(struct drm_device *dev) 3641 { 3642 struct drm_i915_private *dev_priv = dev->dev_private; 3643 int pipe; 3644 3645 I915_WRITE(GEN11_GFX_MSTR_IRQ, 0); 3646 POSTING_READ(GEN11_GFX_MSTR_IRQ); 3647 3648 gen11_gt_irq_reset(dev_priv); 3649 3650 I915_WRITE(GEN11_DISPLAY_INT_CTL, 0); 3651 3652 for_each_pipe(dev_priv, pipe) 3653 if (intel_display_power_is_enabled(dev_priv, 3654 POWER_DOMAIN_PIPE(pipe))) 3655 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3656 3657 GEN3_IRQ_RESET(GEN8_DE_PORT_); 3658 GEN3_IRQ_RESET(GEN8_DE_MISC_); 3659 GEN3_IRQ_RESET(GEN11_DE_HPD_); 3660 GEN3_IRQ_RESET(GEN11_GU_MISC_); 3661 GEN3_IRQ_RESET(GEN8_PCU_); 3662 3663 if (HAS_PCH_ICP(dev_priv)) 3664 GEN3_IRQ_RESET(SDE); 3665 } 3666 3667 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3668 u8 pipe_mask) 3669 { 3670 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3671 enum pipe pipe; 3672 3673 spin_lock_irq(&dev_priv->irq_lock); 3674 3675 if (!intel_irqs_enabled(dev_priv)) { 3676 spin_unlock_irq(&dev_priv->irq_lock); 3677 return; 3678 } 3679 3680 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3681 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3682 dev_priv->de_irq_mask[pipe], 3683 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3684 3685 spin_unlock_irq(&dev_priv->irq_lock); 3686 } 3687 3688 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3689 u8 pipe_mask) 3690 { 3691 enum pipe pipe; 3692 3693 spin_lock_irq(&dev_priv->irq_lock); 3694 3695 if (!intel_irqs_enabled(dev_priv)) { 3696 spin_unlock_irq(&dev_priv->irq_lock); 3697 return; 3698 } 3699 3700 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3701 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3702 3703 spin_unlock_irq(&dev_priv->irq_lock); 3704 3705 /* make sure we're done processing display irqs */ 3706 synchronize_irq(dev_priv->drm.irq); 3707 } 3708 3709 static void cherryview_irq_reset(struct drm_device *dev) 3710 { 3711 struct drm_i915_private *dev_priv = to_i915(dev); 3712 3713 I915_WRITE(GEN8_MASTER_IRQ, 0); 3714 POSTING_READ(GEN8_MASTER_IRQ); 3715 3716 gen8_gt_irq_reset(dev_priv); 3717 3718 GEN3_IRQ_RESET(GEN8_PCU_); 3719 3720 spin_lock_irq(&dev_priv->irq_lock); 3721 if (dev_priv->display_irqs_enabled) 3722 vlv_display_irq_reset(dev_priv); 3723 spin_unlock_irq(&dev_priv->irq_lock); 3724 } 3725 3726 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3727 const u32 hpd[HPD_NUM_PINS]) 3728 { 3729 struct intel_encoder *encoder; 3730 u32 enabled_irqs = 0; 3731 3732 for_each_intel_encoder(&dev_priv->drm, encoder) 3733 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3734 enabled_irqs |= hpd[encoder->hpd_pin]; 3735 3736 return enabled_irqs; 3737 } 3738 3739 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3740 { 3741 u32 hotplug; 3742 3743 /* 3744 * Enable digital hotplug on the PCH, and configure the DP short pulse 3745 * duration to 2ms (which is the minimum in the Display Port spec). 3746 * The pulse duration bits are reserved on LPT+. 3747 */ 3748 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3749 hotplug &= ~(PORTB_PULSE_DURATION_MASK | 3750 PORTC_PULSE_DURATION_MASK | 3751 PORTD_PULSE_DURATION_MASK); 3752 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3753 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3754 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3755 /* 3756 * When CPU and PCH are on the same package, port A 3757 * HPD must be enabled in both north and south. 3758 */ 3759 if (HAS_PCH_LPT_LP(dev_priv)) 3760 hotplug |= PORTA_HOTPLUG_ENABLE; 3761 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3762 } 3763 3764 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3765 { 3766 u32 hotplug_irqs, enabled_irqs; 3767 3768 if (HAS_PCH_IBX(dev_priv)) { 3769 hotplug_irqs = SDE_HOTPLUG_MASK; 3770 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3771 } else { 3772 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3773 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3774 } 3775 3776 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3777 3778 ibx_hpd_detection_setup(dev_priv); 3779 } 3780 3781 static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv) 3782 { 3783 u32 hotplug; 3784 3785 hotplug = I915_READ(SHOTPLUG_CTL_DDI); 3786 hotplug |= ICP_DDIA_HPD_ENABLE | 3787 ICP_DDIB_HPD_ENABLE; 3788 I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); 3789 3790 hotplug = I915_READ(SHOTPLUG_CTL_TC); 3791 hotplug |= ICP_TC_HPD_ENABLE(PORT_TC1) | 3792 ICP_TC_HPD_ENABLE(PORT_TC2) | 3793 ICP_TC_HPD_ENABLE(PORT_TC3) | 3794 ICP_TC_HPD_ENABLE(PORT_TC4); 3795 I915_WRITE(SHOTPLUG_CTL_TC, hotplug); 3796 } 3797 3798 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) 3799 { 3800 u32 hotplug_irqs, enabled_irqs; 3801 3802 hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP; 3803 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp); 3804 3805 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3806 3807 icp_hpd_detection_setup(dev_priv); 3808 } 3809 3810 static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) 3811 { 3812 u32 hotplug; 3813 3814 hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL); 3815 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3816 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3817 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3818 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3819 I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug); 3820 3821 hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL); 3822 hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) | 3823 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) | 3824 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) | 3825 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4); 3826 I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug); 3827 } 3828 3829 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) 3830 { 3831 u32 hotplug_irqs, enabled_irqs; 3832 u32 val; 3833 3834 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_gen11); 3835 hotplug_irqs = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; 3836 3837 val = I915_READ(GEN11_DE_HPD_IMR); 3838 val &= ~hotplug_irqs; 3839 I915_WRITE(GEN11_DE_HPD_IMR, val); 3840 POSTING_READ(GEN11_DE_HPD_IMR); 3841 3842 gen11_hpd_detection_setup(dev_priv); 3843 3844 if (HAS_PCH_ICP(dev_priv)) 3845 icp_hpd_irq_setup(dev_priv); 3846 } 3847 3848 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3849 { 3850 u32 val, hotplug; 3851 3852 /* Display WA #1179 WaHardHangonHotPlug: cnp */ 3853 if (HAS_PCH_CNP(dev_priv)) { 3854 val = I915_READ(SOUTH_CHICKEN1); 3855 val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 3856 val |= CHASSIS_CLK_REQ_DURATION(0xf); 3857 I915_WRITE(SOUTH_CHICKEN1, val); 3858 } 3859 3860 /* Enable digital hotplug on the PCH */ 3861 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3862 hotplug |= PORTA_HOTPLUG_ENABLE | 3863 PORTB_HOTPLUG_ENABLE | 3864 PORTC_HOTPLUG_ENABLE | 3865 PORTD_HOTPLUG_ENABLE; 3866 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3867 3868 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3869 hotplug |= PORTE_HOTPLUG_ENABLE; 3870 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3871 } 3872 3873 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3874 { 3875 u32 hotplug_irqs, enabled_irqs; 3876 3877 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3878 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3879 3880 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3881 3882 spt_hpd_detection_setup(dev_priv); 3883 } 3884 3885 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3886 { 3887 u32 hotplug; 3888 3889 /* 3890 * Enable digital hotplug on the CPU, and configure the DP short pulse 3891 * duration to 2ms (which is the minimum in the Display Port spec) 3892 * The pulse duration bits are reserved on HSW+. 3893 */ 3894 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3895 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3896 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 3897 DIGITAL_PORTA_PULSE_DURATION_2ms; 3898 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3899 } 3900 3901 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3902 { 3903 u32 hotplug_irqs, enabled_irqs; 3904 3905 if (INTEL_GEN(dev_priv) >= 8) { 3906 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3907 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3908 3909 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3910 } else if (INTEL_GEN(dev_priv) >= 7) { 3911 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3912 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3913 3914 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3915 } else { 3916 hotplug_irqs = DE_DP_A_HOTPLUG; 3917 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3918 3919 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3920 } 3921 3922 ilk_hpd_detection_setup(dev_priv); 3923 3924 ibx_hpd_irq_setup(dev_priv); 3925 } 3926 3927 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 3928 u32 enabled_irqs) 3929 { 3930 u32 hotplug; 3931 3932 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3933 hotplug |= PORTA_HOTPLUG_ENABLE | 3934 PORTB_HOTPLUG_ENABLE | 3935 PORTC_HOTPLUG_ENABLE; 3936 3937 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3938 hotplug, enabled_irqs); 3939 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3940 3941 /* 3942 * For BXT invert bit has to be set based on AOB design 3943 * for HPD detection logic, update it based on VBT fields. 3944 */ 3945 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3946 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3947 hotplug |= BXT_DDIA_HPD_INVERT; 3948 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3949 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3950 hotplug |= BXT_DDIB_HPD_INVERT; 3951 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3952 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3953 hotplug |= BXT_DDIC_HPD_INVERT; 3954 3955 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3956 } 3957 3958 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3959 { 3960 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 3961 } 3962 3963 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3964 { 3965 u32 hotplug_irqs, enabled_irqs; 3966 3967 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3968 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3969 3970 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3971 3972 __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 3973 } 3974 3975 static void ibx_irq_postinstall(struct drm_device *dev) 3976 { 3977 struct drm_i915_private *dev_priv = to_i915(dev); 3978 u32 mask; 3979 3980 if (HAS_PCH_NOP(dev_priv)) 3981 return; 3982 3983 if (HAS_PCH_IBX(dev_priv)) 3984 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3985 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3986 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3987 else 3988 mask = SDE_GMBUS_CPT; 3989 3990 gen3_assert_iir_is_zero(dev_priv, SDEIIR); 3991 I915_WRITE(SDEIMR, ~mask); 3992 3993 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 3994 HAS_PCH_LPT(dev_priv)) 3995 ibx_hpd_detection_setup(dev_priv); 3996 else 3997 spt_hpd_detection_setup(dev_priv); 3998 } 3999 4000 static void gen5_gt_irq_postinstall(struct drm_device *dev) 4001 { 4002 struct drm_i915_private *dev_priv = to_i915(dev); 4003 u32 pm_irqs, gt_irqs; 4004 4005 pm_irqs = gt_irqs = 0; 4006 4007 dev_priv->gt_irq_mask = ~0; 4008 if (HAS_L3_DPF(dev_priv)) { 4009 /* L3 parity interrupt is always unmasked. */ 4010 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 4011 gt_irqs |= GT_PARITY_ERROR(dev_priv); 4012 } 4013 4014 gt_irqs |= GT_RENDER_USER_INTERRUPT; 4015 if (IS_GEN5(dev_priv)) { 4016 gt_irqs |= ILK_BSD_USER_INTERRUPT; 4017 } else { 4018 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 4019 } 4020 4021 GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 4022 4023 if (INTEL_GEN(dev_priv) >= 6) { 4024 /* 4025 * RPS interrupts will get enabled/disabled on demand when RPS 4026 * itself is enabled/disabled. 4027 */ 4028 if (HAS_VEBOX(dev_priv)) { 4029 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 4030 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 4031 } 4032 4033 dev_priv->pm_imr = 0xffffffff; 4034 GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 4035 } 4036 } 4037 4038 static int ironlake_irq_postinstall(struct drm_device *dev) 4039 { 4040 struct drm_i915_private *dev_priv = to_i915(dev); 4041 u32 display_mask, extra_mask; 4042 4043 if (INTEL_GEN(dev_priv) >= 7) { 4044 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 4045 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 4046 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 4047 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 4048 DE_DP_A_HOTPLUG_IVB); 4049 } else { 4050 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 4051 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 4052 DE_PIPEA_CRC_DONE | DE_POISON); 4053 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 4054 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 4055 DE_DP_A_HOTPLUG); 4056 } 4057 4058 if (IS_HASWELL(dev_priv)) { 4059 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 4060 intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 4061 display_mask |= DE_EDP_PSR_INT_HSW; 4062 } 4063 4064 dev_priv->irq_mask = ~display_mask; 4065 4066 ibx_irq_pre_postinstall(dev); 4067 4068 GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 4069 4070 gen5_gt_irq_postinstall(dev); 4071 4072 ilk_hpd_detection_setup(dev_priv); 4073 4074 ibx_irq_postinstall(dev); 4075 4076 if (IS_IRONLAKE_M(dev_priv)) { 4077 /* Enable PCU event interrupts 4078 * 4079 * spinlocking not required here for correctness since interrupt 4080 * setup is guaranteed to run in single-threaded context. But we 4081 * need it to make the assert_spin_locked happy. */ 4082 spin_lock_irq(&dev_priv->irq_lock); 4083 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 4084 spin_unlock_irq(&dev_priv->irq_lock); 4085 } 4086 4087 return 0; 4088 } 4089 4090 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 4091 { 4092 lockdep_assert_held(&dev_priv->irq_lock); 4093 4094 if (dev_priv->display_irqs_enabled) 4095 return; 4096 4097 dev_priv->display_irqs_enabled = true; 4098 4099 if (intel_irqs_enabled(dev_priv)) { 4100 vlv_display_irq_reset(dev_priv); 4101 vlv_display_irq_postinstall(dev_priv); 4102 } 4103 } 4104 4105 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 4106 { 4107 lockdep_assert_held(&dev_priv->irq_lock); 4108 4109 if (!dev_priv->display_irqs_enabled) 4110 return; 4111 4112 dev_priv->display_irqs_enabled = false; 4113 4114 if (intel_irqs_enabled(dev_priv)) 4115 vlv_display_irq_reset(dev_priv); 4116 } 4117 4118 4119 static int valleyview_irq_postinstall(struct drm_device *dev) 4120 { 4121 struct drm_i915_private *dev_priv = to_i915(dev); 4122 4123 gen5_gt_irq_postinstall(dev); 4124 4125 spin_lock_irq(&dev_priv->irq_lock); 4126 if (dev_priv->display_irqs_enabled) 4127 vlv_display_irq_postinstall(dev_priv); 4128 spin_unlock_irq(&dev_priv->irq_lock); 4129 4130 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 4131 POSTING_READ(VLV_MASTER_IER); 4132 4133 return 0; 4134 } 4135 4136 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 4137 { 4138 /* These are interrupts we'll toggle with the ring mask register */ 4139 uint32_t gt_interrupts[] = { 4140 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 4141 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 4142 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 4143 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 4144 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 4145 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 4146 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 4147 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 4148 0, 4149 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 4150 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 4151 }; 4152 4153 if (HAS_L3_DPF(dev_priv)) 4154 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 4155 4156 dev_priv->pm_ier = 0x0; 4157 dev_priv->pm_imr = ~dev_priv->pm_ier; 4158 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 4159 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 4160 /* 4161 * RPS interrupts will get enabled/disabled on demand when RPS itself 4162 * is enabled/disabled. Same wil be the case for GuC interrupts. 4163 */ 4164 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 4165 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 4166 } 4167 4168 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 4169 { 4170 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 4171 uint32_t de_pipe_enables; 4172 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 4173 u32 de_port_enables; 4174 u32 de_misc_masked = GEN8_DE_EDP_PSR; 4175 enum pipe pipe; 4176 4177 if (INTEL_GEN(dev_priv) <= 10) 4178 de_misc_masked |= GEN8_DE_MISC_GSE; 4179 4180 if (INTEL_GEN(dev_priv) >= 9) { 4181 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 4182 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 4183 GEN9_AUX_CHANNEL_D; 4184 if (IS_GEN9_LP(dev_priv)) 4185 de_port_masked |= BXT_DE_PORT_GMBUS; 4186 } else { 4187 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 4188 } 4189 4190 if (INTEL_GEN(dev_priv) >= 11) 4191 de_port_masked |= ICL_AUX_CHANNEL_E; 4192 4193 if (IS_CNL_WITH_PORT_F(dev_priv) || INTEL_GEN(dev_priv) >= 11) 4194 de_port_masked |= CNL_AUX_CHANNEL_F; 4195 4196 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 4197 GEN8_PIPE_FIFO_UNDERRUN; 4198 4199 de_port_enables = de_port_masked; 4200 if (IS_GEN9_LP(dev_priv)) 4201 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 4202 else if (IS_BROADWELL(dev_priv)) 4203 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 4204 4205 gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR); 4206 intel_psr_irq_control(dev_priv, dev_priv->psr.debug); 4207 4208 for_each_pipe(dev_priv, pipe) { 4209 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 4210 4211 if (intel_display_power_is_enabled(dev_priv, 4212 POWER_DOMAIN_PIPE(pipe))) 4213 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 4214 dev_priv->de_irq_mask[pipe], 4215 de_pipe_enables); 4216 } 4217 4218 GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 4219 GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 4220 4221 if (INTEL_GEN(dev_priv) >= 11) { 4222 u32 de_hpd_masked = 0; 4223 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 4224 GEN11_DE_TBT_HOTPLUG_MASK; 4225 4226 GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables); 4227 gen11_hpd_detection_setup(dev_priv); 4228 } else if (IS_GEN9_LP(dev_priv)) { 4229 bxt_hpd_detection_setup(dev_priv); 4230 } else if (IS_BROADWELL(dev_priv)) { 4231 ilk_hpd_detection_setup(dev_priv); 4232 } 4233 } 4234 4235 static int gen8_irq_postinstall(struct drm_device *dev) 4236 { 4237 struct drm_i915_private *dev_priv = to_i915(dev); 4238 4239 if (HAS_PCH_SPLIT(dev_priv)) 4240 ibx_irq_pre_postinstall(dev); 4241 4242 gen8_gt_irq_postinstall(dev_priv); 4243 gen8_de_irq_postinstall(dev_priv); 4244 4245 if (HAS_PCH_SPLIT(dev_priv)) 4246 ibx_irq_postinstall(dev); 4247 4248 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 4249 POSTING_READ(GEN8_MASTER_IRQ); 4250 4251 return 0; 4252 } 4253 4254 static void gen11_gt_irq_postinstall(struct drm_i915_private *dev_priv) 4255 { 4256 const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT; 4257 4258 BUILD_BUG_ON(irqs & 0xffff0000); 4259 4260 /* Enable RCS, BCS, VCS and VECS class interrupts. */ 4261 I915_WRITE(GEN11_RENDER_COPY_INTR_ENABLE, irqs << 16 | irqs); 4262 I915_WRITE(GEN11_VCS_VECS_INTR_ENABLE, irqs << 16 | irqs); 4263 4264 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */ 4265 I915_WRITE(GEN11_RCS0_RSVD_INTR_MASK, ~(irqs << 16)); 4266 I915_WRITE(GEN11_BCS_RSVD_INTR_MASK, ~(irqs << 16)); 4267 I915_WRITE(GEN11_VCS0_VCS1_INTR_MASK, ~(irqs | irqs << 16)); 4268 I915_WRITE(GEN11_VCS2_VCS3_INTR_MASK, ~(irqs | irqs << 16)); 4269 I915_WRITE(GEN11_VECS0_VECS1_INTR_MASK, ~(irqs | irqs << 16)); 4270 4271 /* 4272 * RPS interrupts will get enabled/disabled on demand when RPS itself 4273 * is enabled/disabled. 4274 */ 4275 dev_priv->pm_ier = 0x0; 4276 dev_priv->pm_imr = ~dev_priv->pm_ier; 4277 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0); 4278 I915_WRITE(GEN11_GPM_WGBOXPERF_INTR_MASK, ~0); 4279 } 4280 4281 static void icp_irq_postinstall(struct drm_device *dev) 4282 { 4283 struct drm_i915_private *dev_priv = to_i915(dev); 4284 u32 mask = SDE_GMBUS_ICP; 4285 4286 WARN_ON(I915_READ(SDEIER) != 0); 4287 I915_WRITE(SDEIER, 0xffffffff); 4288 POSTING_READ(SDEIER); 4289 4290 gen3_assert_iir_is_zero(dev_priv, SDEIIR); 4291 I915_WRITE(SDEIMR, ~mask); 4292 4293 icp_hpd_detection_setup(dev_priv); 4294 } 4295 4296 static int gen11_irq_postinstall(struct drm_device *dev) 4297 { 4298 struct drm_i915_private *dev_priv = dev->dev_private; 4299 u32 gu_misc_masked = GEN11_GU_MISC_GSE; 4300 4301 if (HAS_PCH_ICP(dev_priv)) 4302 icp_irq_postinstall(dev); 4303 4304 gen11_gt_irq_postinstall(dev_priv); 4305 gen8_de_irq_postinstall(dev_priv); 4306 4307 GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); 4308 4309 I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); 4310 4311 I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); 4312 POSTING_READ(GEN11_GFX_MSTR_IRQ); 4313 4314 return 0; 4315 } 4316 4317 static int cherryview_irq_postinstall(struct drm_device *dev) 4318 { 4319 struct drm_i915_private *dev_priv = to_i915(dev); 4320 4321 gen8_gt_irq_postinstall(dev_priv); 4322 4323 spin_lock_irq(&dev_priv->irq_lock); 4324 if (dev_priv->display_irqs_enabled) 4325 vlv_display_irq_postinstall(dev_priv); 4326 spin_unlock_irq(&dev_priv->irq_lock); 4327 4328 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 4329 POSTING_READ(GEN8_MASTER_IRQ); 4330 4331 return 0; 4332 } 4333 4334 static void i8xx_irq_reset(struct drm_device *dev) 4335 { 4336 struct drm_i915_private *dev_priv = to_i915(dev); 4337 4338 i9xx_pipestat_irq_reset(dev_priv); 4339 4340 I915_WRITE16(HWSTAM, 0xffff); 4341 4342 GEN2_IRQ_RESET(); 4343 } 4344 4345 static int i8xx_irq_postinstall(struct drm_device *dev) 4346 { 4347 struct drm_i915_private *dev_priv = to_i915(dev); 4348 u16 enable_mask; 4349 4350 I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | 4351 I915_ERROR_MEMORY_REFRESH)); 4352 4353 /* Unmask the interrupts that we always want on. */ 4354 dev_priv->irq_mask = 4355 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4356 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4357 I915_MASTER_ERROR_INTERRUPT); 4358 4359 enable_mask = 4360 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4361 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4362 I915_MASTER_ERROR_INTERRUPT | 4363 I915_USER_INTERRUPT; 4364 4365 GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4366 4367 /* Interrupt setup is already guaranteed to be single-threaded, this is 4368 * just to make the assert_spin_locked check happy. */ 4369 spin_lock_irq(&dev_priv->irq_lock); 4370 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4371 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4372 spin_unlock_irq(&dev_priv->irq_lock); 4373 4374 return 0; 4375 } 4376 4377 static void i8xx_error_irq_ack(struct drm_i915_private *dev_priv, 4378 u16 *eir, u16 *eir_stuck) 4379 { 4380 u16 emr; 4381 4382 *eir = I915_READ16(EIR); 4383 4384 if (*eir) 4385 I915_WRITE16(EIR, *eir); 4386 4387 *eir_stuck = I915_READ16(EIR); 4388 if (*eir_stuck == 0) 4389 return; 4390 4391 /* 4392 * Toggle all EMR bits to make sure we get an edge 4393 * in the ISR master error bit if we don't clear 4394 * all the EIR bits. Otherwise the edge triggered 4395 * IIR on i965/g4x wouldn't notice that an interrupt 4396 * is still pending. Also some EIR bits can't be 4397 * cleared except by handling the underlying error 4398 * (or by a GPU reset) so we mask any bit that 4399 * remains set. 4400 */ 4401 emr = I915_READ16(EMR); 4402 I915_WRITE16(EMR, 0xffff); 4403 I915_WRITE16(EMR, emr | *eir_stuck); 4404 } 4405 4406 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv, 4407 u16 eir, u16 eir_stuck) 4408 { 4409 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir); 4410 4411 if (eir_stuck) 4412 DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck); 4413 } 4414 4415 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, 4416 u32 *eir, u32 *eir_stuck) 4417 { 4418 u32 emr; 4419 4420 *eir = I915_READ(EIR); 4421 4422 I915_WRITE(EIR, *eir); 4423 4424 *eir_stuck = I915_READ(EIR); 4425 if (*eir_stuck == 0) 4426 return; 4427 4428 /* 4429 * Toggle all EMR bits to make sure we get an edge 4430 * in the ISR master error bit if we don't clear 4431 * all the EIR bits. Otherwise the edge triggered 4432 * IIR on i965/g4x wouldn't notice that an interrupt 4433 * is still pending. Also some EIR bits can't be 4434 * cleared except by handling the underlying error 4435 * (or by a GPU reset) so we mask any bit that 4436 * remains set. 4437 */ 4438 emr = I915_READ(EMR); 4439 I915_WRITE(EMR, 0xffffffff); 4440 I915_WRITE(EMR, emr | *eir_stuck); 4441 } 4442 4443 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, 4444 u32 eir, u32 eir_stuck) 4445 { 4446 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir); 4447 4448 if (eir_stuck) 4449 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck); 4450 } 4451 4452 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4453 { 4454 struct drm_device *dev = arg; 4455 struct drm_i915_private *dev_priv = to_i915(dev); 4456 irqreturn_t ret = IRQ_NONE; 4457 4458 if (!intel_irqs_enabled(dev_priv)) 4459 return IRQ_NONE; 4460 4461 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4462 disable_rpm_wakeref_asserts(dev_priv); 4463 4464 do { 4465 u32 pipe_stats[I915_MAX_PIPES] = {}; 4466 u16 eir = 0, eir_stuck = 0; 4467 u16 iir; 4468 4469 iir = I915_READ16(IIR); 4470 if (iir == 0) 4471 break; 4472 4473 ret = IRQ_HANDLED; 4474 4475 /* Call regardless, as some status bits might not be 4476 * signalled in iir */ 4477 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4478 4479 if (iir & I915_MASTER_ERROR_INTERRUPT) 4480 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4481 4482 I915_WRITE16(IIR, iir); 4483 4484 if (iir & I915_USER_INTERRUPT) 4485 notify_ring(dev_priv->engine[RCS]); 4486 4487 if (iir & I915_MASTER_ERROR_INTERRUPT) 4488 i8xx_error_irq_handler(dev_priv, eir, eir_stuck); 4489 4490 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4491 } while (0); 4492 4493 enable_rpm_wakeref_asserts(dev_priv); 4494 4495 return ret; 4496 } 4497 4498 static void i915_irq_reset(struct drm_device *dev) 4499 { 4500 struct drm_i915_private *dev_priv = to_i915(dev); 4501 4502 if (I915_HAS_HOTPLUG(dev_priv)) { 4503 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4504 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4505 } 4506 4507 i9xx_pipestat_irq_reset(dev_priv); 4508 4509 I915_WRITE(HWSTAM, 0xffffffff); 4510 4511 GEN3_IRQ_RESET(); 4512 } 4513 4514 static int i915_irq_postinstall(struct drm_device *dev) 4515 { 4516 struct drm_i915_private *dev_priv = to_i915(dev); 4517 u32 enable_mask; 4518 4519 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | 4520 I915_ERROR_MEMORY_REFRESH)); 4521 4522 /* Unmask the interrupts that we always want on. */ 4523 dev_priv->irq_mask = 4524 ~(I915_ASLE_INTERRUPT | 4525 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4526 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4527 I915_MASTER_ERROR_INTERRUPT); 4528 4529 enable_mask = 4530 I915_ASLE_INTERRUPT | 4531 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4532 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4533 I915_MASTER_ERROR_INTERRUPT | 4534 I915_USER_INTERRUPT; 4535 4536 if (I915_HAS_HOTPLUG(dev_priv)) { 4537 /* Enable in IER... */ 4538 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4539 /* and unmask in IMR */ 4540 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4541 } 4542 4543 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4544 4545 /* Interrupt setup is already guaranteed to be single-threaded, this is 4546 * just to make the assert_spin_locked check happy. */ 4547 spin_lock_irq(&dev_priv->irq_lock); 4548 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4549 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4550 spin_unlock_irq(&dev_priv->irq_lock); 4551 4552 i915_enable_asle_pipestat(dev_priv); 4553 4554 return 0; 4555 } 4556 4557 static irqreturn_t i915_irq_handler(int irq, void *arg) 4558 { 4559 struct drm_device *dev = arg; 4560 struct drm_i915_private *dev_priv = to_i915(dev); 4561 irqreturn_t ret = IRQ_NONE; 4562 4563 if (!intel_irqs_enabled(dev_priv)) 4564 return IRQ_NONE; 4565 4566 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4567 disable_rpm_wakeref_asserts(dev_priv); 4568 4569 do { 4570 u32 pipe_stats[I915_MAX_PIPES] = {}; 4571 u32 eir = 0, eir_stuck = 0; 4572 u32 hotplug_status = 0; 4573 u32 iir; 4574 4575 iir = I915_READ(IIR); 4576 if (iir == 0) 4577 break; 4578 4579 ret = IRQ_HANDLED; 4580 4581 if (I915_HAS_HOTPLUG(dev_priv) && 4582 iir & I915_DISPLAY_PORT_INTERRUPT) 4583 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4584 4585 /* Call regardless, as some status bits might not be 4586 * signalled in iir */ 4587 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4588 4589 if (iir & I915_MASTER_ERROR_INTERRUPT) 4590 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4591 4592 I915_WRITE(IIR, iir); 4593 4594 if (iir & I915_USER_INTERRUPT) 4595 notify_ring(dev_priv->engine[RCS]); 4596 4597 if (iir & I915_MASTER_ERROR_INTERRUPT) 4598 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4599 4600 if (hotplug_status) 4601 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4602 4603 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4604 } while (0); 4605 4606 enable_rpm_wakeref_asserts(dev_priv); 4607 4608 return ret; 4609 } 4610 4611 static void i965_irq_reset(struct drm_device *dev) 4612 { 4613 struct drm_i915_private *dev_priv = to_i915(dev); 4614 4615 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4616 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4617 4618 i9xx_pipestat_irq_reset(dev_priv); 4619 4620 I915_WRITE(HWSTAM, 0xffffffff); 4621 4622 GEN3_IRQ_RESET(); 4623 } 4624 4625 static int i965_irq_postinstall(struct drm_device *dev) 4626 { 4627 struct drm_i915_private *dev_priv = to_i915(dev); 4628 u32 enable_mask; 4629 u32 error_mask; 4630 4631 /* 4632 * Enable some error detection, note the instruction error mask 4633 * bit is reserved, so we leave it masked. 4634 */ 4635 if (IS_G4X(dev_priv)) { 4636 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4637 GM45_ERROR_MEM_PRIV | 4638 GM45_ERROR_CP_PRIV | 4639 I915_ERROR_MEMORY_REFRESH); 4640 } else { 4641 error_mask = ~(I915_ERROR_PAGE_TABLE | 4642 I915_ERROR_MEMORY_REFRESH); 4643 } 4644 I915_WRITE(EMR, error_mask); 4645 4646 /* Unmask the interrupts that we always want on. */ 4647 dev_priv->irq_mask = 4648 ~(I915_ASLE_INTERRUPT | 4649 I915_DISPLAY_PORT_INTERRUPT | 4650 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4651 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4652 I915_MASTER_ERROR_INTERRUPT); 4653 4654 enable_mask = 4655 I915_ASLE_INTERRUPT | 4656 I915_DISPLAY_PORT_INTERRUPT | 4657 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4658 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4659 I915_MASTER_ERROR_INTERRUPT | 4660 I915_USER_INTERRUPT; 4661 4662 if (IS_G4X(dev_priv)) 4663 enable_mask |= I915_BSD_USER_INTERRUPT; 4664 4665 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 4666 4667 /* Interrupt setup is already guaranteed to be single-threaded, this is 4668 * just to make the assert_spin_locked check happy. */ 4669 spin_lock_irq(&dev_priv->irq_lock); 4670 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4671 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4672 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4673 spin_unlock_irq(&dev_priv->irq_lock); 4674 4675 i915_enable_asle_pipestat(dev_priv); 4676 4677 return 0; 4678 } 4679 4680 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4681 { 4682 u32 hotplug_en; 4683 4684 lockdep_assert_held(&dev_priv->irq_lock); 4685 4686 /* Note HDMI and DP share hotplug bits */ 4687 /* enable bits are the same for all generations */ 4688 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4689 /* Programming the CRT detection parameters tends 4690 to generate a spurious hotplug event about three 4691 seconds later. So just do it once. 4692 */ 4693 if (IS_G4X(dev_priv)) 4694 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4695 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4696 4697 /* Ignore TV since it's buggy */ 4698 i915_hotplug_interrupt_update_locked(dev_priv, 4699 HOTPLUG_INT_EN_MASK | 4700 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4701 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4702 hotplug_en); 4703 } 4704 4705 static irqreturn_t i965_irq_handler(int irq, void *arg) 4706 { 4707 struct drm_device *dev = arg; 4708 struct drm_i915_private *dev_priv = to_i915(dev); 4709 irqreturn_t ret = IRQ_NONE; 4710 4711 if (!intel_irqs_enabled(dev_priv)) 4712 return IRQ_NONE; 4713 4714 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4715 disable_rpm_wakeref_asserts(dev_priv); 4716 4717 do { 4718 u32 pipe_stats[I915_MAX_PIPES] = {}; 4719 u32 eir = 0, eir_stuck = 0; 4720 u32 hotplug_status = 0; 4721 u32 iir; 4722 4723 iir = I915_READ(IIR); 4724 if (iir == 0) 4725 break; 4726 4727 ret = IRQ_HANDLED; 4728 4729 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4730 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4731 4732 /* Call regardless, as some status bits might not be 4733 * signalled in iir */ 4734 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 4735 4736 if (iir & I915_MASTER_ERROR_INTERRUPT) 4737 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); 4738 4739 I915_WRITE(IIR, iir); 4740 4741 if (iir & I915_USER_INTERRUPT) 4742 notify_ring(dev_priv->engine[RCS]); 4743 4744 if (iir & I915_BSD_USER_INTERRUPT) 4745 notify_ring(dev_priv->engine[VCS]); 4746 4747 if (iir & I915_MASTER_ERROR_INTERRUPT) 4748 i9xx_error_irq_handler(dev_priv, eir, eir_stuck); 4749 4750 if (hotplug_status) 4751 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4752 4753 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4754 } while (0); 4755 4756 enable_rpm_wakeref_asserts(dev_priv); 4757 4758 return ret; 4759 } 4760 4761 /** 4762 * intel_irq_init - initializes irq support 4763 * @dev_priv: i915 device instance 4764 * 4765 * This function initializes all the irq support including work items, timers 4766 * and all the vtables. It does not setup the interrupt itself though. 4767 */ 4768 void intel_irq_init(struct drm_i915_private *dev_priv) 4769 { 4770 struct drm_device *dev = &dev_priv->drm; 4771 struct intel_rps *rps = &dev_priv->gt_pm.rps; 4772 int i; 4773 4774 intel_hpd_init_work(dev_priv); 4775 4776 INIT_WORK(&rps->work, gen6_pm_rps_work); 4777 4778 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4779 for (i = 0; i < MAX_L3_SLICES; ++i) 4780 dev_priv->l3_parity.remap_info[i] = NULL; 4781 4782 if (HAS_GUC_SCHED(dev_priv)) 4783 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 4784 4785 /* Let's track the enabled rps events */ 4786 if (IS_VALLEYVIEW(dev_priv)) 4787 /* WaGsvRC0ResidencyMethod:vlv */ 4788 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4789 else 4790 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4791 4792 rps->pm_intrmsk_mbz = 0; 4793 4794 /* 4795 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 4796 * if GEN6_PM_UP_EI_EXPIRED is masked. 4797 * 4798 * TODO: verify if this can be reproduced on VLV,CHV. 4799 */ 4800 if (INTEL_GEN(dev_priv) <= 7) 4801 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 4802 4803 if (INTEL_GEN(dev_priv) >= 8) 4804 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 4805 4806 if (IS_GEN2(dev_priv)) { 4807 /* Gen2 doesn't have a hardware frame counter */ 4808 dev->max_vblank_count = 0; 4809 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 4810 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4811 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4812 } else { 4813 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4814 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4815 } 4816 4817 /* 4818 * Opt out of the vblank disable timer on everything except gen2. 4819 * Gen2 doesn't have a hardware frame counter and so depends on 4820 * vblank interrupts to produce sane vblank seuquence numbers. 4821 */ 4822 if (!IS_GEN2(dev_priv)) 4823 dev->vblank_disable_immediate = true; 4824 4825 /* Most platforms treat the display irq block as an always-on 4826 * power domain. vlv/chv can disable it at runtime and need 4827 * special care to avoid writing any of the display block registers 4828 * outside of the power domain. We defer setting up the display irqs 4829 * in this case to the runtime pm. 4830 */ 4831 dev_priv->display_irqs_enabled = true; 4832 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4833 dev_priv->display_irqs_enabled = false; 4834 4835 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4836 4837 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 4838 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4839 4840 if (IS_CHERRYVIEW(dev_priv)) { 4841 dev->driver->irq_handler = cherryview_irq_handler; 4842 dev->driver->irq_preinstall = cherryview_irq_reset; 4843 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4844 dev->driver->irq_uninstall = cherryview_irq_reset; 4845 dev->driver->enable_vblank = i965_enable_vblank; 4846 dev->driver->disable_vblank = i965_disable_vblank; 4847 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4848 } else if (IS_VALLEYVIEW(dev_priv)) { 4849 dev->driver->irq_handler = valleyview_irq_handler; 4850 dev->driver->irq_preinstall = valleyview_irq_reset; 4851 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4852 dev->driver->irq_uninstall = valleyview_irq_reset; 4853 dev->driver->enable_vblank = i965_enable_vblank; 4854 dev->driver->disable_vblank = i965_disable_vblank; 4855 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4856 } else if (INTEL_GEN(dev_priv) >= 11) { 4857 dev->driver->irq_handler = gen11_irq_handler; 4858 dev->driver->irq_preinstall = gen11_irq_reset; 4859 dev->driver->irq_postinstall = gen11_irq_postinstall; 4860 dev->driver->irq_uninstall = gen11_irq_reset; 4861 dev->driver->enable_vblank = gen8_enable_vblank; 4862 dev->driver->disable_vblank = gen8_disable_vblank; 4863 dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; 4864 } else if (INTEL_GEN(dev_priv) >= 8) { 4865 dev->driver->irq_handler = gen8_irq_handler; 4866 dev->driver->irq_preinstall = gen8_irq_reset; 4867 dev->driver->irq_postinstall = gen8_irq_postinstall; 4868 dev->driver->irq_uninstall = gen8_irq_reset; 4869 dev->driver->enable_vblank = gen8_enable_vblank; 4870 dev->driver->disable_vblank = gen8_disable_vblank; 4871 if (IS_GEN9_LP(dev_priv)) 4872 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4873 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 4874 HAS_PCH_CNP(dev_priv)) 4875 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4876 else 4877 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4878 } else if (HAS_PCH_SPLIT(dev_priv)) { 4879 dev->driver->irq_handler = ironlake_irq_handler; 4880 dev->driver->irq_preinstall = ironlake_irq_reset; 4881 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4882 dev->driver->irq_uninstall = ironlake_irq_reset; 4883 dev->driver->enable_vblank = ironlake_enable_vblank; 4884 dev->driver->disable_vblank = ironlake_disable_vblank; 4885 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4886 } else { 4887 if (IS_GEN2(dev_priv)) { 4888 dev->driver->irq_preinstall = i8xx_irq_reset; 4889 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4890 dev->driver->irq_handler = i8xx_irq_handler; 4891 dev->driver->irq_uninstall = i8xx_irq_reset; 4892 dev->driver->enable_vblank = i8xx_enable_vblank; 4893 dev->driver->disable_vblank = i8xx_disable_vblank; 4894 } else if (IS_GEN3(dev_priv)) { 4895 dev->driver->irq_preinstall = i915_irq_reset; 4896 dev->driver->irq_postinstall = i915_irq_postinstall; 4897 dev->driver->irq_uninstall = i915_irq_reset; 4898 dev->driver->irq_handler = i915_irq_handler; 4899 dev->driver->enable_vblank = i8xx_enable_vblank; 4900 dev->driver->disable_vblank = i8xx_disable_vblank; 4901 } else { 4902 dev->driver->irq_preinstall = i965_irq_reset; 4903 dev->driver->irq_postinstall = i965_irq_postinstall; 4904 dev->driver->irq_uninstall = i965_irq_reset; 4905 dev->driver->irq_handler = i965_irq_handler; 4906 dev->driver->enable_vblank = i965_enable_vblank; 4907 dev->driver->disable_vblank = i965_disable_vblank; 4908 } 4909 if (I915_HAS_HOTPLUG(dev_priv)) 4910 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4911 } 4912 } 4913 4914 /** 4915 * intel_irq_fini - deinitializes IRQ support 4916 * @i915: i915 device instance 4917 * 4918 * This function deinitializes all the IRQ support. 4919 */ 4920 void intel_irq_fini(struct drm_i915_private *i915) 4921 { 4922 int i; 4923 4924 for (i = 0; i < MAX_L3_SLICES; ++i) 4925 kfree(i915->l3_parity.remap_info[i]); 4926 } 4927 4928 /** 4929 * intel_irq_install - enables the hardware interrupt 4930 * @dev_priv: i915 device instance 4931 * 4932 * This function enables the hardware interrupt handling, but leaves the hotplug 4933 * handling still disabled. It is called after intel_irq_init(). 4934 * 4935 * In the driver load and resume code we need working interrupts in a few places 4936 * but don't want to deal with the hassle of concurrent probe and hotplug 4937 * workers. Hence the split into this two-stage approach. 4938 */ 4939 int intel_irq_install(struct drm_i915_private *dev_priv) 4940 { 4941 /* 4942 * We enable some interrupt sources in our postinstall hooks, so mark 4943 * interrupts as enabled _before_ actually enabling them to avoid 4944 * special cases in our ordering checks. 4945 */ 4946 dev_priv->runtime_pm.irqs_enabled = true; 4947 4948 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4949 } 4950 4951 /** 4952 * intel_irq_uninstall - finilizes all irq handling 4953 * @dev_priv: i915 device instance 4954 * 4955 * This stops interrupt and hotplug handling and unregisters and frees all 4956 * resources acquired in the init functions. 4957 */ 4958 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4959 { 4960 drm_irq_uninstall(&dev_priv->drm); 4961 intel_hpd_cancel_work(dev_priv); 4962 dev_priv->runtime_pm.irqs_enabled = false; 4963 } 4964 4965 /** 4966 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4967 * @dev_priv: i915 device instance 4968 * 4969 * This function is used to disable interrupts at runtime, both in the runtime 4970 * pm and the system suspend/resume code. 4971 */ 4972 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4973 { 4974 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4975 dev_priv->runtime_pm.irqs_enabled = false; 4976 synchronize_irq(dev_priv->drm.irq); 4977 } 4978 4979 /** 4980 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4981 * @dev_priv: i915 device instance 4982 * 4983 * This function is used to enable interrupts at runtime, both in the runtime 4984 * pm and the system suspend/resume code. 4985 */ 4986 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4987 { 4988 dev_priv->runtime_pm.irqs_enabled = true; 4989 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 4990 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4991 } 4992