1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define KBUILD_MODNAME "i915" 30 31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 32 33 #include <linux/sysrq.h> 34 #include <linux/slab.h> 35 #include <linux/circ_buf.h> 36 #include <drm/drmP.h> 37 #include <drm/i915_drm.h> 38 #include "i915_drv.h" 39 #include "i915_trace.h" 40 #include "intel_drv.h" 41 42 /** 43 * DOC: interrupt handling 44 * 45 * These functions provide the basic support for enabling and disabling the 46 * interrupt handling support. There's a lot more functionality in i915_irq.c 47 * and related files, but that will be described in separate chapters. 48 */ 49 50 static const u32 hpd_ilk[HPD_NUM_PINS] = { 51 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 52 }; 53 54 static const u32 hpd_ivb[HPD_NUM_PINS] = { 55 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 56 }; 57 58 static const u32 hpd_bdw[HPD_NUM_PINS] = { 59 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 60 }; 61 62 static const u32 hpd_ibx[HPD_NUM_PINS] = { 63 [HPD_CRT] = SDE_CRT_HOTPLUG, 64 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 65 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 66 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 67 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 68 }; 69 70 static const u32 hpd_cpt[HPD_NUM_PINS] = { 71 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 72 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 73 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 74 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 75 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 76 }; 77 78 static const u32 hpd_spt[HPD_NUM_PINS] = { 79 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 80 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 81 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 82 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 83 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 84 }; 85 86 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 87 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 88 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 89 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 90 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 91 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 92 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 93 }; 94 95 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 96 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 97 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 98 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 99 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 100 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 101 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 102 }; 103 104 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 105 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 106 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 107 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 108 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 109 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 110 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 111 }; 112 113 /* BXT hpd list */ 114 static const u32 hpd_bxt[HPD_NUM_PINS] = { 115 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 116 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 117 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 118 }; 119 120 /* IIR can theoretically queue up two events. Be paranoid. */ 121 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 122 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 123 POSTING_READ(GEN8_##type##_IMR(which)); \ 124 I915_WRITE(GEN8_##type##_IER(which), 0); \ 125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 126 POSTING_READ(GEN8_##type##_IIR(which)); \ 127 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 128 POSTING_READ(GEN8_##type##_IIR(which)); \ 129 } while (0) 130 131 #define GEN5_IRQ_RESET(type) do { \ 132 I915_WRITE(type##IMR, 0xffffffff); \ 133 POSTING_READ(type##IMR); \ 134 I915_WRITE(type##IER, 0); \ 135 I915_WRITE(type##IIR, 0xffffffff); \ 136 POSTING_READ(type##IIR); \ 137 I915_WRITE(type##IIR, 0xffffffff); \ 138 POSTING_READ(type##IIR); \ 139 } while (0) 140 141 /* 142 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 143 */ 144 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, 145 i915_reg_t reg) 146 { 147 u32 val = I915_READ(reg); 148 149 if (val == 0) 150 return; 151 152 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 153 i915_mmio_reg_offset(reg), val); 154 I915_WRITE(reg, 0xffffffff); 155 POSTING_READ(reg); 156 I915_WRITE(reg, 0xffffffff); 157 POSTING_READ(reg); 158 } 159 160 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 161 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 162 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 163 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 164 POSTING_READ(GEN8_##type##_IMR(which)); \ 165 } while (0) 166 167 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 168 gen5_assert_iir_is_zero(dev_priv, type##IIR); \ 169 I915_WRITE(type##IER, (ier_val)); \ 170 I915_WRITE(type##IMR, (imr_val)); \ 171 POSTING_READ(type##IMR); \ 172 } while (0) 173 174 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 175 176 /* For display hotplug interrupt */ 177 static inline void 178 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 179 uint32_t mask, 180 uint32_t bits) 181 { 182 uint32_t val; 183 184 assert_spin_locked(&dev_priv->irq_lock); 185 WARN_ON(bits & ~mask); 186 187 val = I915_READ(PORT_HOTPLUG_EN); 188 val &= ~mask; 189 val |= bits; 190 I915_WRITE(PORT_HOTPLUG_EN, val); 191 } 192 193 /** 194 * i915_hotplug_interrupt_update - update hotplug interrupt enable 195 * @dev_priv: driver private 196 * @mask: bits to update 197 * @bits: bits to enable 198 * NOTE: the HPD enable bits are modified both inside and outside 199 * of an interrupt context. To avoid that read-modify-write cycles 200 * interfer, these bits are protected by a spinlock. Since this 201 * function is usually not called from a context where the lock is 202 * held already, this function acquires the lock itself. A non-locking 203 * version is also available. 204 */ 205 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 206 uint32_t mask, 207 uint32_t bits) 208 { 209 spin_lock_irq(&dev_priv->irq_lock); 210 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 211 spin_unlock_irq(&dev_priv->irq_lock); 212 } 213 214 /** 215 * ilk_update_display_irq - update DEIMR 216 * @dev_priv: driver private 217 * @interrupt_mask: mask of interrupt bits to update 218 * @enabled_irq_mask: mask of interrupt bits to enable 219 */ 220 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 221 uint32_t interrupt_mask, 222 uint32_t enabled_irq_mask) 223 { 224 uint32_t new_val; 225 226 assert_spin_locked(&dev_priv->irq_lock); 227 228 WARN_ON(enabled_irq_mask & ~interrupt_mask); 229 230 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 231 return; 232 233 new_val = dev_priv->irq_mask; 234 new_val &= ~interrupt_mask; 235 new_val |= (~enabled_irq_mask & interrupt_mask); 236 237 if (new_val != dev_priv->irq_mask) { 238 dev_priv->irq_mask = new_val; 239 I915_WRITE(DEIMR, dev_priv->irq_mask); 240 POSTING_READ(DEIMR); 241 } 242 } 243 244 /** 245 * ilk_update_gt_irq - update GTIMR 246 * @dev_priv: driver private 247 * @interrupt_mask: mask of interrupt bits to update 248 * @enabled_irq_mask: mask of interrupt bits to enable 249 */ 250 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 251 uint32_t interrupt_mask, 252 uint32_t enabled_irq_mask) 253 { 254 assert_spin_locked(&dev_priv->irq_lock); 255 256 WARN_ON(enabled_irq_mask & ~interrupt_mask); 257 258 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 259 return; 260 261 dev_priv->gt_irq_mask &= ~interrupt_mask; 262 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 263 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 264 } 265 266 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 267 { 268 ilk_update_gt_irq(dev_priv, mask, mask); 269 POSTING_READ_FW(GTIMR); 270 } 271 272 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 273 { 274 ilk_update_gt_irq(dev_priv, mask, 0); 275 } 276 277 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 278 { 279 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 280 } 281 282 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 283 { 284 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 285 } 286 287 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 288 { 289 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 290 } 291 292 /** 293 * snb_update_pm_irq - update GEN6_PMIMR 294 * @dev_priv: driver private 295 * @interrupt_mask: mask of interrupt bits to update 296 * @enabled_irq_mask: mask of interrupt bits to enable 297 */ 298 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 299 uint32_t interrupt_mask, 300 uint32_t enabled_irq_mask) 301 { 302 uint32_t new_val; 303 304 WARN_ON(enabled_irq_mask & ~interrupt_mask); 305 306 assert_spin_locked(&dev_priv->irq_lock); 307 308 new_val = dev_priv->pm_irq_mask; 309 new_val &= ~interrupt_mask; 310 new_val |= (~enabled_irq_mask & interrupt_mask); 311 312 if (new_val != dev_priv->pm_irq_mask) { 313 dev_priv->pm_irq_mask = new_val; 314 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); 315 POSTING_READ(gen6_pm_imr(dev_priv)); 316 } 317 } 318 319 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 320 { 321 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 322 return; 323 324 snb_update_pm_irq(dev_priv, mask, mask); 325 } 326 327 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, 328 uint32_t mask) 329 { 330 snb_update_pm_irq(dev_priv, mask, 0); 331 } 332 333 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 334 { 335 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 336 return; 337 338 __gen6_disable_pm_irq(dev_priv, mask); 339 } 340 341 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 342 { 343 i915_reg_t reg = gen6_pm_iir(dev_priv); 344 345 spin_lock_irq(&dev_priv->irq_lock); 346 I915_WRITE(reg, dev_priv->pm_rps_events); 347 I915_WRITE(reg, dev_priv->pm_rps_events); 348 POSTING_READ(reg); 349 dev_priv->rps.pm_iir = 0; 350 spin_unlock_irq(&dev_priv->irq_lock); 351 } 352 353 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 354 { 355 spin_lock_irq(&dev_priv->irq_lock); 356 WARN_ON_ONCE(dev_priv->rps.pm_iir); 357 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 358 dev_priv->rps.interrupts_enabled = true; 359 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | 360 dev_priv->pm_rps_events); 361 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 362 363 spin_unlock_irq(&dev_priv->irq_lock); 364 } 365 366 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 367 { 368 return (mask & ~dev_priv->rps.pm_intr_keep); 369 } 370 371 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 372 { 373 spin_lock_irq(&dev_priv->irq_lock); 374 dev_priv->rps.interrupts_enabled = false; 375 376 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 377 378 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 379 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 380 ~dev_priv->pm_rps_events); 381 382 spin_unlock_irq(&dev_priv->irq_lock); 383 synchronize_irq(dev_priv->drm.irq); 384 385 /* Now that we will not be generating any more work, flush any 386 * outsanding tasks. As we are called on the RPS idle path, 387 * we will reset the GPU to minimum frequencies, so the current 388 * state of the worker can be discarded. 389 */ 390 cancel_work_sync(&dev_priv->rps.work); 391 gen6_reset_rps_interrupts(dev_priv); 392 } 393 394 /** 395 * bdw_update_port_irq - update DE port interrupt 396 * @dev_priv: driver private 397 * @interrupt_mask: mask of interrupt bits to update 398 * @enabled_irq_mask: mask of interrupt bits to enable 399 */ 400 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 401 uint32_t interrupt_mask, 402 uint32_t enabled_irq_mask) 403 { 404 uint32_t new_val; 405 uint32_t old_val; 406 407 assert_spin_locked(&dev_priv->irq_lock); 408 409 WARN_ON(enabled_irq_mask & ~interrupt_mask); 410 411 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 412 return; 413 414 old_val = I915_READ(GEN8_DE_PORT_IMR); 415 416 new_val = old_val; 417 new_val &= ~interrupt_mask; 418 new_val |= (~enabled_irq_mask & interrupt_mask); 419 420 if (new_val != old_val) { 421 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 422 POSTING_READ(GEN8_DE_PORT_IMR); 423 } 424 } 425 426 /** 427 * bdw_update_pipe_irq - update DE pipe interrupt 428 * @dev_priv: driver private 429 * @pipe: pipe whose interrupt to update 430 * @interrupt_mask: mask of interrupt bits to update 431 * @enabled_irq_mask: mask of interrupt bits to enable 432 */ 433 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 434 enum i915_pipe pipe, 435 uint32_t interrupt_mask, 436 uint32_t enabled_irq_mask) 437 { 438 uint32_t new_val; 439 440 assert_spin_locked(&dev_priv->irq_lock); 441 442 WARN_ON(enabled_irq_mask & ~interrupt_mask); 443 444 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 445 return; 446 447 new_val = dev_priv->de_irq_mask[pipe]; 448 new_val &= ~interrupt_mask; 449 new_val |= (~enabled_irq_mask & interrupt_mask); 450 451 if (new_val != dev_priv->de_irq_mask[pipe]) { 452 dev_priv->de_irq_mask[pipe] = new_val; 453 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 454 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 455 } 456 } 457 458 /** 459 * ibx_display_interrupt_update - update SDEIMR 460 * @dev_priv: driver private 461 * @interrupt_mask: mask of interrupt bits to update 462 * @enabled_irq_mask: mask of interrupt bits to enable 463 */ 464 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 465 uint32_t interrupt_mask, 466 uint32_t enabled_irq_mask) 467 { 468 uint32_t sdeimr = I915_READ(SDEIMR); 469 sdeimr &= ~interrupt_mask; 470 sdeimr |= (~enabled_irq_mask & interrupt_mask); 471 472 WARN_ON(enabled_irq_mask & ~interrupt_mask); 473 474 assert_spin_locked(&dev_priv->irq_lock); 475 476 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 477 return; 478 479 I915_WRITE(SDEIMR, sdeimr); 480 POSTING_READ(SDEIMR); 481 } 482 483 static void 484 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 485 u32 enable_mask, u32 status_mask) 486 { 487 i915_reg_t reg = PIPESTAT(pipe); 488 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 489 490 assert_spin_locked(&dev_priv->irq_lock); 491 WARN_ON(!intel_irqs_enabled(dev_priv)); 492 493 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 494 status_mask & ~PIPESTAT_INT_STATUS_MASK, 495 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 496 pipe_name(pipe), enable_mask, status_mask)) 497 return; 498 499 if ((pipestat & enable_mask) == enable_mask) 500 return; 501 502 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 503 504 /* Enable the interrupt, clear any pending status */ 505 pipestat |= enable_mask | status_mask; 506 I915_WRITE(reg, pipestat); 507 POSTING_READ(reg); 508 } 509 510 static void 511 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 512 u32 enable_mask, u32 status_mask) 513 { 514 i915_reg_t reg = PIPESTAT(pipe); 515 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 516 517 assert_spin_locked(&dev_priv->irq_lock); 518 WARN_ON(!intel_irqs_enabled(dev_priv)); 519 520 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 521 status_mask & ~PIPESTAT_INT_STATUS_MASK, 522 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 523 pipe_name(pipe), enable_mask, status_mask)) 524 return; 525 526 if ((pipestat & enable_mask) == 0) 527 return; 528 529 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 530 531 pipestat &= ~enable_mask; 532 I915_WRITE(reg, pipestat); 533 POSTING_READ(reg); 534 } 535 536 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 537 { 538 u32 enable_mask = status_mask << 16; 539 540 /* 541 * On pipe A we don't support the PSR interrupt yet, 542 * on pipe B and C the same bit MBZ. 543 */ 544 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 545 return 0; 546 /* 547 * On pipe B and C we don't support the PSR interrupt yet, on pipe 548 * A the same bit is for perf counters which we don't use either. 549 */ 550 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 551 return 0; 552 553 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 554 SPRITE0_FLIP_DONE_INT_EN_VLV | 555 SPRITE1_FLIP_DONE_INT_EN_VLV); 556 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 557 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 558 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 559 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 560 561 return enable_mask; 562 } 563 564 void 565 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 566 u32 status_mask) 567 { 568 u32 enable_mask; 569 570 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 571 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 572 status_mask); 573 else 574 enable_mask = status_mask << 16; 575 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 576 } 577 578 void 579 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 580 u32 status_mask) 581 { 582 u32 enable_mask; 583 584 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 585 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm, 586 status_mask); 587 else 588 enable_mask = status_mask << 16; 589 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 590 } 591 592 /** 593 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 594 * @dev_priv: i915 device private 595 */ 596 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 597 { 598 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 599 return; 600 601 spin_lock_irq(&dev_priv->irq_lock); 602 603 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 604 if (INTEL_GEN(dev_priv) >= 4) 605 i915_enable_pipestat(dev_priv, PIPE_A, 606 PIPE_LEGACY_BLC_EVENT_STATUS); 607 608 spin_unlock_irq(&dev_priv->irq_lock); 609 } 610 611 /* 612 * This timing diagram depicts the video signal in and 613 * around the vertical blanking period. 614 * 615 * Assumptions about the fictitious mode used in this example: 616 * vblank_start >= 3 617 * vsync_start = vblank_start + 1 618 * vsync_end = vblank_start + 2 619 * vtotal = vblank_start + 3 620 * 621 * start of vblank: 622 * latch double buffered registers 623 * increment frame counter (ctg+) 624 * generate start of vblank interrupt (gen4+) 625 * | 626 * | frame start: 627 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 628 * | may be shifted forward 1-3 extra lines via PIPECONF 629 * | | 630 * | | start of vsync: 631 * | | generate vsync interrupt 632 * | | | 633 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 634 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 635 * ----va---> <-----------------vb--------------------> <--------va------------- 636 * | | <----vs-----> | 637 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 638 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 639 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 640 * | | | 641 * last visible pixel first visible pixel 642 * | increment frame counter (gen3/4) 643 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 644 * 645 * x = horizontal active 646 * _ = horizontal blanking 647 * hs = horizontal sync 648 * va = vertical active 649 * vb = vertical blanking 650 * vs = vertical sync 651 * vbs = vblank_start (number) 652 * 653 * Summary: 654 * - most events happen at the start of horizontal sync 655 * - frame start happens at the start of horizontal blank, 1-4 lines 656 * (depending on PIPECONF settings) after the start of vblank 657 * - gen3/4 pixel and frame counter are synchronized with the start 658 * of horizontal active on the first line of vertical active 659 */ 660 661 /* Called from drm generic code, passed a 'crtc', which 662 * we use as a pipe index 663 */ 664 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 665 { 666 struct drm_i915_private *dev_priv = to_i915(dev); 667 i915_reg_t high_frame, low_frame; 668 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 669 struct intel_crtc *intel_crtc = 670 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 671 const struct drm_display_mode *mode = &intel_crtc->base.hwmode; 672 673 htotal = mode->crtc_htotal; 674 hsync_start = mode->crtc_hsync_start; 675 vbl_start = mode->crtc_vblank_start; 676 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 677 vbl_start = DIV_ROUND_UP(vbl_start, 2); 678 679 /* Convert to pixel count */ 680 vbl_start *= htotal; 681 682 /* Start of vblank event occurs at start of hsync */ 683 vbl_start -= htotal - hsync_start; 684 685 high_frame = PIPEFRAME(pipe); 686 low_frame = PIPEFRAMEPIXEL(pipe); 687 688 /* 689 * High & low register fields aren't synchronized, so make sure 690 * we get a low value that's stable across two reads of the high 691 * register. 692 */ 693 do { 694 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 695 low = I915_READ(low_frame); 696 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 697 } while (high1 != high2); 698 699 high1 >>= PIPE_FRAME_HIGH_SHIFT; 700 pixel = low & PIPE_PIXEL_MASK; 701 low >>= PIPE_FRAME_LOW_SHIFT; 702 703 /* 704 * The frame counter increments at beginning of active. 705 * Cook up a vblank counter by also checking the pixel 706 * counter against vblank start. 707 */ 708 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 709 } 710 711 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 712 { 713 struct drm_i915_private *dev_priv = to_i915(dev); 714 715 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 716 } 717 718 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 719 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 720 { 721 struct drm_device *dev = crtc->base.dev; 722 struct drm_i915_private *dev_priv = to_i915(dev); 723 const struct drm_display_mode *mode = &crtc->base.hwmode; 724 enum i915_pipe pipe = crtc->pipe; 725 int position, vtotal; 726 727 vtotal = mode->crtc_vtotal; 728 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 729 vtotal /= 2; 730 731 if (IS_GEN2(dev_priv)) 732 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 733 else 734 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 735 736 /* 737 * On HSW, the DSL reg (0x70000) appears to return 0 if we 738 * read it just before the start of vblank. So try it again 739 * so we don't accidentally end up spanning a vblank frame 740 * increment, causing the pipe_update_end() code to squak at us. 741 * 742 * The nature of this problem means we can't simply check the ISR 743 * bit and return the vblank start value; nor can we use the scanline 744 * debug register in the transcoder as it appears to have the same 745 * problem. We may need to extend this to include other platforms, 746 * but so far testing only shows the problem on HSW. 747 */ 748 if (HAS_DDI(dev_priv) && !position) { 749 int i, temp; 750 751 for (i = 0; i < 100; i++) { 752 udelay(1); 753 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & 754 DSL_LINEMASK_GEN3; 755 if (temp != position) { 756 position = temp; 757 break; 758 } 759 } 760 } 761 762 /* 763 * See update_scanline_offset() for the details on the 764 * scanline_offset adjustment. 765 */ 766 return (position + crtc->scanline_offset) % vtotal; 767 } 768 769 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 770 unsigned int flags, int *vpos, int *hpos, 771 ktime_t *stime, ktime_t *etime, 772 const struct drm_display_mode *mode) 773 { 774 struct drm_i915_private *dev_priv = to_i915(dev); 775 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 776 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 777 int position; 778 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 779 bool in_vbl = true; 780 int ret = 0; 781 unsigned long irqflags; 782 783 if (WARN_ON(!mode->crtc_clock)) { 784 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 785 "pipe %c\n", pipe_name(pipe)); 786 return 0; 787 } 788 789 htotal = mode->crtc_htotal; 790 hsync_start = mode->crtc_hsync_start; 791 vtotal = mode->crtc_vtotal; 792 vbl_start = mode->crtc_vblank_start; 793 vbl_end = mode->crtc_vblank_end; 794 795 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 796 vbl_start = DIV_ROUND_UP(vbl_start, 2); 797 vbl_end /= 2; 798 vtotal /= 2; 799 } 800 801 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 802 803 /* 804 * Lock uncore.lock, as we will do multiple timing critical raw 805 * register reads, potentially with preemption disabled, so the 806 * following code must not block on uncore.lock. 807 */ 808 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 809 810 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 811 812 /* Get optional system timestamp before query. */ 813 if (stime) 814 *stime = ktime_get(); 815 816 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 817 /* No obvious pixelcount register. Only query vertical 818 * scanout position from Display scan line register. 819 */ 820 position = __intel_get_crtc_scanline(intel_crtc); 821 } else { 822 /* Have access to pixelcount since start of frame. 823 * We can split this into vertical and horizontal 824 * scanout position. 825 */ 826 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 827 828 /* convert to pixel counts */ 829 vbl_start *= htotal; 830 vbl_end *= htotal; 831 vtotal *= htotal; 832 833 /* 834 * In interlaced modes, the pixel counter counts all pixels, 835 * so one field will have htotal more pixels. In order to avoid 836 * the reported position from jumping backwards when the pixel 837 * counter is beyond the length of the shorter field, just 838 * clamp the position the length of the shorter field. This 839 * matches how the scanline counter based position works since 840 * the scanline counter doesn't count the two half lines. 841 */ 842 if (position >= vtotal) 843 position = vtotal - 1; 844 845 /* 846 * Start of vblank interrupt is triggered at start of hsync, 847 * just prior to the first active line of vblank. However we 848 * consider lines to start at the leading edge of horizontal 849 * active. So, should we get here before we've crossed into 850 * the horizontal active of the first line in vblank, we would 851 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 852 * always add htotal-hsync_start to the current pixel position. 853 */ 854 position = (position + htotal - hsync_start) % vtotal; 855 } 856 857 /* Get optional system timestamp after query. */ 858 if (etime) 859 *etime = ktime_get(); 860 861 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 862 863 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 864 865 in_vbl = position >= vbl_start && position < vbl_end; 866 867 /* 868 * While in vblank, position will be negative 869 * counting up towards 0 at vbl_end. And outside 870 * vblank, position will be positive counting 871 * up since vbl_end. 872 */ 873 if (position >= vbl_start) 874 position -= vbl_end; 875 else 876 position += vtotal - vbl_end; 877 878 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 879 *vpos = position; 880 *hpos = 0; 881 } else { 882 *vpos = position / htotal; 883 *hpos = position - (*vpos * htotal); 884 } 885 886 /* In vblank? */ 887 if (in_vbl) 888 ret |= DRM_SCANOUTPOS_IN_VBLANK; 889 890 return ret; 891 } 892 893 int intel_get_crtc_scanline(struct intel_crtc *crtc) 894 { 895 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 896 unsigned long irqflags; 897 int position; 898 899 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 900 position = __intel_get_crtc_scanline(crtc); 901 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 902 903 return position; 904 } 905 906 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, 907 int *max_error, 908 struct timeval *vblank_time, 909 unsigned flags) 910 { 911 struct drm_crtc *crtc; 912 913 if (pipe >= INTEL_INFO(dev)->num_pipes) { 914 DRM_ERROR("Invalid crtc %u\n", pipe); 915 return -EINVAL; 916 } 917 918 /* Get drm_crtc to timestamp: */ 919 crtc = intel_get_crtc_for_pipe(dev, pipe); 920 if (crtc == NULL) { 921 DRM_ERROR("Invalid crtc %u\n", pipe); 922 return -EINVAL; 923 } 924 925 if (!crtc->hwmode.crtc_clock) { 926 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe); 927 return -EBUSY; 928 } 929 930 /* Helper routine in DRM core does all the work: */ 931 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 932 vblank_time, flags, 933 &crtc->hwmode); 934 } 935 936 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 937 { 938 u32 busy_up, busy_down, max_avg, min_avg; 939 u8 new_delay; 940 941 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 942 943 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 944 945 new_delay = dev_priv->ips.cur_delay; 946 947 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 948 busy_up = I915_READ(RCPREVBSYTUPAVG); 949 busy_down = I915_READ(RCPREVBSYTDNAVG); 950 max_avg = I915_READ(RCBMAXAVG); 951 min_avg = I915_READ(RCBMINAVG); 952 953 /* Handle RCS change request from hw */ 954 if (busy_up > max_avg) { 955 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 956 new_delay = dev_priv->ips.cur_delay - 1; 957 if (new_delay < dev_priv->ips.max_delay) 958 new_delay = dev_priv->ips.max_delay; 959 } else if (busy_down < min_avg) { 960 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 961 new_delay = dev_priv->ips.cur_delay + 1; 962 if (new_delay > dev_priv->ips.min_delay) 963 new_delay = dev_priv->ips.min_delay; 964 } 965 966 if (ironlake_set_drps(dev_priv, new_delay)) 967 dev_priv->ips.cur_delay = new_delay; 968 969 lockmgr(&mchdev_lock, LK_RELEASE); 970 971 return; 972 } 973 974 static void notify_ring(struct intel_engine_cs *engine) 975 { 976 smp_store_mb(engine->breadcrumbs.irq_posted, true); 977 if (intel_engine_wakeup(engine)) { 978 trace_i915_gem_request_notify(engine); 979 engine->breadcrumbs.irq_wakeups++; 980 } 981 } 982 983 static void vlv_c0_read(struct drm_i915_private *dev_priv, 984 struct intel_rps_ei *ei) 985 { 986 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 987 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 988 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 989 } 990 991 static bool vlv_c0_above(struct drm_i915_private *dev_priv, 992 const struct intel_rps_ei *old, 993 const struct intel_rps_ei *now, 994 int threshold) 995 { 996 u64 time, c0; 997 unsigned int mul = 100; 998 999 if (old->cz_clock == 0) 1000 return false; 1001 1002 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) 1003 mul <<= 8; 1004 1005 time = now->cz_clock - old->cz_clock; 1006 time *= threshold * dev_priv->czclk_freq; 1007 1008 /* Workload can be split between render + media, e.g. SwapBuffers 1009 * being blitted in X after being rendered in mesa. To account for 1010 * this we need to combine both engines into our activity counter. 1011 */ 1012 c0 = now->render_c0 - old->render_c0; 1013 c0 += now->media_c0 - old->media_c0; 1014 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC; 1015 1016 return c0 >= time; 1017 } 1018 1019 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1020 { 1021 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); 1022 dev_priv->rps.up_ei = dev_priv->rps.down_ei; 1023 } 1024 1025 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1026 { 1027 struct intel_rps_ei now; 1028 u32 events = 0; 1029 1030 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) 1031 return 0; 1032 1033 vlv_c0_read(dev_priv, &now); 1034 if (now.cz_clock == 0) 1035 return 0; 1036 1037 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { 1038 if (!vlv_c0_above(dev_priv, 1039 &dev_priv->rps.down_ei, &now, 1040 dev_priv->rps.down_threshold)) 1041 events |= GEN6_PM_RP_DOWN_THRESHOLD; 1042 dev_priv->rps.down_ei = now; 1043 } 1044 1045 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 1046 if (vlv_c0_above(dev_priv, 1047 &dev_priv->rps.up_ei, &now, 1048 dev_priv->rps.up_threshold)) 1049 events |= GEN6_PM_RP_UP_THRESHOLD; 1050 dev_priv->rps.up_ei = now; 1051 } 1052 1053 return events; 1054 } 1055 1056 static bool any_waiters(struct drm_i915_private *dev_priv) 1057 { 1058 struct intel_engine_cs *engine; 1059 1060 for_each_engine(engine, dev_priv) 1061 if (intel_engine_has_waiter(engine)) 1062 return true; 1063 1064 return false; 1065 } 1066 1067 static void gen6_pm_rps_work(struct work_struct *work) 1068 { 1069 struct drm_i915_private *dev_priv = 1070 container_of(work, struct drm_i915_private, rps.work); 1071 bool client_boost; 1072 int new_delay, adj, min, max; 1073 u32 pm_iir; 1074 1075 spin_lock_irq(&dev_priv->irq_lock); 1076 /* Speed up work cancelation during disabling rps interrupts. */ 1077 if (!dev_priv->rps.interrupts_enabled) { 1078 spin_unlock_irq(&dev_priv->irq_lock); 1079 return; 1080 } 1081 1082 pm_iir = dev_priv->rps.pm_iir; 1083 dev_priv->rps.pm_iir = 0; 1084 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1085 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1086 client_boost = dev_priv->rps.client_boost; 1087 dev_priv->rps.client_boost = false; 1088 spin_unlock_irq(&dev_priv->irq_lock); 1089 1090 /* Make sure we didn't queue anything we're not going to process. */ 1091 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1092 1093 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1094 return; 1095 1096 mutex_lock(&dev_priv->rps.hw_lock); 1097 1098 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1099 1100 adj = dev_priv->rps.last_adj; 1101 new_delay = dev_priv->rps.cur_freq; 1102 min = dev_priv->rps.min_freq_softlimit; 1103 max = dev_priv->rps.max_freq_softlimit; 1104 if (client_boost || any_waiters(dev_priv)) 1105 max = dev_priv->rps.max_freq; 1106 if (client_boost && new_delay < dev_priv->rps.boost_freq) { 1107 new_delay = dev_priv->rps.boost_freq; 1108 adj = 0; 1109 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1110 if (adj > 0) 1111 adj *= 2; 1112 else /* CHV needs even encode values */ 1113 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1114 /* 1115 * For better performance, jump directly 1116 * to RPe if we're below it. 1117 */ 1118 if (new_delay < dev_priv->rps.efficient_freq - adj) { 1119 new_delay = dev_priv->rps.efficient_freq; 1120 adj = 0; 1121 } 1122 } else if (client_boost || any_waiters(dev_priv)) { 1123 adj = 0; 1124 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1125 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1126 new_delay = dev_priv->rps.efficient_freq; 1127 else 1128 new_delay = dev_priv->rps.min_freq_softlimit; 1129 adj = 0; 1130 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1131 if (adj < 0) 1132 adj *= 2; 1133 else /* CHV needs even encode values */ 1134 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1135 } else { /* unknown event */ 1136 adj = 0; 1137 } 1138 1139 dev_priv->rps.last_adj = adj; 1140 1141 /* sysfs frequency interfaces may have snuck in while servicing the 1142 * interrupt 1143 */ 1144 new_delay += adj; 1145 new_delay = clamp_t(int, new_delay, min, max); 1146 1147 intel_set_rps(dev_priv, new_delay); 1148 1149 mutex_unlock(&dev_priv->rps.hw_lock); 1150 } 1151 1152 1153 /** 1154 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1155 * occurred. 1156 * @work: workqueue struct 1157 * 1158 * Doesn't actually do anything except notify userspace. As a consequence of 1159 * this event, userspace should try to remap the bad rows since statistically 1160 * it is likely the same row is more likely to go bad again. 1161 */ 1162 static void ivybridge_parity_work(struct work_struct *work) 1163 { 1164 struct drm_i915_private *dev_priv = 1165 container_of(work, struct drm_i915_private, l3_parity.error_work); 1166 u32 error_status, row, bank, subbank; 1167 char *parity_event[6]; 1168 uint32_t misccpctl; 1169 uint8_t slice = 0; 1170 1171 /* We must turn off DOP level clock gating to access the L3 registers. 1172 * In order to prevent a get/put style interface, acquire struct mutex 1173 * any time we access those registers. 1174 */ 1175 mutex_lock(&dev_priv->drm.struct_mutex); 1176 1177 /* If we've screwed up tracking, just let the interrupt fire again */ 1178 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1179 goto out; 1180 1181 misccpctl = I915_READ(GEN7_MISCCPCTL); 1182 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1183 POSTING_READ(GEN7_MISCCPCTL); 1184 1185 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1186 i915_reg_t reg; 1187 1188 slice--; 1189 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1190 break; 1191 1192 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1193 1194 reg = GEN7_L3CDERRST1(slice); 1195 1196 error_status = I915_READ(reg); 1197 row = GEN7_PARITY_ERROR_ROW(error_status); 1198 bank = GEN7_PARITY_ERROR_BANK(error_status); 1199 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1200 1201 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1202 POSTING_READ(reg); 1203 1204 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1205 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1206 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1207 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1208 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1209 parity_event[5] = NULL; 1210 1211 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1212 KOBJ_CHANGE, parity_event); 1213 1214 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1215 slice, row, bank, subbank); 1216 1217 kfree(parity_event[4]); 1218 kfree(parity_event[3]); 1219 kfree(parity_event[2]); 1220 kfree(parity_event[1]); 1221 } 1222 1223 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1224 1225 out: 1226 WARN_ON(dev_priv->l3_parity.which_slice); 1227 spin_lock_irq(&dev_priv->irq_lock); 1228 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1229 spin_unlock_irq(&dev_priv->irq_lock); 1230 1231 mutex_unlock(&dev_priv->drm.struct_mutex); 1232 } 1233 1234 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1235 u32 iir) 1236 { 1237 if (!HAS_L3_DPF(dev_priv)) 1238 return; 1239 1240 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1241 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1242 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1243 1244 iir &= GT_PARITY_ERROR(dev_priv); 1245 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1246 dev_priv->l3_parity.which_slice |= 1 << 1; 1247 1248 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1249 dev_priv->l3_parity.which_slice |= 1 << 0; 1250 1251 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1252 } 1253 1254 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1255 u32 gt_iir) 1256 { 1257 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1258 notify_ring(&dev_priv->engine[RCS]); 1259 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1260 notify_ring(&dev_priv->engine[VCS]); 1261 } 1262 1263 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1264 u32 gt_iir) 1265 { 1266 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1267 notify_ring(&dev_priv->engine[RCS]); 1268 if (gt_iir & GT_BSD_USER_INTERRUPT) 1269 notify_ring(&dev_priv->engine[VCS]); 1270 if (gt_iir & GT_BLT_USER_INTERRUPT) 1271 notify_ring(&dev_priv->engine[BCS]); 1272 1273 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1274 GT_BSD_CS_ERROR_INTERRUPT | 1275 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1276 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1277 1278 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1279 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1280 } 1281 1282 static __always_inline void 1283 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) 1284 { 1285 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) 1286 notify_ring(engine); 1287 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) 1288 tasklet_schedule(&engine->irq_tasklet); 1289 } 1290 1291 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, 1292 u32 master_ctl, 1293 u32 gt_iir[4]) 1294 { 1295 irqreturn_t ret = IRQ_NONE; 1296 1297 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1298 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0)); 1299 if (gt_iir[0]) { 1300 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]); 1301 ret = IRQ_HANDLED; 1302 } else 1303 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1304 } 1305 1306 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1307 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1)); 1308 if (gt_iir[1]) { 1309 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]); 1310 ret = IRQ_HANDLED; 1311 } else 1312 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1313 } 1314 1315 if (master_ctl & GEN8_GT_VECS_IRQ) { 1316 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3)); 1317 if (gt_iir[3]) { 1318 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]); 1319 ret = IRQ_HANDLED; 1320 } else 1321 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1322 } 1323 1324 if (master_ctl & GEN8_GT_PM_IRQ) { 1325 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2)); 1326 if (gt_iir[2] & dev_priv->pm_rps_events) { 1327 I915_WRITE_FW(GEN8_GT_IIR(2), 1328 gt_iir[2] & dev_priv->pm_rps_events); 1329 ret = IRQ_HANDLED; 1330 } else 1331 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1332 } 1333 1334 return ret; 1335 } 1336 1337 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1338 u32 gt_iir[4]) 1339 { 1340 if (gt_iir[0]) { 1341 gen8_cs_irq_handler(&dev_priv->engine[RCS], 1342 gt_iir[0], GEN8_RCS_IRQ_SHIFT); 1343 gen8_cs_irq_handler(&dev_priv->engine[BCS], 1344 gt_iir[0], GEN8_BCS_IRQ_SHIFT); 1345 } 1346 1347 if (gt_iir[1]) { 1348 gen8_cs_irq_handler(&dev_priv->engine[VCS], 1349 gt_iir[1], GEN8_VCS1_IRQ_SHIFT); 1350 gen8_cs_irq_handler(&dev_priv->engine[VCS2], 1351 gt_iir[1], GEN8_VCS2_IRQ_SHIFT); 1352 } 1353 1354 if (gt_iir[3]) 1355 gen8_cs_irq_handler(&dev_priv->engine[VECS], 1356 gt_iir[3], GEN8_VECS_IRQ_SHIFT); 1357 1358 if (gt_iir[2] & dev_priv->pm_rps_events) 1359 gen6_rps_irq_handler(dev_priv, gt_iir[2]); 1360 } 1361 1362 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1363 { 1364 switch (port) { 1365 case PORT_A: 1366 return val & PORTA_HOTPLUG_LONG_DETECT; 1367 case PORT_B: 1368 return val & PORTB_HOTPLUG_LONG_DETECT; 1369 case PORT_C: 1370 return val & PORTC_HOTPLUG_LONG_DETECT; 1371 default: 1372 return false; 1373 } 1374 } 1375 1376 static bool spt_port_hotplug2_long_detect(enum port port, u32 val) 1377 { 1378 switch (port) { 1379 case PORT_E: 1380 return val & PORTE_HOTPLUG_LONG_DETECT; 1381 default: 1382 return false; 1383 } 1384 } 1385 1386 static bool spt_port_hotplug_long_detect(enum port port, u32 val) 1387 { 1388 switch (port) { 1389 case PORT_A: 1390 return val & PORTA_HOTPLUG_LONG_DETECT; 1391 case PORT_B: 1392 return val & PORTB_HOTPLUG_LONG_DETECT; 1393 case PORT_C: 1394 return val & PORTC_HOTPLUG_LONG_DETECT; 1395 case PORT_D: 1396 return val & PORTD_HOTPLUG_LONG_DETECT; 1397 default: 1398 return false; 1399 } 1400 } 1401 1402 static bool ilk_port_hotplug_long_detect(enum port port, u32 val) 1403 { 1404 switch (port) { 1405 case PORT_A: 1406 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1407 default: 1408 return false; 1409 } 1410 } 1411 1412 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1413 { 1414 switch (port) { 1415 case PORT_B: 1416 return val & PORTB_HOTPLUG_LONG_DETECT; 1417 case PORT_C: 1418 return val & PORTC_HOTPLUG_LONG_DETECT; 1419 case PORT_D: 1420 return val & PORTD_HOTPLUG_LONG_DETECT; 1421 default: 1422 return false; 1423 } 1424 } 1425 1426 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1427 { 1428 switch (port) { 1429 case PORT_B: 1430 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1431 case PORT_C: 1432 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1433 case PORT_D: 1434 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1435 default: 1436 return false; 1437 } 1438 } 1439 1440 /* 1441 * Get a bit mask of pins that have triggered, and which ones may be long. 1442 * This can be called multiple times with the same masks to accumulate 1443 * hotplug detection results from several registers. 1444 * 1445 * Note that the caller is expected to zero out the masks initially. 1446 */ 1447 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, 1448 u32 hotplug_trigger, u32 dig_hotplug_reg, 1449 const u32 hpd[HPD_NUM_PINS], 1450 bool long_pulse_detect(enum port port, u32 val)) 1451 { 1452 enum port port; 1453 int i; 1454 1455 for_each_hpd_pin(i) { 1456 if ((hpd[i] & hotplug_trigger) == 0) 1457 continue; 1458 1459 *pin_mask |= BIT(i); 1460 1461 if (!intel_hpd_pin_to_port(i, &port)) 1462 continue; 1463 1464 if (long_pulse_detect(port, dig_hotplug_reg)) 1465 *long_mask |= BIT(i); 1466 } 1467 1468 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1469 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1470 1471 } 1472 1473 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1474 { 1475 wake_up_all(&dev_priv->gmbus_wait_queue); 1476 } 1477 1478 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1479 { 1480 wake_up_all(&dev_priv->gmbus_wait_queue); 1481 } 1482 1483 #if defined(CONFIG_DEBUG_FS) 1484 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1485 enum i915_pipe pipe, 1486 uint32_t crc0, uint32_t crc1, 1487 uint32_t crc2, uint32_t crc3, 1488 uint32_t crc4) 1489 { 1490 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1491 struct intel_pipe_crc_entry *entry; 1492 int head, tail; 1493 1494 spin_lock(&pipe_crc->lock); 1495 1496 if (!pipe_crc->entries) { 1497 spin_unlock(&pipe_crc->lock); 1498 DRM_DEBUG_KMS("spurious interrupt\n"); 1499 return; 1500 } 1501 1502 head = pipe_crc->head; 1503 tail = pipe_crc->tail; 1504 1505 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1506 spin_unlock(&pipe_crc->lock); 1507 DRM_ERROR("CRC buffer overflowing\n"); 1508 return; 1509 } 1510 1511 entry = &pipe_crc->entries[head]; 1512 1513 entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, 1514 pipe); 1515 entry->crc[0] = crc0; 1516 entry->crc[1] = crc1; 1517 entry->crc[2] = crc2; 1518 entry->crc[3] = crc3; 1519 entry->crc[4] = crc4; 1520 1521 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1522 pipe_crc->head = head; 1523 1524 spin_unlock(&pipe_crc->lock); 1525 1526 wake_up_interruptible(&pipe_crc->wq); 1527 } 1528 #else 1529 static inline void 1530 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1531 enum i915_pipe pipe, 1532 uint32_t crc0, uint32_t crc1, 1533 uint32_t crc2, uint32_t crc3, 1534 uint32_t crc4) {} 1535 #endif 1536 1537 1538 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1539 enum i915_pipe pipe) 1540 { 1541 display_pipe_crc_irq_handler(dev_priv, pipe, 1542 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1543 0, 0, 0, 0); 1544 } 1545 1546 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1547 enum i915_pipe pipe) 1548 { 1549 display_pipe_crc_irq_handler(dev_priv, pipe, 1550 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1551 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1552 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1553 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1554 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1555 } 1556 1557 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1558 enum i915_pipe pipe) 1559 { 1560 uint32_t res1, res2; 1561 1562 if (INTEL_GEN(dev_priv) >= 3) 1563 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1564 else 1565 res1 = 0; 1566 1567 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1568 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1569 else 1570 res2 = 0; 1571 1572 display_pipe_crc_irq_handler(dev_priv, pipe, 1573 I915_READ(PIPE_CRC_RES_RED(pipe)), 1574 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1575 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1576 res1, res2); 1577 } 1578 1579 /* The RPS events need forcewake, so we add them to a work queue and mask their 1580 * IMR bits until the work is done. Other interrupts can be processed without 1581 * the work queue. */ 1582 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1583 { 1584 if (pm_iir & dev_priv->pm_rps_events) { 1585 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1586 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1587 if (dev_priv->rps.interrupts_enabled) { 1588 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1589 schedule_work(&dev_priv->rps.work); 1590 } 1591 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1592 } 1593 1594 if (INTEL_INFO(dev_priv)->gen >= 8) 1595 return; 1596 1597 if (HAS_VEBOX(dev_priv)) { 1598 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1599 notify_ring(&dev_priv->engine[VECS]); 1600 1601 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1602 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1603 } 1604 } 1605 1606 static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv, 1607 enum i915_pipe pipe) 1608 { 1609 bool ret; 1610 1611 ret = drm_handle_vblank(&dev_priv->drm, pipe); 1612 if (ret) 1613 intel_finish_page_flip_mmio(dev_priv, pipe); 1614 1615 return ret; 1616 } 1617 1618 static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1619 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1620 { 1621 int pipe; 1622 1623 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1624 1625 if (!dev_priv->display_irqs_enabled) { 1626 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1627 return; 1628 } 1629 1630 for_each_pipe(dev_priv, pipe) { 1631 i915_reg_t reg; 1632 u32 mask, iir_bit = 0; 1633 1634 /* 1635 * PIPESTAT bits get signalled even when the interrupt is 1636 * disabled with the mask bits, and some of the status bits do 1637 * not generate interrupts at all (like the underrun bit). Hence 1638 * we need to be careful that we only handle what we want to 1639 * handle. 1640 */ 1641 1642 /* fifo underruns are filterered in the underrun handler. */ 1643 mask = PIPE_FIFO_UNDERRUN_STATUS; 1644 1645 switch (pipe) { 1646 case PIPE_A: 1647 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1648 break; 1649 case PIPE_B: 1650 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1651 break; 1652 case PIPE_C: 1653 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1654 break; 1655 } 1656 if (iir & iir_bit) 1657 mask |= dev_priv->pipestat_irq_mask[pipe]; 1658 1659 if (!mask) 1660 continue; 1661 1662 reg = PIPESTAT(pipe); 1663 mask |= PIPESTAT_INT_ENABLE_MASK; 1664 pipe_stats[pipe] = I915_READ(reg) & mask; 1665 1666 /* 1667 * Clear the PIPE*STAT regs before the IIR 1668 */ 1669 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1670 PIPESTAT_INT_STATUS_MASK)) 1671 I915_WRITE(reg, pipe_stats[pipe]); 1672 } 1673 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1674 } 1675 1676 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1677 u32 pipe_stats[I915_MAX_PIPES]) 1678 { 1679 enum i915_pipe pipe; 1680 1681 for_each_pipe(dev_priv, pipe) { 1682 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1683 intel_pipe_handle_vblank(dev_priv, pipe)) 1684 intel_check_page_flip(dev_priv, pipe); 1685 1686 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 1687 intel_finish_page_flip_cs(dev_priv, pipe); 1688 1689 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1690 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1691 1692 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1693 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1694 } 1695 1696 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1697 gmbus_irq_handler(dev_priv); 1698 } 1699 1700 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1701 { 1702 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1703 1704 if (hotplug_status) 1705 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1706 1707 return hotplug_status; 1708 } 1709 1710 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1711 u32 hotplug_status) 1712 { 1713 u32 pin_mask = 0, long_mask = 0; 1714 1715 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 1716 IS_CHERRYVIEW(dev_priv)) { 1717 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1718 1719 if (hotplug_trigger) { 1720 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1721 hotplug_trigger, hpd_status_g4x, 1722 i9xx_port_hotplug_long_detect); 1723 1724 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1725 } 1726 1727 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1728 dp_aux_irq_handler(dev_priv); 1729 } else { 1730 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1731 1732 if (hotplug_trigger) { 1733 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1734 hotplug_trigger, hpd_status_i915, 1735 i9xx_port_hotplug_long_detect); 1736 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1737 } 1738 } 1739 } 1740 1741 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1742 { 1743 struct drm_device *dev = arg; 1744 struct drm_i915_private *dev_priv = to_i915(dev); 1745 irqreturn_t ret = IRQ_NONE; 1746 1747 if (!intel_irqs_enabled(dev_priv)) 1748 return IRQ_NONE; 1749 1750 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1751 disable_rpm_wakeref_asserts(dev_priv); 1752 1753 do { 1754 u32 iir, gt_iir, pm_iir; 1755 u32 pipe_stats[I915_MAX_PIPES] = {}; 1756 u32 hotplug_status = 0; 1757 u32 ier = 0; 1758 1759 gt_iir = I915_READ(GTIIR); 1760 pm_iir = I915_READ(GEN6_PMIIR); 1761 iir = I915_READ(VLV_IIR); 1762 1763 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1764 break; 1765 1766 ret = IRQ_HANDLED; 1767 1768 /* 1769 * Theory on interrupt generation, based on empirical evidence: 1770 * 1771 * x = ((VLV_IIR & VLV_IER) || 1772 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 1773 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 1774 * 1775 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1776 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 1777 * guarantee the CPU interrupt will be raised again even if we 1778 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1779 * bits this time around. 1780 */ 1781 I915_WRITE(VLV_MASTER_IER, 0); 1782 ier = I915_READ(VLV_IER); 1783 I915_WRITE(VLV_IER, 0); 1784 1785 if (gt_iir) 1786 I915_WRITE(GTIIR, gt_iir); 1787 if (pm_iir) 1788 I915_WRITE(GEN6_PMIIR, pm_iir); 1789 1790 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1791 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1792 1793 /* Call regardless, as some status bits might not be 1794 * signalled in iir */ 1795 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1796 1797 /* 1798 * VLV_IIR is single buffered, and reflects the level 1799 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1800 */ 1801 if (iir) 1802 I915_WRITE(VLV_IIR, iir); 1803 1804 I915_WRITE(VLV_IER, ier); 1805 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1806 POSTING_READ(VLV_MASTER_IER); 1807 1808 if (gt_iir) 1809 snb_gt_irq_handler(dev_priv, gt_iir); 1810 if (pm_iir) 1811 gen6_rps_irq_handler(dev_priv, pm_iir); 1812 1813 if (hotplug_status) 1814 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1815 1816 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1817 } while (0); 1818 1819 enable_rpm_wakeref_asserts(dev_priv); 1820 1821 return ret; 1822 } 1823 1824 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1825 { 1826 struct drm_device *dev = arg; 1827 struct drm_i915_private *dev_priv = to_i915(dev); 1828 irqreturn_t ret = IRQ_NONE; 1829 1830 if (!intel_irqs_enabled(dev_priv)) 1831 return IRQ_NONE; 1832 1833 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1834 disable_rpm_wakeref_asserts(dev_priv); 1835 1836 do { 1837 u32 master_ctl, iir; 1838 u32 gt_iir[4] = {}; 1839 u32 pipe_stats[I915_MAX_PIPES] = {}; 1840 u32 hotplug_status = 0; 1841 u32 ier = 0; 1842 1843 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1844 iir = I915_READ(VLV_IIR); 1845 1846 if (master_ctl == 0 && iir == 0) 1847 break; 1848 1849 ret = IRQ_HANDLED; 1850 1851 /* 1852 * Theory on interrupt generation, based on empirical evidence: 1853 * 1854 * x = ((VLV_IIR & VLV_IER) || 1855 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 1856 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 1857 * 1858 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1859 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 1860 * guarantee the CPU interrupt will be raised again even if we 1861 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 1862 * bits this time around. 1863 */ 1864 I915_WRITE(GEN8_MASTER_IRQ, 0); 1865 ier = I915_READ(VLV_IER); 1866 I915_WRITE(VLV_IER, 0); 1867 1868 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 1869 1870 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1871 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1872 1873 /* Call regardless, as some status bits might not be 1874 * signalled in iir */ 1875 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats); 1876 1877 /* 1878 * VLV_IIR is single buffered, and reflects the level 1879 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1880 */ 1881 if (iir) 1882 I915_WRITE(VLV_IIR, iir); 1883 1884 I915_WRITE(VLV_IER, ier); 1885 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1886 POSTING_READ(GEN8_MASTER_IRQ); 1887 1888 gen8_gt_irq_handler(dev_priv, gt_iir); 1889 1890 if (hotplug_status) 1891 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 1892 1893 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 1894 } while (0); 1895 1896 enable_rpm_wakeref_asserts(dev_priv); 1897 1898 return ret; 1899 } 1900 1901 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1902 u32 hotplug_trigger, 1903 const u32 hpd[HPD_NUM_PINS]) 1904 { 1905 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1906 1907 /* 1908 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 1909 * unless we touch the hotplug register, even if hotplug_trigger is 1910 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 1911 * errors. 1912 */ 1913 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1914 if (!hotplug_trigger) { 1915 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 1916 PORTD_HOTPLUG_STATUS_MASK | 1917 PORTC_HOTPLUG_STATUS_MASK | 1918 PORTB_HOTPLUG_STATUS_MASK; 1919 dig_hotplug_reg &= ~mask; 1920 } 1921 1922 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1923 if (!hotplug_trigger) 1924 return; 1925 1926 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1927 dig_hotplug_reg, hpd, 1928 pch_port_hotplug_long_detect); 1929 1930 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1931 } 1932 1933 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 1934 { 1935 int pipe; 1936 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1937 1938 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 1939 1940 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1941 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1942 SDE_AUDIO_POWER_SHIFT); 1943 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1944 port_name(port)); 1945 } 1946 1947 if (pch_iir & SDE_AUX_MASK) 1948 dp_aux_irq_handler(dev_priv); 1949 1950 if (pch_iir & SDE_GMBUS) 1951 gmbus_irq_handler(dev_priv); 1952 1953 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1954 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1955 1956 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1957 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1958 1959 if (pch_iir & SDE_POISON) 1960 DRM_ERROR("PCH poison interrupt\n"); 1961 1962 if (pch_iir & SDE_FDI_MASK) 1963 for_each_pipe(dev_priv, pipe) 1964 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1965 pipe_name(pipe), 1966 I915_READ(FDI_RX_IIR(pipe))); 1967 1968 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1969 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1970 1971 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1972 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1973 1974 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1975 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1976 1977 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1978 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1979 } 1980 1981 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 1982 { 1983 u32 err_int = I915_READ(GEN7_ERR_INT); 1984 enum i915_pipe pipe; 1985 1986 if (err_int & ERR_INT_POISON) 1987 DRM_ERROR("Poison interrupt\n"); 1988 1989 for_each_pipe(dev_priv, pipe) { 1990 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 1991 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1992 1993 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1994 if (IS_IVYBRIDGE(dev_priv)) 1995 ivb_pipe_crc_irq_handler(dev_priv, pipe); 1996 else 1997 hsw_pipe_crc_irq_handler(dev_priv, pipe); 1998 } 1999 } 2000 2001 I915_WRITE(GEN7_ERR_INT, err_int); 2002 } 2003 2004 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2005 { 2006 u32 serr_int = I915_READ(SERR_INT); 2007 2008 if (serr_int & SERR_INT_POISON) 2009 DRM_ERROR("PCH poison interrupt\n"); 2010 2011 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2012 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2013 2014 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2015 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2016 2017 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2018 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 2019 2020 I915_WRITE(SERR_INT, serr_int); 2021 } 2022 2023 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2024 { 2025 int pipe; 2026 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2027 2028 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2029 2030 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2031 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2032 SDE_AUDIO_POWER_SHIFT_CPT); 2033 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2034 port_name(port)); 2035 } 2036 2037 if (pch_iir & SDE_AUX_MASK_CPT) 2038 dp_aux_irq_handler(dev_priv); 2039 2040 if (pch_iir & SDE_GMBUS_CPT) 2041 gmbus_irq_handler(dev_priv); 2042 2043 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2044 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2045 2046 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2047 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2048 2049 if (pch_iir & SDE_FDI_MASK_CPT) 2050 for_each_pipe(dev_priv, pipe) 2051 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2052 pipe_name(pipe), 2053 I915_READ(FDI_RX_IIR(pipe))); 2054 2055 if (pch_iir & SDE_ERROR_CPT) 2056 cpt_serr_int_handler(dev_priv); 2057 } 2058 2059 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2060 { 2061 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2062 ~SDE_PORTE_HOTPLUG_SPT; 2063 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2064 u32 pin_mask = 0, long_mask = 0; 2065 2066 if (hotplug_trigger) { 2067 u32 dig_hotplug_reg; 2068 2069 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2070 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2071 2072 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2073 dig_hotplug_reg, hpd_spt, 2074 spt_port_hotplug_long_detect); 2075 } 2076 2077 if (hotplug2_trigger) { 2078 u32 dig_hotplug_reg; 2079 2080 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2081 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2082 2083 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger, 2084 dig_hotplug_reg, hpd_spt, 2085 spt_port_hotplug2_long_detect); 2086 } 2087 2088 if (pin_mask) 2089 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2090 2091 if (pch_iir & SDE_GMBUS_CPT) 2092 gmbus_irq_handler(dev_priv); 2093 } 2094 2095 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2096 u32 hotplug_trigger, 2097 const u32 hpd[HPD_NUM_PINS]) 2098 { 2099 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2100 2101 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2102 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2103 2104 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2105 dig_hotplug_reg, hpd, 2106 ilk_port_hotplug_long_detect); 2107 2108 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2109 } 2110 2111 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2112 u32 de_iir) 2113 { 2114 enum i915_pipe pipe; 2115 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2116 2117 if (hotplug_trigger) 2118 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2119 2120 if (de_iir & DE_AUX_CHANNEL_A) 2121 dp_aux_irq_handler(dev_priv); 2122 2123 if (de_iir & DE_GSE) 2124 intel_opregion_asle_intr(dev_priv); 2125 2126 if (de_iir & DE_POISON) 2127 DRM_ERROR("Poison interrupt\n"); 2128 2129 for_each_pipe(dev_priv, pipe) { 2130 if (de_iir & DE_PIPE_VBLANK(pipe) && 2131 intel_pipe_handle_vblank(dev_priv, pipe)) 2132 intel_check_page_flip(dev_priv, pipe); 2133 2134 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2135 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2136 2137 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2138 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2139 2140 /* plane/pipes map 1:1 on ilk+ */ 2141 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 2142 intel_finish_page_flip_cs(dev_priv, pipe); 2143 } 2144 2145 /* check event from PCH */ 2146 if (de_iir & DE_PCH_EVENT) { 2147 u32 pch_iir = I915_READ(SDEIIR); 2148 2149 if (HAS_PCH_CPT(dev_priv)) 2150 cpt_irq_handler(dev_priv, pch_iir); 2151 else 2152 ibx_irq_handler(dev_priv, pch_iir); 2153 2154 /* should clear PCH hotplug event before clear CPU irq */ 2155 I915_WRITE(SDEIIR, pch_iir); 2156 } 2157 2158 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 2159 ironlake_rps_change_irq_handler(dev_priv); 2160 } 2161 2162 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2163 u32 de_iir) 2164 { 2165 enum i915_pipe pipe; 2166 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2167 2168 if (hotplug_trigger) 2169 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2170 2171 if (de_iir & DE_ERR_INT_IVB) 2172 ivb_err_int_handler(dev_priv); 2173 2174 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2175 dp_aux_irq_handler(dev_priv); 2176 2177 if (de_iir & DE_GSE_IVB) 2178 intel_opregion_asle_intr(dev_priv); 2179 2180 for_each_pipe(dev_priv, pipe) { 2181 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2182 intel_pipe_handle_vblank(dev_priv, pipe)) 2183 intel_check_page_flip(dev_priv, pipe); 2184 2185 /* plane/pipes map 1:1 on ilk+ */ 2186 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 2187 intel_finish_page_flip_cs(dev_priv, pipe); 2188 } 2189 2190 /* check event from PCH */ 2191 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2192 u32 pch_iir = I915_READ(SDEIIR); 2193 2194 cpt_irq_handler(dev_priv, pch_iir); 2195 2196 /* clear PCH hotplug event before clear CPU irq */ 2197 I915_WRITE(SDEIIR, pch_iir); 2198 } 2199 } 2200 2201 /* 2202 * To handle irqs with the minimum potential races with fresh interrupts, we: 2203 * 1 - Disable Master Interrupt Control. 2204 * 2 - Find the source(s) of the interrupt. 2205 * 3 - Clear the Interrupt Identity bits (IIR). 2206 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2207 * 5 - Re-enable Master Interrupt Control. 2208 */ 2209 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2210 { 2211 struct drm_device *dev = arg; 2212 struct drm_i915_private *dev_priv = to_i915(dev); 2213 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2214 irqreturn_t ret = IRQ_NONE; 2215 2216 if (!intel_irqs_enabled(dev_priv)) 2217 return IRQ_NONE; 2218 2219 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2220 disable_rpm_wakeref_asserts(dev_priv); 2221 2222 /* disable master interrupt before clearing iir */ 2223 de_ier = I915_READ(DEIER); 2224 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2225 POSTING_READ(DEIER); 2226 2227 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2228 * interrupts will will be stored on its back queue, and then we'll be 2229 * able to process them after we restore SDEIER (as soon as we restore 2230 * it, we'll get an interrupt if SDEIIR still has something to process 2231 * due to its back queue). */ 2232 if (!HAS_PCH_NOP(dev_priv)) { 2233 sde_ier = I915_READ(SDEIER); 2234 I915_WRITE(SDEIER, 0); 2235 POSTING_READ(SDEIER); 2236 } 2237 2238 /* Find, clear, then process each source of interrupt */ 2239 2240 gt_iir = I915_READ(GTIIR); 2241 if (gt_iir) { 2242 I915_WRITE(GTIIR, gt_iir); 2243 ret = IRQ_HANDLED; 2244 if (INTEL_GEN(dev_priv) >= 6) 2245 snb_gt_irq_handler(dev_priv, gt_iir); 2246 else 2247 ilk_gt_irq_handler(dev_priv, gt_iir); 2248 } 2249 2250 de_iir = I915_READ(DEIIR); 2251 if (de_iir) { 2252 I915_WRITE(DEIIR, de_iir); 2253 ret = IRQ_HANDLED; 2254 if (INTEL_GEN(dev_priv) >= 7) 2255 ivb_display_irq_handler(dev_priv, de_iir); 2256 else 2257 ilk_display_irq_handler(dev_priv, de_iir); 2258 } 2259 2260 if (INTEL_GEN(dev_priv) >= 6) { 2261 u32 pm_iir = I915_READ(GEN6_PMIIR); 2262 if (pm_iir) { 2263 I915_WRITE(GEN6_PMIIR, pm_iir); 2264 ret = IRQ_HANDLED; 2265 gen6_rps_irq_handler(dev_priv, pm_iir); 2266 } 2267 } 2268 2269 I915_WRITE(DEIER, de_ier); 2270 POSTING_READ(DEIER); 2271 if (!HAS_PCH_NOP(dev_priv)) { 2272 I915_WRITE(SDEIER, sde_ier); 2273 POSTING_READ(SDEIER); 2274 } 2275 2276 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2277 enable_rpm_wakeref_asserts(dev_priv); 2278 2279 return ret; 2280 } 2281 2282 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2283 u32 hotplug_trigger, 2284 const u32 hpd[HPD_NUM_PINS]) 2285 { 2286 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2287 2288 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2289 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2290 2291 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2292 dig_hotplug_reg, hpd, 2293 bxt_port_hotplug_long_detect); 2294 2295 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2296 } 2297 2298 static irqreturn_t 2299 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2300 { 2301 irqreturn_t ret = IRQ_NONE; 2302 u32 iir; 2303 enum i915_pipe pipe; 2304 2305 if (master_ctl & GEN8_DE_MISC_IRQ) { 2306 iir = I915_READ(GEN8_DE_MISC_IIR); 2307 if (iir) { 2308 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2309 ret = IRQ_HANDLED; 2310 if (iir & GEN8_DE_MISC_GSE) 2311 intel_opregion_asle_intr(dev_priv); 2312 else 2313 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2314 } 2315 else 2316 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2317 } 2318 2319 if (master_ctl & GEN8_DE_PORT_IRQ) { 2320 iir = I915_READ(GEN8_DE_PORT_IIR); 2321 if (iir) { 2322 u32 tmp_mask; 2323 bool found = false; 2324 2325 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2326 ret = IRQ_HANDLED; 2327 2328 tmp_mask = GEN8_AUX_CHANNEL_A; 2329 if (INTEL_INFO(dev_priv)->gen >= 9) 2330 tmp_mask |= GEN9_AUX_CHANNEL_B | 2331 GEN9_AUX_CHANNEL_C | 2332 GEN9_AUX_CHANNEL_D; 2333 2334 if (iir & tmp_mask) { 2335 dp_aux_irq_handler(dev_priv); 2336 found = true; 2337 } 2338 2339 if (IS_BROXTON(dev_priv)) { 2340 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2341 if (tmp_mask) { 2342 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2343 hpd_bxt); 2344 found = true; 2345 } 2346 } else if (IS_BROADWELL(dev_priv)) { 2347 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2348 if (tmp_mask) { 2349 ilk_hpd_irq_handler(dev_priv, 2350 tmp_mask, hpd_bdw); 2351 found = true; 2352 } 2353 } 2354 2355 if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2356 gmbus_irq_handler(dev_priv); 2357 found = true; 2358 } 2359 2360 if (!found) 2361 DRM_ERROR("Unexpected DE Port interrupt\n"); 2362 } 2363 else 2364 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2365 } 2366 2367 for_each_pipe(dev_priv, pipe) { 2368 u32 flip_done, fault_errors; 2369 2370 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2371 continue; 2372 2373 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2374 if (!iir) { 2375 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2376 continue; 2377 } 2378 2379 ret = IRQ_HANDLED; 2380 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2381 2382 if (iir & GEN8_PIPE_VBLANK && 2383 intel_pipe_handle_vblank(dev_priv, pipe)) 2384 intel_check_page_flip(dev_priv, pipe); 2385 2386 flip_done = iir; 2387 if (INTEL_INFO(dev_priv)->gen >= 9) 2388 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE; 2389 else 2390 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; 2391 2392 if (flip_done) 2393 intel_finish_page_flip_cs(dev_priv, pipe); 2394 2395 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2396 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2397 2398 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2399 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2400 2401 fault_errors = iir; 2402 if (INTEL_INFO(dev_priv)->gen >= 9) 2403 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2404 else 2405 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2406 2407 if (fault_errors) 2408 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2409 pipe_name(pipe), 2410 fault_errors); 2411 } 2412 2413 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2414 master_ctl & GEN8_DE_PCH_IRQ) { 2415 /* 2416 * FIXME(BDW): Assume for now that the new interrupt handling 2417 * scheme also closed the SDE interrupt handling race we've seen 2418 * on older pch-split platforms. But this needs testing. 2419 */ 2420 iir = I915_READ(SDEIIR); 2421 if (iir) { 2422 I915_WRITE(SDEIIR, iir); 2423 ret = IRQ_HANDLED; 2424 2425 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) 2426 spt_irq_handler(dev_priv, iir); 2427 else 2428 cpt_irq_handler(dev_priv, iir); 2429 } else { 2430 /* 2431 * Like on previous PCH there seems to be something 2432 * fishy going on with forwarding PCH interrupts. 2433 */ 2434 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2435 } 2436 } 2437 2438 return ret; 2439 } 2440 2441 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2442 { 2443 struct drm_device *dev = arg; 2444 struct drm_i915_private *dev_priv = to_i915(dev); 2445 u32 master_ctl; 2446 u32 gt_iir[4] = {}; 2447 irqreturn_t ret; 2448 2449 if (!intel_irqs_enabled(dev_priv)) 2450 return IRQ_NONE; 2451 2452 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2453 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2454 if (!master_ctl) 2455 return IRQ_NONE; 2456 2457 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2458 2459 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2460 disable_rpm_wakeref_asserts(dev_priv); 2461 2462 /* Find, clear, then process each source of interrupt */ 2463 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2464 gen8_gt_irq_handler(dev_priv, gt_iir); 2465 ret |= gen8_de_irq_handler(dev_priv, master_ctl); 2466 2467 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2468 POSTING_READ_FW(GEN8_MASTER_IRQ); 2469 2470 enable_rpm_wakeref_asserts(dev_priv); 2471 2472 return ret; 2473 } 2474 2475 static void i915_error_wake_up(struct drm_i915_private *dev_priv) 2476 { 2477 /* 2478 * Notify all waiters for GPU completion events that reset state has 2479 * been changed, and that they need to restart their wait after 2480 * checking for potential errors (and bail out to drop locks if there is 2481 * a gpu reset pending so that i915_error_work_func can acquire them). 2482 */ 2483 2484 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2485 wake_up_all(&dev_priv->gpu_error.wait_queue); 2486 2487 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2488 wake_up_all(&dev_priv->pending_flip_queue); 2489 } 2490 2491 /** 2492 * i915_reset_and_wakeup - do process context error handling work 2493 * @dev_priv: i915 device private 2494 * 2495 * Fire an error uevent so userspace can see that a hang or error 2496 * was detected. 2497 */ 2498 static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv) 2499 { 2500 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 2501 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2502 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2503 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2504 int ret; 2505 2506 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 2507 2508 /* 2509 * Note that there's only one work item which does gpu resets, so we 2510 * need not worry about concurrent gpu resets potentially incrementing 2511 * error->reset_counter twice. We only need to take care of another 2512 * racing irq/hangcheck declaring the gpu dead for a second time. A 2513 * quick check for that is good enough: schedule_work ensures the 2514 * correct ordering between hang detection and this work item, and since 2515 * the reset in-progress bit is only ever set by code outside of this 2516 * work we don't need to worry about any other races. 2517 */ 2518 if (i915_reset_in_progress(&dev_priv->gpu_error)) { 2519 DRM_DEBUG_DRIVER("resetting chip\n"); 2520 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 2521 2522 /* 2523 * In most cases it's guaranteed that we get here with an RPM 2524 * reference held, for example because there is a pending GPU 2525 * request that won't finish until the reset is done. This 2526 * isn't the case at least when we get here by doing a 2527 * simulated reset via debugs, so get an RPM reference. 2528 */ 2529 intel_runtime_pm_get(dev_priv); 2530 2531 intel_prepare_reset(dev_priv); 2532 2533 /* 2534 * All state reset _must_ be completed before we update the 2535 * reset counter, for otherwise waiters might miss the reset 2536 * pending state and not properly drop locks, resulting in 2537 * deadlocks with the reset work. 2538 */ 2539 ret = i915_reset(dev_priv); 2540 2541 intel_finish_reset(dev_priv); 2542 2543 intel_runtime_pm_put(dev_priv); 2544 2545 if (ret == 0) 2546 kobject_uevent_env(kobj, 2547 KOBJ_CHANGE, reset_done_event); 2548 2549 /* 2550 * Note: The wake_up also serves as a memory barrier so that 2551 * waiters see the update value of the reset counter atomic_t. 2552 */ 2553 wake_up_all(&dev_priv->gpu_error.reset_queue); 2554 } 2555 } 2556 2557 static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv) 2558 { 2559 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2560 u32 eir = I915_READ(EIR); 2561 int pipe, i; 2562 2563 if (!eir) 2564 return; 2565 2566 pr_err("render error detected, EIR: 0x%08x\n", eir); 2567 2568 i915_get_extra_instdone(dev_priv, instdone); 2569 2570 if (IS_G4X(dev_priv)) { 2571 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2572 u32 ipeir = I915_READ(IPEIR_I965); 2573 2574 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2575 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2576 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2577 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2578 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2579 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2580 I915_WRITE(IPEIR_I965, ipeir); 2581 POSTING_READ(IPEIR_I965); 2582 } 2583 if (eir & GM45_ERROR_PAGE_TABLE) { 2584 u32 pgtbl_err = I915_READ(PGTBL_ER); 2585 pr_err("page table error\n"); 2586 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2587 I915_WRITE(PGTBL_ER, pgtbl_err); 2588 POSTING_READ(PGTBL_ER); 2589 } 2590 } 2591 2592 if (!IS_GEN2(dev_priv)) { 2593 if (eir & I915_ERROR_PAGE_TABLE) { 2594 u32 pgtbl_err = I915_READ(PGTBL_ER); 2595 pr_err("page table error\n"); 2596 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2597 I915_WRITE(PGTBL_ER, pgtbl_err); 2598 POSTING_READ(PGTBL_ER); 2599 } 2600 } 2601 2602 if (eir & I915_ERROR_MEMORY_REFRESH) { 2603 pr_err("memory refresh error:\n"); 2604 for_each_pipe(dev_priv, pipe) 2605 pr_err("pipe %c stat: 0x%08x\n", 2606 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2607 /* pipestat has already been acked */ 2608 } 2609 if (eir & I915_ERROR_INSTRUCTION) { 2610 pr_err("instruction error\n"); 2611 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2612 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2613 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2614 if (INTEL_GEN(dev_priv) < 4) { 2615 u32 ipeir = I915_READ(IPEIR); 2616 2617 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2618 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2619 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2620 I915_WRITE(IPEIR, ipeir); 2621 POSTING_READ(IPEIR); 2622 } else { 2623 u32 ipeir = I915_READ(IPEIR_I965); 2624 2625 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2626 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2627 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2628 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2629 I915_WRITE(IPEIR_I965, ipeir); 2630 POSTING_READ(IPEIR_I965); 2631 } 2632 } 2633 2634 I915_WRITE(EIR, eir); 2635 POSTING_READ(EIR); 2636 eir = I915_READ(EIR); 2637 if (eir) { 2638 /* 2639 * some errors might have become stuck, 2640 * mask them. 2641 */ 2642 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2643 I915_WRITE(EMR, I915_READ(EMR) | eir); 2644 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2645 } 2646 } 2647 2648 /** 2649 * i915_handle_error - handle a gpu error 2650 * @dev_priv: i915 device private 2651 * @engine_mask: mask representing engines that are hung 2652 * Do some basic checking of register state at error time and 2653 * dump it to the syslog. Also call i915_capture_error_state() to make 2654 * sure we get a record and make it available in debugfs. Fire a uevent 2655 * so userspace knows something bad happened (should trigger collection 2656 * of a ring dump etc.). 2657 * @fmt: Error message format string 2658 */ 2659 void i915_handle_error(struct drm_i915_private *dev_priv, 2660 u32 engine_mask, 2661 const char *fmt, ...) 2662 { 2663 va_list args; 2664 char error_msg[80]; 2665 2666 va_start(args, fmt); 2667 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2668 va_end(args); 2669 2670 i915_capture_error_state(dev_priv, engine_mask, error_msg); 2671 i915_report_and_clear_eir(dev_priv); 2672 2673 if (engine_mask) { 2674 atomic_or(I915_RESET_IN_PROGRESS_FLAG, 2675 &dev_priv->gpu_error.reset_counter); 2676 2677 /* 2678 * Wakeup waiting processes so that the reset function 2679 * i915_reset_and_wakeup doesn't deadlock trying to grab 2680 * various locks. By bumping the reset counter first, the woken 2681 * processes will see a reset in progress and back off, 2682 * releasing their locks and then wait for the reset completion. 2683 * We must do this for _all_ gpu waiters that might hold locks 2684 * that the reset work needs to acquire. 2685 * 2686 * Note: The wake_up serves as the required memory barrier to 2687 * ensure that the waiters see the updated value of the reset 2688 * counter atomic_t. 2689 */ 2690 i915_error_wake_up(dev_priv); 2691 } 2692 2693 i915_reset_and_wakeup(dev_priv); 2694 } 2695 2696 /* Called from drm generic code, passed 'crtc' which 2697 * we use as a pipe index 2698 */ 2699 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe) 2700 { 2701 struct drm_i915_private *dev_priv = to_i915(dev); 2702 unsigned long irqflags; 2703 2704 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2705 if (INTEL_INFO(dev)->gen >= 4) 2706 i915_enable_pipestat(dev_priv, pipe, 2707 PIPE_START_VBLANK_INTERRUPT_STATUS); 2708 else 2709 i915_enable_pipestat(dev_priv, pipe, 2710 PIPE_VBLANK_INTERRUPT_STATUS); 2711 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2712 2713 return 0; 2714 } 2715 2716 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 2717 { 2718 struct drm_i915_private *dev_priv = to_i915(dev); 2719 unsigned long irqflags; 2720 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2721 DE_PIPE_VBLANK(pipe); 2722 2723 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2724 ilk_enable_display_irq(dev_priv, bit); 2725 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2726 2727 return 0; 2728 } 2729 2730 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe) 2731 { 2732 struct drm_i915_private *dev_priv = to_i915(dev); 2733 unsigned long irqflags; 2734 2735 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2736 i915_enable_pipestat(dev_priv, pipe, 2737 PIPE_START_VBLANK_INTERRUPT_STATUS); 2738 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2739 2740 return 0; 2741 } 2742 2743 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 2744 { 2745 struct drm_i915_private *dev_priv = to_i915(dev); 2746 unsigned long irqflags; 2747 2748 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2749 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2750 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2751 2752 return 0; 2753 } 2754 2755 /* Called from drm generic code, passed 'crtc' which 2756 * we use as a pipe index 2757 */ 2758 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe) 2759 { 2760 struct drm_i915_private *dev_priv = to_i915(dev); 2761 unsigned long irqflags; 2762 2763 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2764 i915_disable_pipestat(dev_priv, pipe, 2765 PIPE_VBLANK_INTERRUPT_STATUS | 2766 PIPE_START_VBLANK_INTERRUPT_STATUS); 2767 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2768 } 2769 2770 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 2771 { 2772 struct drm_i915_private *dev_priv = to_i915(dev); 2773 unsigned long irqflags; 2774 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2775 DE_PIPE_VBLANK(pipe); 2776 2777 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2778 ilk_disable_display_irq(dev_priv, bit); 2779 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2780 } 2781 2782 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe) 2783 { 2784 struct drm_i915_private *dev_priv = to_i915(dev); 2785 unsigned long irqflags; 2786 2787 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2788 i915_disable_pipestat(dev_priv, pipe, 2789 PIPE_START_VBLANK_INTERRUPT_STATUS); 2790 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2791 } 2792 2793 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 2794 { 2795 struct drm_i915_private *dev_priv = to_i915(dev); 2796 unsigned long irqflags; 2797 2798 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2799 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2800 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2801 } 2802 2803 static bool 2804 ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr) 2805 { 2806 if (INTEL_GEN(engine->i915) >= 8) { 2807 return (ipehr >> 23) == 0x1c; 2808 } else { 2809 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2810 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 2811 MI_SEMAPHORE_REGISTER); 2812 } 2813 } 2814 2815 static struct intel_engine_cs * 2816 semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, 2817 u64 offset) 2818 { 2819 struct drm_i915_private *dev_priv = engine->i915; 2820 struct intel_engine_cs *signaller; 2821 2822 if (INTEL_GEN(dev_priv) >= 8) { 2823 for_each_engine(signaller, dev_priv) { 2824 if (engine == signaller) 2825 continue; 2826 2827 if (offset == signaller->semaphore.signal_ggtt[engine->id]) 2828 return signaller; 2829 } 2830 } else { 2831 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2832 2833 for_each_engine(signaller, dev_priv) { 2834 if(engine == signaller) 2835 continue; 2836 2837 if (sync_bits == signaller->semaphore.mbox.wait[engine->id]) 2838 return signaller; 2839 } 2840 } 2841 2842 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", 2843 engine->id, ipehr, offset); 2844 2845 return NULL; 2846 } 2847 2848 static struct intel_engine_cs * 2849 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) 2850 { 2851 struct drm_i915_private *dev_priv = engine->i915; 2852 void __iomem *vaddr; 2853 u32 cmd, ipehr, head; 2854 u64 offset = 0; 2855 int i, backwards; 2856 2857 /* 2858 * This function does not support execlist mode - any attempt to 2859 * proceed further into this function will result in a kernel panic 2860 * when dereferencing ring->buffer, which is not set up in execlist 2861 * mode. 2862 * 2863 * The correct way of doing it would be to derive the currently 2864 * executing ring buffer from the current context, which is derived 2865 * from the currently running request. Unfortunately, to get the 2866 * current request we would have to grab the struct_mutex before doing 2867 * anything else, which would be ill-advised since some other thread 2868 * might have grabbed it already and managed to hang itself, causing 2869 * the hang checker to deadlock. 2870 * 2871 * Therefore, this function does not support execlist mode in its 2872 * current form. Just return NULL and move on. 2873 */ 2874 if (engine->buffer == NULL) 2875 return NULL; 2876 2877 ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); 2878 if (!ipehr_is_semaphore_wait(engine, ipehr)) 2879 return NULL; 2880 2881 /* 2882 * HEAD is likely pointing to the dword after the actual command, 2883 * so scan backwards until we find the MBOX. But limit it to just 3 2884 * or 4 dwords depending on the semaphore wait command size. 2885 * Note that we don't care about ACTHD here since that might 2886 * point at at batch, and semaphores are always emitted into the 2887 * ringbuffer itself. 2888 */ 2889 head = I915_READ_HEAD(engine) & HEAD_ADDR; 2890 backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4; 2891 vaddr = (void __iomem *)engine->buffer->vaddr; 2892 2893 for (i = backwards; i; --i) { 2894 /* 2895 * Be paranoid and presume the hw has gone off into the wild - 2896 * our ring is smaller than what the hardware (and hence 2897 * HEAD_ADDR) allows. Also handles wrap-around. 2898 */ 2899 head &= engine->buffer->size - 1; 2900 2901 /* This here seems to blow up */ 2902 cmd = ioread32(vaddr + head); 2903 if (cmd == ipehr) 2904 break; 2905 2906 head -= 4; 2907 } 2908 2909 if (!i) 2910 return NULL; 2911 2912 *seqno = ioread32(vaddr + head + 4) + 1; 2913 if (INTEL_GEN(dev_priv) >= 8) { 2914 offset = ioread32(vaddr + head + 12); 2915 offset <<= 32; 2916 offset |= ioread32(vaddr + head + 8); 2917 } 2918 return semaphore_wait_to_signaller_ring(engine, ipehr, offset); 2919 } 2920 2921 static int semaphore_passed(struct intel_engine_cs *engine) 2922 { 2923 struct drm_i915_private *dev_priv = engine->i915; 2924 struct intel_engine_cs *signaller; 2925 u32 seqno; 2926 2927 engine->hangcheck.deadlock++; 2928 2929 signaller = semaphore_waits_for(engine, &seqno); 2930 if (signaller == NULL) 2931 return -1; 2932 2933 /* Prevent pathological recursion due to driver bugs */ 2934 if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES) 2935 return -1; 2936 2937 if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno)) 2938 return 1; 2939 2940 /* cursory check for an unkickable deadlock */ 2941 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 2942 semaphore_passed(signaller) < 0) 2943 return -1; 2944 2945 return 0; 2946 } 2947 2948 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2949 { 2950 struct intel_engine_cs *engine; 2951 2952 for_each_engine(engine, dev_priv) 2953 engine->hangcheck.deadlock = 0; 2954 } 2955 2956 static bool subunits_stuck(struct intel_engine_cs *engine) 2957 { 2958 u32 instdone[I915_NUM_INSTDONE_REG]; 2959 bool stuck; 2960 int i; 2961 2962 if (engine->id != RCS) 2963 return true; 2964 2965 i915_get_extra_instdone(engine->i915, instdone); 2966 2967 /* There might be unstable subunit states even when 2968 * actual head is not moving. Filter out the unstable ones by 2969 * accumulating the undone -> done transitions and only 2970 * consider those as progress. 2971 */ 2972 stuck = true; 2973 for (i = 0; i < I915_NUM_INSTDONE_REG; i++) { 2974 const u32 tmp = instdone[i] | engine->hangcheck.instdone[i]; 2975 2976 if (tmp != engine->hangcheck.instdone[i]) 2977 stuck = false; 2978 2979 engine->hangcheck.instdone[i] |= tmp; 2980 } 2981 2982 return stuck; 2983 } 2984 2985 static enum intel_engine_hangcheck_action 2986 head_stuck(struct intel_engine_cs *engine, u64 acthd) 2987 { 2988 if (acthd != engine->hangcheck.acthd) { 2989 2990 /* Clear subunit states on head movement */ 2991 memset(engine->hangcheck.instdone, 0, 2992 sizeof(engine->hangcheck.instdone)); 2993 2994 return HANGCHECK_ACTIVE; 2995 } 2996 2997 if (!subunits_stuck(engine)) 2998 return HANGCHECK_ACTIVE; 2999 3000 return HANGCHECK_HUNG; 3001 } 3002 3003 static enum intel_engine_hangcheck_action 3004 engine_stuck(struct intel_engine_cs *engine, u64 acthd) 3005 { 3006 struct drm_i915_private *dev_priv = engine->i915; 3007 enum intel_engine_hangcheck_action ha; 3008 u32 tmp; 3009 3010 ha = head_stuck(engine, acthd); 3011 if (ha != HANGCHECK_HUNG) 3012 return ha; 3013 3014 if (IS_GEN2(dev_priv)) 3015 return HANGCHECK_HUNG; 3016 3017 /* Is the chip hanging on a WAIT_FOR_EVENT? 3018 * If so we can simply poke the RB_WAIT bit 3019 * and break the hang. This should work on 3020 * all but the second generation chipsets. 3021 */ 3022 tmp = I915_READ_CTL(engine); 3023 if (tmp & RING_WAIT) { 3024 i915_handle_error(dev_priv, 0, 3025 "Kicking stuck wait on %s", 3026 engine->name); 3027 I915_WRITE_CTL(engine, tmp); 3028 return HANGCHECK_KICK; 3029 } 3030 3031 if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) { 3032 switch (semaphore_passed(engine)) { 3033 default: 3034 return HANGCHECK_HUNG; 3035 case 1: 3036 i915_handle_error(dev_priv, 0, 3037 "Kicking stuck semaphore on %s", 3038 engine->name); 3039 I915_WRITE_CTL(engine, tmp); 3040 return HANGCHECK_KICK; 3041 case 0: 3042 return HANGCHECK_WAIT; 3043 } 3044 } 3045 3046 return HANGCHECK_HUNG; 3047 } 3048 3049 static unsigned long kick_waiters(struct intel_engine_cs *engine) 3050 { 3051 struct drm_i915_private *i915 = engine->i915; 3052 unsigned long irq_count = READ_ONCE(engine->breadcrumbs.irq_wakeups); 3053 3054 if (engine->hangcheck.user_interrupts == irq_count && 3055 !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) { 3056 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings)) 3057 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 3058 engine->name); 3059 3060 intel_engine_enable_fake_irq(engine); 3061 } 3062 3063 return irq_count; 3064 } 3065 /* 3066 * This is called when the chip hasn't reported back with completed 3067 * batchbuffers in a long time. We keep track per ring seqno progress and 3068 * if there are no progress, hangcheck score for that ring is increased. 3069 * Further, acthd is inspected to see if the ring is stuck. On stuck case 3070 * we kick the ring. If we see no progress on three subsequent calls 3071 * we assume chip is wedged and try to fix it by resetting the chip. 3072 */ 3073 static void i915_hangcheck_elapsed(struct work_struct *work) 3074 { 3075 struct drm_i915_private *dev_priv = 3076 container_of(work, typeof(*dev_priv), 3077 gpu_error.hangcheck_work.work); 3078 struct intel_engine_cs *engine; 3079 unsigned int hung = 0, stuck = 0; 3080 int busy_count = 0; 3081 #define BUSY 1 3082 #define KICK 5 3083 #define HUNG 20 3084 #define ACTIVE_DECAY 15 3085 3086 if (!i915.enable_hangcheck) 3087 return; 3088 3089 if (!READ_ONCE(dev_priv->gt.awake)) 3090 return; 3091 3092 /* As enabling the GPU requires fairly extensive mmio access, 3093 * periodically arm the mmio checker to see if we are triggering 3094 * any invalid access. 3095 */ 3096 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 3097 3098 for_each_engine(engine, dev_priv) { 3099 bool busy = intel_engine_has_waiter(engine); 3100 u64 acthd; 3101 u32 seqno; 3102 unsigned user_interrupts; 3103 3104 semaphore_clear_deadlocks(dev_priv); 3105 3106 /* We don't strictly need an irq-barrier here, as we are not 3107 * serving an interrupt request, be paranoid in case the 3108 * barrier has side-effects (such as preventing a broken 3109 * cacheline snoop) and so be sure that we can see the seqno 3110 * advance. If the seqno should stick, due to a stale 3111 * cacheline, we would erroneously declare the GPU hung. 3112 */ 3113 if (engine->irq_seqno_barrier) 3114 engine->irq_seqno_barrier(engine); 3115 3116 acthd = intel_engine_get_active_head(engine); 3117 seqno = intel_engine_get_seqno(engine); 3118 3119 /* Reset stuck interrupts between batch advances */ 3120 user_interrupts = 0; 3121 3122 if (engine->hangcheck.seqno == seqno) { 3123 if (!intel_engine_is_active(engine)) { 3124 engine->hangcheck.action = HANGCHECK_IDLE; 3125 if (busy) { 3126 /* Safeguard against driver failure */ 3127 user_interrupts = kick_waiters(engine); 3128 engine->hangcheck.score += BUSY; 3129 } 3130 } else { 3131 /* We always increment the hangcheck score 3132 * if the engine is busy and still processing 3133 * the same request, so that no single request 3134 * can run indefinitely (such as a chain of 3135 * batches). The only time we do not increment 3136 * the hangcheck score on this ring, if this 3137 * engine is in a legitimate wait for another 3138 * engine. In that case the waiting engine is a 3139 * victim and we want to be sure we catch the 3140 * right culprit. Then every time we do kick 3141 * the ring, add a small increment to the 3142 * score so that we can catch a batch that is 3143 * being repeatedly kicked and so responsible 3144 * for stalling the machine. 3145 */ 3146 engine->hangcheck.action = 3147 engine_stuck(engine, acthd); 3148 3149 switch (engine->hangcheck.action) { 3150 case HANGCHECK_IDLE: 3151 case HANGCHECK_WAIT: 3152 break; 3153 case HANGCHECK_ACTIVE: 3154 engine->hangcheck.score += BUSY; 3155 break; 3156 case HANGCHECK_KICK: 3157 engine->hangcheck.score += KICK; 3158 break; 3159 case HANGCHECK_HUNG: 3160 engine->hangcheck.score += HUNG; 3161 break; 3162 } 3163 } 3164 3165 if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 3166 hung |= intel_engine_flag(engine); 3167 if (engine->hangcheck.action != HANGCHECK_HUNG) 3168 stuck |= intel_engine_flag(engine); 3169 } 3170 } else { 3171 engine->hangcheck.action = HANGCHECK_ACTIVE; 3172 3173 /* Gradually reduce the count so that we catch DoS 3174 * attempts across multiple batches. 3175 */ 3176 if (engine->hangcheck.score > 0) 3177 engine->hangcheck.score -= ACTIVE_DECAY; 3178 if (engine->hangcheck.score < 0) 3179 engine->hangcheck.score = 0; 3180 3181 /* Clear head and subunit states on seqno movement */ 3182 acthd = 0; 3183 3184 memset(engine->hangcheck.instdone, 0, 3185 sizeof(engine->hangcheck.instdone)); 3186 } 3187 3188 engine->hangcheck.seqno = seqno; 3189 engine->hangcheck.acthd = acthd; 3190 engine->hangcheck.user_interrupts = user_interrupts; 3191 busy_count += busy; 3192 } 3193 3194 if (hung) { 3195 char msg[80]; 3196 int len; 3197 3198 /* If some rings hung but others were still busy, only 3199 * blame the hanging rings in the synopsis. 3200 */ 3201 if (stuck != hung) 3202 hung &= ~stuck; 3203 len = scnprintf(msg, sizeof(msg), 3204 "%s on ", stuck == hung ? "No progress" : "Hang"); 3205 for_each_engine_masked(engine, dev_priv, hung) 3206 len += scnprintf(msg + len, sizeof(msg) - len, 3207 "%s, ", engine->name); 3208 msg[len-2] = '\0'; 3209 3210 return i915_handle_error(dev_priv, hung, msg); 3211 } 3212 3213 /* Reset timer in case GPU hangs without another request being added */ 3214 if (busy_count) 3215 i915_queue_hangcheck(dev_priv); 3216 } 3217 3218 static void ibx_irq_reset(struct drm_device *dev) 3219 { 3220 struct drm_i915_private *dev_priv = to_i915(dev); 3221 3222 if (HAS_PCH_NOP(dev)) 3223 return; 3224 3225 GEN5_IRQ_RESET(SDE); 3226 3227 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3228 I915_WRITE(SERR_INT, 0xffffffff); 3229 } 3230 3231 /* 3232 * SDEIER is also touched by the interrupt handler to work around missed PCH 3233 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3234 * instead we unconditionally enable all PCH interrupt sources here, but then 3235 * only unmask them as needed with SDEIMR. 3236 * 3237 * This function needs to be called before interrupts are enabled. 3238 */ 3239 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3240 { 3241 struct drm_i915_private *dev_priv = to_i915(dev); 3242 3243 if (HAS_PCH_NOP(dev)) 3244 return; 3245 3246 WARN_ON(I915_READ(SDEIER) != 0); 3247 I915_WRITE(SDEIER, 0xffffffff); 3248 POSTING_READ(SDEIER); 3249 } 3250 3251 static void gen5_gt_irq_reset(struct drm_device *dev) 3252 { 3253 struct drm_i915_private *dev_priv = to_i915(dev); 3254 3255 GEN5_IRQ_RESET(GT); 3256 if (INTEL_INFO(dev)->gen >= 6) 3257 GEN5_IRQ_RESET(GEN6_PM); 3258 } 3259 3260 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3261 { 3262 enum i915_pipe pipe; 3263 3264 if (IS_CHERRYVIEW(dev_priv)) 3265 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3266 else 3267 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3268 3269 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3270 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3271 3272 for_each_pipe(dev_priv, pipe) { 3273 I915_WRITE(PIPESTAT(pipe), 3274 PIPE_FIFO_UNDERRUN_STATUS | 3275 PIPESTAT_INT_STATUS_MASK); 3276 dev_priv->pipestat_irq_mask[pipe] = 0; 3277 } 3278 3279 GEN5_IRQ_RESET(VLV_); 3280 dev_priv->irq_mask = ~0; 3281 } 3282 3283 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3284 { 3285 u32 pipestat_mask; 3286 u32 enable_mask; 3287 enum i915_pipe pipe; 3288 3289 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3290 PIPE_CRC_DONE_INTERRUPT_STATUS; 3291 3292 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3293 for_each_pipe(dev_priv, pipe) 3294 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3295 3296 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3297 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3298 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3299 if (IS_CHERRYVIEW(dev_priv)) 3300 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3301 3302 WARN_ON(dev_priv->irq_mask != ~0); 3303 3304 dev_priv->irq_mask = ~enable_mask; 3305 3306 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 3307 } 3308 3309 /* drm_dma.h hooks 3310 */ 3311 static void ironlake_irq_reset(struct drm_device *dev) 3312 { 3313 struct drm_i915_private *dev_priv = to_i915(dev); 3314 3315 I915_WRITE(HWSTAM, 0xffffffff); 3316 3317 GEN5_IRQ_RESET(DE); 3318 if (IS_GEN7(dev)) 3319 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3320 3321 gen5_gt_irq_reset(dev); 3322 3323 ibx_irq_reset(dev); 3324 } 3325 3326 static void valleyview_irq_preinstall(struct drm_device *dev) 3327 { 3328 struct drm_i915_private *dev_priv = to_i915(dev); 3329 3330 I915_WRITE(VLV_MASTER_IER, 0); 3331 POSTING_READ(VLV_MASTER_IER); 3332 3333 gen5_gt_irq_reset(dev); 3334 3335 spin_lock_irq(&dev_priv->irq_lock); 3336 if (dev_priv->display_irqs_enabled) 3337 vlv_display_irq_reset(dev_priv); 3338 spin_unlock_irq(&dev_priv->irq_lock); 3339 } 3340 3341 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3342 { 3343 GEN8_IRQ_RESET_NDX(GT, 0); 3344 GEN8_IRQ_RESET_NDX(GT, 1); 3345 GEN8_IRQ_RESET_NDX(GT, 2); 3346 GEN8_IRQ_RESET_NDX(GT, 3); 3347 } 3348 3349 static void gen8_irq_reset(struct drm_device *dev) 3350 { 3351 struct drm_i915_private *dev_priv = to_i915(dev); 3352 int pipe; 3353 3354 I915_WRITE(GEN8_MASTER_IRQ, 0); 3355 POSTING_READ(GEN8_MASTER_IRQ); 3356 3357 gen8_gt_irq_reset(dev_priv); 3358 3359 for_each_pipe(dev_priv, pipe) 3360 if (intel_display_power_is_enabled(dev_priv, 3361 POWER_DOMAIN_PIPE(pipe))) 3362 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3363 3364 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3365 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3366 GEN5_IRQ_RESET(GEN8_PCU_); 3367 3368 if (HAS_PCH_SPLIT(dev)) 3369 ibx_irq_reset(dev); 3370 } 3371 3372 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3373 unsigned int pipe_mask) 3374 { 3375 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3376 enum i915_pipe pipe; 3377 3378 spin_lock_irq(&dev_priv->irq_lock); 3379 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3380 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3381 dev_priv->de_irq_mask[pipe], 3382 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3383 spin_unlock_irq(&dev_priv->irq_lock); 3384 } 3385 3386 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3387 unsigned int pipe_mask) 3388 { 3389 enum i915_pipe pipe; 3390 3391 spin_lock_irq(&dev_priv->irq_lock); 3392 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3393 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3394 spin_unlock_irq(&dev_priv->irq_lock); 3395 3396 /* make sure we're done processing display irqs */ 3397 synchronize_irq(dev_priv->drm.irq); 3398 } 3399 3400 static void cherryview_irq_preinstall(struct drm_device *dev) 3401 { 3402 struct drm_i915_private *dev_priv = to_i915(dev); 3403 3404 I915_WRITE(GEN8_MASTER_IRQ, 0); 3405 POSTING_READ(GEN8_MASTER_IRQ); 3406 3407 gen8_gt_irq_reset(dev_priv); 3408 3409 GEN5_IRQ_RESET(GEN8_PCU_); 3410 3411 spin_lock_irq(&dev_priv->irq_lock); 3412 if (dev_priv->display_irqs_enabled) 3413 vlv_display_irq_reset(dev_priv); 3414 spin_unlock_irq(&dev_priv->irq_lock); 3415 } 3416 3417 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3418 const u32 hpd[HPD_NUM_PINS]) 3419 { 3420 struct intel_encoder *encoder; 3421 u32 enabled_irqs = 0; 3422 3423 for_each_intel_encoder(&dev_priv->drm, encoder) 3424 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3425 enabled_irqs |= hpd[encoder->hpd_pin]; 3426 3427 return enabled_irqs; 3428 } 3429 3430 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3431 { 3432 u32 hotplug_irqs, hotplug, enabled_irqs; 3433 3434 if (HAS_PCH_IBX(dev_priv)) { 3435 hotplug_irqs = SDE_HOTPLUG_MASK; 3436 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3437 } else { 3438 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3439 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3440 } 3441 3442 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3443 3444 /* 3445 * Enable digital hotplug on the PCH, and configure the DP short pulse 3446 * duration to 2ms (which is the minimum in the Display Port spec). 3447 * The pulse duration bits are reserved on LPT+. 3448 */ 3449 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3450 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3451 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3452 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3453 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3454 /* 3455 * When CPU and PCH are on the same package, port A 3456 * HPD must be enabled in both north and south. 3457 */ 3458 if (HAS_PCH_LPT_LP(dev_priv)) 3459 hotplug |= PORTA_HOTPLUG_ENABLE; 3460 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3461 } 3462 3463 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3464 { 3465 u32 hotplug_irqs, hotplug, enabled_irqs; 3466 3467 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3468 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3469 3470 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3471 3472 /* Enable digital hotplug on the PCH */ 3473 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3474 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE | 3475 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE; 3476 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3477 3478 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3479 hotplug |= PORTE_HOTPLUG_ENABLE; 3480 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3481 } 3482 3483 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3484 { 3485 u32 hotplug_irqs, hotplug, enabled_irqs; 3486 3487 if (INTEL_GEN(dev_priv) >= 8) { 3488 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3489 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3490 3491 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3492 } else if (INTEL_GEN(dev_priv) >= 7) { 3493 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3494 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3495 3496 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3497 } else { 3498 hotplug_irqs = DE_DP_A_HOTPLUG; 3499 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3500 3501 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3502 } 3503 3504 /* 3505 * Enable digital hotplug on the CPU, and configure the DP short pulse 3506 * duration to 2ms (which is the minimum in the Display Port spec) 3507 * The pulse duration bits are reserved on HSW+. 3508 */ 3509 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3510 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3511 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; 3512 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3513 3514 ibx_hpd_irq_setup(dev_priv); 3515 } 3516 3517 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3518 { 3519 u32 hotplug_irqs, hotplug, enabled_irqs; 3520 3521 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3522 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3523 3524 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3525 3526 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3527 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE | 3528 PORTA_HOTPLUG_ENABLE; 3529 3530 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3531 hotplug, enabled_irqs); 3532 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3533 3534 /* 3535 * For BXT invert bit has to be set based on AOB design 3536 * for HPD detection logic, update it based on VBT fields. 3537 */ 3538 3539 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3540 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3541 hotplug |= BXT_DDIA_HPD_INVERT; 3542 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3543 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3544 hotplug |= BXT_DDIB_HPD_INVERT; 3545 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3546 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3547 hotplug |= BXT_DDIC_HPD_INVERT; 3548 3549 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3550 } 3551 3552 static void ibx_irq_postinstall(struct drm_device *dev) 3553 { 3554 struct drm_i915_private *dev_priv = to_i915(dev); 3555 u32 mask; 3556 3557 if (HAS_PCH_NOP(dev)) 3558 return; 3559 3560 if (HAS_PCH_IBX(dev)) 3561 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3562 else 3563 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3564 3565 gen5_assert_iir_is_zero(dev_priv, SDEIIR); 3566 I915_WRITE(SDEIMR, ~mask); 3567 } 3568 3569 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3570 { 3571 struct drm_i915_private *dev_priv = to_i915(dev); 3572 u32 pm_irqs, gt_irqs; 3573 3574 pm_irqs = gt_irqs = 0; 3575 3576 dev_priv->gt_irq_mask = ~0; 3577 if (HAS_L3_DPF(dev)) { 3578 /* L3 parity interrupt is always unmasked. */ 3579 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3580 gt_irqs |= GT_PARITY_ERROR(dev); 3581 } 3582 3583 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3584 if (IS_GEN5(dev)) { 3585 gt_irqs |= ILK_BSD_USER_INTERRUPT; 3586 } else { 3587 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3588 } 3589 3590 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3591 3592 if (INTEL_INFO(dev)->gen >= 6) { 3593 /* 3594 * RPS interrupts will get enabled/disabled on demand when RPS 3595 * itself is enabled/disabled. 3596 */ 3597 if (HAS_VEBOX(dev)) 3598 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3599 3600 dev_priv->pm_irq_mask = 0xffffffff; 3601 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3602 } 3603 } 3604 3605 static int ironlake_irq_postinstall(struct drm_device *dev) 3606 { 3607 struct drm_i915_private *dev_priv = to_i915(dev); 3608 u32 display_mask, extra_mask; 3609 3610 if (INTEL_INFO(dev)->gen >= 7) { 3611 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3612 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3613 DE_PLANEB_FLIP_DONE_IVB | 3614 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3615 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3616 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3617 DE_DP_A_HOTPLUG_IVB); 3618 } else { 3619 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3620 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3621 DE_AUX_CHANNEL_A | 3622 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3623 DE_POISON); 3624 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3625 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3626 DE_DP_A_HOTPLUG); 3627 } 3628 3629 dev_priv->irq_mask = ~display_mask; 3630 3631 I915_WRITE(HWSTAM, 0xeffe); 3632 3633 ibx_irq_pre_postinstall(dev); 3634 3635 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3636 3637 gen5_gt_irq_postinstall(dev); 3638 3639 ibx_irq_postinstall(dev); 3640 3641 if (IS_IRONLAKE_M(dev)) { 3642 /* Enable PCU event interrupts 3643 * 3644 * spinlocking not required here for correctness since interrupt 3645 * setup is guaranteed to run in single-threaded context. But we 3646 * need it to make the assert_spin_locked happy. */ 3647 spin_lock_irq(&dev_priv->irq_lock); 3648 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3649 spin_unlock_irq(&dev_priv->irq_lock); 3650 } 3651 3652 return 0; 3653 } 3654 3655 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3656 { 3657 assert_spin_locked(&dev_priv->irq_lock); 3658 3659 if (dev_priv->display_irqs_enabled) 3660 return; 3661 3662 dev_priv->display_irqs_enabled = true; 3663 3664 if (intel_irqs_enabled(dev_priv)) { 3665 vlv_display_irq_reset(dev_priv); 3666 vlv_display_irq_postinstall(dev_priv); 3667 } 3668 } 3669 3670 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3671 { 3672 assert_spin_locked(&dev_priv->irq_lock); 3673 3674 if (!dev_priv->display_irqs_enabled) 3675 return; 3676 3677 dev_priv->display_irqs_enabled = false; 3678 3679 if (intel_irqs_enabled(dev_priv)) 3680 vlv_display_irq_reset(dev_priv); 3681 } 3682 3683 3684 static int valleyview_irq_postinstall(struct drm_device *dev) 3685 { 3686 struct drm_i915_private *dev_priv = to_i915(dev); 3687 3688 gen5_gt_irq_postinstall(dev); 3689 3690 spin_lock_irq(&dev_priv->irq_lock); 3691 if (dev_priv->display_irqs_enabled) 3692 vlv_display_irq_postinstall(dev_priv); 3693 spin_unlock_irq(&dev_priv->irq_lock); 3694 3695 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3696 POSTING_READ(VLV_MASTER_IER); 3697 3698 return 0; 3699 } 3700 3701 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3702 { 3703 /* These are interrupts we'll toggle with the ring mask register */ 3704 uint32_t gt_interrupts[] = { 3705 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3706 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3707 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3708 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3709 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3710 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3711 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3712 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3713 0, 3714 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3715 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3716 }; 3717 3718 if (HAS_L3_DPF(dev_priv)) 3719 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 3720 3721 dev_priv->pm_irq_mask = 0xffffffff; 3722 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3723 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3724 /* 3725 * RPS interrupts will get enabled/disabled on demand when RPS itself 3726 * is enabled/disabled. 3727 */ 3728 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); 3729 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3730 } 3731 3732 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3733 { 3734 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3735 uint32_t de_pipe_enables; 3736 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3737 u32 de_port_enables; 3738 u32 de_misc_masked = GEN8_DE_MISC_GSE; 3739 enum i915_pipe pipe; 3740 3741 if (INTEL_INFO(dev_priv)->gen >= 9) { 3742 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3743 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3744 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3745 GEN9_AUX_CHANNEL_D; 3746 if (IS_BROXTON(dev_priv)) 3747 de_port_masked |= BXT_DE_PORT_GMBUS; 3748 } else { 3749 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3750 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3751 } 3752 3753 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3754 GEN8_PIPE_FIFO_UNDERRUN; 3755 3756 de_port_enables = de_port_masked; 3757 if (IS_BROXTON(dev_priv)) 3758 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3759 else if (IS_BROADWELL(dev_priv)) 3760 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3761 3762 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3763 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3764 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3765 3766 for_each_pipe(dev_priv, pipe) 3767 if (intel_display_power_is_enabled(dev_priv, 3768 POWER_DOMAIN_PIPE(pipe))) 3769 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3770 dev_priv->de_irq_mask[pipe], 3771 de_pipe_enables); 3772 3773 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3774 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3775 } 3776 3777 static int gen8_irq_postinstall(struct drm_device *dev) 3778 { 3779 struct drm_i915_private *dev_priv = to_i915(dev); 3780 3781 if (HAS_PCH_SPLIT(dev)) 3782 ibx_irq_pre_postinstall(dev); 3783 3784 gen8_gt_irq_postinstall(dev_priv); 3785 gen8_de_irq_postinstall(dev_priv); 3786 3787 if (HAS_PCH_SPLIT(dev)) 3788 ibx_irq_postinstall(dev); 3789 3790 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3791 POSTING_READ(GEN8_MASTER_IRQ); 3792 3793 return 0; 3794 } 3795 3796 static int cherryview_irq_postinstall(struct drm_device *dev) 3797 { 3798 struct drm_i915_private *dev_priv = to_i915(dev); 3799 3800 gen8_gt_irq_postinstall(dev_priv); 3801 3802 spin_lock_irq(&dev_priv->irq_lock); 3803 if (dev_priv->display_irqs_enabled) 3804 vlv_display_irq_postinstall(dev_priv); 3805 spin_unlock_irq(&dev_priv->irq_lock); 3806 3807 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3808 POSTING_READ(GEN8_MASTER_IRQ); 3809 3810 return 0; 3811 } 3812 3813 static void gen8_irq_uninstall(struct drm_device *dev) 3814 { 3815 struct drm_i915_private *dev_priv = to_i915(dev); 3816 3817 if (!dev_priv) 3818 return; 3819 3820 gen8_irq_reset(dev); 3821 } 3822 3823 static void valleyview_irq_uninstall(struct drm_device *dev) 3824 { 3825 struct drm_i915_private *dev_priv = to_i915(dev); 3826 3827 if (!dev_priv) 3828 return; 3829 3830 I915_WRITE(VLV_MASTER_IER, 0); 3831 POSTING_READ(VLV_MASTER_IER); 3832 3833 gen5_gt_irq_reset(dev); 3834 3835 I915_WRITE(HWSTAM, 0xffffffff); 3836 3837 spin_lock_irq(&dev_priv->irq_lock); 3838 if (dev_priv->display_irqs_enabled) 3839 vlv_display_irq_reset(dev_priv); 3840 spin_unlock_irq(&dev_priv->irq_lock); 3841 } 3842 3843 static void cherryview_irq_uninstall(struct drm_device *dev) 3844 { 3845 struct drm_i915_private *dev_priv = to_i915(dev); 3846 3847 if (!dev_priv) 3848 return; 3849 3850 I915_WRITE(GEN8_MASTER_IRQ, 0); 3851 POSTING_READ(GEN8_MASTER_IRQ); 3852 3853 gen8_gt_irq_reset(dev_priv); 3854 3855 GEN5_IRQ_RESET(GEN8_PCU_); 3856 3857 spin_lock_irq(&dev_priv->irq_lock); 3858 if (dev_priv->display_irqs_enabled) 3859 vlv_display_irq_reset(dev_priv); 3860 spin_unlock_irq(&dev_priv->irq_lock); 3861 } 3862 3863 static void ironlake_irq_uninstall(struct drm_device *dev) 3864 { 3865 struct drm_i915_private *dev_priv = to_i915(dev); 3866 3867 if (!dev_priv) 3868 return; 3869 3870 ironlake_irq_reset(dev); 3871 } 3872 3873 static void i8xx_irq_preinstall(struct drm_device * dev) 3874 { 3875 struct drm_i915_private *dev_priv = to_i915(dev); 3876 int pipe; 3877 3878 for_each_pipe(dev_priv, pipe) 3879 I915_WRITE(PIPESTAT(pipe), 0); 3880 I915_WRITE16(IMR, 0xffff); 3881 I915_WRITE16(IER, 0x0); 3882 POSTING_READ16(IER); 3883 } 3884 3885 static int i8xx_irq_postinstall(struct drm_device *dev) 3886 { 3887 struct drm_i915_private *dev_priv = to_i915(dev); 3888 3889 I915_WRITE16(EMR, 3890 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3891 3892 /* Unmask the interrupts that we always want on. */ 3893 dev_priv->irq_mask = 3894 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3895 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3896 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3897 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3898 I915_WRITE16(IMR, dev_priv->irq_mask); 3899 3900 I915_WRITE16(IER, 3901 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3902 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3903 I915_USER_INTERRUPT); 3904 POSTING_READ16(IER); 3905 3906 /* Interrupt setup is already guaranteed to be single-threaded, this is 3907 * just to make the assert_spin_locked check happy. */ 3908 spin_lock_irq(&dev_priv->irq_lock); 3909 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3910 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3911 spin_unlock_irq(&dev_priv->irq_lock); 3912 3913 return 0; 3914 } 3915 3916 /* 3917 * Returns true when a page flip has completed. 3918 */ 3919 static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv, 3920 int plane, int pipe, u32 iir) 3921 { 3922 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3923 3924 if (!intel_pipe_handle_vblank(dev_priv, pipe)) 3925 return false; 3926 3927 if ((iir & flip_pending) == 0) 3928 goto check_page_flip; 3929 3930 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3931 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3932 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3933 * the flip is completed (no longer pending). Since this doesn't raise 3934 * an interrupt per se, we watch for the change at vblank. 3935 */ 3936 if (I915_READ16(ISR) & flip_pending) 3937 goto check_page_flip; 3938 3939 intel_finish_page_flip_cs(dev_priv, pipe); 3940 return true; 3941 3942 check_page_flip: 3943 intel_check_page_flip(dev_priv, pipe); 3944 return false; 3945 } 3946 3947 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3948 { 3949 struct drm_device *dev = arg; 3950 struct drm_i915_private *dev_priv = to_i915(dev); 3951 u16 iir, new_iir; 3952 u32 pipe_stats[2]; 3953 int pipe; 3954 u16 flip_mask = 3955 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3956 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3957 irqreturn_t ret; 3958 3959 if (!intel_irqs_enabled(dev_priv)) 3960 return IRQ_NONE; 3961 3962 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3963 disable_rpm_wakeref_asserts(dev_priv); 3964 3965 ret = IRQ_NONE; 3966 iir = I915_READ16(IIR); 3967 if (iir == 0) 3968 goto out; 3969 3970 while (iir & ~flip_mask) { 3971 /* Can't rely on pipestat interrupt bit in iir as it might 3972 * have been cleared after the pipestat interrupt was received. 3973 * It doesn't set the bit in iir again, but it still produces 3974 * interrupts (for non-MSI). 3975 */ 3976 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3977 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3978 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3979 3980 for_each_pipe(dev_priv, pipe) { 3981 i915_reg_t reg = PIPESTAT(pipe); 3982 pipe_stats[pipe] = I915_READ(reg); 3983 3984 /* 3985 * Clear the PIPE*STAT regs before the IIR 3986 */ 3987 if (pipe_stats[pipe] & 0x8000ffff) 3988 I915_WRITE(reg, pipe_stats[pipe]); 3989 } 3990 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3991 3992 I915_WRITE16(IIR, iir & ~flip_mask); 3993 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3994 3995 if (iir & I915_USER_INTERRUPT) 3996 notify_ring(&dev_priv->engine[RCS]); 3997 3998 for_each_pipe(dev_priv, pipe) { 3999 int plane = pipe; 4000 if (HAS_FBC(dev_priv)) 4001 plane = !plane; 4002 4003 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4004 i8xx_handle_vblank(dev_priv, plane, pipe, iir)) 4005 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4006 4007 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4008 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 4009 4010 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4011 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4012 pipe); 4013 } 4014 4015 iir = new_iir; 4016 } 4017 ret = IRQ_HANDLED; 4018 4019 out: 4020 enable_rpm_wakeref_asserts(dev_priv); 4021 4022 return ret; 4023 } 4024 4025 static void i8xx_irq_uninstall(struct drm_device * dev) 4026 { 4027 struct drm_i915_private *dev_priv = to_i915(dev); 4028 int pipe; 4029 4030 for_each_pipe(dev_priv, pipe) { 4031 /* Clear enable bits; then clear status bits */ 4032 I915_WRITE(PIPESTAT(pipe), 0); 4033 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4034 } 4035 I915_WRITE16(IMR, 0xffff); 4036 I915_WRITE16(IER, 0x0); 4037 I915_WRITE16(IIR, I915_READ16(IIR)); 4038 } 4039 4040 static void i915_irq_preinstall(struct drm_device * dev) 4041 { 4042 struct drm_i915_private *dev_priv = to_i915(dev); 4043 int pipe; 4044 4045 if (I915_HAS_HOTPLUG(dev)) { 4046 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4047 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4048 } 4049 4050 I915_WRITE16(HWSTAM, 0xeffe); 4051 for_each_pipe(dev_priv, pipe) 4052 I915_WRITE(PIPESTAT(pipe), 0); 4053 I915_WRITE(IMR, 0xffffffff); 4054 I915_WRITE(IER, 0x0); 4055 POSTING_READ(IER); 4056 } 4057 4058 static int i915_irq_postinstall(struct drm_device *dev) 4059 { 4060 struct drm_i915_private *dev_priv = to_i915(dev); 4061 u32 enable_mask; 4062 4063 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 4064 4065 /* Unmask the interrupts that we always want on. */ 4066 dev_priv->irq_mask = 4067 ~(I915_ASLE_INTERRUPT | 4068 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4069 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4070 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4071 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4072 4073 enable_mask = 4074 I915_ASLE_INTERRUPT | 4075 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4076 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4077 I915_USER_INTERRUPT; 4078 4079 if (I915_HAS_HOTPLUG(dev)) { 4080 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4081 POSTING_READ(PORT_HOTPLUG_EN); 4082 4083 /* Enable in IER... */ 4084 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4085 /* and unmask in IMR */ 4086 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4087 } 4088 4089 I915_WRITE(IMR, dev_priv->irq_mask); 4090 I915_WRITE(IER, enable_mask); 4091 POSTING_READ(IER); 4092 4093 i915_enable_asle_pipestat(dev_priv); 4094 4095 /* Interrupt setup is already guaranteed to be single-threaded, this is 4096 * just to make the assert_spin_locked check happy. */ 4097 spin_lock_irq(&dev_priv->irq_lock); 4098 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4099 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4100 spin_unlock_irq(&dev_priv->irq_lock); 4101 4102 return 0; 4103 } 4104 4105 /* 4106 * Returns true when a page flip has completed. 4107 */ 4108 static bool i915_handle_vblank(struct drm_i915_private *dev_priv, 4109 int plane, int pipe, u32 iir) 4110 { 4111 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 4112 4113 if (!intel_pipe_handle_vblank(dev_priv, pipe)) 4114 return false; 4115 4116 if ((iir & flip_pending) == 0) 4117 goto check_page_flip; 4118 4119 /* We detect FlipDone by looking for the change in PendingFlip from '1' 4120 * to '0' on the following vblank, i.e. IIR has the Pendingflip 4121 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 4122 * the flip is completed (no longer pending). Since this doesn't raise 4123 * an interrupt per se, we watch for the change at vblank. 4124 */ 4125 if (I915_READ(ISR) & flip_pending) 4126 goto check_page_flip; 4127 4128 intel_finish_page_flip_cs(dev_priv, pipe); 4129 return true; 4130 4131 check_page_flip: 4132 intel_check_page_flip(dev_priv, pipe); 4133 return false; 4134 } 4135 4136 static irqreturn_t i915_irq_handler(int irq, void *arg) 4137 { 4138 struct drm_device *dev = arg; 4139 struct drm_i915_private *dev_priv = to_i915(dev); 4140 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 4141 u32 flip_mask = 4142 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4143 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4144 int pipe, ret = IRQ_NONE; 4145 4146 if (!intel_irqs_enabled(dev_priv)) 4147 return IRQ_NONE; 4148 4149 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4150 disable_rpm_wakeref_asserts(dev_priv); 4151 4152 iir = I915_READ(IIR); 4153 do { 4154 bool irq_received = (iir & ~flip_mask) != 0; 4155 bool blc_event = false; 4156 4157 /* Can't rely on pipestat interrupt bit in iir as it might 4158 * have been cleared after the pipestat interrupt was received. 4159 * It doesn't set the bit in iir again, but it still produces 4160 * interrupts (for non-MSI). 4161 */ 4162 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4163 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4164 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4165 4166 for_each_pipe(dev_priv, pipe) { 4167 i915_reg_t reg = PIPESTAT(pipe); 4168 pipe_stats[pipe] = I915_READ(reg); 4169 4170 /* Clear the PIPE*STAT regs before the IIR */ 4171 if (pipe_stats[pipe] & 0x8000ffff) { 4172 I915_WRITE(reg, pipe_stats[pipe]); 4173 irq_received = true; 4174 } 4175 } 4176 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4177 4178 if (!irq_received) 4179 break; 4180 4181 /* Consume port. Then clear IIR or we'll miss events */ 4182 if (I915_HAS_HOTPLUG(dev_priv) && 4183 iir & I915_DISPLAY_PORT_INTERRUPT) { 4184 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4185 if (hotplug_status) 4186 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4187 } 4188 4189 I915_WRITE(IIR, iir & ~flip_mask); 4190 new_iir = I915_READ(IIR); /* Flush posted writes */ 4191 4192 if (iir & I915_USER_INTERRUPT) 4193 notify_ring(&dev_priv->engine[RCS]); 4194 4195 for_each_pipe(dev_priv, pipe) { 4196 int plane = pipe; 4197 if (HAS_FBC(dev_priv)) 4198 plane = !plane; 4199 4200 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4201 i915_handle_vblank(dev_priv, plane, pipe, iir)) 4202 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4203 4204 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4205 blc_event = true; 4206 4207 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4208 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 4209 4210 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4211 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4212 pipe); 4213 } 4214 4215 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4216 intel_opregion_asle_intr(dev_priv); 4217 4218 /* With MSI, interrupts are only generated when iir 4219 * transitions from zero to nonzero. If another bit got 4220 * set while we were handling the existing iir bits, then 4221 * we would never get another interrupt. 4222 * 4223 * This is fine on non-MSI as well, as if we hit this path 4224 * we avoid exiting the interrupt handler only to generate 4225 * another one. 4226 * 4227 * Note that for MSI this could cause a stray interrupt report 4228 * if an interrupt landed in the time between writing IIR and 4229 * the posting read. This should be rare enough to never 4230 * trigger the 99% of 100,000 interrupts test for disabling 4231 * stray interrupts. 4232 */ 4233 ret = IRQ_HANDLED; 4234 iir = new_iir; 4235 } while (iir & ~flip_mask); 4236 4237 enable_rpm_wakeref_asserts(dev_priv); 4238 4239 return ret; 4240 } 4241 4242 static void i915_irq_uninstall(struct drm_device * dev) 4243 { 4244 struct drm_i915_private *dev_priv = to_i915(dev); 4245 int pipe; 4246 4247 if (I915_HAS_HOTPLUG(dev)) { 4248 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4249 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4250 } 4251 4252 I915_WRITE16(HWSTAM, 0xffff); 4253 for_each_pipe(dev_priv, pipe) { 4254 /* Clear enable bits; then clear status bits */ 4255 I915_WRITE(PIPESTAT(pipe), 0); 4256 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4257 } 4258 I915_WRITE(IMR, 0xffffffff); 4259 I915_WRITE(IER, 0x0); 4260 4261 I915_WRITE(IIR, I915_READ(IIR)); 4262 } 4263 4264 static void i965_irq_preinstall(struct drm_device * dev) 4265 { 4266 struct drm_i915_private *dev_priv = to_i915(dev); 4267 int pipe; 4268 4269 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4270 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4271 4272 I915_WRITE(HWSTAM, 0xeffe); 4273 for_each_pipe(dev_priv, pipe) 4274 I915_WRITE(PIPESTAT(pipe), 0); 4275 I915_WRITE(IMR, 0xffffffff); 4276 I915_WRITE(IER, 0x0); 4277 POSTING_READ(IER); 4278 } 4279 4280 static int i965_irq_postinstall(struct drm_device *dev) 4281 { 4282 struct drm_i915_private *dev_priv = to_i915(dev); 4283 u32 enable_mask; 4284 u32 error_mask; 4285 4286 /* Unmask the interrupts that we always want on. */ 4287 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4288 I915_DISPLAY_PORT_INTERRUPT | 4289 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4290 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4291 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4292 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4293 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4294 4295 enable_mask = ~dev_priv->irq_mask; 4296 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4297 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4298 enable_mask |= I915_USER_INTERRUPT; 4299 4300 if (IS_G4X(dev_priv)) 4301 enable_mask |= I915_BSD_USER_INTERRUPT; 4302 4303 /* Interrupt setup is already guaranteed to be single-threaded, this is 4304 * just to make the assert_spin_locked check happy. */ 4305 spin_lock_irq(&dev_priv->irq_lock); 4306 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4307 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4308 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4309 spin_unlock_irq(&dev_priv->irq_lock); 4310 4311 /* 4312 * Enable some error detection, note the instruction error mask 4313 * bit is reserved, so we leave it masked. 4314 */ 4315 if (IS_G4X(dev_priv)) { 4316 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4317 GM45_ERROR_MEM_PRIV | 4318 GM45_ERROR_CP_PRIV | 4319 I915_ERROR_MEMORY_REFRESH); 4320 } else { 4321 error_mask = ~(I915_ERROR_PAGE_TABLE | 4322 I915_ERROR_MEMORY_REFRESH); 4323 } 4324 I915_WRITE(EMR, error_mask); 4325 4326 I915_WRITE(IMR, dev_priv->irq_mask); 4327 I915_WRITE(IER, enable_mask); 4328 POSTING_READ(IER); 4329 4330 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4331 POSTING_READ(PORT_HOTPLUG_EN); 4332 4333 i915_enable_asle_pipestat(dev_priv); 4334 4335 return 0; 4336 } 4337 4338 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 4339 { 4340 u32 hotplug_en; 4341 4342 assert_spin_locked(&dev_priv->irq_lock); 4343 4344 /* Note HDMI and DP share hotplug bits */ 4345 /* enable bits are the same for all generations */ 4346 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 4347 /* Programming the CRT detection parameters tends 4348 to generate a spurious hotplug event about three 4349 seconds later. So just do it once. 4350 */ 4351 if (IS_G4X(dev_priv)) 4352 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4353 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4354 4355 /* Ignore TV since it's buggy */ 4356 i915_hotplug_interrupt_update_locked(dev_priv, 4357 HOTPLUG_INT_EN_MASK | 4358 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4359 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4360 hotplug_en); 4361 } 4362 4363 static irqreturn_t i965_irq_handler(int irq, void *arg) 4364 { 4365 struct drm_device *dev = arg; 4366 struct drm_i915_private *dev_priv = to_i915(dev); 4367 u32 iir, new_iir; 4368 u32 pipe_stats[I915_MAX_PIPES]; 4369 int ret = IRQ_NONE, pipe; 4370 u32 flip_mask = 4371 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4372 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4373 4374 if (!intel_irqs_enabled(dev_priv)) 4375 return IRQ_NONE; 4376 4377 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4378 disable_rpm_wakeref_asserts(dev_priv); 4379 4380 iir = I915_READ(IIR); 4381 4382 for (;;) { 4383 bool irq_received = (iir & ~flip_mask) != 0; 4384 bool blc_event = false; 4385 4386 /* Can't rely on pipestat interrupt bit in iir as it might 4387 * have been cleared after the pipestat interrupt was received. 4388 * It doesn't set the bit in iir again, but it still produces 4389 * interrupts (for non-MSI). 4390 */ 4391 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4392 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4393 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4394 4395 for_each_pipe(dev_priv, pipe) { 4396 i915_reg_t reg = PIPESTAT(pipe); 4397 pipe_stats[pipe] = I915_READ(reg); 4398 4399 /* 4400 * Clear the PIPE*STAT regs before the IIR 4401 */ 4402 if (pipe_stats[pipe] & 0x8000ffff) { 4403 I915_WRITE(reg, pipe_stats[pipe]); 4404 irq_received = true; 4405 } 4406 } 4407 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4408 4409 if (!irq_received) 4410 break; 4411 4412 ret = IRQ_HANDLED; 4413 4414 /* Consume port. Then clear IIR or we'll miss events */ 4415 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 4416 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4417 if (hotplug_status) 4418 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4419 } 4420 4421 I915_WRITE(IIR, iir & ~flip_mask); 4422 new_iir = I915_READ(IIR); /* Flush posted writes */ 4423 4424 if (iir & I915_USER_INTERRUPT) 4425 notify_ring(&dev_priv->engine[RCS]); 4426 if (iir & I915_BSD_USER_INTERRUPT) 4427 notify_ring(&dev_priv->engine[VCS]); 4428 4429 for_each_pipe(dev_priv, pipe) { 4430 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4431 i915_handle_vblank(dev_priv, pipe, pipe, iir)) 4432 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4433 4434 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4435 blc_event = true; 4436 4437 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4438 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 4439 4440 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4441 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4442 } 4443 4444 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4445 intel_opregion_asle_intr(dev_priv); 4446 4447 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4448 gmbus_irq_handler(dev_priv); 4449 4450 /* With MSI, interrupts are only generated when iir 4451 * transitions from zero to nonzero. If another bit got 4452 * set while we were handling the existing iir bits, then 4453 * we would never get another interrupt. 4454 * 4455 * This is fine on non-MSI as well, as if we hit this path 4456 * we avoid exiting the interrupt handler only to generate 4457 * another one. 4458 * 4459 * Note that for MSI this could cause a stray interrupt report 4460 * if an interrupt landed in the time between writing IIR and 4461 * the posting read. This should be rare enough to never 4462 * trigger the 99% of 100,000 interrupts test for disabling 4463 * stray interrupts. 4464 */ 4465 iir = new_iir; 4466 } 4467 4468 enable_rpm_wakeref_asserts(dev_priv); 4469 4470 return ret; 4471 } 4472 4473 static void i965_irq_uninstall(struct drm_device * dev) 4474 { 4475 struct drm_i915_private *dev_priv = to_i915(dev); 4476 int pipe; 4477 4478 if (!dev_priv) 4479 return; 4480 4481 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4482 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4483 4484 I915_WRITE(HWSTAM, 0xffffffff); 4485 for_each_pipe(dev_priv, pipe) 4486 I915_WRITE(PIPESTAT(pipe), 0); 4487 I915_WRITE(IMR, 0xffffffff); 4488 I915_WRITE(IER, 0x0); 4489 4490 for_each_pipe(dev_priv, pipe) 4491 I915_WRITE(PIPESTAT(pipe), 4492 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4493 I915_WRITE(IIR, I915_READ(IIR)); 4494 } 4495 4496 /** 4497 * intel_irq_init - initializes irq support 4498 * @dev_priv: i915 device instance 4499 * 4500 * This function initializes all the irq support including work items, timers 4501 * and all the vtables. It does not setup the interrupt itself though. 4502 */ 4503 void intel_irq_init(struct drm_i915_private *dev_priv) 4504 { 4505 struct drm_device *dev = &dev_priv->drm; 4506 4507 intel_hpd_init_work(dev_priv); 4508 4509 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4510 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4511 4512 /* Let's track the enabled rps events */ 4513 if (IS_VALLEYVIEW(dev_priv)) 4514 /* WaGsvRC0ResidencyMethod:vlv */ 4515 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; 4516 else 4517 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4518 4519 dev_priv->rps.pm_intr_keep = 0; 4520 4521 /* 4522 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer 4523 * if GEN6_PM_UP_EI_EXPIRED is masked. 4524 * 4525 * TODO: verify if this can be reproduced on VLV,CHV. 4526 */ 4527 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) 4528 dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED; 4529 4530 if (INTEL_INFO(dev_priv)->gen >= 8) 4531 dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP; 4532 4533 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, 4534 i915_hangcheck_elapsed); 4535 4536 if (IS_GEN2(dev_priv)) { 4537 /* Gen2 doesn't have a hardware frame counter */ 4538 dev->max_vblank_count = 0; 4539 dev->driver->get_vblank_counter = drm_vblank_no_hw_counter; 4540 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4541 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4542 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4543 } else { 4544 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4545 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4546 } 4547 4548 /* 4549 * Opt out of the vblank disable timer on everything except gen2. 4550 * Gen2 doesn't have a hardware frame counter and so depends on 4551 * vblank interrupts to produce sane vblank seuquence numbers. 4552 */ 4553 if (!IS_GEN2(dev_priv)) 4554 dev->vblank_disable_immediate = true; 4555 4556 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4557 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4558 4559 if (IS_CHERRYVIEW(dev_priv)) { 4560 dev->driver->irq_handler = cherryview_irq_handler; 4561 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4562 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4563 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4564 dev->driver->enable_vblank = valleyview_enable_vblank; 4565 dev->driver->disable_vblank = valleyview_disable_vblank; 4566 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4567 } else if (IS_VALLEYVIEW(dev_priv)) { 4568 dev->driver->irq_handler = valleyview_irq_handler; 4569 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4570 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4571 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4572 dev->driver->enable_vblank = valleyview_enable_vblank; 4573 dev->driver->disable_vblank = valleyview_disable_vblank; 4574 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4575 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4576 dev->driver->irq_handler = gen8_irq_handler; 4577 dev->driver->irq_preinstall = gen8_irq_reset; 4578 dev->driver->irq_postinstall = gen8_irq_postinstall; 4579 dev->driver->irq_uninstall = gen8_irq_uninstall; 4580 dev->driver->enable_vblank = gen8_enable_vblank; 4581 dev->driver->disable_vblank = gen8_disable_vblank; 4582 if (IS_BROXTON(dev)) 4583 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4584 else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev)) 4585 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4586 else 4587 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4588 } else if (HAS_PCH_SPLIT(dev)) { 4589 dev->driver->irq_handler = ironlake_irq_handler; 4590 dev->driver->irq_preinstall = ironlake_irq_reset; 4591 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4592 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4593 dev->driver->enable_vblank = ironlake_enable_vblank; 4594 dev->driver->disable_vblank = ironlake_disable_vblank; 4595 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4596 } else { 4597 if (IS_GEN2(dev_priv)) { 4598 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4599 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4600 dev->driver->irq_handler = i8xx_irq_handler; 4601 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4602 } else if (IS_GEN3(dev_priv)) { 4603 dev->driver->irq_preinstall = i915_irq_preinstall; 4604 dev->driver->irq_postinstall = i915_irq_postinstall; 4605 dev->driver->irq_uninstall = i915_irq_uninstall; 4606 dev->driver->irq_handler = i915_irq_handler; 4607 } else { 4608 dev->driver->irq_preinstall = i965_irq_preinstall; 4609 dev->driver->irq_postinstall = i965_irq_postinstall; 4610 dev->driver->irq_uninstall = i965_irq_uninstall; 4611 dev->driver->irq_handler = i965_irq_handler; 4612 } 4613 if (I915_HAS_HOTPLUG(dev_priv)) 4614 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4615 dev->driver->enable_vblank = i915_enable_vblank; 4616 dev->driver->disable_vblank = i915_disable_vblank; 4617 } 4618 } 4619 4620 /** 4621 * intel_irq_install - enables the hardware interrupt 4622 * @dev_priv: i915 device instance 4623 * 4624 * This function enables the hardware interrupt handling, but leaves the hotplug 4625 * handling still disabled. It is called after intel_irq_init(). 4626 * 4627 * In the driver load and resume code we need working interrupts in a few places 4628 * but don't want to deal with the hassle of concurrent probe and hotplug 4629 * workers. Hence the split into this two-stage approach. 4630 */ 4631 int intel_irq_install(struct drm_i915_private *dev_priv) 4632 { 4633 /* 4634 * We enable some interrupt sources in our postinstall hooks, so mark 4635 * interrupts as enabled _before_ actually enabling them to avoid 4636 * special cases in our ordering checks. 4637 */ 4638 dev_priv->pm.irqs_enabled = true; 4639 4640 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4641 } 4642 4643 /** 4644 * intel_irq_uninstall - finilizes all irq handling 4645 * @dev_priv: i915 device instance 4646 * 4647 * This stops interrupt and hotplug handling and unregisters and frees all 4648 * resources acquired in the init functions. 4649 */ 4650 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4651 { 4652 drm_irq_uninstall(&dev_priv->drm); 4653 intel_hpd_cancel_work(dev_priv); 4654 dev_priv->pm.irqs_enabled = false; 4655 } 4656 4657 /** 4658 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4659 * @dev_priv: i915 device instance 4660 * 4661 * This function is used to disable interrupts at runtime, both in the runtime 4662 * pm and the system suspend/resume code. 4663 */ 4664 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4665 { 4666 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4667 dev_priv->pm.irqs_enabled = false; 4668 synchronize_irq(dev_priv->drm.irq); 4669 } 4670 4671 /** 4672 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4673 * @dev_priv: i915 device instance 4674 * 4675 * This function is used to enable interrupts at runtime, both in the runtime 4676 * pm and the system suspend/resume code. 4677 */ 4678 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4679 { 4680 dev_priv->pm.irqs_enabled = true; 4681 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 4682 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4683 } 4684