1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #include <drm/drmP.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 #include "intel_drv.h" 33 34 /* For display hotplug interrupt */ 35 static void 36 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 37 { 38 if ((dev_priv->irq_mask & mask) != 0) { 39 dev_priv->irq_mask &= ~mask; 40 I915_WRITE(DEIMR, dev_priv->irq_mask); 41 POSTING_READ(DEIMR); 42 } 43 } 44 45 static inline void 46 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 47 { 48 if ((dev_priv->irq_mask & mask) != mask) { 49 dev_priv->irq_mask |= mask; 50 I915_WRITE(DEIMR, dev_priv->irq_mask); 51 POSTING_READ(DEIMR); 52 } 53 } 54 55 void 56 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 57 { 58 if ((dev_priv->pipestat[pipe] & mask) != mask) { 59 u32 reg = PIPESTAT(pipe); 60 61 dev_priv->pipestat[pipe] |= mask; 62 /* Enable the interrupt, clear any pending status */ 63 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); 64 POSTING_READ(reg); 65 } 66 } 67 68 void 69 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 70 { 71 if ((dev_priv->pipestat[pipe] & mask) != 0) { 72 u32 reg = PIPESTAT(pipe); 73 74 dev_priv->pipestat[pipe] &= ~mask; 75 I915_WRITE(reg, dev_priv->pipestat[pipe]); 76 POSTING_READ(reg); 77 } 78 } 79 80 /** 81 * intel_enable_asle - enable ASLE interrupt for OpRegion 82 */ 83 void intel_enable_asle(struct drm_device *dev) 84 { 85 drm_i915_private_t *dev_priv = dev->dev_private; 86 87 /* FIXME: opregion/asle for VLV */ 88 if (IS_VALLEYVIEW(dev)) 89 return; 90 91 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 92 93 if (HAS_PCH_SPLIT(dev)) 94 ironlake_enable_display_irq(dev_priv, DE_GSE); 95 else { 96 i915_enable_pipestat(dev_priv, 1, 97 PIPE_LEGACY_BLC_EVENT_ENABLE); 98 if (INTEL_INFO(dev)->gen >= 4) 99 i915_enable_pipestat(dev_priv, 0, 100 PIPE_LEGACY_BLC_EVENT_ENABLE); 101 } 102 103 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 104 } 105 106 /** 107 * i915_pipe_enabled - check if a pipe is enabled 108 * @dev: DRM device 109 * @pipe: pipe to check 110 * 111 * Reading certain registers when the pipe is disabled can hang the chip. 112 * Use this routine to make sure the PLL is running and the pipe is active 113 * before reading such registers if unsure. 114 */ 115 static int 116 i915_pipe_enabled(struct drm_device *dev, int pipe) 117 { 118 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 119 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 120 pipe); 121 122 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE; 123 } 124 125 /* Called from drm generic code, passed a 'crtc', which 126 * we use as a pipe index 127 */ 128 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 129 { 130 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 131 unsigned long high_frame; 132 unsigned long low_frame; 133 u32 high1, high2, low; 134 135 if (!i915_pipe_enabled(dev, pipe)) { 136 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 137 "pipe %c\n", pipe_name(pipe)); 138 return 0; 139 } 140 141 high_frame = PIPEFRAME(pipe); 142 low_frame = PIPEFRAMEPIXEL(pipe); 143 144 /* 145 * High & low register fields aren't synchronized, so make sure 146 * we get a low value that's stable across two reads of the high 147 * register. 148 */ 149 do { 150 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 151 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 152 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 153 } while (high1 != high2); 154 155 high1 >>= PIPE_FRAME_HIGH_SHIFT; 156 low >>= PIPE_FRAME_LOW_SHIFT; 157 return (high1 << 8) | low; 158 } 159 160 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 161 { 162 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 163 int reg = PIPE_FRMCOUNT_GM45(pipe); 164 165 if (!i915_pipe_enabled(dev, pipe)) { 166 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 167 "pipe %c\n", pipe_name(pipe)); 168 return 0; 169 } 170 171 return I915_READ(reg); 172 } 173 174 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 175 int *vpos, int *hpos) 176 { 177 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 178 u32 vbl = 0, position = 0; 179 int vbl_start, vbl_end, htotal, vtotal; 180 bool in_vbl = true; 181 int ret = 0; 182 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 183 pipe); 184 185 if (!i915_pipe_enabled(dev, pipe)) { 186 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 187 "pipe %c\n", pipe_name(pipe)); 188 return 0; 189 } 190 191 /* Get vtotal. */ 192 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 193 194 if (INTEL_INFO(dev)->gen >= 4) { 195 /* No obvious pixelcount register. Only query vertical 196 * scanout position from Display scan line register. 197 */ 198 position = I915_READ(PIPEDSL(pipe)); 199 200 /* Decode into vertical scanout position. Don't have 201 * horizontal scanout position. 202 */ 203 *vpos = position & 0x1fff; 204 *hpos = 0; 205 } else { 206 /* Have access to pixelcount since start of frame. 207 * We can split this into vertical and horizontal 208 * scanout position. 209 */ 210 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 211 212 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 213 *vpos = position / htotal; 214 *hpos = position - (*vpos * htotal); 215 } 216 217 /* Query vblank area. */ 218 vbl = I915_READ(VBLANK(cpu_transcoder)); 219 220 /* Test position against vblank region. */ 221 vbl_start = vbl & 0x1fff; 222 vbl_end = (vbl >> 16) & 0x1fff; 223 224 if ((*vpos < vbl_start) || (*vpos > vbl_end)) 225 in_vbl = false; 226 227 /* Inside "upper part" of vblank area? Apply corrective offset: */ 228 if (in_vbl && (*vpos >= vbl_start)) 229 *vpos = *vpos - vtotal; 230 231 /* Readouts valid? */ 232 if (vbl > 0) 233 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 234 235 /* In vblank? */ 236 if (in_vbl) 237 ret |= DRM_SCANOUTPOS_INVBL; 238 239 return ret; 240 } 241 242 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 243 int *max_error, 244 struct timeval *vblank_time, 245 unsigned flags) 246 { 247 struct drm_i915_private *dev_priv = dev->dev_private; 248 struct drm_crtc *crtc; 249 250 if (pipe < 0 || pipe >= dev_priv->num_pipe) { 251 DRM_ERROR("Invalid crtc %d\n", pipe); 252 return -EINVAL; 253 } 254 255 /* Get drm_crtc to timestamp: */ 256 crtc = intel_get_crtc_for_pipe(dev, pipe); 257 if (crtc == NULL) { 258 DRM_ERROR("Invalid crtc %d\n", pipe); 259 return -EINVAL; 260 } 261 262 if (!crtc->enabled) { 263 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 264 return -EBUSY; 265 } 266 267 /* Helper routine in DRM core does all the work: */ 268 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 269 vblank_time, flags, 270 crtc); 271 } 272 273 /* 274 * Handle hotplug events outside the interrupt handler proper. 275 */ 276 static void i915_hotplug_work_func(struct work_struct *work) 277 { 278 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 279 hotplug_work); 280 struct drm_device *dev = dev_priv->dev; 281 struct drm_mode_config *mode_config = &dev->mode_config; 282 struct intel_encoder *encoder; 283 284 lockmgr(&mode_config->mutex, LK_EXCLUSIVE); 285 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 286 287 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 288 if (encoder->hot_plug) 289 encoder->hot_plug(encoder); 290 291 lockmgr(&mode_config->mutex, LK_RELEASE); 292 293 /* Just fire off a uevent and let userspace tell us what to do */ 294 drm_helper_hpd_irq_event(dev); 295 } 296 297 static void ironlake_handle_rps_change(struct drm_device *dev) 298 { 299 drm_i915_private_t *dev_priv = dev->dev_private; 300 u32 busy_up, busy_down, max_avg, min_avg; 301 u8 new_delay; 302 303 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 304 305 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 306 307 new_delay = dev_priv->ips.cur_delay; 308 309 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 310 busy_up = I915_READ(RCPREVBSYTUPAVG); 311 busy_down = I915_READ(RCPREVBSYTDNAVG); 312 max_avg = I915_READ(RCBMAXAVG); 313 min_avg = I915_READ(RCBMINAVG); 314 315 /* Handle RCS change request from hw */ 316 if (busy_up > max_avg) { 317 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 318 new_delay = dev_priv->ips.cur_delay - 1; 319 if (new_delay < dev_priv->ips.max_delay) 320 new_delay = dev_priv->ips.max_delay; 321 } else if (busy_down < min_avg) { 322 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 323 new_delay = dev_priv->ips.cur_delay + 1; 324 if (new_delay > dev_priv->ips.min_delay) 325 new_delay = dev_priv->ips.min_delay; 326 } 327 328 if (ironlake_set_drps(dev, new_delay)) 329 dev_priv->ips.cur_delay = new_delay; 330 331 lockmgr(&mchdev_lock, LK_RELEASE); 332 333 return; 334 } 335 336 static void notify_ring(struct drm_device *dev, 337 struct intel_ring_buffer *ring) 338 { 339 struct drm_i915_private *dev_priv = dev->dev_private; 340 341 if (ring->obj == NULL) 342 return; 343 344 wake_up_all(&ring->irq_queue); 345 if (i915_enable_hangcheck) { 346 dev_priv->hangcheck_count = 0; 347 mod_timer(&dev_priv->hangcheck_timer, 348 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 349 } 350 } 351 352 static void gen6_pm_rps_work(struct work_struct *work) 353 { 354 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 355 rps.work); 356 u32 pm_iir, pm_imr; 357 u8 new_delay; 358 359 spin_lock(&dev_priv->rps.lock); 360 pm_iir = dev_priv->rps.pm_iir; 361 dev_priv->rps.pm_iir = 0; 362 pm_imr = I915_READ(GEN6_PMIMR); 363 I915_WRITE(GEN6_PMIMR, 0); 364 spin_unlock(&dev_priv->rps.lock); 365 366 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) 367 return; 368 369 lockmgr(&dev_priv->rps.hw_lock, LK_EXCLUSIVE); 370 371 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) 372 new_delay = dev_priv->rps.cur_delay + 1; 373 else 374 new_delay = dev_priv->rps.cur_delay - 1; 375 376 /* sysfs frequency interfaces may have snuck in while servicing the 377 * interrupt 378 */ 379 if (!(new_delay > dev_priv->rps.max_delay || 380 new_delay < dev_priv->rps.min_delay)) { 381 gen6_set_rps(dev_priv->dev, new_delay); 382 } 383 384 lockmgr(&dev_priv->rps.hw_lock, LK_RELEASE); 385 } 386 387 388 /** 389 * ivybridge_parity_work - Workqueue called when a parity error interrupt 390 * occurred. 391 * @work: workqueue struct 392 * 393 * Doesn't actually do anything except notify userspace. As a consequence of 394 * this event, userspace should try to remap the bad rows since statistically 395 * it is likely the same row is more likely to go bad again. 396 */ 397 static void ivybridge_parity_work(struct work_struct *work) 398 { 399 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 400 l3_parity.error_work); 401 u32 error_status, row, bank, subbank; 402 uint32_t misccpctl; 403 404 /* We must turn off DOP level clock gating to access the L3 registers. 405 * In order to prevent a get/put style interface, acquire struct mutex 406 * any time we access those registers. 407 */ 408 DRM_LOCK(dev_priv->dev); 409 410 misccpctl = I915_READ(GEN7_MISCCPCTL); 411 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 412 POSTING_READ(GEN7_MISCCPCTL); 413 414 error_status = I915_READ(GEN7_L3CDERRST1); 415 row = GEN7_PARITY_ERROR_ROW(error_status); 416 bank = GEN7_PARITY_ERROR_BANK(error_status); 417 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 418 419 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | 420 GEN7_L3CDERRST1_ENABLE); 421 POSTING_READ(GEN7_L3CDERRST1); 422 423 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 424 425 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 426 dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 427 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 428 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 429 430 DRM_UNLOCK(dev_priv->dev); 431 432 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", 433 row, bank, subbank); 434 } 435 436 static void ivybridge_handle_parity_error(struct drm_device *dev) 437 { 438 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 439 440 if (!HAS_L3_GPU_CACHE(dev)) 441 return; 442 443 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 444 dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 445 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 446 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 447 448 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 449 } 450 451 static void snb_gt_irq_handler(struct drm_device *dev, 452 struct drm_i915_private *dev_priv, 453 u32 gt_iir) 454 { 455 456 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | 457 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) 458 notify_ring(dev, &dev_priv->ring[RCS]); 459 if (gt_iir & GEN6_BSD_USER_INTERRUPT) 460 notify_ring(dev, &dev_priv->ring[VCS]); 461 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) 462 notify_ring(dev, &dev_priv->ring[BCS]); 463 464 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | 465 GT_GEN6_BSD_CS_ERROR_INTERRUPT | 466 GT_RENDER_CS_ERROR_INTERRUPT)) { 467 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 468 i915_handle_error(dev, false); 469 } 470 471 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) 472 ivybridge_handle_parity_error(dev); 473 } 474 475 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, 476 u32 pm_iir) 477 { 478 479 /* 480 * IIR bits should never already be set because IMR should 481 * prevent an interrupt from being shown in IIR. The warning 482 * displays a case where we've unsafely cleared 483 * dev_priv->rps.pm_iir. Although missing an interrupt of the same 484 * type is not a problem, it displays a problem in the logic. 485 * 486 * The mask bit in IMR is cleared by dev_priv->rps.work. 487 */ 488 489 spin_lock(&dev_priv->rps.lock); 490 dev_priv->rps.pm_iir |= pm_iir; 491 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 492 POSTING_READ(GEN6_PMIMR); 493 spin_unlock(&dev_priv->rps.lock); 494 495 queue_work(dev_priv->wq, &dev_priv->rps.work); 496 } 497 498 static irqreturn_t valleyview_irq_handler(void *arg) 499 { 500 struct drm_device *dev = (struct drm_device *) arg; 501 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 502 u32 iir, gt_iir, pm_iir; 503 int pipe; 504 u32 pipe_stats[I915_MAX_PIPES]; 505 bool blc_event; 506 507 atomic_inc(&dev_priv->irq_received); 508 509 while (true) { 510 iir = I915_READ(VLV_IIR); 511 gt_iir = I915_READ(GTIIR); 512 pm_iir = I915_READ(GEN6_PMIIR); 513 514 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 515 goto out; 516 517 snb_gt_irq_handler(dev, dev_priv, gt_iir); 518 519 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 520 for_each_pipe(pipe) { 521 int reg = PIPESTAT(pipe); 522 pipe_stats[pipe] = I915_READ(reg); 523 524 /* 525 * Clear the PIPE*STAT regs before the IIR 526 */ 527 if (pipe_stats[pipe] & 0x8000ffff) { 528 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 529 DRM_DEBUG_DRIVER("pipe %c underrun\n", 530 pipe_name(pipe)); 531 I915_WRITE(reg, pipe_stats[pipe]); 532 } 533 } 534 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 535 536 for_each_pipe(pipe) { 537 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 538 drm_handle_vblank(dev, pipe); 539 540 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 541 intel_prepare_page_flip(dev, pipe); 542 intel_finish_page_flip(dev, pipe); 543 } 544 } 545 546 /* Consume port. Then clear IIR or we'll miss events */ 547 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 548 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 549 550 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 551 hotplug_status); 552 if (hotplug_status & dev_priv->hotplug_supported_mask) 553 queue_work(dev_priv->wq, 554 &dev_priv->hotplug_work); 555 556 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 557 I915_READ(PORT_HOTPLUG_STAT); 558 } 559 560 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 561 blc_event = true; 562 563 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 564 gen6_queue_rps_work(dev_priv, pm_iir); 565 566 I915_WRITE(GTIIR, gt_iir); 567 I915_WRITE(GEN6_PMIIR, pm_iir); 568 I915_WRITE(VLV_IIR, iir); 569 } 570 571 out: 572 return; 573 } 574 575 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 576 { 577 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 578 int pipe; 579 580 if (pch_iir & SDE_HOTPLUG_MASK) 581 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 582 583 if (pch_iir & SDE_AUDIO_POWER_MASK) 584 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 585 (pch_iir & SDE_AUDIO_POWER_MASK) >> 586 SDE_AUDIO_POWER_SHIFT); 587 588 if (pch_iir & SDE_GMBUS) 589 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 590 591 if (pch_iir & SDE_AUDIO_HDCP_MASK) 592 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 593 594 if (pch_iir & SDE_AUDIO_TRANS_MASK) 595 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 596 597 if (pch_iir & SDE_POISON) 598 DRM_ERROR("PCH poison interrupt\n"); 599 600 if (pch_iir & SDE_FDI_MASK) 601 for_each_pipe(pipe) 602 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 603 pipe_name(pipe), 604 I915_READ(FDI_RX_IIR(pipe))); 605 606 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 607 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 608 609 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 610 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 611 612 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 613 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); 614 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 615 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); 616 } 617 618 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 619 { 620 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 621 int pipe; 622 623 if (pch_iir & SDE_HOTPLUG_MASK_CPT) 624 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 625 626 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) 627 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 628 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 629 SDE_AUDIO_POWER_SHIFT_CPT); 630 631 if (pch_iir & SDE_AUX_MASK_CPT) 632 DRM_DEBUG_DRIVER("AUX channel interrupt\n"); 633 634 if (pch_iir & SDE_GMBUS_CPT) 635 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 636 637 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 638 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 639 640 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 641 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 642 643 if (pch_iir & SDE_FDI_MASK_CPT) 644 for_each_pipe(pipe) 645 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 646 pipe_name(pipe), 647 I915_READ(FDI_RX_IIR(pipe))); 648 } 649 650 static irqreturn_t ivybridge_irq_handler(void *arg) 651 { 652 struct drm_device *dev = (struct drm_device *) arg; 653 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 654 u32 de_iir, gt_iir, de_ier, pm_iir; 655 int i; 656 657 atomic_inc(&dev_priv->irq_received); 658 659 /* disable master interrupt before clearing iir */ 660 de_ier = I915_READ(DEIER); 661 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 662 663 gt_iir = I915_READ(GTIIR); 664 if (gt_iir) { 665 snb_gt_irq_handler(dev, dev_priv, gt_iir); 666 I915_WRITE(GTIIR, gt_iir); 667 } 668 669 de_iir = I915_READ(DEIIR); 670 if (de_iir) { 671 if (de_iir & DE_GSE_IVB) 672 intel_opregion_gse_intr(dev); 673 674 for (i = 0; i < 3; i++) { 675 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 676 drm_handle_vblank(dev, i); 677 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 678 intel_prepare_page_flip(dev, i); 679 intel_finish_page_flip_plane(dev, i); 680 } 681 } 682 683 /* check event from PCH */ 684 if (de_iir & DE_PCH_EVENT_IVB) { 685 u32 pch_iir = I915_READ(SDEIIR); 686 687 cpt_irq_handler(dev, pch_iir); 688 689 /* clear PCH hotplug event before clear CPU irq */ 690 I915_WRITE(SDEIIR, pch_iir); 691 } 692 693 I915_WRITE(DEIIR, de_iir); 694 } 695 696 pm_iir = I915_READ(GEN6_PMIIR); 697 if (pm_iir) { 698 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 699 gen6_queue_rps_work(dev_priv, pm_iir); 700 I915_WRITE(GEN6_PMIIR, pm_iir); 701 } 702 703 I915_WRITE(DEIER, de_ier); 704 POSTING_READ(DEIER); 705 } 706 707 static void ilk_gt_irq_handler(struct drm_device *dev, 708 struct drm_i915_private *dev_priv, 709 u32 gt_iir) 710 { 711 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) 712 notify_ring(dev, &dev_priv->ring[RCS]); 713 if (gt_iir & GT_BSD_USER_INTERRUPT) 714 notify_ring(dev, &dev_priv->ring[VCS]); 715 } 716 717 static irqreturn_t ironlake_irq_handler(void *arg) 718 { 719 struct drm_device *dev = (struct drm_device *) arg; 720 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 721 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 722 723 atomic_inc(&dev_priv->irq_received); 724 725 /* disable master interrupt before clearing iir */ 726 de_ier = I915_READ(DEIER); 727 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 728 POSTING_READ(DEIER); 729 730 de_iir = I915_READ(DEIIR); 731 gt_iir = I915_READ(GTIIR); 732 pch_iir = I915_READ(SDEIIR); 733 pm_iir = I915_READ(GEN6_PMIIR); 734 735 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && 736 (!IS_GEN6(dev) || pm_iir == 0)) 737 goto done; 738 739 if (IS_GEN5(dev)) 740 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 741 else 742 snb_gt_irq_handler(dev, dev_priv, gt_iir); 743 744 if (de_iir & DE_GSE) 745 intel_opregion_gse_intr(dev); 746 747 if (de_iir & DE_PIPEA_VBLANK) 748 drm_handle_vblank(dev, 0); 749 750 if (de_iir & DE_PIPEB_VBLANK) 751 drm_handle_vblank(dev, 1); 752 753 if (de_iir & DE_PLANEA_FLIP_DONE) { 754 intel_prepare_page_flip(dev, 0); 755 intel_finish_page_flip_plane(dev, 0); 756 } 757 758 if (de_iir & DE_PLANEB_FLIP_DONE) { 759 intel_prepare_page_flip(dev, 1); 760 intel_finish_page_flip_plane(dev, 1); 761 } 762 763 /* check event from PCH */ 764 if (de_iir & DE_PCH_EVENT) { 765 if (HAS_PCH_CPT(dev)) 766 cpt_irq_handler(dev, pch_iir); 767 else 768 ibx_irq_handler(dev, pch_iir); 769 } 770 771 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 772 ironlake_handle_rps_change(dev); 773 774 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) 775 gen6_queue_rps_work(dev_priv, pm_iir); 776 777 /* should clear PCH hotplug event before clear CPU irq */ 778 I915_WRITE(SDEIIR, pch_iir); 779 I915_WRITE(GTIIR, gt_iir); 780 I915_WRITE(DEIIR, de_iir); 781 I915_WRITE(GEN6_PMIIR, pm_iir); 782 783 done: 784 I915_WRITE(DEIER, de_ier); 785 POSTING_READ(DEIER); 786 } 787 788 /** 789 * i915_error_work_func - do process context error handling work 790 * @work: work struct 791 * 792 * Fire an error uevent so userspace can see that a hang or error 793 * was detected. 794 */ 795 static void i915_error_work_func(struct work_struct *work) 796 { 797 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 798 error_work); 799 struct drm_device *dev = dev_priv->dev; 800 801 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */ 802 803 if (atomic_read(&dev_priv->mm.wedged)) { 804 DRM_DEBUG_DRIVER("resetting chip\n"); 805 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); */ 806 if (!i915_reset(dev)) { 807 atomic_set(&dev_priv->mm.wedged, 0); 808 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); */ 809 } 810 complete_all(&dev_priv->error_completion); 811 } 812 } 813 814 /* NB: please notice the memset */ 815 static void i915_get_extra_instdone(struct drm_device *dev, 816 uint32_t *instdone) 817 { 818 struct drm_i915_private *dev_priv = dev->dev_private; 819 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); 820 821 switch(INTEL_INFO(dev)->gen) { 822 case 2: 823 case 3: 824 instdone[0] = I915_READ(INSTDONE); 825 break; 826 case 4: 827 case 5: 828 case 6: 829 instdone[0] = I915_READ(INSTDONE_I965); 830 instdone[1] = I915_READ(INSTDONE1); 831 break; 832 default: 833 #if 0 834 WARN_ONCE(1, "Unsupported platform\n"); 835 #endif 836 case 7: 837 instdone[0] = I915_READ(GEN7_INSTDONE_1); 838 instdone[1] = I915_READ(GEN7_SC_INSTDONE); 839 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 840 instdone[3] = I915_READ(GEN7_ROW_INSTDONE); 841 break; 842 } 843 } 844 845 #if 0 /* CONFIG_DEBUG_FS */ 846 static struct drm_i915_error_object * 847 i915_error_object_create(struct drm_i915_private *dev_priv, 848 struct drm_i915_gem_object *src) 849 { 850 struct drm_i915_error_object *dst; 851 int i, count; 852 u32 reloc_offset; 853 854 if (src == NULL || src->pages == NULL) 855 return NULL; 856 857 count = src->base.size / PAGE_SIZE; 858 859 dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC); 860 if (dst == NULL) 861 return NULL; 862 863 reloc_offset = src->gtt_offset; 864 for (i = 0; i < count; i++) { 865 unsigned long flags; 866 void *d; 867 868 d = kmalloc(PAGE_SIZE, GFP_ATOMIC); 869 if (d == NULL) 870 goto unwind; 871 872 local_irq_save(flags); 873 if (reloc_offset < dev_priv->mm.gtt_mappable_end && 874 src->has_global_gtt_mapping) { 875 void __iomem *s; 876 877 /* Simply ignore tiling or any overlapping fence. 878 * It's part of the error state, and this hopefully 879 * captures what the GPU read. 880 */ 881 882 s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 883 reloc_offset); 884 memcpy_fromio(d, s, PAGE_SIZE); 885 io_mapping_unmap_atomic(s); 886 } else { 887 struct page *page; 888 void *s; 889 890 page = i915_gem_object_get_page(src, i); 891 892 drm_clflush_pages(&page, 1); 893 894 s = kmap_atomic(page); 895 memcpy(d, s, PAGE_SIZE); 896 kunmap_atomic(s); 897 898 drm_clflush_pages(&page, 1); 899 } 900 local_irq_restore(flags); 901 902 dst->pages[i] = d; 903 904 reloc_offset += PAGE_SIZE; 905 } 906 dst->page_count = count; 907 dst->gtt_offset = src->gtt_offset; 908 909 return dst; 910 911 unwind: 912 while (i--) 913 kfree(dst->pages[i]); 914 kfree(dst); 915 return NULL; 916 } 917 918 static void 919 i915_error_object_free(struct drm_i915_error_object *obj) 920 { 921 int page; 922 923 if (obj == NULL) 924 return; 925 926 for (page = 0; page < obj->page_count; page++) 927 kfree(obj->pages[page]); 928 929 kfree(obj); 930 } 931 932 void 933 i915_error_state_free(struct drm_device *dev, 934 struct drm_i915_error_state *error) 935 { 936 struct drm_i915_error_state *error = container_of(error_ref, 937 typeof(*error), ref); 938 int i; 939 940 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 941 i915_error_object_free(error->ring[i].batchbuffer); 942 i915_error_object_free(error->ring[i].ringbuffer); 943 kfree(error->ring[i].requests); 944 } 945 946 kfree(error->active_bo); 947 kfree(error->overlay); 948 kfree(error); 949 } 950 static void capture_bo(struct drm_i915_error_buffer *err, 951 struct drm_i915_gem_object *obj) 952 { 953 err->size = obj->base.size; 954 err->name = obj->base.name; 955 err->rseqno = obj->last_read_seqno; 956 err->wseqno = obj->last_write_seqno; 957 err->gtt_offset = obj->gtt_offset; 958 err->read_domains = obj->base.read_domains; 959 err->write_domain = obj->base.write_domain; 960 err->fence_reg = obj->fence_reg; 961 err->pinned = 0; 962 if (obj->pin_count > 0) 963 err->pinned = 1; 964 if (obj->user_pin_count > 0) 965 err->pinned = -1; 966 err->tiling = obj->tiling_mode; 967 err->dirty = obj->dirty; 968 err->purgeable = obj->madv != I915_MADV_WILLNEED; 969 err->ring = obj->ring ? obj->ring->id : -1; 970 err->cache_level = obj->cache_level; 971 } 972 973 static u32 capture_active_bo(struct drm_i915_error_buffer *err, 974 int count, struct list_head *head) 975 { 976 struct drm_i915_gem_object *obj; 977 int i = 0; 978 979 list_for_each_entry(obj, head, mm_list) { 980 capture_bo(err++, obj); 981 if (++i == count) 982 break; 983 } 984 985 return i; 986 } 987 988 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, 989 int count, struct list_head *head) 990 { 991 struct drm_i915_gem_object *obj; 992 int i = 0; 993 994 list_for_each_entry(obj, head, gtt_list) { 995 if (obj->pin_count == 0) 996 continue; 997 998 capture_bo(err++, obj); 999 if (++i == count) 1000 break; 1001 } 1002 1003 return i; 1004 } 1005 1006 static void i915_gem_record_fences(struct drm_device *dev, 1007 struct drm_i915_error_state *error) 1008 { 1009 struct drm_i915_private *dev_priv = dev->dev_private; 1010 int i; 1011 1012 /* Fences */ 1013 switch (INTEL_INFO(dev)->gen) { 1014 case 7: 1015 case 6: 1016 for (i = 0; i < 16; i++) 1017 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 1018 break; 1019 case 5: 1020 case 4: 1021 for (i = 0; i < 16; i++) 1022 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 1023 break; 1024 case 3: 1025 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 1026 for (i = 0; i < 8; i++) 1027 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 1028 case 2: 1029 for (i = 0; i < 8; i++) 1030 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 1031 break; 1032 1033 } 1034 } 1035 1036 static struct drm_i915_error_object * 1037 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, 1038 struct intel_ring_buffer *ring) 1039 { 1040 struct drm_i915_gem_object *obj; 1041 u32 seqno; 1042 1043 if (!ring->get_seqno) 1044 return NULL; 1045 1046 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { 1047 u32 acthd = I915_READ(ACTHD); 1048 1049 if (WARN_ON(ring->id != RCS)) 1050 return NULL; 1051 1052 obj = ring->private; 1053 if (acthd >= obj->gtt_offset && 1054 acthd < obj->gtt_offset + obj->base.size) 1055 return i915_error_object_create(dev_priv, obj); 1056 } 1057 1058 seqno = ring->get_seqno(ring, false); 1059 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 1060 if (obj->ring != ring) 1061 continue; 1062 1063 if (i915_seqno_passed(seqno, obj->last_read_seqno)) 1064 continue; 1065 1066 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) 1067 continue; 1068 1069 /* We need to copy these to an anonymous buffer as the simplest 1070 * method to avoid being overwritten by userspace. 1071 */ 1072 return i915_error_object_create(dev_priv, obj); 1073 } 1074 1075 return NULL; 1076 } 1077 1078 static void i915_record_ring_state(struct drm_device *dev, 1079 struct drm_i915_error_state *error, 1080 struct intel_ring_buffer *ring) 1081 { 1082 struct drm_i915_private *dev_priv = dev->dev_private; 1083 1084 if (INTEL_INFO(dev)->gen >= 6) { 1085 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); 1086 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); 1087 error->semaphore_mboxes[ring->id][0] 1088 = I915_READ(RING_SYNC_0(ring->mmio_base)); 1089 error->semaphore_mboxes[ring->id][1] 1090 = I915_READ(RING_SYNC_1(ring->mmio_base)); 1091 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; 1092 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; 1093 } 1094 1095 if (INTEL_INFO(dev)->gen >= 4) { 1096 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); 1097 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); 1098 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 1099 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 1100 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 1101 if (ring->id == RCS) 1102 error->bbaddr = I915_READ64(BB_ADDR); 1103 } else { 1104 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); 1105 error->ipeir[ring->id] = I915_READ(IPEIR); 1106 error->ipehr[ring->id] = I915_READ(IPEHR); 1107 error->instdone[ring->id] = I915_READ(INSTDONE); 1108 } 1109 1110 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); 1111 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); 1112 error->seqno[ring->id] = ring->get_seqno(ring, false); 1113 error->acthd[ring->id] = intel_ring_get_active_head(ring); 1114 error->head[ring->id] = I915_READ_HEAD(ring); 1115 error->tail[ring->id] = I915_READ_TAIL(ring); 1116 error->ctl[ring->id] = I915_READ_CTL(ring); 1117 1118 error->cpu_ring_head[ring->id] = ring->head; 1119 error->cpu_ring_tail[ring->id] = ring->tail; 1120 } 1121 1122 static void i915_gem_record_rings(struct drm_device *dev, 1123 struct drm_i915_error_state *error) 1124 { 1125 struct drm_i915_private *dev_priv = dev->dev_private; 1126 struct intel_ring_buffer *ring; 1127 struct drm_i915_gem_request *request; 1128 int i, count; 1129 1130 for_each_ring(ring, dev_priv, i) { 1131 i915_record_ring_state(dev, error, ring); 1132 1133 error->ring[i].batchbuffer = 1134 i915_error_first_batchbuffer(dev_priv, ring); 1135 1136 error->ring[i].ringbuffer = 1137 i915_error_object_create(dev_priv, ring->obj); 1138 1139 count = 0; 1140 list_for_each_entry(request, &ring->request_list, list) 1141 count++; 1142 1143 error->ring[i].num_requests = count; 1144 error->ring[i].requests = 1145 kmalloc(count*sizeof(struct drm_i915_error_request), 1146 GFP_ATOMIC); 1147 if (error->ring[i].requests == NULL) { 1148 error->ring[i].num_requests = 0; 1149 continue; 1150 } 1151 1152 count = 0; 1153 list_for_each_entry(request, &ring->request_list, list) { 1154 struct drm_i915_error_request *erq; 1155 1156 erq = &error->ring[i].requests[count++]; 1157 erq->seqno = request->seqno; 1158 erq->jiffies = request->emitted_jiffies; 1159 erq->tail = request->tail; 1160 } 1161 } 1162 } 1163 1164 /** 1165 * i915_capture_error_state - capture an error record for later analysis 1166 * @dev: drm device 1167 * 1168 * Should be called when an error is detected (either a hang or an error 1169 * interrupt) to capture error state from the time of the error. Fills 1170 * out a structure which becomes available in debugfs for user level tools 1171 * to pick up. 1172 */ 1173 static void i915_capture_error_state(struct drm_device *dev) 1174 { 1175 struct drm_i915_private *dev_priv = dev->dev_private; 1176 struct drm_i915_gem_object *obj; 1177 struct drm_i915_error_state *error; 1178 unsigned long flags; 1179 int i, pipe; 1180 1181 spin_lock_irqsave(&dev_priv->error_lock, flags); 1182 error = dev_priv->first_error; 1183 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1184 if (error) 1185 return; 1186 1187 /* Account for pipe specific data like PIPE*STAT */ 1188 error = kmalloc(sizeof(*error), DRM_I915_GEM, M_NOWAIT | M_ZERO); 1189 if (!error) { 1190 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 1191 return; 1192 } 1193 1194 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", 1195 dev->primary->index); 1196 1197 kref_init(&error->ref); 1198 error->eir = I915_READ(EIR); 1199 error->pgtbl_er = I915_READ(PGTBL_ER); 1200 error->ccid = I915_READ(CCID); 1201 1202 if (HAS_PCH_SPLIT(dev)) 1203 error->ier = I915_READ(DEIER) | I915_READ(GTIER); 1204 else if (IS_VALLEYVIEW(dev)) 1205 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); 1206 else if (IS_GEN2(dev)) 1207 error->ier = I915_READ16(IER); 1208 else 1209 error->ier = I915_READ(IER); 1210 1211 if (INTEL_INFO(dev)->gen >= 6) 1212 error->derrmr = I915_READ(DERRMR); 1213 1214 if (IS_VALLEYVIEW(dev)) 1215 error->forcewake = I915_READ(FORCEWAKE_VLV); 1216 else if (INTEL_INFO(dev)->gen >= 7) 1217 error->forcewake = I915_READ(FORCEWAKE_MT); 1218 else if (INTEL_INFO(dev)->gen == 6) 1219 error->forcewake = I915_READ(FORCEWAKE); 1220 1221 for_each_pipe(pipe) 1222 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 1223 1224 if (INTEL_INFO(dev)->gen >= 6) { 1225 error->error = I915_READ(ERROR_GEN6); 1226 error->done_reg = I915_READ(DONE_REG); 1227 } 1228 1229 if (INTEL_INFO(dev)->gen == 7) 1230 error->err_int = I915_READ(GEN7_ERR_INT); 1231 1232 i915_get_extra_instdone(dev, error->extra_instdone); 1233 1234 i915_gem_record_fences(dev, error); 1235 i915_gem_record_rings(dev, error); 1236 1237 /* Record buffers on the active and pinned lists. */ 1238 error->active_bo = NULL; 1239 error->pinned_bo = NULL; 1240 1241 i = 0; 1242 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 1243 i++; 1244 error->active_bo_count = i; 1245 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) 1246 if (obj->pin_count) 1247 i++; 1248 error->pinned_bo_count = i - error->active_bo_count; 1249 1250 error->active_bo = NULL; 1251 error->pinned_bo = NULL; 1252 if (i) { 1253 error->active_bo = kmalloc(sizeof(*error->active_bo)*i, 1254 GFP_ATOMIC); 1255 if (error->active_bo) 1256 error->pinned_bo = 1257 error->active_bo + error->active_bo_count; 1258 } 1259 1260 if (error->active_bo) 1261 error->active_bo_count = 1262 capture_active_bo(error->active_bo, 1263 error->active_bo_count, 1264 &dev_priv->mm.active_list); 1265 1266 if (error->pinned_bo) 1267 error->pinned_bo_count = 1268 capture_pinned_bo(error->pinned_bo, 1269 error->pinned_bo_count, 1270 &dev_priv->mm.bound_list); 1271 1272 do_gettimeofday(&error->time); 1273 1274 error->overlay = intel_overlay_capture_error_state(dev); 1275 error->display = intel_display_capture_error_state(dev); 1276 1277 spin_lock_irqsave(&dev_priv->error_lock, flags); 1278 if (dev_priv->first_error == NULL) { 1279 dev_priv->first_error = error; 1280 error = NULL; 1281 } 1282 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1283 1284 if (error) 1285 i915_error_state_free(&error->ref); 1286 } 1287 1288 void i915_destroy_error_state(struct drm_device *dev) 1289 { 1290 struct drm_i915_private *dev_priv = dev->dev_private; 1291 struct drm_i915_error_state *error; 1292 1293 lockmgr(&dev_priv->error_lock, LK_EXCLUSIVE); 1294 error = dev_priv->first_error; 1295 dev_priv->first_error = NULL; 1296 lockmgr(&dev_priv->error_lock, LK_RELEASE); 1297 1298 if (error) 1299 i915_error_state_free(dev, error); 1300 } 1301 #else 1302 #define i915_capture_error_state(x) 1303 #endif 1304 1305 static void i915_report_and_clear_eir(struct drm_device *dev) 1306 { 1307 struct drm_i915_private *dev_priv = dev->dev_private; 1308 uint32_t instdone[I915_NUM_INSTDONE_REG]; 1309 u32 eir = I915_READ(EIR); 1310 int pipe, i; 1311 1312 if (!eir) 1313 return; 1314 1315 pr_err("render error detected, EIR: 0x%08x\n", eir); 1316 1317 i915_get_extra_instdone(dev, instdone); 1318 1319 if (IS_G4X(dev)) { 1320 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 1321 u32 ipeir = I915_READ(IPEIR_I965); 1322 1323 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1324 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1325 for (i = 0; i < ARRAY_SIZE(instdone); i++) 1326 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1327 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1328 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1329 I915_WRITE(IPEIR_I965, ipeir); 1330 POSTING_READ(IPEIR_I965); 1331 } 1332 if (eir & GM45_ERROR_PAGE_TABLE) { 1333 u32 pgtbl_err = I915_READ(PGTBL_ER); 1334 pr_err("page table error\n"); 1335 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 1336 I915_WRITE(PGTBL_ER, pgtbl_err); 1337 POSTING_READ(PGTBL_ER); 1338 } 1339 } 1340 1341 if (!IS_GEN2(dev)) { 1342 if (eir & I915_ERROR_PAGE_TABLE) { 1343 u32 pgtbl_err = I915_READ(PGTBL_ER); 1344 pr_err("page table error\n"); 1345 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 1346 I915_WRITE(PGTBL_ER, pgtbl_err); 1347 POSTING_READ(PGTBL_ER); 1348 } 1349 } 1350 1351 if (eir & I915_ERROR_MEMORY_REFRESH) { 1352 pr_err("memory refresh error:\n"); 1353 for_each_pipe(pipe) 1354 pr_err("pipe %c stat: 0x%08x\n", 1355 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 1356 /* pipestat has already been acked */ 1357 } 1358 if (eir & I915_ERROR_INSTRUCTION) { 1359 pr_err("instruction error\n"); 1360 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 1361 for (i = 0; i < ARRAY_SIZE(instdone); i++) 1362 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1363 if (INTEL_INFO(dev)->gen < 4) { 1364 u32 ipeir = I915_READ(IPEIR); 1365 1366 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 1367 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 1368 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 1369 I915_WRITE(IPEIR, ipeir); 1370 POSTING_READ(IPEIR); 1371 } else { 1372 u32 ipeir = I915_READ(IPEIR_I965); 1373 1374 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1375 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1376 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1377 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1378 I915_WRITE(IPEIR_I965, ipeir); 1379 POSTING_READ(IPEIR_I965); 1380 } 1381 } 1382 1383 I915_WRITE(EIR, eir); 1384 POSTING_READ(EIR); 1385 eir = I915_READ(EIR); 1386 if (eir) { 1387 /* 1388 * some errors might have become stuck, 1389 * mask them. 1390 */ 1391 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 1392 I915_WRITE(EMR, I915_READ(EMR) | eir); 1393 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 1394 } 1395 } 1396 1397 /** 1398 * i915_handle_error - handle an error interrupt 1399 * @dev: drm device 1400 * 1401 * Do some basic checking of regsiter state at error interrupt time and 1402 * dump it to the syslog. Also call i915_capture_error_state() to make 1403 * sure we get a record and make it available in debugfs. Fire a uevent 1404 * so userspace knows something bad happened (should trigger collection 1405 * of a ring dump etc.). 1406 */ 1407 void i915_handle_error(struct drm_device *dev, bool wedged) 1408 { 1409 struct drm_i915_private *dev_priv = dev->dev_private; 1410 struct intel_ring_buffer *ring; 1411 int i; 1412 1413 i915_capture_error_state(dev); 1414 i915_report_and_clear_eir(dev); 1415 1416 if (wedged) { 1417 INIT_COMPLETION(dev_priv->error_completion); 1418 atomic_set(&dev_priv->mm.wedged, 1); 1419 1420 /* 1421 * Wakeup waiting processes so they don't hang 1422 */ 1423 for_each_ring(ring, dev_priv, i) 1424 wake_up_all(&ring->irq_queue); 1425 } 1426 1427 queue_work(dev_priv->wq, &dev_priv->error_work); 1428 } 1429 1430 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) 1431 { 1432 drm_i915_private_t *dev_priv = dev->dev_private; 1433 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1434 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1435 struct drm_i915_gem_object *obj; 1436 struct intel_unpin_work *work; 1437 bool stall_detected; 1438 1439 /* Ignore early vblank irqs */ 1440 if (intel_crtc == NULL) 1441 return; 1442 1443 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 1444 work = intel_crtc->unpin_work; 1445 1446 if (work == NULL || 1447 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 1448 !work->enable_stall_check) { 1449 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 1450 lockmgr(&dev->event_lock, LK_RELEASE); 1451 return; 1452 } 1453 1454 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 1455 obj = work->pending_flip_obj; 1456 if (INTEL_INFO(dev)->gen >= 4) { 1457 int dspsurf = DSPSURF(intel_crtc->plane); 1458 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 1459 obj->gtt_offset; 1460 } else { 1461 int dspaddr = DSPADDR(intel_crtc->plane); 1462 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 1463 crtc->y * crtc->fb->pitches[0] + 1464 crtc->x * crtc->fb->bits_per_pixel/8); 1465 } 1466 1467 lockmgr(&dev->event_lock, LK_RELEASE); 1468 1469 if (stall_detected) { 1470 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 1471 intel_prepare_page_flip(dev, intel_crtc->plane); 1472 } 1473 } 1474 1475 /* Called from drm generic code, passed 'crtc' which 1476 * we use as a pipe index 1477 */ 1478 static int i915_enable_vblank(struct drm_device *dev, int pipe) 1479 { 1480 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1481 1482 if (!i915_pipe_enabled(dev, pipe)) 1483 return -EINVAL; 1484 1485 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1486 if (INTEL_INFO(dev)->gen >= 4) 1487 i915_enable_pipestat(dev_priv, pipe, 1488 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1489 else 1490 i915_enable_pipestat(dev_priv, pipe, 1491 PIPE_VBLANK_INTERRUPT_ENABLE); 1492 1493 /* maintain vblank delivery even in deep C-states */ 1494 if (dev_priv->info->gen == 3) 1495 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 1496 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1497 1498 return 0; 1499 } 1500 1501 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 1502 { 1503 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1504 1505 if (!i915_pipe_enabled(dev, pipe)) 1506 return -EINVAL; 1507 1508 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1509 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1510 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1511 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1512 1513 return 0; 1514 } 1515 1516 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) 1517 { 1518 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1519 1520 if (!i915_pipe_enabled(dev, pipe)) 1521 return -EINVAL; 1522 1523 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1524 ironlake_enable_display_irq(dev_priv, 1525 DE_PIPEA_VBLANK_IVB << (5 * pipe)); 1526 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1527 1528 return 0; 1529 } 1530 1531 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 1532 { 1533 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1534 u32 imr; 1535 1536 if (!i915_pipe_enabled(dev, pipe)) 1537 return -EINVAL; 1538 1539 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1540 imr = I915_READ(VLV_IMR); 1541 if (pipe == 0) 1542 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1543 else 1544 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1545 I915_WRITE(VLV_IMR, imr); 1546 i915_enable_pipestat(dev_priv, pipe, 1547 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1548 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1549 1550 return 0; 1551 } 1552 1553 /* Called from drm generic code, passed 'crtc' which 1554 * we use as a pipe index 1555 */ 1556 static void i915_disable_vblank(struct drm_device *dev, int pipe) 1557 { 1558 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1559 1560 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1561 if (dev_priv->info->gen == 3) 1562 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 1563 1564 i915_disable_pipestat(dev_priv, pipe, 1565 PIPE_VBLANK_INTERRUPT_ENABLE | 1566 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1567 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1568 } 1569 1570 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 1571 { 1572 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1573 1574 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1575 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1576 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1577 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1578 } 1579 1580 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) 1581 { 1582 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1583 1584 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1585 ironlake_disable_display_irq(dev_priv, 1586 DE_PIPEA_VBLANK_IVB << (pipe * 5)); 1587 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1588 } 1589 1590 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 1591 { 1592 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1593 u32 imr; 1594 1595 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1596 i915_disable_pipestat(dev_priv, pipe, 1597 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1598 imr = I915_READ(VLV_IMR); 1599 if (pipe == 0) 1600 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1601 else 1602 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1603 I915_WRITE(VLV_IMR, imr); 1604 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1605 } 1606 1607 static u32 1608 ring_last_seqno(struct intel_ring_buffer *ring) 1609 { 1610 return list_entry(ring->request_list.prev, 1611 struct drm_i915_gem_request, list)->seqno; 1612 } 1613 1614 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) 1615 { 1616 if (list_empty(&ring->request_list) || 1617 i915_seqno_passed(ring->get_seqno(ring, false), 1618 ring_last_seqno(ring))) { 1619 /* Issue a wake-up to catch stuck h/w. */ 1620 #if 0 /* XXX From OpenBSD */ 1621 if (waitqueue_active(&ring->irq_queue)) { 1622 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 1623 ring->name); 1624 wake_up_all(&ring->irq_queue); 1625 *err = true; 1626 } 1627 #else 1628 wake_up_all(&ring->irq_queue); 1629 #endif 1630 return true; 1631 } 1632 return false; 1633 } 1634 1635 static bool kick_ring(struct intel_ring_buffer *ring) 1636 { 1637 struct drm_device *dev = ring->dev; 1638 struct drm_i915_private *dev_priv = dev->dev_private; 1639 u32 tmp = I915_READ_CTL(ring); 1640 if (tmp & RING_WAIT) { 1641 DRM_ERROR("Kicking stuck wait on %s\n", 1642 ring->name); 1643 I915_WRITE_CTL(ring, tmp); 1644 return true; 1645 } 1646 return false; 1647 } 1648 1649 static bool i915_hangcheck_hung(struct drm_device *dev) 1650 { 1651 drm_i915_private_t *dev_priv = dev->dev_private; 1652 1653 if (dev_priv->hangcheck_count++ > 1) { 1654 bool hung = true; 1655 1656 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 1657 i915_handle_error(dev, true); 1658 1659 if (!IS_GEN2(dev)) { 1660 struct intel_ring_buffer *ring; 1661 int i; 1662 1663 /* Is the chip hanging on a WAIT_FOR_EVENT? 1664 * If so we can simply poke the RB_WAIT bit 1665 * and break the hang. This should work on 1666 * all but the second generation chipsets. 1667 */ 1668 for_each_ring(ring, dev_priv, i) 1669 hung &= !kick_ring(ring); 1670 } 1671 1672 return hung; 1673 } 1674 1675 return false; 1676 } 1677 1678 /** 1679 * This is called when the chip hasn't reported back with completed 1680 * batchbuffers in a long time. The first time this is called we simply record 1681 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses 1682 * again, we assume the chip is wedged and try to fix it. 1683 */ 1684 void i915_hangcheck_elapsed(unsigned long data) 1685 { 1686 struct drm_device *dev = (struct drm_device *)data; 1687 drm_i915_private_t *dev_priv = dev->dev_private; 1688 uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG]; 1689 struct intel_ring_buffer *ring; 1690 bool err = false, idle; 1691 int i; 1692 1693 if (!i915_enable_hangcheck) 1694 return; 1695 1696 memset(acthd, 0, sizeof(acthd)); 1697 idle = true; 1698 for_each_ring(ring, dev_priv, i) { 1699 idle &= i915_hangcheck_ring_idle(ring, &err); 1700 acthd[i] = intel_ring_get_active_head(ring); 1701 } 1702 1703 /* If all work is done then ACTHD clearly hasn't advanced. */ 1704 if (idle) { 1705 if (err) { 1706 if (i915_hangcheck_hung(dev)) 1707 return; 1708 1709 goto repeat; 1710 } 1711 1712 dev_priv->hangcheck_count = 0; 1713 return; 1714 } 1715 1716 i915_get_extra_instdone(dev, instdone); 1717 if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && 1718 memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) { 1719 if (i915_hangcheck_hung(dev)) 1720 return; 1721 } else { 1722 dev_priv->hangcheck_count = 0; 1723 1724 memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); 1725 memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone)); 1726 } 1727 1728 repeat: 1729 /* Reset timer case chip hangs without another request being added */ 1730 mod_timer(&dev_priv->hangcheck_timer, 1731 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 1732 } 1733 1734 /* drm_dma.h hooks 1735 */ 1736 static void ironlake_irq_preinstall(struct drm_device *dev) 1737 { 1738 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1739 1740 atomic_set(&dev_priv->irq_received, 0); 1741 1742 I915_WRITE(HWSTAM, 0xeffe); 1743 1744 /* XXX hotplug from PCH */ 1745 1746 I915_WRITE(DEIMR, 0xffffffff); 1747 I915_WRITE(DEIER, 0x0); 1748 POSTING_READ(DEIER); 1749 1750 /* and GT */ 1751 I915_WRITE(GTIMR, 0xffffffff); 1752 I915_WRITE(GTIER, 0x0); 1753 POSTING_READ(GTIER); 1754 1755 /* south display irq */ 1756 I915_WRITE(SDEIMR, 0xffffffff); 1757 I915_WRITE(SDEIER, 0x0); 1758 POSTING_READ(SDEIER); 1759 } 1760 1761 static void valleyview_irq_preinstall(struct drm_device *dev) 1762 { 1763 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1764 int pipe; 1765 1766 atomic_set(&dev_priv->irq_received, 0); 1767 1768 /* VLV magic */ 1769 I915_WRITE(VLV_IMR, 0); 1770 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 1771 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 1772 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 1773 1774 /* and GT */ 1775 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1776 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1777 I915_WRITE(GTIMR, 0xffffffff); 1778 I915_WRITE(GTIER, 0x0); 1779 POSTING_READ(GTIER); 1780 1781 I915_WRITE(DPINVGTT, 0xff); 1782 1783 I915_WRITE(PORT_HOTPLUG_EN, 0); 1784 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 1785 for_each_pipe(pipe) 1786 I915_WRITE(PIPESTAT(pipe), 0xffff); 1787 I915_WRITE(VLV_IIR, 0xffffffff); 1788 I915_WRITE(VLV_IMR, 0xffffffff); 1789 I915_WRITE(VLV_IER, 0x0); 1790 POSTING_READ(VLV_IER); 1791 } 1792 1793 /* 1794 * Enable digital hotplug on the PCH, and configure the DP short pulse 1795 * duration to 2ms (which is the minimum in the Display Port spec) 1796 * 1797 * This register is the same on all known PCH chips. 1798 */ 1799 1800 static void ironlake_enable_pch_hotplug(struct drm_device *dev) 1801 { 1802 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1803 u32 hotplug; 1804 1805 hotplug = I915_READ(PCH_PORT_HOTPLUG); 1806 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 1807 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 1808 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 1809 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 1810 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 1811 } 1812 1813 static int ironlake_irq_postinstall(struct drm_device *dev) 1814 { 1815 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1816 /* enable kind of interrupts always enabled */ 1817 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1818 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1819 u32 render_irqs; 1820 u32 hotplug_mask; 1821 1822 dev_priv->irq_mask = ~display_mask; 1823 1824 /* should always can generate irq */ 1825 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1826 I915_WRITE(DEIMR, dev_priv->irq_mask); 1827 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); 1828 POSTING_READ(DEIER); 1829 1830 dev_priv->gt_irq_mask = ~0; 1831 1832 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1833 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1834 1835 if (IS_GEN6(dev)) 1836 render_irqs = 1837 GT_USER_INTERRUPT | 1838 GEN6_BSD_USER_INTERRUPT | 1839 GEN6_BLITTER_USER_INTERRUPT; 1840 else 1841 render_irqs = 1842 GT_USER_INTERRUPT | 1843 GT_PIPE_NOTIFY | 1844 GT_BSD_USER_INTERRUPT; 1845 I915_WRITE(GTIER, render_irqs); 1846 POSTING_READ(GTIER); 1847 1848 if (HAS_PCH_CPT(dev)) { 1849 hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1850 SDE_PORTB_HOTPLUG_CPT | 1851 SDE_PORTC_HOTPLUG_CPT | 1852 SDE_PORTD_HOTPLUG_CPT); 1853 } else { 1854 hotplug_mask = (SDE_CRT_HOTPLUG | 1855 SDE_PORTB_HOTPLUG | 1856 SDE_PORTC_HOTPLUG | 1857 SDE_PORTD_HOTPLUG | 1858 SDE_AUX_MASK); 1859 } 1860 1861 dev_priv->pch_irq_mask = ~hotplug_mask; 1862 1863 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1864 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); 1865 I915_WRITE(SDEIER, hotplug_mask); 1866 POSTING_READ(SDEIER); 1867 1868 ironlake_enable_pch_hotplug(dev); 1869 1870 if (IS_IRONLAKE_M(dev)) { 1871 /* Clear & enable PCU event interrupts */ 1872 I915_WRITE(DEIIR, DE_PCU_EVENT); 1873 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); 1874 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 1875 } 1876 1877 return 0; 1878 } 1879 1880 static int ivybridge_irq_postinstall(struct drm_device *dev) 1881 { 1882 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1883 /* enable kind of interrupts always enabled */ 1884 u32 display_mask = 1885 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | 1886 DE_PLANEC_FLIP_DONE_IVB | 1887 DE_PLANEB_FLIP_DONE_IVB | 1888 DE_PLANEA_FLIP_DONE_IVB; 1889 u32 render_irqs; 1890 u32 hotplug_mask; 1891 1892 dev_priv->irq_mask = ~display_mask; 1893 1894 /* should always can generate irq */ 1895 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1896 I915_WRITE(DEIMR, dev_priv->irq_mask); 1897 I915_WRITE(DEIER, 1898 display_mask | 1899 DE_PIPEC_VBLANK_IVB | 1900 DE_PIPEB_VBLANK_IVB | 1901 DE_PIPEA_VBLANK_IVB); 1902 POSTING_READ(DEIER); 1903 1904 dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 1905 1906 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1907 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1908 1909 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | 1910 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 1911 I915_WRITE(GTIER, render_irqs); 1912 POSTING_READ(GTIER); 1913 1914 hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1915 SDE_PORTB_HOTPLUG_CPT | 1916 SDE_PORTC_HOTPLUG_CPT | 1917 SDE_PORTD_HOTPLUG_CPT); 1918 dev_priv->pch_irq_mask = ~hotplug_mask; 1919 1920 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1921 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); 1922 I915_WRITE(SDEIER, hotplug_mask); 1923 POSTING_READ(SDEIER); 1924 1925 ironlake_enable_pch_hotplug(dev); 1926 1927 return 0; 1928 } 1929 1930 static int valleyview_irq_postinstall(struct drm_device *dev) 1931 { 1932 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1933 u32 enable_mask; 1934 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1935 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 1936 u32 render_irqs; 1937 u16 msid; 1938 1939 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 1940 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 1941 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 1942 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 1943 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1944 1945 /* 1946 *Leave vblank interrupts masked initially. enable/disable will 1947 * toggle them based on usage. 1948 */ 1949 dev_priv->irq_mask = (~enable_mask) | 1950 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 1951 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1952 1953 dev_priv->pipestat[0] = 0; 1954 dev_priv->pipestat[1] = 0; 1955 1956 /* Hack for broken MSIs on VLV */ 1957 pci_write_config(dev_priv->dev->dev, 0x94, 0xfee00000, 4); 1958 msid = pci_read_config(dev->dev, 0x98, 2); 1959 msid &= 0xff; /* mask out delivery bits */ 1960 msid |= (1<<14); 1961 pci_write_config(dev_priv->dev->dev, 0x98, msid, 4); 1962 1963 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 1964 I915_WRITE(VLV_IER, enable_mask); 1965 I915_WRITE(VLV_IIR, 0xffffffff); 1966 I915_WRITE(PIPESTAT(0), 0xffff); 1967 I915_WRITE(PIPESTAT(1), 0xffff); 1968 POSTING_READ(VLV_IER); 1969 1970 i915_enable_pipestat(dev_priv, 0, pipestat_enable); 1971 i915_enable_pipestat(dev_priv, 1, pipestat_enable); 1972 1973 I915_WRITE(VLV_IIR, 0xffffffff); 1974 I915_WRITE(VLV_IIR, 0xffffffff); 1975 1976 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1977 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1978 1979 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | 1980 GEN6_BLITTER_USER_INTERRUPT; 1981 I915_WRITE(GTIER, render_irqs); 1982 POSTING_READ(GTIER); 1983 1984 /* ack & enable invalid PTE error interrupts */ 1985 #if 0 /* FIXME: add support to irq handler for checking these bits */ 1986 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 1987 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 1988 #endif 1989 1990 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1991 /* Note HDMI and DP share bits */ 1992 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 1993 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 1994 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 1995 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 1996 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 1997 hotplug_en |= HDMID_HOTPLUG_INT_EN; 1998 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) 1999 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2000 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) 2001 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2002 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2003 hotplug_en |= CRT_HOTPLUG_INT_EN; 2004 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2005 } 2006 2007 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2008 2009 return 0; 2010 } 2011 2012 static void valleyview_irq_uninstall(struct drm_device *dev) 2013 { 2014 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2015 int pipe; 2016 2017 if (!dev_priv) 2018 return; 2019 2020 for_each_pipe(pipe) 2021 I915_WRITE(PIPESTAT(pipe), 0xffff); 2022 2023 I915_WRITE(HWSTAM, 0xffffffff); 2024 I915_WRITE(PORT_HOTPLUG_EN, 0); 2025 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2026 for_each_pipe(pipe) 2027 I915_WRITE(PIPESTAT(pipe), 0xffff); 2028 I915_WRITE(VLV_IIR, 0xffffffff); 2029 I915_WRITE(VLV_IMR, 0xffffffff); 2030 I915_WRITE(VLV_IER, 0x0); 2031 POSTING_READ(VLV_IER); 2032 } 2033 2034 static void ironlake_irq_uninstall(struct drm_device *dev) 2035 { 2036 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2037 2038 if (!dev_priv) 2039 return; 2040 2041 I915_WRITE(HWSTAM, 0xffffffff); 2042 2043 I915_WRITE(DEIMR, 0xffffffff); 2044 I915_WRITE(DEIER, 0x0); 2045 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2046 2047 I915_WRITE(GTIMR, 0xffffffff); 2048 I915_WRITE(GTIER, 0x0); 2049 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2050 2051 I915_WRITE(SDEIMR, 0xffffffff); 2052 I915_WRITE(SDEIER, 0x0); 2053 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2054 } 2055 2056 static void i8xx_irq_preinstall(struct drm_device * dev) 2057 { 2058 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2059 int pipe; 2060 2061 atomic_set(&dev_priv->irq_received, 0); 2062 2063 for_each_pipe(pipe) 2064 I915_WRITE(PIPESTAT(pipe), 0); 2065 I915_WRITE16(IMR, 0xffff); 2066 I915_WRITE16(IER, 0x0); 2067 POSTING_READ16(IER); 2068 } 2069 2070 static int i8xx_irq_postinstall(struct drm_device *dev) 2071 { 2072 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2073 2074 dev_priv->pipestat[0] = 0; 2075 dev_priv->pipestat[1] = 0; 2076 2077 I915_WRITE16(EMR, 2078 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2079 2080 /* Unmask the interrupts that we always want on. */ 2081 dev_priv->irq_mask = 2082 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2083 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2084 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2085 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2086 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2087 I915_WRITE16(IMR, dev_priv->irq_mask); 2088 2089 I915_WRITE16(IER, 2090 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2091 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2092 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2093 I915_USER_INTERRUPT); 2094 POSTING_READ16(IER); 2095 2096 return 0; 2097 } 2098 2099 static irqreturn_t i8xx_irq_handler(void *arg) 2100 { 2101 struct drm_device *dev = (struct drm_device *) arg; 2102 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2103 u16 iir, new_iir; 2104 u32 pipe_stats[2]; 2105 int irq_received; 2106 int pipe; 2107 u16 flip_mask = 2108 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2109 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2110 2111 atomic_inc(&dev_priv->irq_received); 2112 2113 iir = I915_READ16(IIR); 2114 if (iir == 0) 2115 return; 2116 2117 while (iir & ~flip_mask) { 2118 /* Can't rely on pipestat interrupt bit in iir as it might 2119 * have been cleared after the pipestat interrupt was received. 2120 * It doesn't set the bit in iir again, but it still produces 2121 * interrupts (for non-MSI). 2122 */ 2123 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2124 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2125 i915_handle_error(dev, false); 2126 2127 for_each_pipe(pipe) { 2128 int reg = PIPESTAT(pipe); 2129 pipe_stats[pipe] = I915_READ(reg); 2130 2131 /* 2132 * Clear the PIPE*STAT regs before the IIR 2133 */ 2134 if (pipe_stats[pipe] & 0x8000ffff) { 2135 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2136 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2137 pipe_name(pipe)); 2138 I915_WRITE(reg, pipe_stats[pipe]); 2139 irq_received = 1; 2140 } 2141 } 2142 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2143 2144 I915_WRITE16(IIR, iir & ~flip_mask); 2145 new_iir = I915_READ16(IIR); /* Flush posted writes */ 2146 2147 i915_update_dri1_breadcrumb(dev); 2148 2149 if (iir & I915_USER_INTERRUPT) 2150 notify_ring(dev, &dev_priv->ring[RCS]); 2151 2152 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 2153 drm_handle_vblank(dev, 0)) { 2154 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { 2155 intel_prepare_page_flip(dev, 0); 2156 intel_finish_page_flip(dev, 0); 2157 flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; 2158 } 2159 } 2160 2161 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 2162 drm_handle_vblank(dev, 1)) { 2163 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { 2164 intel_prepare_page_flip(dev, 1); 2165 intel_finish_page_flip(dev, 1); 2166 flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2167 } 2168 } 2169 2170 iir = new_iir; 2171 } 2172 2173 return; 2174 } 2175 2176 static void i8xx_irq_uninstall(struct drm_device * dev) 2177 { 2178 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2179 int pipe; 2180 2181 for_each_pipe(pipe) { 2182 /* Clear enable bits; then clear status bits */ 2183 I915_WRITE(PIPESTAT(pipe), 0); 2184 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2185 } 2186 I915_WRITE16(IMR, 0xffff); 2187 I915_WRITE16(IER, 0x0); 2188 I915_WRITE16(IIR, I915_READ16(IIR)); 2189 } 2190 2191 static void i915_irq_preinstall(struct drm_device * dev) 2192 { 2193 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2194 int pipe; 2195 2196 atomic_set(&dev_priv->irq_received, 0); 2197 2198 if (I915_HAS_HOTPLUG(dev)) { 2199 I915_WRITE(PORT_HOTPLUG_EN, 0); 2200 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2201 } 2202 2203 I915_WRITE16(HWSTAM, 0xeffe); 2204 for_each_pipe(pipe) 2205 I915_WRITE(PIPESTAT(pipe), 0); 2206 I915_WRITE(IMR, 0xffffffff); 2207 I915_WRITE(IER, 0x0); 2208 POSTING_READ(IER); 2209 } 2210 2211 static int i915_irq_postinstall(struct drm_device *dev) 2212 { 2213 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2214 u32 enable_mask; 2215 2216 dev_priv->pipestat[0] = 0; 2217 dev_priv->pipestat[1] = 0; 2218 2219 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2220 2221 /* Unmask the interrupts that we always want on. */ 2222 dev_priv->irq_mask = 2223 ~(I915_ASLE_INTERRUPT | 2224 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2225 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2226 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2227 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2228 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2229 2230 enable_mask = 2231 I915_ASLE_INTERRUPT | 2232 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2233 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2234 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2235 I915_USER_INTERRUPT; 2236 2237 if (I915_HAS_HOTPLUG(dev)) { 2238 /* Enable in IER... */ 2239 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 2240 /* and unmask in IMR */ 2241 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 2242 } 2243 2244 I915_WRITE(IMR, dev_priv->irq_mask); 2245 I915_WRITE(IER, enable_mask); 2246 POSTING_READ(IER); 2247 2248 if (I915_HAS_HOTPLUG(dev)) { 2249 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2250 2251 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2252 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2253 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2254 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2255 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2256 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2257 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) 2258 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2259 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) 2260 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2261 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2262 hotplug_en |= CRT_HOTPLUG_INT_EN; 2263 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2264 } 2265 2266 /* Ignore TV since it's buggy */ 2267 2268 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2269 } 2270 2271 intel_opregion_enable_asle(dev); 2272 2273 return 0; 2274 } 2275 2276 static irqreturn_t i915_irq_handler(void *arg) 2277 { 2278 struct drm_device *dev = (struct drm_device *) arg; 2279 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2280 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 2281 u32 flip_mask = 2282 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2283 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2284 u32 flip[2] = { 2285 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT, 2286 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT 2287 }; 2288 int pipe; 2289 2290 atomic_inc(&dev_priv->irq_received); 2291 2292 iir = I915_READ(IIR); 2293 do { 2294 bool irq_received = (iir & ~flip_mask) != 0; 2295 bool blc_event = false; 2296 2297 /* Can't rely on pipestat interrupt bit in iir as it might 2298 * have been cleared after the pipestat interrupt was received. 2299 * It doesn't set the bit in iir again, but it still produces 2300 * interrupts (for non-MSI). 2301 */ 2302 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2303 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2304 i915_handle_error(dev, false); 2305 2306 for_each_pipe(pipe) { 2307 int reg = PIPESTAT(pipe); 2308 pipe_stats[pipe] = I915_READ(reg); 2309 2310 /* Clear the PIPE*STAT regs before the IIR */ 2311 if (pipe_stats[pipe] & 0x8000ffff) { 2312 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2313 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2314 pipe_name(pipe)); 2315 I915_WRITE(reg, pipe_stats[pipe]); 2316 irq_received = true; 2317 } 2318 } 2319 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2320 2321 if (!irq_received) 2322 break; 2323 2324 /* Consume port. Then clear IIR or we'll miss events */ 2325 if ((I915_HAS_HOTPLUG(dev)) && 2326 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 2327 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2328 2329 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2330 hotplug_status); 2331 if (hotplug_status & dev_priv->hotplug_supported_mask) 2332 queue_work(dev_priv->wq, 2333 &dev_priv->hotplug_work); 2334 2335 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2336 POSTING_READ(PORT_HOTPLUG_STAT); 2337 } 2338 2339 I915_WRITE(IIR, iir & ~flip_mask); 2340 new_iir = I915_READ(IIR); /* Flush posted writes */ 2341 2342 if (iir & I915_USER_INTERRUPT) 2343 notify_ring(dev, &dev_priv->ring[RCS]); 2344 2345 for_each_pipe(pipe) { 2346 int plane = pipe; 2347 if (IS_MOBILE(dev)) 2348 plane = !plane; 2349 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 2350 drm_handle_vblank(dev, pipe)) { 2351 if (iir & flip[plane]) { 2352 intel_prepare_page_flip(dev, plane); 2353 intel_finish_page_flip(dev, pipe); 2354 flip_mask &= ~flip[plane]; 2355 } 2356 } 2357 2358 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2359 blc_event = true; 2360 } 2361 2362 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2363 intel_opregion_asle_intr(dev); 2364 2365 /* With MSI, interrupts are only generated when iir 2366 * transitions from zero to nonzero. If another bit got 2367 * set while we were handling the existing iir bits, then 2368 * we would never get another interrupt. 2369 * 2370 * This is fine on non-MSI as well, as if we hit this path 2371 * we avoid exiting the interrupt handler only to generate 2372 * another one. 2373 * 2374 * Note that for MSI this could cause a stray interrupt report 2375 * if an interrupt landed in the time between writing IIR and 2376 * the posting read. This should be rare enough to never 2377 * trigger the 99% of 100,000 interrupts test for disabling 2378 * stray interrupts. 2379 */ 2380 iir = new_iir; 2381 } while (iir & ~flip_mask); 2382 2383 i915_update_dri1_breadcrumb(dev); 2384 } 2385 2386 static void i915_irq_uninstall(struct drm_device * dev) 2387 { 2388 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2389 int pipe; 2390 2391 if (I915_HAS_HOTPLUG(dev)) { 2392 I915_WRITE(PORT_HOTPLUG_EN, 0); 2393 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2394 } 2395 2396 I915_WRITE16(HWSTAM, 0xffff); 2397 for_each_pipe(pipe) { 2398 /* Clear enable bits; then clear status bits */ 2399 I915_WRITE(PIPESTAT(pipe), 0); 2400 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2401 } 2402 I915_WRITE(IMR, 0xffffffff); 2403 I915_WRITE(IER, 0x0); 2404 2405 I915_WRITE(IIR, I915_READ(IIR)); 2406 } 2407 2408 static void i965_irq_preinstall(struct drm_device * dev) 2409 { 2410 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2411 int pipe; 2412 2413 atomic_set(&dev_priv->irq_received, 0); 2414 2415 I915_WRITE(PORT_HOTPLUG_EN, 0); 2416 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2417 2418 I915_WRITE(HWSTAM, 0xeffe); 2419 for_each_pipe(pipe) 2420 I915_WRITE(PIPESTAT(pipe), 0); 2421 I915_WRITE(IMR, 0xffffffff); 2422 I915_WRITE(IER, 0x0); 2423 POSTING_READ(IER); 2424 } 2425 2426 static int i965_irq_postinstall(struct drm_device *dev) 2427 { 2428 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2429 u32 hotplug_en; 2430 u32 enable_mask; 2431 u32 error_mask; 2432 2433 /* Unmask the interrupts that we always want on. */ 2434 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 2435 I915_DISPLAY_PORT_INTERRUPT | 2436 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2437 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2438 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2439 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2440 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2441 2442 enable_mask = ~dev_priv->irq_mask; 2443 enable_mask |= I915_USER_INTERRUPT; 2444 2445 if (IS_G4X(dev)) 2446 enable_mask |= I915_BSD_USER_INTERRUPT; 2447 2448 dev_priv->pipestat[0] = 0; 2449 dev_priv->pipestat[1] = 0; 2450 2451 /* 2452 * Enable some error detection, note the instruction error mask 2453 * bit is reserved, so we leave it masked. 2454 */ 2455 if (IS_G4X(dev)) { 2456 error_mask = ~(GM45_ERROR_PAGE_TABLE | 2457 GM45_ERROR_MEM_PRIV | 2458 GM45_ERROR_CP_PRIV | 2459 I915_ERROR_MEMORY_REFRESH); 2460 } else { 2461 error_mask = ~(I915_ERROR_PAGE_TABLE | 2462 I915_ERROR_MEMORY_REFRESH); 2463 } 2464 I915_WRITE(EMR, error_mask); 2465 2466 I915_WRITE(IMR, dev_priv->irq_mask); 2467 I915_WRITE(IER, enable_mask); 2468 POSTING_READ(IER); 2469 2470 /* Note HDMI and DP share hotplug bits */ 2471 hotplug_en = 0; 2472 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2473 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2474 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2475 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2476 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2477 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2478 if (IS_G4X(dev)) { 2479 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X) 2480 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2481 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X) 2482 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2483 } else { 2484 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965) 2485 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2486 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965) 2487 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2488 } 2489 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2490 hotplug_en |= CRT_HOTPLUG_INT_EN; 2491 2492 /* Programming the CRT detection parameters tends 2493 to generate a spurious hotplug event about three 2494 seconds later. So just do it once. 2495 */ 2496 if (IS_G4X(dev)) 2497 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 2498 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2499 } 2500 2501 /* Ignore TV since it's buggy */ 2502 2503 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2504 2505 intel_opregion_enable_asle(dev); 2506 2507 return 0; 2508 } 2509 2510 static irqreturn_t i965_irq_handler(void *arg) 2511 { 2512 struct drm_device *dev = (struct drm_device *) arg; 2513 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2514 u32 iir, new_iir; 2515 u32 pipe_stats[I915_MAX_PIPES]; 2516 int irq_received; 2517 int pipe; 2518 2519 atomic_inc(&dev_priv->irq_received); 2520 2521 iir = I915_READ(IIR); 2522 2523 for (;;) { 2524 bool blc_event = false; 2525 2526 irq_received = iir != 0; 2527 2528 /* Can't rely on pipestat interrupt bit in iir as it might 2529 * have been cleared after the pipestat interrupt was received. 2530 * It doesn't set the bit in iir again, but it still produces 2531 * interrupts (for non-MSI). 2532 */ 2533 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2534 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2535 i915_handle_error(dev, false); 2536 2537 for_each_pipe(pipe) { 2538 int reg = PIPESTAT(pipe); 2539 pipe_stats[pipe] = I915_READ(reg); 2540 2541 /* 2542 * Clear the PIPE*STAT regs before the IIR 2543 */ 2544 if (pipe_stats[pipe] & 0x8000ffff) { 2545 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2546 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2547 pipe_name(pipe)); 2548 I915_WRITE(reg, pipe_stats[pipe]); 2549 irq_received = 1; 2550 } 2551 } 2552 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2553 2554 if (!irq_received) 2555 break; 2556 2557 /* Consume port. Then clear IIR or we'll miss events */ 2558 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 2559 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2560 2561 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2562 hotplug_status); 2563 if (hotplug_status & dev_priv->hotplug_supported_mask) 2564 queue_work(dev_priv->wq, 2565 &dev_priv->hotplug_work); 2566 2567 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2568 I915_READ(PORT_HOTPLUG_STAT); 2569 } 2570 2571 I915_WRITE(IIR, iir); 2572 new_iir = I915_READ(IIR); /* Flush posted writes */ 2573 2574 if (iir & I915_USER_INTERRUPT) 2575 notify_ring(dev, &dev_priv->ring[RCS]); 2576 if (iir & I915_BSD_USER_INTERRUPT) 2577 notify_ring(dev, &dev_priv->ring[VCS]); 2578 2579 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) 2580 intel_prepare_page_flip(dev, 0); 2581 2582 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) 2583 intel_prepare_page_flip(dev, 1); 2584 2585 for_each_pipe(pipe) { 2586 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 2587 drm_handle_vblank(dev, pipe)) { 2588 i915_pageflip_stall_check(dev, pipe); 2589 intel_finish_page_flip(dev, pipe); 2590 } 2591 2592 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2593 blc_event = true; 2594 } 2595 2596 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2597 intel_opregion_asle_intr(dev); 2598 2599 /* With MSI, interrupts are only generated when iir 2600 * transitions from zero to nonzero. If another bit got 2601 * set while we were handling the existing iir bits, then 2602 * we would never get another interrupt. 2603 * 2604 * This is fine on non-MSI as well, as if we hit this path 2605 * we avoid exiting the interrupt handler only to generate 2606 * another one. 2607 * 2608 * Note that for MSI this could cause a stray interrupt report 2609 * if an interrupt landed in the time between writing IIR and 2610 * the posting read. This should be rare enough to never 2611 * trigger the 99% of 100,000 interrupts test for disabling 2612 * stray interrupts. 2613 */ 2614 iir = new_iir; 2615 } 2616 2617 i915_update_dri1_breadcrumb(dev); 2618 } 2619 2620 static void i965_irq_uninstall(struct drm_device * dev) 2621 { 2622 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2623 int pipe; 2624 2625 if (!dev_priv) 2626 return; 2627 2628 I915_WRITE(PORT_HOTPLUG_EN, 0); 2629 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2630 2631 I915_WRITE(HWSTAM, 0xffffffff); 2632 for_each_pipe(pipe) 2633 I915_WRITE(PIPESTAT(pipe), 0); 2634 I915_WRITE(IMR, 0xffffffff); 2635 I915_WRITE(IER, 0x0); 2636 2637 for_each_pipe(pipe) 2638 I915_WRITE(PIPESTAT(pipe), 2639 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 2640 I915_WRITE(IIR, I915_READ(IIR)); 2641 } 2642 2643 void intel_irq_init(struct drm_device *dev) 2644 { 2645 struct drm_i915_private *dev_priv = dev->dev_private; 2646 2647 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 2648 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 2649 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 2650 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 2651 2652 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2653 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2654 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 2655 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 2656 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2657 } 2658 2659 if (drm_core_check_feature(dev, DRIVER_MODESET)) 2660 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 2661 else 2662 dev->driver->get_vblank_timestamp = NULL; 2663 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 2664 2665 if (IS_VALLEYVIEW(dev)) { 2666 dev->driver->irq_handler = valleyview_irq_handler; 2667 dev->driver->irq_preinstall = valleyview_irq_preinstall; 2668 dev->driver->irq_postinstall = valleyview_irq_postinstall; 2669 dev->driver->irq_uninstall = valleyview_irq_uninstall; 2670 dev->driver->enable_vblank = valleyview_enable_vblank; 2671 dev->driver->disable_vblank = valleyview_disable_vblank; 2672 } else if (IS_IVYBRIDGE(dev)) { 2673 /* Share pre & uninstall handlers with ILK/SNB */ 2674 dev->driver->irq_handler = ivybridge_irq_handler; 2675 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2676 dev->driver->irq_postinstall = ivybridge_irq_postinstall; 2677 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2678 dev->driver->enable_vblank = ivybridge_enable_vblank; 2679 dev->driver->disable_vblank = ivybridge_disable_vblank; 2680 } else if (IS_HASWELL(dev)) { 2681 /* Share interrupts handling with IVB */ 2682 dev->driver->irq_handler = ivybridge_irq_handler; 2683 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2684 dev->driver->irq_postinstall = ivybridge_irq_postinstall; 2685 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2686 dev->driver->enable_vblank = ivybridge_enable_vblank; 2687 dev->driver->disable_vblank = ivybridge_disable_vblank; 2688 } else if (HAS_PCH_SPLIT(dev)) { 2689 dev->driver->irq_handler = ironlake_irq_handler; 2690 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2691 dev->driver->irq_postinstall = ironlake_irq_postinstall; 2692 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2693 dev->driver->enable_vblank = ironlake_enable_vblank; 2694 dev->driver->disable_vblank = ironlake_disable_vblank; 2695 } else { 2696 if (INTEL_INFO(dev)->gen == 2) { 2697 dev->driver->irq_preinstall = i8xx_irq_preinstall; 2698 dev->driver->irq_postinstall = i8xx_irq_postinstall; 2699 dev->driver->irq_handler = i8xx_irq_handler; 2700 dev->driver->irq_uninstall = i8xx_irq_uninstall; 2701 } else if (INTEL_INFO(dev)->gen == 3) { 2702 dev->driver->irq_preinstall = i915_irq_preinstall; 2703 dev->driver->irq_postinstall = i915_irq_postinstall; 2704 dev->driver->irq_uninstall = i915_irq_uninstall; 2705 dev->driver->irq_handler = i915_irq_handler; 2706 } else { 2707 dev->driver->irq_preinstall = i965_irq_preinstall; 2708 dev->driver->irq_postinstall = i965_irq_postinstall; 2709 dev->driver->irq_uninstall = i965_irq_uninstall; 2710 dev->driver->irq_handler = i965_irq_handler; 2711 } 2712 dev->driver->enable_vblank = i915_enable_vblank; 2713 dev->driver->disable_vblank = i915_disable_vblank; 2714 } 2715 } 2716