1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /*- 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 * $FreeBSD: src/sys/dev/drm2/i915/i915_irq.c,v 1.1 2012/05/22 11:07:44 kib Exp $ 28 */ 29 30 #include <sys/sfbuf.h> 31 32 #include <drm/drmP.h> 33 #include <drm/i915_drm.h> 34 #include "i915_drv.h" 35 #include "intel_drv.h" 36 37 static void i915_capture_error_state(struct drm_device *dev); 38 static u32 ring_last_seqno(struct intel_ring_buffer *ring); 39 40 /** 41 * Interrupts that are always left unmasked. 42 * 43 * Since pipe events are edge-triggered from the PIPESTAT register to IIR, 44 * we leave them always unmasked in IMR and then control enabling them through 45 * PIPESTAT alone. 46 */ 47 #define I915_INTERRUPT_ENABLE_FIX \ 48 (I915_ASLE_INTERRUPT | \ 49 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ 50 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \ 51 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \ 52 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \ 53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 54 55 /** Interrupts that we mask and unmask at runtime. */ 56 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT) 57 58 #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ 59 PIPE_VBLANK_INTERRUPT_STATUS) 60 61 #define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\ 62 PIPE_VBLANK_INTERRUPT_ENABLE) 63 64 #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ 65 DRM_I915_VBLANK_PIPE_B) 66 67 /* For display hotplug interrupt */ 68 static void 69 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 70 { 71 if ((dev_priv->irq_mask & mask) != 0) { 72 dev_priv->irq_mask &= ~mask; 73 I915_WRITE(DEIMR, dev_priv->irq_mask); 74 POSTING_READ(DEIMR); 75 } 76 } 77 78 static inline void 79 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 80 { 81 if ((dev_priv->irq_mask & mask) != mask) { 82 dev_priv->irq_mask |= mask; 83 I915_WRITE(DEIMR, dev_priv->irq_mask); 84 POSTING_READ(DEIMR); 85 } 86 } 87 88 void 89 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 90 { 91 if ((dev_priv->pipestat[pipe] & mask) != mask) { 92 u32 reg = PIPESTAT(pipe); 93 94 dev_priv->pipestat[pipe] |= mask; 95 /* Enable the interrupt, clear any pending status */ 96 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); 97 POSTING_READ(reg); 98 } 99 } 100 101 void 102 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 103 { 104 if ((dev_priv->pipestat[pipe] & mask) != 0) { 105 u32 reg = PIPESTAT(pipe); 106 107 dev_priv->pipestat[pipe] &= ~mask; 108 I915_WRITE(reg, dev_priv->pipestat[pipe]); 109 POSTING_READ(reg); 110 } 111 } 112 113 /** 114 * intel_enable_asle - enable ASLE interrupt for OpRegion 115 */ 116 void intel_enable_asle(struct drm_device *dev) 117 { 118 drm_i915_private_t *dev_priv = dev->dev_private; 119 120 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 121 122 if (HAS_PCH_SPLIT(dev)) 123 ironlake_enable_display_irq(dev_priv, DE_GSE); 124 else { 125 i915_enable_pipestat(dev_priv, 1, 126 PIPE_LEGACY_BLC_EVENT_ENABLE); 127 if (INTEL_INFO(dev)->gen >= 4) 128 i915_enable_pipestat(dev_priv, 0, 129 PIPE_LEGACY_BLC_EVENT_ENABLE); 130 } 131 132 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 133 } 134 135 /** 136 * i915_pipe_enabled - check if a pipe is enabled 137 * @dev: DRM device 138 * @pipe: pipe to check 139 * 140 * Reading certain registers when the pipe is disabled can hang the chip. 141 * Use this routine to make sure the PLL is running and the pipe is active 142 * before reading such registers if unsure. 143 */ 144 static int 145 i915_pipe_enabled(struct drm_device *dev, int pipe) 146 { 147 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 148 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 149 } 150 151 /* Called from drm generic code, passed a 'crtc', which 152 * we use as a pipe index 153 */ 154 static u32 155 i915_get_vblank_counter(struct drm_device *dev, int pipe) 156 { 157 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 158 unsigned long high_frame; 159 unsigned long low_frame; 160 u32 high1, high2, low; 161 162 if (!i915_pipe_enabled(dev, pipe)) { 163 DRM_DEBUG("trying to get vblank count for disabled " 164 "pipe %c\n", pipe_name(pipe)); 165 return 0; 166 } 167 168 high_frame = PIPEFRAME(pipe); 169 low_frame = PIPEFRAMEPIXEL(pipe); 170 171 /* 172 * High & low register fields aren't synchronized, so make sure 173 * we get a low value that's stable across two reads of the high 174 * register. 175 */ 176 do { 177 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 178 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 179 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 180 } while (high1 != high2); 181 182 high1 >>= PIPE_FRAME_HIGH_SHIFT; 183 low >>= PIPE_FRAME_LOW_SHIFT; 184 return (high1 << 8) | low; 185 } 186 187 static u32 188 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 189 { 190 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 191 int reg = PIPE_FRMCOUNT_GM45(pipe); 192 193 if (!i915_pipe_enabled(dev, pipe)) { 194 DRM_DEBUG("i915: trying to get vblank count for disabled " 195 "pipe %c\n", pipe_name(pipe)); 196 return 0; 197 } 198 199 return I915_READ(reg); 200 } 201 202 static int 203 i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 204 int *vpos, int *hpos) 205 { 206 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 207 u32 vbl = 0, position = 0; 208 int vbl_start, vbl_end, htotal, vtotal; 209 bool in_vbl = true; 210 int ret = 0; 211 212 if (!i915_pipe_enabled(dev, pipe)) { 213 DRM_DEBUG("i915: trying to get scanoutpos for disabled " 214 "pipe %c\n", pipe_name(pipe)); 215 return 0; 216 } 217 218 /* Get vtotal. */ 219 vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff); 220 221 if (INTEL_INFO(dev)->gen >= 4) { 222 /* No obvious pixelcount register. Only query vertical 223 * scanout position from Display scan line register. 224 */ 225 position = I915_READ(PIPEDSL(pipe)); 226 227 /* Decode into vertical scanout position. Don't have 228 * horizontal scanout position. 229 */ 230 *vpos = position & 0x1fff; 231 *hpos = 0; 232 } else { 233 /* Have access to pixelcount since start of frame. 234 * We can split this into vertical and horizontal 235 * scanout position. 236 */ 237 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 238 239 htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff); 240 *vpos = position / htotal; 241 *hpos = position - (*vpos * htotal); 242 } 243 244 /* Query vblank area. */ 245 vbl = I915_READ(VBLANK(pipe)); 246 247 /* Test position against vblank region. */ 248 vbl_start = vbl & 0x1fff; 249 vbl_end = (vbl >> 16) & 0x1fff; 250 251 if ((*vpos < vbl_start) || (*vpos > vbl_end)) 252 in_vbl = false; 253 254 /* Inside "upper part" of vblank area? Apply corrective offset: */ 255 if (in_vbl && (*vpos >= vbl_start)) 256 *vpos = *vpos - vtotal; 257 258 /* Readouts valid? */ 259 if (vbl > 0) 260 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 261 262 /* In vblank? */ 263 if (in_vbl) 264 ret |= DRM_SCANOUTPOS_INVBL; 265 266 return ret; 267 } 268 269 static int 270 i915_get_vblank_timestamp(struct drm_device *dev, int pipe, int *max_error, 271 struct timeval *vblank_time, unsigned flags) 272 { 273 struct drm_i915_private *dev_priv = dev->dev_private; 274 struct drm_crtc *crtc; 275 276 if (pipe < 0 || pipe >= dev_priv->num_pipe) { 277 DRM_ERROR("Invalid crtc %d\n", pipe); 278 return -EINVAL; 279 } 280 281 /* Get drm_crtc to timestamp: */ 282 crtc = intel_get_crtc_for_pipe(dev, pipe); 283 if (crtc == NULL) { 284 DRM_ERROR("Invalid crtc %d\n", pipe); 285 return -EINVAL; 286 } 287 288 if (!crtc->enabled) { 289 #if 0 290 DRM_DEBUG("crtc %d is disabled\n", pipe); 291 #endif 292 return -EBUSY; 293 } 294 295 /* Helper routine in DRM core does all the work: */ 296 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 297 vblank_time, flags, 298 crtc); 299 } 300 301 /* 302 * Handle hotplug events outside the interrupt handler proper. 303 */ 304 static void i915_hotplug_work_func(struct work_struct *work) 305 { 306 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 307 hotplug_work); 308 struct drm_device *dev = dev_priv->dev; 309 struct drm_mode_config *mode_config = &dev->mode_config; 310 struct intel_encoder *encoder; 311 312 lockmgr(&mode_config->mutex, LK_EXCLUSIVE); 313 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 314 315 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 316 if (encoder->hot_plug) 317 encoder->hot_plug(encoder); 318 319 lockmgr(&mode_config->mutex, LK_RELEASE); 320 321 /* Just fire off a uevent and let userspace tell us what to do */ 322 #if 0 323 drm_helper_hpd_irq_event(dev); 324 #endif 325 } 326 327 static void ironlake_handle_rps_change(struct drm_device *dev) 328 { 329 drm_i915_private_t *dev_priv = dev->dev_private; 330 u32 busy_up, busy_down, max_avg, min_avg; 331 u8 new_delay; 332 333 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 334 335 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 336 337 new_delay = dev_priv->rps.cur_delay; 338 339 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 340 busy_up = I915_READ(RCPREVBSYTUPAVG); 341 busy_down = I915_READ(RCPREVBSYTDNAVG); 342 max_avg = I915_READ(RCBMAXAVG); 343 min_avg = I915_READ(RCBMINAVG); 344 345 /* Handle RCS change request from hw */ 346 if (busy_up > max_avg) { 347 if (dev_priv->rps.cur_delay != dev_priv->rps.max_delay) 348 new_delay = dev_priv->rps.cur_delay - 1; 349 if (new_delay < dev_priv->rps.max_delay) 350 new_delay = dev_priv->rps.max_delay; 351 } else if (busy_down < min_avg) { 352 if (dev_priv->rps.cur_delay != dev_priv->rps.min_delay) 353 new_delay = dev_priv->rps.cur_delay + 1; 354 if (new_delay > dev_priv->rps.min_delay) 355 new_delay = dev_priv->rps.min_delay; 356 } 357 358 if (ironlake_set_drps(dev, new_delay)) 359 dev_priv->rps.cur_delay = new_delay; 360 361 lockmgr(&mchdev_lock, LK_RELEASE); 362 363 return; 364 } 365 366 static void notify_ring(struct drm_device *dev, 367 struct intel_ring_buffer *ring) 368 { 369 struct drm_i915_private *dev_priv = dev->dev_private; 370 u32 seqno; 371 372 if (ring->obj == NULL) 373 return; 374 375 seqno = ring->get_seqno(ring); 376 377 lockmgr(&ring->irq_lock, LK_EXCLUSIVE); 378 ring->irq_seqno = seqno; 379 wakeup(ring); 380 lockmgr(&ring->irq_lock, LK_RELEASE); 381 382 if (i915_enable_hangcheck) { 383 dev_priv->hangcheck_count = 0; 384 mod_timer(&dev_priv->hangcheck_timer, 385 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 386 } 387 } 388 389 static void gen6_pm_rps_work(struct work_struct *work) 390 { 391 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 392 rps.work); 393 u32 pm_iir, pm_imr; 394 u8 new_delay; 395 396 spin_lock(&dev_priv->rps.lock); 397 pm_iir = dev_priv->rps.pm_iir; 398 dev_priv->rps.pm_iir = 0; 399 pm_imr = I915_READ(GEN6_PMIMR); 400 I915_WRITE(GEN6_PMIMR, 0); 401 spin_unlock(&dev_priv->rps.lock); 402 403 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) 404 return; 405 406 lockmgr(&dev_priv->rps.hw_lock, LK_EXCLUSIVE); 407 408 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) 409 new_delay = dev_priv->rps.cur_delay + 1; 410 else 411 new_delay = dev_priv->rps.cur_delay - 1; 412 413 /* sysfs frequency interfaces may have snuck in while servicing the 414 * interrupt 415 */ 416 if (!(new_delay > dev_priv->rps.max_delay || 417 new_delay < dev_priv->rps.min_delay)) { 418 gen6_set_rps(dev_priv->dev, new_delay); 419 } 420 421 lockmgr(&dev_priv->rps.hw_lock, LK_RELEASE); 422 } 423 424 static void snb_gt_irq_handler(struct drm_device *dev, 425 struct drm_i915_private *dev_priv, 426 u32 gt_iir) 427 { 428 429 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | 430 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) 431 notify_ring(dev, &dev_priv->ring[RCS]); 432 if (gt_iir & GEN6_BSD_USER_INTERRUPT) 433 notify_ring(dev, &dev_priv->ring[VCS]); 434 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) 435 notify_ring(dev, &dev_priv->ring[BCS]); 436 437 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | 438 GT_GEN6_BSD_CS_ERROR_INTERRUPT | 439 GT_RENDER_CS_ERROR_INTERRUPT)) { 440 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 441 i915_handle_error(dev, false); 442 } 443 444 #if 0 445 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) 446 ivybridge_handle_parity_error(dev); 447 #endif 448 } 449 450 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, 451 u32 pm_iir) 452 { 453 454 /* 455 * IIR bits should never already be set because IMR should 456 * prevent an interrupt from being shown in IIR. The warning 457 * displays a case where we've unsafely cleared 458 * dev_priv->rps.pm_iir. Although missing an interrupt of the same 459 * type is not a problem, it displays a problem in the logic. 460 * 461 * The mask bit in IMR is cleared by dev_priv->rps.work. 462 */ 463 464 spin_lock(&dev_priv->rps.lock); 465 dev_priv->rps.pm_iir |= pm_iir; 466 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 467 POSTING_READ(GEN6_PMIMR); 468 spin_unlock(&dev_priv->rps.lock); 469 470 queue_work(dev_priv->wq, &dev_priv->rps.work); 471 } 472 473 static irqreturn_t valleyview_irq_handler(void *arg) 474 { 475 struct drm_device *dev = (struct drm_device *) arg; 476 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 477 u32 iir, gt_iir, pm_iir; 478 int pipe; 479 u32 pipe_stats[I915_MAX_PIPES]; 480 bool blc_event; 481 482 atomic_inc(&dev_priv->irq_received); 483 484 while (true) { 485 iir = I915_READ(VLV_IIR); 486 gt_iir = I915_READ(GTIIR); 487 pm_iir = I915_READ(GEN6_PMIIR); 488 489 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 490 goto out; 491 492 snb_gt_irq_handler(dev, dev_priv, gt_iir); 493 494 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 495 for_each_pipe(pipe) { 496 int reg = PIPESTAT(pipe); 497 pipe_stats[pipe] = I915_READ(reg); 498 499 /* 500 * Clear the PIPE*STAT regs before the IIR 501 */ 502 if (pipe_stats[pipe] & 0x8000ffff) { 503 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 504 DRM_DEBUG_DRIVER("pipe %c underrun\n", 505 pipe_name(pipe)); 506 I915_WRITE(reg, pipe_stats[pipe]); 507 } 508 } 509 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 510 511 for_each_pipe(pipe) { 512 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 513 drm_handle_vblank(dev, pipe); 514 515 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 516 intel_prepare_page_flip(dev, pipe); 517 intel_finish_page_flip(dev, pipe); 518 } 519 } 520 521 /* Consume port. Then clear IIR or we'll miss events */ 522 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 523 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 524 525 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 526 hotplug_status); 527 if (hotplug_status & dev_priv->hotplug_supported_mask) 528 queue_work(dev_priv->wq, 529 &dev_priv->hotplug_work); 530 531 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 532 I915_READ(PORT_HOTPLUG_STAT); 533 } 534 535 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 536 blc_event = true; 537 538 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 539 gen6_queue_rps_work(dev_priv, pm_iir); 540 541 I915_WRITE(GTIIR, gt_iir); 542 I915_WRITE(GEN6_PMIIR, pm_iir); 543 I915_WRITE(VLV_IIR, iir); 544 } 545 546 out: 547 return; 548 } 549 550 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 551 { 552 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 553 int pipe; 554 555 if (pch_iir & SDE_HOTPLUG_MASK) 556 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 557 558 if (pch_iir & SDE_AUDIO_POWER_MASK) 559 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 560 (pch_iir & SDE_AUDIO_POWER_MASK) >> 561 SDE_AUDIO_POWER_SHIFT); 562 563 if (pch_iir & SDE_GMBUS) 564 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 565 566 if (pch_iir & SDE_AUDIO_HDCP_MASK) 567 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 568 569 if (pch_iir & SDE_AUDIO_TRANS_MASK) 570 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 571 572 if (pch_iir & SDE_POISON) 573 DRM_ERROR("PCH poison interrupt\n"); 574 575 if (pch_iir & SDE_FDI_MASK) 576 for_each_pipe(pipe) 577 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 578 pipe_name(pipe), 579 I915_READ(FDI_RX_IIR(pipe))); 580 581 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 582 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 583 584 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 585 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 586 587 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 588 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); 589 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 590 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); 591 } 592 593 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 594 { 595 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 596 int pipe; 597 598 if (pch_iir & SDE_HOTPLUG_MASK_CPT) 599 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 600 601 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) 602 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 603 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 604 SDE_AUDIO_POWER_SHIFT_CPT); 605 606 if (pch_iir & SDE_AUX_MASK_CPT) 607 DRM_DEBUG_DRIVER("AUX channel interrupt\n"); 608 609 if (pch_iir & SDE_GMBUS_CPT) 610 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 611 612 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 613 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 614 615 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 616 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 617 618 if (pch_iir & SDE_FDI_MASK_CPT) 619 for_each_pipe(pipe) 620 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 621 pipe_name(pipe), 622 I915_READ(FDI_RX_IIR(pipe))); 623 } 624 625 static void 626 ivybridge_irq_handler(void *arg) 627 { 628 struct drm_device *dev = (struct drm_device *) arg; 629 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 630 u32 de_iir, gt_iir, de_ier, pm_iir; 631 int i; 632 633 atomic_inc(&dev_priv->irq_received); 634 635 /* disable master interrupt before clearing iir */ 636 de_ier = I915_READ(DEIER); 637 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 638 639 gt_iir = I915_READ(GTIIR); 640 if (gt_iir) { 641 snb_gt_irq_handler(dev, dev_priv, gt_iir); 642 I915_WRITE(GTIIR, gt_iir); 643 } 644 645 de_iir = I915_READ(DEIER); 646 if (de_iir) { 647 #if 0 648 if (de_iir & DE_GSE_IVB) 649 intel_opregion_gse_intr(dev); 650 #endif 651 652 for (i = 0; i < 3; i++) { 653 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 654 drm_handle_vblank(dev, i); 655 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 656 intel_prepare_page_flip(dev, i); 657 intel_finish_page_flip_plane(dev, i); 658 } 659 } 660 661 /* check event from PCH */ 662 if (de_iir & DE_PCH_EVENT_IVB) { 663 u32 pch_iir = I915_READ(SDEIIR); 664 665 cpt_irq_handler(dev, pch_iir); 666 667 /* clear PCH hotplug event before clear CPU irq */ 668 I915_WRITE(SDEIIR, pch_iir); 669 } 670 671 I915_WRITE(DEIIR, de_iir); 672 } 673 674 pm_iir = I915_READ(GEN6_PMIIR); 675 if (pm_iir) { 676 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 677 gen6_queue_rps_work(dev_priv, pm_iir); 678 I915_WRITE(GEN6_PMIIR, pm_iir); 679 } 680 681 I915_WRITE(DEIER, de_ier); 682 POSTING_READ(DEIER); 683 } 684 685 static void ilk_gt_irq_handler(struct drm_device *dev, 686 struct drm_i915_private *dev_priv, 687 u32 gt_iir) 688 { 689 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) 690 notify_ring(dev, &dev_priv->ring[RCS]); 691 if (gt_iir & GT_BSD_USER_INTERRUPT) 692 notify_ring(dev, &dev_priv->ring[VCS]); 693 } 694 695 static void 696 ironlake_irq_handler(void *arg) 697 { 698 struct drm_device *dev = arg; 699 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 700 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 701 702 atomic_inc(&dev_priv->irq_received); 703 704 /* disable master interrupt before clearing iir */ 705 de_ier = I915_READ(DEIER); 706 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 707 POSTING_READ(DEIER); 708 709 de_iir = I915_READ(DEIIR); 710 gt_iir = I915_READ(GTIIR); 711 pch_iir = I915_READ(SDEIIR); 712 pm_iir = I915_READ(GEN6_PMIIR); 713 714 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && 715 (!IS_GEN6(dev) || pm_iir == 0)) 716 goto done; 717 718 if (IS_GEN5(dev)) 719 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 720 else 721 snb_gt_irq_handler(dev, dev_priv, gt_iir); 722 723 if (gt_iir & GT_GEN6_BLT_USER_INTERRUPT) 724 notify_ring(dev, &dev_priv->ring[BCS]); 725 726 if (de_iir & DE_GSE) { 727 #if 1 728 KIB_NOTYET(); 729 #else 730 intel_opregion_gse_intr(dev); 731 #endif 732 } 733 734 if (de_iir & DE_PIPEA_VBLANK) 735 drm_handle_vblank(dev, 0); 736 737 if (de_iir & DE_PIPEB_VBLANK) 738 drm_handle_vblank(dev, 1); 739 740 if (de_iir & DE_PLANEA_FLIP_DONE) { 741 intel_prepare_page_flip(dev, 0); 742 intel_finish_page_flip_plane(dev, 0); 743 } 744 745 if (de_iir & DE_PLANEB_FLIP_DONE) { 746 intel_prepare_page_flip(dev, 1); 747 intel_finish_page_flip_plane(dev, 1); 748 } 749 750 /* check event from PCH */ 751 if (de_iir & DE_PCH_EVENT) { 752 if (HAS_PCH_CPT(dev)) 753 cpt_irq_handler(dev, pch_iir); 754 else 755 ibx_irq_handler(dev, pch_iir); 756 } 757 758 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 759 ironlake_handle_rps_change(dev); 760 761 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) 762 gen6_queue_rps_work(dev_priv, pm_iir); 763 764 /* should clear PCH hotplug event before clear CPU irq */ 765 I915_WRITE(SDEIIR, pch_iir); 766 I915_WRITE(GTIIR, gt_iir); 767 I915_WRITE(DEIIR, de_iir); 768 I915_WRITE(GEN6_PMIIR, pm_iir); 769 770 done: 771 I915_WRITE(DEIER, de_ier); 772 POSTING_READ(DEIER); 773 } 774 775 /** 776 * i915_error_work_func - do process context error handling work 777 * @work: work struct 778 * 779 * Fire an error uevent so userspace can see that a hang or error 780 * was detected. 781 */ 782 static void i915_error_work_func(struct work_struct *work) 783 { 784 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 785 error_work); 786 struct drm_device *dev = dev_priv->dev; 787 788 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */ 789 790 if (atomic_read(&dev_priv->mm.wedged)) { 791 DRM_DEBUG_DRIVER("resetting chip\n"); 792 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); */ 793 if (!i915_reset(dev, GRDOM_RENDER)) { 794 atomic_set(&dev_priv->mm.wedged, 0); 795 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); */ 796 } 797 lockmgr(&dev_priv->error_completion_lock, LK_EXCLUSIVE); 798 dev_priv->error_completion++; 799 wakeup(&dev_priv->error_completion); 800 lockmgr(&dev_priv->error_completion_lock, LK_RELEASE); 801 } 802 } 803 804 /* Set the vblank monitor pipe 805 */ 806 int i915_vblank_pipe_set(struct drm_device *dev, void *data, 807 struct drm_file *file_priv) 808 { 809 drm_i915_private_t *dev_priv = dev->dev_private; 810 811 if (!dev_priv) { 812 DRM_ERROR("called with no initialization\n"); 813 return -EINVAL; 814 } 815 816 return 0; 817 } 818 819 int i915_vblank_pipe_get(struct drm_device *dev, void *data, 820 struct drm_file *file_priv) 821 { 822 drm_i915_private_t *dev_priv = dev->dev_private; 823 drm_i915_vblank_pipe_t *pipe = data; 824 825 if (!dev_priv) { 826 DRM_ERROR("called with no initialization\n"); 827 return -EINVAL; 828 } 829 830 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 831 832 return 0; 833 } 834 835 /** 836 * Schedule buffer swap at given vertical blank. 837 */ 838 int i915_vblank_swap(struct drm_device *dev, void *data, 839 struct drm_file *file_priv) 840 { 841 /* The delayed swap mechanism was fundamentally racy, and has been 842 * removed. The model was that the client requested a delayed flip/swap 843 * from the kernel, then waited for vblank before continuing to perform 844 * rendering. The problem was that the kernel might wake the client 845 * up before it dispatched the vblank swap (since the lock has to be 846 * held while touching the ringbuffer), in which case the client would 847 * clear and start the next frame before the swap occurred, and 848 * flicker would occur in addition to likely missing the vblank. 849 * 850 * In the absence of this ioctl, userland falls back to a correct path 851 * of waiting for a vblank, then dispatching the swap on its own. 852 * Context switching to userland and back is plenty fast enough for 853 * meeting the requirements of vblank swapping. 854 */ 855 return -EINVAL; 856 } 857 858 static struct drm_i915_error_object * 859 i915_error_object_create(struct drm_i915_private *dev_priv, 860 struct drm_i915_gem_object *src) 861 { 862 struct drm_i915_error_object *dst; 863 struct sf_buf *sf; 864 void *d, *s; 865 int page, page_count; 866 u32 reloc_offset; 867 868 if (src == NULL || src->pages == NULL) 869 return NULL; 870 871 page_count = src->base.size / PAGE_SIZE; 872 873 dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), DRM_I915_GEM, 874 M_NOWAIT); 875 if (dst == NULL) 876 return (NULL); 877 878 reloc_offset = src->gtt_offset; 879 for (page = 0; page < page_count; page++) { 880 d = kmalloc(PAGE_SIZE, DRM_I915_GEM, M_NOWAIT); 881 if (d == NULL) 882 goto unwind; 883 884 if (reloc_offset < dev_priv->mm.gtt_mappable_end) { 885 /* Simply ignore tiling or any overlapping fence. 886 * It's part of the error state, and this hopefully 887 * captures what the GPU read. 888 */ 889 s = pmap_mapdev_attr(src->base.dev->agp->base + 890 reloc_offset, PAGE_SIZE, PAT_WRITE_COMBINING); 891 memcpy(d, s, PAGE_SIZE); 892 pmap_unmapdev((vm_offset_t)s, PAGE_SIZE); 893 } else { 894 drm_clflush_pages(&src->pages[page], 1); 895 896 sf = sf_buf_alloc(src->pages[page]); 897 if (sf != NULL) { 898 s = (void *)(uintptr_t)sf_buf_kva(sf); 899 memcpy(d, s, PAGE_SIZE); 900 sf_buf_free(sf); 901 } else { 902 bzero(d, PAGE_SIZE); 903 strcpy(d, "XXXKIB"); 904 } 905 906 drm_clflush_pages(&src->pages[page], 1); 907 } 908 909 dst->pages[page] = d; 910 911 reloc_offset += PAGE_SIZE; 912 } 913 dst->page_count = page_count; 914 dst->gtt_offset = src->gtt_offset; 915 916 return (dst); 917 918 unwind: 919 while (page--) 920 drm_free(dst->pages[page], DRM_I915_GEM); 921 drm_free(dst, DRM_I915_GEM); 922 return (NULL); 923 } 924 925 static void 926 i915_error_object_free(struct drm_i915_error_object *obj) 927 { 928 int page; 929 930 if (obj == NULL) 931 return; 932 933 for (page = 0; page < obj->page_count; page++) 934 drm_free(obj->pages[page], DRM_I915_GEM); 935 936 drm_free(obj, DRM_I915_GEM); 937 } 938 939 static void 940 i915_error_state_free(struct drm_device *dev, 941 struct drm_i915_error_state *error) 942 { 943 int i; 944 945 for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) { 946 i915_error_object_free(error->ring[i].batchbuffer); 947 i915_error_object_free(error->ring[i].ringbuffer); 948 drm_free(error->ring[i].requests, DRM_I915_GEM); 949 } 950 951 drm_free(error->active_bo, DRM_I915_GEM); 952 drm_free(error->overlay, DRM_I915_GEM); 953 drm_free(error, DRM_I915_GEM); 954 } 955 956 static u32 957 capture_bo_list(struct drm_i915_error_buffer *err, int count, 958 struct list_head *head) 959 { 960 struct drm_i915_gem_object *obj; 961 int i = 0; 962 963 list_for_each_entry(obj, head, mm_list) { 964 err->size = obj->base.size; 965 err->name = obj->base.name; 966 err->seqno = obj->last_rendering_seqno; 967 err->gtt_offset = obj->gtt_offset; 968 err->read_domains = obj->base.read_domains; 969 err->write_domain = obj->base.write_domain; 970 err->fence_reg = obj->fence_reg; 971 err->pinned = 0; 972 if (obj->pin_count > 0) 973 err->pinned = 1; 974 if (obj->user_pin_count > 0) 975 err->pinned = -1; 976 err->tiling = obj->tiling_mode; 977 err->dirty = obj->dirty; 978 err->purgeable = obj->madv != I915_MADV_WILLNEED; 979 err->ring = obj->ring ? obj->ring->id : -1; 980 err->cache_level = obj->cache_level; 981 982 if (++i == count) 983 break; 984 985 err++; 986 } 987 988 return (i); 989 } 990 991 static void 992 i915_gem_record_fences(struct drm_device *dev, 993 struct drm_i915_error_state *error) 994 { 995 struct drm_i915_private *dev_priv = dev->dev_private; 996 int i; 997 998 /* Fences */ 999 switch (INTEL_INFO(dev)->gen) { 1000 case 7: 1001 case 6: 1002 for (i = 0; i < 16; i++) 1003 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 1004 break; 1005 case 5: 1006 case 4: 1007 for (i = 0; i < 16; i++) 1008 error->fence[i] = I915_READ64(FENCE_REG_965_0 + 1009 (i * 8)); 1010 break; 1011 case 3: 1012 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 1013 for (i = 0; i < 8; i++) 1014 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + 1015 (i * 4)); 1016 case 2: 1017 for (i = 0; i < 8; i++) 1018 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 1019 break; 1020 1021 } 1022 } 1023 1024 static struct drm_i915_error_object * 1025 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, 1026 struct intel_ring_buffer *ring) 1027 { 1028 struct drm_i915_gem_object *obj; 1029 u32 seqno; 1030 1031 if (!ring->get_seqno) 1032 return (NULL); 1033 1034 seqno = ring->get_seqno(ring); 1035 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 1036 if (obj->ring != ring) 1037 continue; 1038 1039 if (i915_seqno_passed(seqno, obj->last_rendering_seqno)) 1040 continue; 1041 1042 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) 1043 continue; 1044 1045 /* We need to copy these to an anonymous buffer as the simplest 1046 * method to avoid being overwritten by userspace. 1047 */ 1048 return (i915_error_object_create(dev_priv, obj)); 1049 } 1050 1051 return NULL; 1052 } 1053 1054 static void 1055 i915_record_ring_state(struct drm_device *dev, 1056 struct drm_i915_error_state *error, 1057 struct intel_ring_buffer *ring) 1058 { 1059 struct drm_i915_private *dev_priv = dev->dev_private; 1060 1061 if (INTEL_INFO(dev)->gen >= 6) { 1062 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); 1063 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); 1064 error->semaphore_mboxes[ring->id][0] 1065 = I915_READ(RING_SYNC_0(ring->mmio_base)); 1066 error->semaphore_mboxes[ring->id][1] 1067 = I915_READ(RING_SYNC_1(ring->mmio_base)); 1068 } 1069 1070 if (INTEL_INFO(dev)->gen >= 4) { 1071 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); 1072 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 1073 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 1074 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 1075 if (ring->id == RCS) { 1076 error->instdone1 = I915_READ(INSTDONE1); 1077 error->bbaddr = I915_READ64(BB_ADDR); 1078 } 1079 } else { 1080 error->ipeir[ring->id] = I915_READ(IPEIR); 1081 error->ipehr[ring->id] = I915_READ(IPEHR); 1082 error->instdone[ring->id] = I915_READ(INSTDONE); 1083 } 1084 1085 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); 1086 error->seqno[ring->id] = ring->get_seqno(ring); 1087 error->acthd[ring->id] = intel_ring_get_active_head(ring); 1088 error->head[ring->id] = I915_READ_HEAD(ring); 1089 error->tail[ring->id] = I915_READ_TAIL(ring); 1090 1091 error->cpu_ring_head[ring->id] = ring->head; 1092 error->cpu_ring_tail[ring->id] = ring->tail; 1093 } 1094 1095 static void 1096 i915_gem_record_rings(struct drm_device *dev, 1097 struct drm_i915_error_state *error) 1098 { 1099 struct drm_i915_private *dev_priv = dev->dev_private; 1100 struct drm_i915_gem_request *request; 1101 int i, count; 1102 1103 for (i = 0; i < I915_NUM_RINGS; i++) { 1104 struct intel_ring_buffer *ring = &dev_priv->ring[i]; 1105 1106 if (ring->obj == NULL) 1107 continue; 1108 1109 i915_record_ring_state(dev, error, ring); 1110 1111 error->ring[i].batchbuffer = 1112 i915_error_first_batchbuffer(dev_priv, ring); 1113 1114 error->ring[i].ringbuffer = 1115 i915_error_object_create(dev_priv, ring->obj); 1116 1117 count = 0; 1118 list_for_each_entry(request, &ring->request_list, list) 1119 count++; 1120 1121 error->ring[i].num_requests = count; 1122 error->ring[i].requests = kmalloc(count * 1123 sizeof(struct drm_i915_error_request), DRM_I915_GEM, 1124 M_WAITOK); 1125 if (error->ring[i].requests == NULL) { 1126 error->ring[i].num_requests = 0; 1127 continue; 1128 } 1129 1130 count = 0; 1131 list_for_each_entry(request, &ring->request_list, list) { 1132 struct drm_i915_error_request *erq; 1133 1134 erq = &error->ring[i].requests[count++]; 1135 erq->seqno = request->seqno; 1136 erq->jiffies = request->emitted_jiffies; 1137 erq->tail = request->tail; 1138 } 1139 } 1140 } 1141 1142 static void 1143 i915_capture_error_state(struct drm_device *dev) 1144 { 1145 struct drm_i915_private *dev_priv = dev->dev_private; 1146 struct drm_i915_gem_object *obj; 1147 struct drm_i915_error_state *error; 1148 int i, pipe; 1149 1150 lockmgr(&dev_priv->error_lock, LK_EXCLUSIVE); 1151 error = dev_priv->first_error; 1152 lockmgr(&dev_priv->error_lock, LK_RELEASE); 1153 if (error != NULL) 1154 return; 1155 1156 /* Account for pipe specific data like PIPE*STAT */ 1157 error = kmalloc(sizeof(*error), DRM_I915_GEM, M_NOWAIT | M_ZERO); 1158 if (error == NULL) { 1159 DRM_DEBUG("out of memory, not capturing error state\n"); 1160 return; 1161 } 1162 1163 DRM_INFO("capturing error event; look for more information in " 1164 "sysctl hw.dri.%d.info.i915_error_state\n", dev->sysctl_node_idx); 1165 1166 error->eir = I915_READ(EIR); 1167 error->pgtbl_er = I915_READ(PGTBL_ER); 1168 for_each_pipe(pipe) 1169 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 1170 1171 if (INTEL_INFO(dev)->gen >= 6) { 1172 error->error = I915_READ(ERROR_GEN6); 1173 error->done_reg = I915_READ(DONE_REG); 1174 } 1175 1176 i915_gem_record_fences(dev, error); 1177 i915_gem_record_rings(dev, error); 1178 1179 /* Record buffers on the active and pinned lists. */ 1180 error->active_bo = NULL; 1181 error->pinned_bo = NULL; 1182 1183 i = 0; 1184 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 1185 i++; 1186 error->active_bo_count = i; 1187 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) 1188 i++; 1189 error->pinned_bo_count = i - error->active_bo_count; 1190 1191 error->active_bo = NULL; 1192 error->pinned_bo = NULL; 1193 if (i) { 1194 error->active_bo = kmalloc(sizeof(*error->active_bo) * i, 1195 DRM_I915_GEM, M_NOWAIT); 1196 if (error->active_bo) 1197 error->pinned_bo = error->active_bo + 1198 error->active_bo_count; 1199 } 1200 1201 if (error->active_bo) 1202 error->active_bo_count = capture_bo_list(error->active_bo, 1203 error->active_bo_count, &dev_priv->mm.active_list); 1204 1205 if (error->pinned_bo) 1206 error->pinned_bo_count = capture_bo_list(error->pinned_bo, 1207 error->pinned_bo_count, &dev_priv->mm.pinned_list); 1208 1209 microtime(&error->time); 1210 1211 error->overlay = intel_overlay_capture_error_state(dev); 1212 error->display = intel_display_capture_error_state(dev); 1213 1214 lockmgr(&dev_priv->error_lock, LK_EXCLUSIVE); 1215 if (dev_priv->first_error == NULL) { 1216 dev_priv->first_error = error; 1217 error = NULL; 1218 } 1219 lockmgr(&dev_priv->error_lock, LK_RELEASE); 1220 1221 if (error != NULL) 1222 i915_error_state_free(dev, error); 1223 } 1224 1225 void 1226 i915_destroy_error_state(struct drm_device *dev) 1227 { 1228 struct drm_i915_private *dev_priv = dev->dev_private; 1229 struct drm_i915_error_state *error; 1230 1231 lockmgr(&dev_priv->error_lock, LK_EXCLUSIVE); 1232 error = dev_priv->first_error; 1233 dev_priv->first_error = NULL; 1234 lockmgr(&dev_priv->error_lock, LK_RELEASE); 1235 1236 if (error != NULL) 1237 i915_error_state_free(dev, error); 1238 } 1239 1240 static void i915_report_and_clear_eir(struct drm_device *dev) 1241 { 1242 struct drm_i915_private *dev_priv = dev->dev_private; 1243 u32 eir = I915_READ(EIR); 1244 int pipe; 1245 1246 if (!eir) 1247 return; 1248 1249 kprintf("i915: render error detected, EIR: 0x%08x\n", eir); 1250 1251 if (IS_G4X(dev)) { 1252 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 1253 u32 ipeir = I915_READ(IPEIR_I965); 1254 1255 kprintf(" IPEIR: 0x%08x\n", 1256 I915_READ(IPEIR_I965)); 1257 kprintf(" IPEHR: 0x%08x\n", 1258 I915_READ(IPEHR_I965)); 1259 kprintf(" INSTDONE: 0x%08x\n", 1260 I915_READ(INSTDONE_I965)); 1261 kprintf(" INSTPS: 0x%08x\n", 1262 I915_READ(INSTPS)); 1263 kprintf(" INSTDONE1: 0x%08x\n", 1264 I915_READ(INSTDONE1)); 1265 kprintf(" ACTHD: 0x%08x\n", 1266 I915_READ(ACTHD_I965)); 1267 I915_WRITE(IPEIR_I965, ipeir); 1268 POSTING_READ(IPEIR_I965); 1269 } 1270 if (eir & GM45_ERROR_PAGE_TABLE) { 1271 u32 pgtbl_err = I915_READ(PGTBL_ER); 1272 kprintf("page table error\n"); 1273 kprintf(" PGTBL_ER: 0x%08x\n", 1274 pgtbl_err); 1275 I915_WRITE(PGTBL_ER, pgtbl_err); 1276 POSTING_READ(PGTBL_ER); 1277 } 1278 } 1279 1280 if (!IS_GEN2(dev)) { 1281 if (eir & I915_ERROR_PAGE_TABLE) { 1282 u32 pgtbl_err = I915_READ(PGTBL_ER); 1283 kprintf("page table error\n"); 1284 kprintf(" PGTBL_ER: 0x%08x\n", 1285 pgtbl_err); 1286 I915_WRITE(PGTBL_ER, pgtbl_err); 1287 POSTING_READ(PGTBL_ER); 1288 } 1289 } 1290 1291 if (eir & I915_ERROR_MEMORY_REFRESH) { 1292 kprintf("memory refresh error:\n"); 1293 for_each_pipe(pipe) 1294 kprintf("pipe %c stat: 0x%08x\n", 1295 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 1296 /* pipestat has already been acked */ 1297 } 1298 if (eir & I915_ERROR_INSTRUCTION) { 1299 kprintf("instruction error\n"); 1300 kprintf(" INSTPM: 0x%08x\n", 1301 I915_READ(INSTPM)); 1302 if (INTEL_INFO(dev)->gen < 4) { 1303 u32 ipeir = I915_READ(IPEIR); 1304 1305 kprintf(" IPEIR: 0x%08x\n", 1306 I915_READ(IPEIR)); 1307 kprintf(" IPEHR: 0x%08x\n", 1308 I915_READ(IPEHR)); 1309 kprintf(" INSTDONE: 0x%08x\n", 1310 I915_READ(INSTDONE)); 1311 kprintf(" ACTHD: 0x%08x\n", 1312 I915_READ(ACTHD)); 1313 I915_WRITE(IPEIR, ipeir); 1314 POSTING_READ(IPEIR); 1315 } else { 1316 u32 ipeir = I915_READ(IPEIR_I965); 1317 1318 kprintf(" IPEIR: 0x%08x\n", 1319 I915_READ(IPEIR_I965)); 1320 kprintf(" IPEHR: 0x%08x\n", 1321 I915_READ(IPEHR_I965)); 1322 kprintf(" INSTDONE: 0x%08x\n", 1323 I915_READ(INSTDONE_I965)); 1324 kprintf(" INSTPS: 0x%08x\n", 1325 I915_READ(INSTPS)); 1326 kprintf(" INSTDONE1: 0x%08x\n", 1327 I915_READ(INSTDONE1)); 1328 kprintf(" ACTHD: 0x%08x\n", 1329 I915_READ(ACTHD_I965)); 1330 I915_WRITE(IPEIR_I965, ipeir); 1331 POSTING_READ(IPEIR_I965); 1332 } 1333 } 1334 1335 I915_WRITE(EIR, eir); 1336 POSTING_READ(EIR); 1337 eir = I915_READ(EIR); 1338 if (eir) { 1339 /* 1340 * some errors might have become stuck, 1341 * mask them. 1342 */ 1343 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 1344 I915_WRITE(EMR, I915_READ(EMR) | eir); 1345 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 1346 } 1347 } 1348 1349 /** 1350 * i915_handle_error - handle an error interrupt 1351 * @dev: drm device 1352 * 1353 * Do some basic checking of regsiter state at error interrupt time and 1354 * dump it to the syslog. Also call i915_capture_error_state() to make 1355 * sure we get a record and make it available in debugfs. Fire a uevent 1356 * so userspace knows something bad happened (should trigger collection 1357 * of a ring dump etc.). 1358 */ 1359 void i915_handle_error(struct drm_device *dev, bool wedged) 1360 { 1361 struct drm_i915_private *dev_priv = dev->dev_private; 1362 1363 i915_capture_error_state(dev); 1364 i915_report_and_clear_eir(dev); 1365 1366 if (wedged) { 1367 lockmgr(&dev_priv->error_completion_lock, LK_EXCLUSIVE); 1368 dev_priv->error_completion = 0; 1369 atomic_set(&dev_priv->mm.wedged, 1); 1370 /* unlock acts as rel barrier for store to wedged */ 1371 lockmgr(&dev_priv->error_completion_lock, LK_RELEASE); 1372 1373 /* 1374 * Wakeup waiting processes so they don't hang 1375 */ 1376 lockmgr(&dev_priv->ring[RCS].irq_lock, LK_EXCLUSIVE); 1377 wakeup(&dev_priv->ring[RCS]); 1378 lockmgr(&dev_priv->ring[RCS].irq_lock, LK_RELEASE); 1379 if (HAS_BSD(dev)) { 1380 lockmgr(&dev_priv->ring[VCS].irq_lock, LK_EXCLUSIVE); 1381 wakeup(&dev_priv->ring[VCS]); 1382 lockmgr(&dev_priv->ring[VCS].irq_lock, LK_RELEASE); 1383 } 1384 if (HAS_BLT(dev)) { 1385 lockmgr(&dev_priv->ring[BCS].irq_lock, LK_EXCLUSIVE); 1386 wakeup(&dev_priv->ring[BCS]); 1387 lockmgr(&dev_priv->ring[BCS].irq_lock, LK_RELEASE); 1388 } 1389 } 1390 1391 queue_work(dev_priv->wq, &dev_priv->error_work); 1392 } 1393 1394 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) 1395 { 1396 drm_i915_private_t *dev_priv = dev->dev_private; 1397 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1398 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1399 struct drm_i915_gem_object *obj; 1400 struct intel_unpin_work *work; 1401 bool stall_detected; 1402 1403 /* Ignore early vblank irqs */ 1404 if (intel_crtc == NULL) 1405 return; 1406 1407 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 1408 work = intel_crtc->unpin_work; 1409 1410 if (work == NULL || atomic_read(&work->pending) || 1411 !work->enable_stall_check) { 1412 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 1413 lockmgr(&dev->event_lock, LK_RELEASE); 1414 return; 1415 } 1416 1417 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 1418 obj = work->pending_flip_obj; 1419 if (INTEL_INFO(dev)->gen >= 4) { 1420 int dspsurf = DSPSURF(intel_crtc->plane); 1421 stall_detected = I915_READ(dspsurf) == obj->gtt_offset; 1422 } else { 1423 int dspaddr = DSPADDR(intel_crtc->plane); 1424 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 1425 crtc->y * crtc->fb->pitches[0] + 1426 crtc->x * crtc->fb->bits_per_pixel/8); 1427 } 1428 1429 lockmgr(&dev->event_lock, LK_RELEASE); 1430 1431 if (stall_detected) { 1432 DRM_DEBUG("Pageflip stall detected\n"); 1433 intel_prepare_page_flip(dev, intel_crtc->plane); 1434 } 1435 } 1436 1437 /* Called from drm generic code, passed 'crtc' which 1438 * we use as a pipe index 1439 */ 1440 static int 1441 i915_enable_vblank(struct drm_device *dev, int pipe) 1442 { 1443 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1444 1445 if (!i915_pipe_enabled(dev, pipe)) 1446 return -EINVAL; 1447 1448 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1449 if (INTEL_INFO(dev)->gen >= 4) 1450 i915_enable_pipestat(dev_priv, pipe, 1451 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1452 else 1453 i915_enable_pipestat(dev_priv, pipe, 1454 PIPE_VBLANK_INTERRUPT_ENABLE); 1455 1456 /* maintain vblank delivery even in deep C-states */ 1457 if (dev_priv->info->gen == 3) 1458 I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16); 1459 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1460 1461 return 0; 1462 } 1463 1464 static int 1465 ironlake_enable_vblank(struct drm_device *dev, int pipe) 1466 { 1467 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1468 1469 if (!i915_pipe_enabled(dev, pipe)) 1470 return -EINVAL; 1471 1472 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1473 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1474 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1475 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1476 1477 return 0; 1478 } 1479 1480 static int 1481 ivybridge_enable_vblank(struct drm_device *dev, int pipe) 1482 { 1483 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1484 1485 if (!i915_pipe_enabled(dev, pipe)) 1486 return -EINVAL; 1487 1488 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1489 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1490 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); 1491 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1492 1493 return 0; 1494 } 1495 1496 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 1497 { 1498 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1499 u32 imr; 1500 1501 if (!i915_pipe_enabled(dev, pipe)) 1502 return -EINVAL; 1503 1504 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1505 imr = I915_READ(VLV_IMR); 1506 if (pipe == 0) 1507 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1508 else 1509 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1510 I915_WRITE(VLV_IMR, imr); 1511 i915_enable_pipestat(dev_priv, pipe, 1512 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1513 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1514 1515 return 0; 1516 } 1517 1518 /* Called from drm generic code, passed 'crtc' which 1519 * we use as a pipe index 1520 */ 1521 static void 1522 i915_disable_vblank(struct drm_device *dev, int pipe) 1523 { 1524 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1525 1526 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1527 if (dev_priv->info->gen == 3) 1528 I915_WRITE(INSTPM, 1529 INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS); 1530 1531 i915_disable_pipestat(dev_priv, pipe, 1532 PIPE_VBLANK_INTERRUPT_ENABLE | 1533 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1534 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1535 } 1536 1537 static void 1538 ironlake_disable_vblank(struct drm_device *dev, int pipe) 1539 { 1540 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1541 1542 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1543 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1544 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1545 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1546 } 1547 1548 static void 1549 ivybridge_disable_vblank(struct drm_device *dev, int pipe) 1550 { 1551 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1552 1553 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1554 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1555 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); 1556 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1557 } 1558 1559 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 1560 { 1561 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1562 u32 imr; 1563 1564 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1565 i915_disable_pipestat(dev_priv, pipe, 1566 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1567 imr = I915_READ(VLV_IMR); 1568 if (pipe == 0) 1569 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1570 else 1571 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1572 I915_WRITE(VLV_IMR, imr); 1573 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1574 } 1575 1576 static u32 1577 ring_last_seqno(struct intel_ring_buffer *ring) 1578 { 1579 1580 if (list_empty(&ring->request_list)) 1581 return (0); 1582 else 1583 return (list_entry(ring->request_list.prev, 1584 struct drm_i915_gem_request, list)->seqno); 1585 } 1586 1587 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) 1588 { 1589 if (list_empty(&ring->request_list) || 1590 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { 1591 /* Issue a wake-up to catch stuck h/w. */ 1592 if (ring->waiting_seqno) { 1593 DRM_ERROR( 1594 "Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n", 1595 ring->name, 1596 ring->waiting_seqno, 1597 ring->get_seqno(ring)); 1598 wakeup(ring); 1599 *err = true; 1600 } 1601 return true; 1602 } 1603 return false; 1604 } 1605 1606 static bool kick_ring(struct intel_ring_buffer *ring) 1607 { 1608 struct drm_device *dev = ring->dev; 1609 struct drm_i915_private *dev_priv = dev->dev_private; 1610 u32 tmp = I915_READ_CTL(ring); 1611 if (tmp & RING_WAIT) { 1612 DRM_ERROR("Kicking stuck wait on %s\n", 1613 ring->name); 1614 I915_WRITE_CTL(ring, tmp); 1615 return true; 1616 } 1617 return false; 1618 } 1619 1620 /** 1621 * This is called when the chip hasn't reported back with completed 1622 * batchbuffers in a long time. The first time this is called we simply record 1623 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses 1624 * again, we assume the chip is wedged and try to fix it. 1625 */ 1626 void i915_hangcheck_elapsed(unsigned long data) 1627 { 1628 struct drm_device *dev = (struct drm_device *)data; 1629 drm_i915_private_t *dev_priv = dev->dev_private; 1630 uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt; 1631 bool err = false; 1632 1633 if (!i915_enable_hangcheck) 1634 return; 1635 1636 /* If all work is done then ACTHD clearly hasn't advanced. */ 1637 if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && 1638 i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && 1639 i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) { 1640 dev_priv->hangcheck_count = 0; 1641 if (err) 1642 goto repeat; 1643 return; 1644 } 1645 1646 if (INTEL_INFO(dev)->gen < 4) { 1647 instdone = I915_READ(INSTDONE); 1648 instdone1 = 0; 1649 } else { 1650 instdone = I915_READ(INSTDONE_I965); 1651 instdone1 = I915_READ(INSTDONE1); 1652 } 1653 acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]); 1654 acthd_bsd = HAS_BSD(dev) ? 1655 intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0; 1656 acthd_blt = HAS_BLT(dev) ? 1657 intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0; 1658 1659 if (dev_priv->last_acthd == acthd && 1660 dev_priv->last_acthd_bsd == acthd_bsd && 1661 dev_priv->last_acthd_blt == acthd_blt && 1662 dev_priv->last_instdone == instdone && 1663 dev_priv->last_instdone1 == instdone1) { 1664 if (dev_priv->hangcheck_count++ > 1) { 1665 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 1666 i915_handle_error(dev, true); 1667 1668 if (!IS_GEN2(dev)) { 1669 /* Is the chip hanging on a WAIT_FOR_EVENT? 1670 * If so we can simply poke the RB_WAIT bit 1671 * and break the hang. This should work on 1672 * all but the second generation chipsets. 1673 */ 1674 if (kick_ring(&dev_priv->ring[RCS])) 1675 goto repeat; 1676 1677 if (HAS_BSD(dev) && 1678 kick_ring(&dev_priv->ring[VCS])) 1679 goto repeat; 1680 1681 if (HAS_BLT(dev) && 1682 kick_ring(&dev_priv->ring[BCS])) 1683 goto repeat; 1684 } 1685 1686 return; 1687 } 1688 } else { 1689 dev_priv->hangcheck_count = 0; 1690 1691 dev_priv->last_acthd = acthd; 1692 dev_priv->last_acthd_bsd = acthd_bsd; 1693 dev_priv->last_acthd_blt = acthd_blt; 1694 dev_priv->last_instdone = instdone; 1695 dev_priv->last_instdone1 = instdone1; 1696 } 1697 1698 repeat: 1699 /* Reset timer case chip hangs without another request being added */ 1700 mod_timer(&dev_priv->hangcheck_timer, 1701 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 1702 } 1703 1704 1705 /* drm_dma.h hooks 1706 */ 1707 static void 1708 ironlake_irq_preinstall(struct drm_device *dev) 1709 { 1710 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1711 1712 atomic_set(&dev_priv->irq_received, 0); 1713 1714 I915_WRITE(HWSTAM, 0xeffe); 1715 1716 /* XXX hotplug from PCH */ 1717 1718 I915_WRITE(DEIMR, 0xffffffff); 1719 I915_WRITE(DEIER, 0x0); 1720 POSTING_READ(DEIER); 1721 1722 /* and GT */ 1723 I915_WRITE(GTIMR, 0xffffffff); 1724 I915_WRITE(GTIER, 0x0); 1725 POSTING_READ(GTIER); 1726 1727 /* south display irq */ 1728 I915_WRITE(SDEIMR, 0xffffffff); 1729 I915_WRITE(SDEIER, 0x0); 1730 POSTING_READ(SDEIER); 1731 } 1732 1733 static void valleyview_irq_preinstall(struct drm_device *dev) 1734 { 1735 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1736 int pipe; 1737 1738 atomic_set(&dev_priv->irq_received, 0); 1739 1740 /* VLV magic */ 1741 I915_WRITE(VLV_IMR, 0); 1742 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 1743 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 1744 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 1745 1746 /* and GT */ 1747 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1748 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1749 I915_WRITE(GTIMR, 0xffffffff); 1750 I915_WRITE(GTIER, 0x0); 1751 POSTING_READ(GTIER); 1752 1753 I915_WRITE(DPINVGTT, 0xff); 1754 1755 I915_WRITE(PORT_HOTPLUG_EN, 0); 1756 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 1757 for_each_pipe(pipe) 1758 I915_WRITE(PIPESTAT(pipe), 0xffff); 1759 I915_WRITE(VLV_IIR, 0xffffffff); 1760 I915_WRITE(VLV_IMR, 0xffffffff); 1761 I915_WRITE(VLV_IER, 0x0); 1762 POSTING_READ(VLV_IER); 1763 } 1764 1765 /* 1766 * Enable digital hotplug on the PCH, and configure the DP short pulse 1767 * duration to 2ms (which is the minimum in the Display Port spec) 1768 * 1769 * This register is the same on all known PCH chips. 1770 */ 1771 1772 static void ironlake_enable_pch_hotplug(struct drm_device *dev) 1773 { 1774 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1775 u32 hotplug; 1776 1777 hotplug = I915_READ(PCH_PORT_HOTPLUG); 1778 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 1779 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 1780 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 1781 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 1782 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 1783 } 1784 1785 static int ironlake_irq_postinstall(struct drm_device *dev) 1786 { 1787 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1788 /* enable kind of interrupts always enabled */ 1789 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1790 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1791 u32 render_irqs; 1792 u32 hotplug_mask; 1793 1794 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1795 dev_priv->irq_mask = ~display_mask; 1796 1797 /* should always can generate irq */ 1798 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1799 I915_WRITE(DEIMR, dev_priv->irq_mask); 1800 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); 1801 POSTING_READ(DEIER); 1802 1803 dev_priv->gt_irq_mask = ~0; 1804 1805 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1806 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1807 1808 if (IS_GEN6(dev)) 1809 render_irqs = 1810 GT_USER_INTERRUPT | 1811 GT_GEN6_BSD_USER_INTERRUPT | 1812 GT_GEN6_BLT_USER_INTERRUPT; 1813 else 1814 render_irqs = 1815 GT_USER_INTERRUPT | 1816 GT_PIPE_NOTIFY | 1817 GT_BSD_USER_INTERRUPT; 1818 I915_WRITE(GTIER, render_irqs); 1819 POSTING_READ(GTIER); 1820 1821 if (HAS_PCH_CPT(dev)) { 1822 hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1823 SDE_PORTB_HOTPLUG_CPT | 1824 SDE_PORTC_HOTPLUG_CPT | 1825 SDE_PORTD_HOTPLUG_CPT); 1826 } else { 1827 hotplug_mask = (SDE_CRT_HOTPLUG | 1828 SDE_PORTB_HOTPLUG | 1829 SDE_PORTC_HOTPLUG | 1830 SDE_PORTD_HOTPLUG | 1831 SDE_AUX_MASK); 1832 } 1833 1834 dev_priv->pch_irq_mask = ~hotplug_mask; 1835 1836 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1837 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); 1838 I915_WRITE(SDEIER, hotplug_mask); 1839 POSTING_READ(SDEIER); 1840 1841 ironlake_enable_pch_hotplug(dev); 1842 1843 if (IS_IRONLAKE_M(dev)) { 1844 /* Clear & enable PCU event interrupts */ 1845 I915_WRITE(DEIIR, DE_PCU_EVENT); 1846 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); 1847 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 1848 } 1849 1850 return 0; 1851 } 1852 1853 1854 static int ivybridge_irq_postinstall(struct drm_device *dev) 1855 { 1856 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1857 /* enable kind of interrupts always enabled */ 1858 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 1859 DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB | 1860 DE_PLANEB_FLIP_DONE_IVB; 1861 u32 render_irqs; 1862 u32 hotplug_mask; 1863 1864 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1865 dev_priv->irq_mask = ~display_mask; 1866 1867 /* should always can generate irq */ 1868 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1869 I915_WRITE(DEIMR, dev_priv->irq_mask); 1870 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB | 1871 DE_PIPEB_VBLANK_IVB); 1872 POSTING_READ(DEIER); 1873 1874 dev_priv->gt_irq_mask = ~0; 1875 1876 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1877 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1878 1879 render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT | 1880 GT_GEN6_BLT_USER_INTERRUPT; 1881 I915_WRITE(GTIER, render_irqs); 1882 POSTING_READ(GTIER); 1883 1884 hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1885 SDE_PORTB_HOTPLUG_CPT | 1886 SDE_PORTC_HOTPLUG_CPT | 1887 SDE_PORTD_HOTPLUG_CPT); 1888 dev_priv->pch_irq_mask = ~hotplug_mask; 1889 1890 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1891 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); 1892 I915_WRITE(SDEIER, hotplug_mask); 1893 POSTING_READ(SDEIER); 1894 1895 ironlake_enable_pch_hotplug(dev); 1896 1897 return 0; 1898 } 1899 1900 1901 static int valleyview_irq_postinstall(struct drm_device *dev) 1902 { 1903 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1904 u32 enable_mask; 1905 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1906 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 1907 u32 render_irqs; 1908 u16 msid; 1909 1910 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 1911 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 1912 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 1913 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 1914 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1915 1916 /* 1917 *Leave vblank interrupts masked initially. enable/disable will 1918 * toggle them based on usage. 1919 */ 1920 dev_priv->irq_mask = (~enable_mask) | 1921 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 1922 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1923 1924 dev_priv->pipestat[0] = 0; 1925 dev_priv->pipestat[1] = 0; 1926 1927 /* Hack for broken MSIs on VLV */ 1928 pci_write_config(dev_priv->dev->dev, 0x94, 0xfee00000, 4); 1929 msid = pci_read_config(dev->dev, 0x98, 2); 1930 msid &= 0xff; /* mask out delivery bits */ 1931 msid |= (1<<14); 1932 pci_write_config(dev_priv->dev->dev, 0x98, msid, 4); 1933 1934 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 1935 I915_WRITE(VLV_IER, enable_mask); 1936 I915_WRITE(VLV_IIR, 0xffffffff); 1937 I915_WRITE(PIPESTAT(0), 0xffff); 1938 I915_WRITE(PIPESTAT(1), 0xffff); 1939 POSTING_READ(VLV_IER); 1940 1941 i915_enable_pipestat(dev_priv, 0, pipestat_enable); 1942 i915_enable_pipestat(dev_priv, 1, pipestat_enable); 1943 1944 I915_WRITE(VLV_IIR, 0xffffffff); 1945 I915_WRITE(VLV_IIR, 0xffffffff); 1946 1947 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1948 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1949 1950 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | 1951 GEN6_BLITTER_USER_INTERRUPT; 1952 I915_WRITE(GTIER, render_irqs); 1953 POSTING_READ(GTIER); 1954 1955 /* ack & enable invalid PTE error interrupts */ 1956 #if 0 /* FIXME: add support to irq handler for checking these bits */ 1957 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 1958 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 1959 #endif 1960 1961 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1962 /* Note HDMI and DP share bits */ 1963 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 1964 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 1965 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 1966 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 1967 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 1968 hotplug_en |= HDMID_HOTPLUG_INT_EN; 1969 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) 1970 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 1971 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) 1972 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 1973 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 1974 hotplug_en |= CRT_HOTPLUG_INT_EN; 1975 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 1976 } 1977 1978 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 1979 1980 return 0; 1981 } 1982 1983 static void valleyview_irq_uninstall(struct drm_device *dev) 1984 { 1985 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1986 int pipe; 1987 1988 if (!dev_priv) 1989 return; 1990 1991 for_each_pipe(pipe) 1992 I915_WRITE(PIPESTAT(pipe), 0xffff); 1993 1994 I915_WRITE(HWSTAM, 0xffffffff); 1995 I915_WRITE(PORT_HOTPLUG_EN, 0); 1996 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 1997 for_each_pipe(pipe) 1998 I915_WRITE(PIPESTAT(pipe), 0xffff); 1999 I915_WRITE(VLV_IIR, 0xffffffff); 2000 I915_WRITE(VLV_IMR, 0xffffffff); 2001 I915_WRITE(VLV_IER, 0x0); 2002 POSTING_READ(VLV_IER); 2003 } 2004 2005 static void ironlake_irq_uninstall(struct drm_device *dev) 2006 { 2007 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2008 2009 if (!dev_priv) 2010 return; 2011 2012 I915_WRITE(HWSTAM, 0xffffffff); 2013 2014 I915_WRITE(DEIMR, 0xffffffff); 2015 I915_WRITE(DEIER, 0x0); 2016 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2017 2018 I915_WRITE(GTIMR, 0xffffffff); 2019 I915_WRITE(GTIER, 0x0); 2020 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2021 2022 I915_WRITE(SDEIMR, 0xffffffff); 2023 I915_WRITE(SDEIER, 0x0); 2024 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2025 } 2026 2027 static void i8xx_irq_preinstall(struct drm_device * dev) 2028 { 2029 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2030 int pipe; 2031 2032 atomic_set(&dev_priv->irq_received, 0); 2033 2034 for_each_pipe(pipe) 2035 I915_WRITE(PIPESTAT(pipe), 0); 2036 I915_WRITE16(IMR, 0xffff); 2037 I915_WRITE16(IER, 0x0); 2038 POSTING_READ16(IER); 2039 } 2040 2041 static int i8xx_irq_postinstall(struct drm_device *dev) 2042 { 2043 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2044 2045 dev_priv->pipestat[0] = 0; 2046 dev_priv->pipestat[1] = 0; 2047 2048 I915_WRITE16(EMR, 2049 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2050 2051 /* Unmask the interrupts that we always want on. */ 2052 dev_priv->irq_mask = 2053 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2054 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2055 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2056 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2057 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2058 I915_WRITE16(IMR, dev_priv->irq_mask); 2059 2060 I915_WRITE16(IER, 2061 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2062 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2063 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2064 I915_USER_INTERRUPT); 2065 POSTING_READ16(IER); 2066 2067 return 0; 2068 } 2069 2070 static irqreturn_t i8xx_irq_handler(void *arg) 2071 { 2072 struct drm_device *dev = (struct drm_device *) arg; 2073 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2074 u16 iir, new_iir; 2075 u32 pipe_stats[2]; 2076 int irq_received; 2077 int pipe; 2078 u16 flip_mask = 2079 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2080 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2081 2082 atomic_inc(&dev_priv->irq_received); 2083 2084 iir = I915_READ16(IIR); 2085 if (iir == 0) 2086 return; 2087 2088 while (iir & ~flip_mask) { 2089 /* Can't rely on pipestat interrupt bit in iir as it might 2090 * have been cleared after the pipestat interrupt was received. 2091 * It doesn't set the bit in iir again, but it still produces 2092 * interrupts (for non-MSI). 2093 */ 2094 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2095 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2096 i915_handle_error(dev, false); 2097 2098 for_each_pipe(pipe) { 2099 int reg = PIPESTAT(pipe); 2100 pipe_stats[pipe] = I915_READ(reg); 2101 2102 /* 2103 * Clear the PIPE*STAT regs before the IIR 2104 */ 2105 if (pipe_stats[pipe] & 0x8000ffff) { 2106 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2107 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2108 pipe_name(pipe)); 2109 I915_WRITE(reg, pipe_stats[pipe]); 2110 irq_received = 1; 2111 } 2112 } 2113 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2114 2115 I915_WRITE16(IIR, iir & ~flip_mask); 2116 new_iir = I915_READ16(IIR); /* Flush posted writes */ 2117 2118 i915_update_dri1_breadcrumb(dev); 2119 2120 if (iir & I915_USER_INTERRUPT) 2121 notify_ring(dev, &dev_priv->ring[RCS]); 2122 2123 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 2124 drm_handle_vblank(dev, 0)) { 2125 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { 2126 intel_prepare_page_flip(dev, 0); 2127 intel_finish_page_flip(dev, 0); 2128 flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; 2129 } 2130 } 2131 2132 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 2133 drm_handle_vblank(dev, 1)) { 2134 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { 2135 intel_prepare_page_flip(dev, 1); 2136 intel_finish_page_flip(dev, 1); 2137 flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2138 } 2139 } 2140 2141 iir = new_iir; 2142 } 2143 2144 return; 2145 } 2146 2147 static void i8xx_irq_uninstall(struct drm_device * dev) 2148 { 2149 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2150 int pipe; 2151 2152 for_each_pipe(pipe) { 2153 /* Clear enable bits; then clear status bits */ 2154 I915_WRITE(PIPESTAT(pipe), 0); 2155 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2156 } 2157 I915_WRITE16(IMR, 0xffff); 2158 I915_WRITE16(IER, 0x0); 2159 I915_WRITE16(IIR, I915_READ16(IIR)); 2160 } 2161 2162 static void i915_irq_preinstall(struct drm_device * dev) 2163 { 2164 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2165 int pipe; 2166 2167 atomic_set(&dev_priv->irq_received, 0); 2168 2169 if (I915_HAS_HOTPLUG(dev)) { 2170 I915_WRITE(PORT_HOTPLUG_EN, 0); 2171 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2172 } 2173 2174 I915_WRITE16(HWSTAM, 0xeffe); 2175 for_each_pipe(pipe) 2176 I915_WRITE(PIPESTAT(pipe), 0); 2177 I915_WRITE(IMR, 0xffffffff); 2178 I915_WRITE(IER, 0x0); 2179 POSTING_READ(IER); 2180 } 2181 2182 static int i915_irq_postinstall(struct drm_device *dev) 2183 { 2184 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2185 u32 enable_mask; 2186 2187 dev_priv->pipestat[0] = 0; 2188 dev_priv->pipestat[1] = 0; 2189 2190 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2191 2192 /* Unmask the interrupts that we always want on. */ 2193 dev_priv->irq_mask = 2194 ~(I915_ASLE_INTERRUPT | 2195 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2196 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2197 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2198 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2199 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2200 2201 enable_mask = 2202 I915_ASLE_INTERRUPT | 2203 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2204 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2205 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2206 I915_USER_INTERRUPT; 2207 2208 if (I915_HAS_HOTPLUG(dev)) { 2209 /* Enable in IER... */ 2210 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 2211 /* and unmask in IMR */ 2212 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 2213 } 2214 2215 I915_WRITE(IMR, dev_priv->irq_mask); 2216 I915_WRITE(IER, enable_mask); 2217 POSTING_READ(IER); 2218 2219 if (I915_HAS_HOTPLUG(dev)) { 2220 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2221 2222 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2223 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2224 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2225 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2226 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2227 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2228 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) 2229 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2230 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) 2231 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2232 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2233 hotplug_en |= CRT_HOTPLUG_INT_EN; 2234 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2235 } 2236 2237 /* Ignore TV since it's buggy */ 2238 2239 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2240 } 2241 2242 #if 0 2243 intel_opregion_enable_asle(dev); 2244 #endif 2245 2246 return 0; 2247 } 2248 2249 static irqreturn_t i915_irq_handler(void *arg) 2250 { 2251 struct drm_device *dev = (struct drm_device *) arg; 2252 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2253 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 2254 u32 flip_mask = 2255 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2256 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2257 u32 flip[2] = { 2258 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT, 2259 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT 2260 }; 2261 int pipe; 2262 2263 atomic_inc(&dev_priv->irq_received); 2264 2265 iir = I915_READ(IIR); 2266 do { 2267 bool irq_received = (iir & ~flip_mask) != 0; 2268 bool blc_event = false; 2269 2270 /* Can't rely on pipestat interrupt bit in iir as it might 2271 * have been cleared after the pipestat interrupt was received. 2272 * It doesn't set the bit in iir again, but it still produces 2273 * interrupts (for non-MSI). 2274 */ 2275 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2276 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2277 i915_handle_error(dev, false); 2278 2279 for_each_pipe(pipe) { 2280 int reg = PIPESTAT(pipe); 2281 pipe_stats[pipe] = I915_READ(reg); 2282 2283 /* Clear the PIPE*STAT regs before the IIR */ 2284 if (pipe_stats[pipe] & 0x8000ffff) { 2285 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2286 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2287 pipe_name(pipe)); 2288 I915_WRITE(reg, pipe_stats[pipe]); 2289 irq_received = true; 2290 } 2291 } 2292 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2293 2294 if (!irq_received) 2295 break; 2296 2297 /* Consume port. Then clear IIR or we'll miss events */ 2298 if ((I915_HAS_HOTPLUG(dev)) && 2299 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 2300 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2301 2302 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2303 hotplug_status); 2304 if (hotplug_status & dev_priv->hotplug_supported_mask) 2305 queue_work(dev_priv->wq, 2306 &dev_priv->hotplug_work); 2307 2308 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2309 POSTING_READ(PORT_HOTPLUG_STAT); 2310 } 2311 2312 I915_WRITE(IIR, iir & ~flip_mask); 2313 new_iir = I915_READ(IIR); /* Flush posted writes */ 2314 2315 if (iir & I915_USER_INTERRUPT) 2316 notify_ring(dev, &dev_priv->ring[RCS]); 2317 2318 for_each_pipe(pipe) { 2319 int plane = pipe; 2320 if (IS_MOBILE(dev)) 2321 plane = !plane; 2322 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 2323 drm_handle_vblank(dev, pipe)) { 2324 if (iir & flip[plane]) { 2325 intel_prepare_page_flip(dev, plane); 2326 intel_finish_page_flip(dev, pipe); 2327 flip_mask &= ~flip[plane]; 2328 } 2329 } 2330 2331 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2332 blc_event = true; 2333 } 2334 2335 #if 0 2336 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2337 intel_opregion_asle_intr(dev); 2338 #endif 2339 2340 /* With MSI, interrupts are only generated when iir 2341 * transitions from zero to nonzero. If another bit got 2342 * set while we were handling the existing iir bits, then 2343 * we would never get another interrupt. 2344 * 2345 * This is fine on non-MSI as well, as if we hit this path 2346 * we avoid exiting the interrupt handler only to generate 2347 * another one. 2348 * 2349 * Note that for MSI this could cause a stray interrupt report 2350 * if an interrupt landed in the time between writing IIR and 2351 * the posting read. This should be rare enough to never 2352 * trigger the 99% of 100,000 interrupts test for disabling 2353 * stray interrupts. 2354 */ 2355 iir = new_iir; 2356 } while (iir & ~flip_mask); 2357 2358 i915_update_dri1_breadcrumb(dev); 2359 } 2360 2361 static void i915_irq_uninstall(struct drm_device * dev) 2362 { 2363 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2364 int pipe; 2365 2366 if (I915_HAS_HOTPLUG(dev)) { 2367 I915_WRITE(PORT_HOTPLUG_EN, 0); 2368 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2369 } 2370 2371 I915_WRITE16(HWSTAM, 0xffff); 2372 for_each_pipe(pipe) { 2373 /* Clear enable bits; then clear status bits */ 2374 I915_WRITE(PIPESTAT(pipe), 0); 2375 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2376 } 2377 I915_WRITE(IMR, 0xffffffff); 2378 I915_WRITE(IER, 0x0); 2379 2380 I915_WRITE(IIR, I915_READ(IIR)); 2381 } 2382 2383 static void i965_irq_preinstall(struct drm_device * dev) 2384 { 2385 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2386 int pipe; 2387 2388 atomic_set(&dev_priv->irq_received, 0); 2389 2390 I915_WRITE(PORT_HOTPLUG_EN, 0); 2391 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2392 2393 I915_WRITE(HWSTAM, 0xeffe); 2394 for_each_pipe(pipe) 2395 I915_WRITE(PIPESTAT(pipe), 0); 2396 I915_WRITE(IMR, 0xffffffff); 2397 I915_WRITE(IER, 0x0); 2398 POSTING_READ(IER); 2399 } 2400 2401 static int i965_irq_postinstall(struct drm_device *dev) 2402 { 2403 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2404 u32 hotplug_en; 2405 u32 enable_mask; 2406 u32 error_mask; 2407 2408 /* Unmask the interrupts that we always want on. */ 2409 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 2410 I915_DISPLAY_PORT_INTERRUPT | 2411 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2412 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2413 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2414 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2415 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2416 2417 enable_mask = ~dev_priv->irq_mask; 2418 enable_mask |= I915_USER_INTERRUPT; 2419 2420 if (IS_G4X(dev)) 2421 enable_mask |= I915_BSD_USER_INTERRUPT; 2422 2423 dev_priv->pipestat[0] = 0; 2424 dev_priv->pipestat[1] = 0; 2425 2426 /* 2427 * Enable some error detection, note the instruction error mask 2428 * bit is reserved, so we leave it masked. 2429 */ 2430 if (IS_G4X(dev)) { 2431 error_mask = ~(GM45_ERROR_PAGE_TABLE | 2432 GM45_ERROR_MEM_PRIV | 2433 GM45_ERROR_CP_PRIV | 2434 I915_ERROR_MEMORY_REFRESH); 2435 } else { 2436 error_mask = ~(I915_ERROR_PAGE_TABLE | 2437 I915_ERROR_MEMORY_REFRESH); 2438 } 2439 I915_WRITE(EMR, error_mask); 2440 2441 I915_WRITE(IMR, dev_priv->irq_mask); 2442 I915_WRITE(IER, enable_mask); 2443 POSTING_READ(IER); 2444 2445 /* Note HDMI and DP share hotplug bits */ 2446 hotplug_en = 0; 2447 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2448 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2449 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2450 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2451 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2452 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2453 if (IS_G4X(dev)) { 2454 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X) 2455 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2456 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X) 2457 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2458 } else { 2459 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965) 2460 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2461 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965) 2462 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2463 } 2464 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2465 hotplug_en |= CRT_HOTPLUG_INT_EN; 2466 2467 /* Programming the CRT detection parameters tends 2468 to generate a spurious hotplug event about three 2469 seconds later. So just do it once. 2470 */ 2471 if (IS_G4X(dev)) 2472 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 2473 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2474 } 2475 2476 /* Ignore TV since it's buggy */ 2477 2478 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2479 2480 #if 0 2481 intel_opregion_enable_asle(dev); 2482 #endif 2483 2484 return 0; 2485 } 2486 2487 2488 static void i965_irq_handler(void *arg) 2489 { 2490 struct drm_device *dev = (struct drm_device *) arg; 2491 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2492 u32 iir, new_iir; 2493 u32 pipe_stats[I915_MAX_PIPES]; 2494 int irq_received; 2495 int pipe; 2496 2497 atomic_inc(&dev_priv->irq_received); 2498 2499 iir = I915_READ(IIR); 2500 2501 for (;;) { 2502 bool blc_event = false; 2503 2504 irq_received = iir != 0; 2505 2506 /* Can't rely on pipestat interrupt bit in iir as it might 2507 * have been cleared after the pipestat interrupt was received. 2508 * It doesn't set the bit in iir again, but it still produces 2509 * interrupts (for non-MSI). 2510 */ 2511 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2512 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2513 i915_handle_error(dev, false); 2514 2515 for_each_pipe(pipe) { 2516 int reg = PIPESTAT(pipe); 2517 pipe_stats[pipe] = I915_READ(reg); 2518 2519 /* 2520 * Clear the PIPE*STAT regs before the IIR 2521 */ 2522 if (pipe_stats[pipe] & 0x8000ffff) { 2523 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2524 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2525 pipe_name(pipe)); 2526 I915_WRITE(reg, pipe_stats[pipe]); 2527 irq_received = 1; 2528 } 2529 } 2530 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2531 2532 if (!irq_received) 2533 break; 2534 2535 /* Consume port. Then clear IIR or we'll miss events */ 2536 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 2537 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2538 2539 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2540 hotplug_status); 2541 if (hotplug_status & dev_priv->hotplug_supported_mask) 2542 queue_work(dev_priv->wq, 2543 &dev_priv->hotplug_work); 2544 2545 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2546 I915_READ(PORT_HOTPLUG_STAT); 2547 } 2548 2549 I915_WRITE(IIR, iir); 2550 new_iir = I915_READ(IIR); /* Flush posted writes */ 2551 2552 if (iir & I915_USER_INTERRUPT) 2553 notify_ring(dev, &dev_priv->ring[RCS]); 2554 if (iir & I915_BSD_USER_INTERRUPT) 2555 notify_ring(dev, &dev_priv->ring[VCS]); 2556 2557 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) 2558 intel_prepare_page_flip(dev, 0); 2559 2560 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) 2561 intel_prepare_page_flip(dev, 1); 2562 2563 for_each_pipe(pipe) { 2564 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 2565 drm_handle_vblank(dev, pipe)) { 2566 i915_pageflip_stall_check(dev, pipe); 2567 intel_finish_page_flip(dev, pipe); 2568 } 2569 2570 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2571 blc_event = true; 2572 } 2573 2574 #if 0 2575 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2576 intel_opregion_asle_intr(dev); 2577 #endif 2578 2579 /* With MSI, interrupts are only generated when iir 2580 * transitions from zero to nonzero. If another bit got 2581 * set while we were handling the existing iir bits, then 2582 * we would never get another interrupt. 2583 * 2584 * This is fine on non-MSI as well, as if we hit this path 2585 * we avoid exiting the interrupt handler only to generate 2586 * another one. 2587 * 2588 * Note that for MSI this could cause a stray interrupt report 2589 * if an interrupt landed in the time between writing IIR and 2590 * the posting read. This should be rare enough to never 2591 * trigger the 99% of 100,000 interrupts test for disabling 2592 * stray interrupts. 2593 */ 2594 iir = new_iir; 2595 } 2596 2597 i915_update_dri1_breadcrumb(dev); 2598 } 2599 2600 static void i965_irq_uninstall(struct drm_device * dev) 2601 { 2602 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2603 int pipe; 2604 2605 if (!dev_priv) 2606 return; 2607 2608 I915_WRITE(PORT_HOTPLUG_EN, 0); 2609 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2610 2611 I915_WRITE(HWSTAM, 0xffffffff); 2612 for_each_pipe(pipe) 2613 I915_WRITE(PIPESTAT(pipe), 0); 2614 I915_WRITE(IMR, 0xffffffff); 2615 I915_WRITE(IER, 0x0); 2616 2617 for_each_pipe(pipe) 2618 I915_WRITE(PIPESTAT(pipe), 2619 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 2620 I915_WRITE(IIR, I915_READ(IIR)); 2621 } 2622 2623 void intel_irq_init(struct drm_device *dev) 2624 { 2625 struct drm_i915_private *dev_priv = dev->dev_private; 2626 2627 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 2628 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 2629 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 2630 2631 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2632 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2633 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 2634 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 2635 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2636 } 2637 2638 if (drm_core_check_feature(dev, DRIVER_MODESET)) 2639 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 2640 else 2641 dev->driver->get_vblank_timestamp = NULL; 2642 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 2643 2644 if (IS_VALLEYVIEW(dev)) { 2645 dev->driver->irq_handler = valleyview_irq_handler; 2646 dev->driver->irq_preinstall = valleyview_irq_preinstall; 2647 dev->driver->irq_postinstall = valleyview_irq_postinstall; 2648 dev->driver->irq_uninstall = valleyview_irq_uninstall; 2649 dev->driver->enable_vblank = valleyview_enable_vblank; 2650 dev->driver->disable_vblank = valleyview_disable_vblank; 2651 } else if (IS_IVYBRIDGE(dev)) { 2652 /* Share pre & uninstall handlers with ILK/SNB */ 2653 dev->driver->irq_handler = ivybridge_irq_handler; 2654 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2655 dev->driver->irq_postinstall = ivybridge_irq_postinstall; 2656 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2657 dev->driver->enable_vblank = ivybridge_enable_vblank; 2658 dev->driver->disable_vblank = ivybridge_disable_vblank; 2659 } else if (IS_HASWELL(dev)) { 2660 /* Share interrupts handling with IVB */ 2661 dev->driver->irq_handler = ivybridge_irq_handler; 2662 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2663 dev->driver->irq_postinstall = ivybridge_irq_postinstall; 2664 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2665 dev->driver->enable_vblank = ivybridge_enable_vblank; 2666 dev->driver->disable_vblank = ivybridge_disable_vblank; 2667 } else if (HAS_PCH_SPLIT(dev)) { 2668 dev->driver->irq_handler = ironlake_irq_handler; 2669 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2670 dev->driver->irq_postinstall = ironlake_irq_postinstall; 2671 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2672 dev->driver->enable_vblank = ironlake_enable_vblank; 2673 dev->driver->disable_vblank = ironlake_disable_vblank; 2674 } else { 2675 if (INTEL_INFO(dev)->gen == 2) { 2676 dev->driver->irq_preinstall = i8xx_irq_preinstall; 2677 dev->driver->irq_postinstall = i8xx_irq_postinstall; 2678 dev->driver->irq_handler = i8xx_irq_handler; 2679 dev->driver->irq_uninstall = i8xx_irq_uninstall; 2680 } else if (INTEL_INFO(dev)->gen == 3) { 2681 dev->driver->irq_preinstall = i915_irq_preinstall; 2682 dev->driver->irq_postinstall = i915_irq_postinstall; 2683 dev->driver->irq_uninstall = i915_irq_uninstall; 2684 dev->driver->irq_handler = i915_irq_handler; 2685 } else { 2686 dev->driver->irq_preinstall = i965_irq_preinstall; 2687 dev->driver->irq_postinstall = i965_irq_postinstall; 2688 dev->driver->irq_uninstall = i965_irq_uninstall; 2689 dev->driver->irq_handler = i965_irq_handler; 2690 } 2691 dev->driver->enable_vblank = i915_enable_vblank; 2692 dev->driver->disable_vblank = i915_disable_vblank; 2693 } 2694 } 2695