1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #include <drm/drmP.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 #include "intel_drv.h" 33 34 /* For display hotplug interrupt */ 35 static void 36 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 37 { 38 if ((dev_priv->irq_mask & mask) != 0) { 39 dev_priv->irq_mask &= ~mask; 40 I915_WRITE(DEIMR, dev_priv->irq_mask); 41 POSTING_READ(DEIMR); 42 } 43 } 44 45 static inline void 46 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 47 { 48 if ((dev_priv->irq_mask & mask) != mask) { 49 dev_priv->irq_mask |= mask; 50 I915_WRITE(DEIMR, dev_priv->irq_mask); 51 POSTING_READ(DEIMR); 52 } 53 } 54 55 void 56 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 57 { 58 if ((dev_priv->pipestat[pipe] & mask) != mask) { 59 u32 reg = PIPESTAT(pipe); 60 61 dev_priv->pipestat[pipe] |= mask; 62 /* Enable the interrupt, clear any pending status */ 63 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); 64 POSTING_READ(reg); 65 } 66 } 67 68 void 69 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 70 { 71 if ((dev_priv->pipestat[pipe] & mask) != 0) { 72 u32 reg = PIPESTAT(pipe); 73 74 dev_priv->pipestat[pipe] &= ~mask; 75 I915_WRITE(reg, dev_priv->pipestat[pipe]); 76 POSTING_READ(reg); 77 } 78 } 79 80 /** 81 * intel_enable_asle - enable ASLE interrupt for OpRegion 82 */ 83 void intel_enable_asle(struct drm_device *dev) 84 { 85 drm_i915_private_t *dev_priv = dev->dev_private; 86 87 /* FIXME: opregion/asle for VLV */ 88 if (IS_VALLEYVIEW(dev)) 89 return; 90 91 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 92 93 if (HAS_PCH_SPLIT(dev)) 94 ironlake_enable_display_irq(dev_priv, DE_GSE); 95 else { 96 i915_enable_pipestat(dev_priv, 1, 97 PIPE_LEGACY_BLC_EVENT_ENABLE); 98 if (INTEL_INFO(dev)->gen >= 4) 99 i915_enable_pipestat(dev_priv, 0, 100 PIPE_LEGACY_BLC_EVENT_ENABLE); 101 } 102 103 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 104 } 105 106 /** 107 * i915_pipe_enabled - check if a pipe is enabled 108 * @dev: DRM device 109 * @pipe: pipe to check 110 * 111 * Reading certain registers when the pipe is disabled can hang the chip. 112 * Use this routine to make sure the PLL is running and the pipe is active 113 * before reading such registers if unsure. 114 */ 115 static int 116 i915_pipe_enabled(struct drm_device *dev, int pipe) 117 { 118 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 119 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 120 pipe); 121 122 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE; 123 } 124 125 /* Called from drm generic code, passed a 'crtc', which 126 * we use as a pipe index 127 */ 128 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 129 { 130 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 131 unsigned long high_frame; 132 unsigned long low_frame; 133 u32 high1, high2, low; 134 135 if (!i915_pipe_enabled(dev, pipe)) { 136 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 137 "pipe %c\n", pipe_name(pipe)); 138 return 0; 139 } 140 141 high_frame = PIPEFRAME(pipe); 142 low_frame = PIPEFRAMEPIXEL(pipe); 143 144 /* 145 * High & low register fields aren't synchronized, so make sure 146 * we get a low value that's stable across two reads of the high 147 * register. 148 */ 149 do { 150 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 151 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 152 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 153 } while (high1 != high2); 154 155 high1 >>= PIPE_FRAME_HIGH_SHIFT; 156 low >>= PIPE_FRAME_LOW_SHIFT; 157 return (high1 << 8) | low; 158 } 159 160 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 161 { 162 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 163 int reg = PIPE_FRMCOUNT_GM45(pipe); 164 165 if (!i915_pipe_enabled(dev, pipe)) { 166 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 167 "pipe %c\n", pipe_name(pipe)); 168 return 0; 169 } 170 171 return I915_READ(reg); 172 } 173 174 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 175 int *vpos, int *hpos) 176 { 177 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 178 u32 vbl = 0, position = 0; 179 int vbl_start, vbl_end, htotal, vtotal; 180 bool in_vbl = true; 181 int ret = 0; 182 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 183 pipe); 184 185 if (!i915_pipe_enabled(dev, pipe)) { 186 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 187 "pipe %c\n", pipe_name(pipe)); 188 return 0; 189 } 190 191 /* Get vtotal. */ 192 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 193 194 if (INTEL_INFO(dev)->gen >= 4) { 195 /* No obvious pixelcount register. Only query vertical 196 * scanout position from Display scan line register. 197 */ 198 position = I915_READ(PIPEDSL(pipe)); 199 200 /* Decode into vertical scanout position. Don't have 201 * horizontal scanout position. 202 */ 203 *vpos = position & 0x1fff; 204 *hpos = 0; 205 } else { 206 /* Have access to pixelcount since start of frame. 207 * We can split this into vertical and horizontal 208 * scanout position. 209 */ 210 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 211 212 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 213 *vpos = position / htotal; 214 *hpos = position - (*vpos * htotal); 215 } 216 217 /* Query vblank area. */ 218 vbl = I915_READ(VBLANK(cpu_transcoder)); 219 220 /* Test position against vblank region. */ 221 vbl_start = vbl & 0x1fff; 222 vbl_end = (vbl >> 16) & 0x1fff; 223 224 if ((*vpos < vbl_start) || (*vpos > vbl_end)) 225 in_vbl = false; 226 227 /* Inside "upper part" of vblank area? Apply corrective offset: */ 228 if (in_vbl && (*vpos >= vbl_start)) 229 *vpos = *vpos - vtotal; 230 231 /* Readouts valid? */ 232 if (vbl > 0) 233 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 234 235 /* In vblank? */ 236 if (in_vbl) 237 ret |= DRM_SCANOUTPOS_INVBL; 238 239 return ret; 240 } 241 242 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 243 int *max_error, 244 struct timeval *vblank_time, 245 unsigned flags) 246 { 247 struct drm_i915_private *dev_priv = dev->dev_private; 248 struct drm_crtc *crtc; 249 250 if (pipe < 0 || pipe >= dev_priv->num_pipe) { 251 DRM_ERROR("Invalid crtc %d\n", pipe); 252 return -EINVAL; 253 } 254 255 /* Get drm_crtc to timestamp: */ 256 crtc = intel_get_crtc_for_pipe(dev, pipe); 257 if (crtc == NULL) { 258 DRM_ERROR("Invalid crtc %d\n", pipe); 259 return -EINVAL; 260 } 261 262 if (!crtc->enabled) { 263 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 264 return -EBUSY; 265 } 266 267 /* Helper routine in DRM core does all the work: */ 268 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 269 vblank_time, flags, 270 crtc); 271 } 272 273 /* 274 * Handle hotplug events outside the interrupt handler proper. 275 */ 276 static void i915_hotplug_work_func(struct work_struct *work) 277 { 278 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 279 hotplug_work); 280 struct drm_device *dev = dev_priv->dev; 281 struct drm_mode_config *mode_config = &dev->mode_config; 282 struct intel_encoder *encoder; 283 284 /* HPD irq before everything is fully set up. */ 285 if (!dev_priv->enable_hotplug_processing) 286 return; 287 288 mutex_lock(&mode_config->mutex); 289 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 290 291 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 292 if (encoder->hot_plug) 293 encoder->hot_plug(encoder); 294 295 mutex_unlock(&mode_config->mutex); 296 297 /* Just fire off a uevent and let userspace tell us what to do */ 298 drm_helper_hpd_irq_event(dev); 299 } 300 301 static void ironlake_handle_rps_change(struct drm_device *dev) 302 { 303 drm_i915_private_t *dev_priv = dev->dev_private; 304 u32 busy_up, busy_down, max_avg, min_avg; 305 u8 new_delay; 306 307 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 308 309 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 310 311 new_delay = dev_priv->ips.cur_delay; 312 313 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 314 busy_up = I915_READ(RCPREVBSYTUPAVG); 315 busy_down = I915_READ(RCPREVBSYTDNAVG); 316 max_avg = I915_READ(RCBMAXAVG); 317 min_avg = I915_READ(RCBMINAVG); 318 319 /* Handle RCS change request from hw */ 320 if (busy_up > max_avg) { 321 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 322 new_delay = dev_priv->ips.cur_delay - 1; 323 if (new_delay < dev_priv->ips.max_delay) 324 new_delay = dev_priv->ips.max_delay; 325 } else if (busy_down < min_avg) { 326 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 327 new_delay = dev_priv->ips.cur_delay + 1; 328 if (new_delay > dev_priv->ips.min_delay) 329 new_delay = dev_priv->ips.min_delay; 330 } 331 332 if (ironlake_set_drps(dev, new_delay)) 333 dev_priv->ips.cur_delay = new_delay; 334 335 lockmgr(&mchdev_lock, LK_RELEASE); 336 337 return; 338 } 339 340 static void notify_ring(struct drm_device *dev, 341 struct intel_ring_buffer *ring) 342 { 343 struct drm_i915_private *dev_priv = dev->dev_private; 344 345 if (ring->obj == NULL) 346 return; 347 348 wake_up_all(&ring->irq_queue); 349 if (i915_enable_hangcheck) { 350 dev_priv->gpu_error.hangcheck_count = 0; 351 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 352 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 353 } 354 } 355 356 static void gen6_pm_rps_work(struct work_struct *work) 357 { 358 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 359 rps.work); 360 u32 pm_iir, pm_imr; 361 u8 new_delay; 362 363 spin_lock(&dev_priv->rps.lock); 364 pm_iir = dev_priv->rps.pm_iir; 365 dev_priv->rps.pm_iir = 0; 366 pm_imr = I915_READ(GEN6_PMIMR); 367 I915_WRITE(GEN6_PMIMR, 0); 368 spin_unlock(&dev_priv->rps.lock); 369 370 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) 371 return; 372 373 mutex_lock(&dev_priv->rps.hw_lock); 374 375 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) 376 new_delay = dev_priv->rps.cur_delay + 1; 377 else 378 new_delay = dev_priv->rps.cur_delay - 1; 379 380 /* sysfs frequency interfaces may have snuck in while servicing the 381 * interrupt 382 */ 383 if (!(new_delay > dev_priv->rps.max_delay || 384 new_delay < dev_priv->rps.min_delay)) { 385 gen6_set_rps(dev_priv->dev, new_delay); 386 } 387 388 mutex_unlock(&dev_priv->rps.hw_lock); 389 } 390 391 392 /** 393 * ivybridge_parity_work - Workqueue called when a parity error interrupt 394 * occurred. 395 * @work: workqueue struct 396 * 397 * Doesn't actually do anything except notify userspace. As a consequence of 398 * this event, userspace should try to remap the bad rows since statistically 399 * it is likely the same row is more likely to go bad again. 400 */ 401 static void ivybridge_parity_work(struct work_struct *work) 402 { 403 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 404 l3_parity.error_work); 405 u32 error_status, row, bank, subbank; 406 char *parity_event[5]; 407 uint32_t misccpctl; 408 409 /* We must turn off DOP level clock gating to access the L3 registers. 410 * In order to prevent a get/put style interface, acquire struct mutex 411 * any time we access those registers. 412 */ 413 mutex_lock(&dev_priv->dev->struct_mutex); 414 415 misccpctl = I915_READ(GEN7_MISCCPCTL); 416 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 417 POSTING_READ(GEN7_MISCCPCTL); 418 419 error_status = I915_READ(GEN7_L3CDERRST1); 420 row = GEN7_PARITY_ERROR_ROW(error_status); 421 bank = GEN7_PARITY_ERROR_BANK(error_status); 422 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 423 424 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | 425 GEN7_L3CDERRST1_ENABLE); 426 POSTING_READ(GEN7_L3CDERRST1); 427 428 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 429 430 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 431 dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 432 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 433 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 434 435 mutex_unlock(&dev_priv->dev->struct_mutex); 436 437 parity_event[0] = "L3_PARITY_ERROR=1"; 438 parity_event[4] = NULL; 439 440 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", 441 row, bank, subbank); 442 } 443 444 static void ivybridge_handle_parity_error(struct drm_device *dev) 445 { 446 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 447 448 if (!HAS_L3_GPU_CACHE(dev)) 449 return; 450 451 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 452 dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 453 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 454 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 455 456 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 457 } 458 459 static void snb_gt_irq_handler(struct drm_device *dev, 460 struct drm_i915_private *dev_priv, 461 u32 gt_iir) 462 { 463 464 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | 465 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) 466 notify_ring(dev, &dev_priv->ring[RCS]); 467 if (gt_iir & GEN6_BSD_USER_INTERRUPT) 468 notify_ring(dev, &dev_priv->ring[VCS]); 469 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) 470 notify_ring(dev, &dev_priv->ring[BCS]); 471 472 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | 473 GT_GEN6_BSD_CS_ERROR_INTERRUPT | 474 GT_RENDER_CS_ERROR_INTERRUPT)) { 475 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 476 i915_handle_error(dev, false); 477 } 478 479 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) 480 ivybridge_handle_parity_error(dev); 481 } 482 483 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, 484 u32 pm_iir) 485 { 486 487 /* 488 * IIR bits should never already be set because IMR should 489 * prevent an interrupt from being shown in IIR. The warning 490 * displays a case where we've unsafely cleared 491 * dev_priv->rps.pm_iir. Although missing an interrupt of the same 492 * type is not a problem, it displays a problem in the logic. 493 * 494 * The mask bit in IMR is cleared by dev_priv->rps.work. 495 */ 496 497 spin_lock(&dev_priv->rps.lock); 498 dev_priv->rps.pm_iir |= pm_iir; 499 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 500 POSTING_READ(GEN6_PMIMR); 501 spin_unlock(&dev_priv->rps.lock); 502 503 queue_work(dev_priv->wq, &dev_priv->rps.work); 504 } 505 506 static void gmbus_irq_handler(struct drm_device *dev) 507 { 508 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 509 510 wake_up_all(&dev_priv->gmbus_wait_queue); 511 } 512 513 static void dp_aux_irq_handler(struct drm_device *dev) 514 { 515 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 516 517 wake_up_all(&dev_priv->gmbus_wait_queue); 518 } 519 520 static irqreturn_t valleyview_irq_handler(void *arg) 521 { 522 struct drm_device *dev = (struct drm_device *) arg; 523 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 524 u32 iir, gt_iir, pm_iir; 525 int pipe; 526 u32 pipe_stats[I915_MAX_PIPES]; 527 528 atomic_inc(&dev_priv->irq_received); 529 530 while (true) { 531 iir = I915_READ(VLV_IIR); 532 gt_iir = I915_READ(GTIIR); 533 pm_iir = I915_READ(GEN6_PMIIR); 534 535 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 536 goto out; 537 538 snb_gt_irq_handler(dev, dev_priv, gt_iir); 539 540 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 541 for_each_pipe(pipe) { 542 int reg = PIPESTAT(pipe); 543 pipe_stats[pipe] = I915_READ(reg); 544 545 /* 546 * Clear the PIPE*STAT regs before the IIR 547 */ 548 if (pipe_stats[pipe] & 0x8000ffff) { 549 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 550 DRM_DEBUG_DRIVER("pipe %c underrun\n", 551 pipe_name(pipe)); 552 I915_WRITE(reg, pipe_stats[pipe]); 553 } 554 } 555 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 556 557 for_each_pipe(pipe) { 558 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 559 drm_handle_vblank(dev, pipe); 560 561 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 562 intel_prepare_page_flip(dev, pipe); 563 intel_finish_page_flip(dev, pipe); 564 } 565 } 566 567 /* Consume port. Then clear IIR or we'll miss events */ 568 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 569 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 570 571 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 572 hotplug_status); 573 if (hotplug_status & dev_priv->hotplug_supported_mask) 574 queue_work(dev_priv->wq, 575 &dev_priv->hotplug_work); 576 577 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 578 I915_READ(PORT_HOTPLUG_STAT); 579 } 580 581 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 582 gmbus_irq_handler(dev); 583 584 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 585 gen6_queue_rps_work(dev_priv, pm_iir); 586 587 I915_WRITE(GTIIR, gt_iir); 588 I915_WRITE(GEN6_PMIIR, pm_iir); 589 I915_WRITE(VLV_IIR, iir); 590 } 591 592 out: 593 return; 594 } 595 596 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 597 { 598 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 599 int pipe; 600 601 if (pch_iir & SDE_HOTPLUG_MASK) 602 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 603 604 if (pch_iir & SDE_AUDIO_POWER_MASK) 605 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 606 (pch_iir & SDE_AUDIO_POWER_MASK) >> 607 SDE_AUDIO_POWER_SHIFT); 608 609 if (pch_iir & SDE_AUX_MASK) 610 dp_aux_irq_handler(dev); 611 612 if (pch_iir & SDE_GMBUS) 613 gmbus_irq_handler(dev); 614 615 if (pch_iir & SDE_AUDIO_HDCP_MASK) 616 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 617 618 if (pch_iir & SDE_AUDIO_TRANS_MASK) 619 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 620 621 if (pch_iir & SDE_POISON) 622 DRM_ERROR("PCH poison interrupt\n"); 623 624 if (pch_iir & SDE_FDI_MASK) 625 for_each_pipe(pipe) 626 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 627 pipe_name(pipe), 628 I915_READ(FDI_RX_IIR(pipe))); 629 630 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 631 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 632 633 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 634 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 635 636 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 637 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); 638 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 639 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); 640 } 641 642 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 643 { 644 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 645 int pipe; 646 647 if (pch_iir & SDE_HOTPLUG_MASK_CPT) 648 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 649 650 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) 651 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 652 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 653 SDE_AUDIO_POWER_SHIFT_CPT); 654 655 if (pch_iir & SDE_AUX_MASK_CPT) 656 dp_aux_irq_handler(dev); 657 658 if (pch_iir & SDE_GMBUS_CPT) 659 gmbus_irq_handler(dev); 660 661 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 662 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 663 664 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 665 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 666 667 if (pch_iir & SDE_FDI_MASK_CPT) 668 for_each_pipe(pipe) 669 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 670 pipe_name(pipe), 671 I915_READ(FDI_RX_IIR(pipe))); 672 } 673 674 static irqreturn_t ivybridge_irq_handler(void *arg) 675 { 676 struct drm_device *dev = (struct drm_device *) arg; 677 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 678 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; 679 int i; 680 681 atomic_inc(&dev_priv->irq_received); 682 683 /* disable master interrupt before clearing iir */ 684 de_ier = I915_READ(DEIER); 685 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 686 687 /* Disable south interrupts. We'll only write to SDEIIR once, so further 688 * interrupts will will be stored on its back queue, and then we'll be 689 * able to process them after we restore SDEIER (as soon as we restore 690 * it, we'll get an interrupt if SDEIIR still has something to process 691 * due to its back queue). */ 692 sde_ier = I915_READ(SDEIER); 693 I915_WRITE(SDEIER, 0); 694 POSTING_READ(SDEIER); 695 696 gt_iir = I915_READ(GTIIR); 697 if (gt_iir) { 698 snb_gt_irq_handler(dev, dev_priv, gt_iir); 699 I915_WRITE(GTIIR, gt_iir); 700 } 701 702 de_iir = I915_READ(DEIIR); 703 if (de_iir) { 704 if (de_iir & DE_AUX_CHANNEL_A_IVB) 705 dp_aux_irq_handler(dev); 706 707 if (de_iir & DE_GSE_IVB) 708 intel_opregion_gse_intr(dev); 709 710 for (i = 0; i < 3; i++) { 711 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 712 drm_handle_vblank(dev, i); 713 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 714 intel_prepare_page_flip(dev, i); 715 intel_finish_page_flip_plane(dev, i); 716 } 717 } 718 719 /* check event from PCH */ 720 if (de_iir & DE_PCH_EVENT_IVB) { 721 u32 pch_iir = I915_READ(SDEIIR); 722 723 cpt_irq_handler(dev, pch_iir); 724 725 /* clear PCH hotplug event before clear CPU irq */ 726 I915_WRITE(SDEIIR, pch_iir); 727 } 728 729 I915_WRITE(DEIIR, de_iir); 730 } 731 732 pm_iir = I915_READ(GEN6_PMIIR); 733 if (pm_iir) { 734 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 735 gen6_queue_rps_work(dev_priv, pm_iir); 736 I915_WRITE(GEN6_PMIIR, pm_iir); 737 } 738 739 I915_WRITE(DEIER, de_ier); 740 POSTING_READ(DEIER); 741 I915_WRITE(SDEIER, sde_ier); 742 POSTING_READ(SDEIER); 743 } 744 745 static void ilk_gt_irq_handler(struct drm_device *dev, 746 struct drm_i915_private *dev_priv, 747 u32 gt_iir) 748 { 749 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) 750 notify_ring(dev, &dev_priv->ring[RCS]); 751 if (gt_iir & GT_BSD_USER_INTERRUPT) 752 notify_ring(dev, &dev_priv->ring[VCS]); 753 } 754 755 static irqreturn_t ironlake_irq_handler(void *arg) 756 { 757 struct drm_device *dev = (struct drm_device *) arg; 758 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 759 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; 760 761 atomic_inc(&dev_priv->irq_received); 762 763 /* disable master interrupt before clearing iir */ 764 de_ier = I915_READ(DEIER); 765 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 766 POSTING_READ(DEIER); 767 768 /* Disable south interrupts. We'll only write to SDEIIR once, so further 769 * interrupts will will be stored on its back queue, and then we'll be 770 * able to process them after we restore SDEIER (as soon as we restore 771 * it, we'll get an interrupt if SDEIIR still has something to process 772 * due to its back queue). */ 773 sde_ier = I915_READ(SDEIER); 774 I915_WRITE(SDEIER, 0); 775 POSTING_READ(SDEIER); 776 777 de_iir = I915_READ(DEIIR); 778 gt_iir = I915_READ(GTIIR); 779 pm_iir = I915_READ(GEN6_PMIIR); 780 781 if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0)) 782 goto done; 783 784 if (IS_GEN5(dev)) 785 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 786 else 787 snb_gt_irq_handler(dev, dev_priv, gt_iir); 788 789 if (de_iir & DE_AUX_CHANNEL_A) 790 dp_aux_irq_handler(dev); 791 792 if (de_iir & DE_GSE) 793 intel_opregion_gse_intr(dev); 794 795 if (de_iir & DE_PIPEA_VBLANK) 796 drm_handle_vblank(dev, 0); 797 798 if (de_iir & DE_PIPEB_VBLANK) 799 drm_handle_vblank(dev, 1); 800 801 if (de_iir & DE_PLANEA_FLIP_DONE) { 802 intel_prepare_page_flip(dev, 0); 803 intel_finish_page_flip_plane(dev, 0); 804 } 805 806 if (de_iir & DE_PLANEB_FLIP_DONE) { 807 intel_prepare_page_flip(dev, 1); 808 intel_finish_page_flip_plane(dev, 1); 809 } 810 811 /* check event from PCH */ 812 if (de_iir & DE_PCH_EVENT) { 813 u32 pch_iir = I915_READ(SDEIIR); 814 815 if (HAS_PCH_CPT(dev)) 816 cpt_irq_handler(dev, pch_iir); 817 else 818 ibx_irq_handler(dev, pch_iir); 819 820 /* should clear PCH hotplug event before clear CPU irq */ 821 I915_WRITE(SDEIIR, pch_iir); 822 } 823 824 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 825 ironlake_handle_rps_change(dev); 826 827 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) 828 gen6_queue_rps_work(dev_priv, pm_iir); 829 830 I915_WRITE(GTIIR, gt_iir); 831 I915_WRITE(DEIIR, de_iir); 832 I915_WRITE(GEN6_PMIIR, pm_iir); 833 834 done: 835 I915_WRITE(DEIER, de_ier); 836 POSTING_READ(DEIER); 837 I915_WRITE(SDEIER, sde_ier); 838 POSTING_READ(SDEIER); 839 } 840 841 /** 842 * i915_error_work_func - do process context error handling work 843 * @work: work struct 844 * 845 * Fire an error uevent so userspace can see that a hang or error 846 * was detected. 847 */ 848 static void i915_error_work_func(struct work_struct *work) 849 { 850 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 851 work); 852 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 853 gpu_error); 854 struct drm_device *dev = dev_priv->dev; 855 struct intel_ring_buffer *ring; 856 #if 0 857 char *error_event[] = { "ERROR=1", NULL }; 858 char *reset_event[] = { "RESET=1", NULL }; 859 char *reset_done_event[] = { "ERROR=0", NULL }; 860 #endif 861 int i, ret; 862 863 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */ 864 865 /* 866 * Note that there's only one work item which does gpu resets, so we 867 * need not worry about concurrent gpu resets potentially incrementing 868 * error->reset_counter twice. We only need to take care of another 869 * racing irq/hangcheck declaring the gpu dead for a second time. A 870 * quick check for that is good enough: schedule_work ensures the 871 * correct ordering between hang detection and this work item, and since 872 * the reset in-progress bit is only ever set by code outside of this 873 * work we don't need to worry about any other races. 874 */ 875 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 876 DRM_DEBUG_DRIVER("resetting chip\n"); 877 #if 0 878 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, 879 reset_event); 880 #endif 881 882 ret = i915_reset(dev); 883 884 if (ret == 0) { 885 /* 886 * After all the gem state is reset, increment the reset 887 * counter and wake up everyone waiting for the reset to 888 * complete. 889 * 890 * Since unlock operations are a one-sided barrier only, 891 * we need to insert a barrier here to order any seqno 892 * updates before 893 * the counter increment. 894 */ 895 cpu_sfence(); 896 atomic_inc(&dev_priv->gpu_error.reset_counter); 897 898 #if 0 899 kobject_uevent_env(&dev->primary->kdev.kobj, 900 KOBJ_CHANGE, reset_done_event); 901 #endif 902 } else { 903 atomic_set(&error->reset_counter, I915_WEDGED); 904 } 905 906 for_each_ring(ring, dev_priv, i) 907 wake_up_all(&ring->irq_queue); 908 909 wake_up_all(&dev_priv->gpu_error.reset_queue); 910 } 911 } 912 913 /* NB: please notice the memset */ 914 static void i915_get_extra_instdone(struct drm_device *dev, 915 uint32_t *instdone) 916 { 917 struct drm_i915_private *dev_priv = dev->dev_private; 918 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); 919 920 switch(INTEL_INFO(dev)->gen) { 921 case 2: 922 case 3: 923 instdone[0] = I915_READ(INSTDONE); 924 break; 925 case 4: 926 case 5: 927 case 6: 928 instdone[0] = I915_READ(INSTDONE_I965); 929 instdone[1] = I915_READ(INSTDONE1); 930 break; 931 default: 932 #if 0 933 WARN_ONCE(1, "Unsupported platform\n"); 934 #endif 935 case 7: 936 instdone[0] = I915_READ(GEN7_INSTDONE_1); 937 instdone[1] = I915_READ(GEN7_SC_INSTDONE); 938 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 939 instdone[3] = I915_READ(GEN7_ROW_INSTDONE); 940 break; 941 } 942 } 943 944 #if 0 /* CONFIG_DEBUG_FS */ 945 static struct drm_i915_error_object * 946 i915_error_object_create(struct drm_i915_private *dev_priv, 947 struct drm_i915_gem_object *src) 948 { 949 struct drm_i915_error_object *dst; 950 int i, count; 951 u32 reloc_offset; 952 953 if (src == NULL || src->pages == NULL) 954 return NULL; 955 956 count = src->base.size / PAGE_SIZE; 957 958 dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC); 959 if (dst == NULL) 960 return NULL; 961 962 reloc_offset = src->gtt_offset; 963 for (i = 0; i < count; i++) { 964 unsigned long flags; 965 void *d; 966 967 d = kmalloc(PAGE_SIZE, GFP_ATOMIC); 968 if (d == NULL) 969 goto unwind; 970 971 local_irq_save(flags); 972 if (reloc_offset < dev_priv->gtt.mappable_end && 973 src->has_global_gtt_mapping) { 974 void __iomem *s; 975 976 /* Simply ignore tiling or any overlapping fence. 977 * It's part of the error state, and this hopefully 978 * captures what the GPU read. 979 */ 980 981 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, 982 reloc_offset); 983 memcpy_fromio(d, s, PAGE_SIZE); 984 io_mapping_unmap_atomic(s); 985 } else if (src->stolen) { 986 unsigned long offset; 987 988 offset = dev_priv->mm.stolen_base; 989 offset += src->stolen->start; 990 offset += i << PAGE_SHIFT; 991 992 memcpy_fromio(d, (void *)offset, PAGE_SIZE); 993 } else { 994 struct page *page; 995 void *s; 996 997 page = i915_gem_object_get_page(src, i); 998 999 drm_clflush_pages(&page, 1); 1000 1001 s = kmap_atomic(page); 1002 memcpy(d, s, PAGE_SIZE); 1003 kunmap_atomic(s); 1004 1005 drm_clflush_pages(&page, 1); 1006 } 1007 local_irq_restore(flags); 1008 1009 dst->pages[i] = d; 1010 1011 reloc_offset += PAGE_SIZE; 1012 } 1013 dst->page_count = count; 1014 dst->gtt_offset = src->gtt_offset; 1015 1016 return dst; 1017 1018 unwind: 1019 while (i--) 1020 kfree(dst->pages[i]); 1021 kfree(dst); 1022 return NULL; 1023 } 1024 1025 static void 1026 i915_error_object_free(struct drm_i915_error_object *obj) 1027 { 1028 int page; 1029 1030 if (obj == NULL) 1031 return; 1032 1033 for (page = 0; page < obj->page_count; page++) 1034 kfree(obj->pages[page]); 1035 1036 kfree(obj); 1037 } 1038 1039 void 1040 i915_error_state_free(struct drm_device *dev, 1041 struct drm_i915_error_state *error) 1042 { 1043 struct drm_i915_error_state *error = container_of(error_ref, 1044 typeof(*error), ref); 1045 int i; 1046 1047 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 1048 i915_error_object_free(error->ring[i].batchbuffer); 1049 i915_error_object_free(error->ring[i].ringbuffer); 1050 kfree(error->ring[i].requests); 1051 } 1052 1053 kfree(error->active_bo); 1054 kfree(error->overlay); 1055 kfree(error); 1056 } 1057 static void capture_bo(struct drm_i915_error_buffer *err, 1058 struct drm_i915_gem_object *obj) 1059 { 1060 err->size = obj->base.size; 1061 err->name = obj->base.name; 1062 err->rseqno = obj->last_read_seqno; 1063 err->wseqno = obj->last_write_seqno; 1064 err->gtt_offset = obj->gtt_offset; 1065 err->read_domains = obj->base.read_domains; 1066 err->write_domain = obj->base.write_domain; 1067 err->fence_reg = obj->fence_reg; 1068 err->pinned = 0; 1069 if (obj->pin_count > 0) 1070 err->pinned = 1; 1071 if (obj->user_pin_count > 0) 1072 err->pinned = -1; 1073 err->tiling = obj->tiling_mode; 1074 err->dirty = obj->dirty; 1075 err->purgeable = obj->madv != I915_MADV_WILLNEED; 1076 err->ring = obj->ring ? obj->ring->id : -1; 1077 err->cache_level = obj->cache_level; 1078 } 1079 1080 static u32 capture_active_bo(struct drm_i915_error_buffer *err, 1081 int count, struct list_head *head) 1082 { 1083 struct drm_i915_gem_object *obj; 1084 int i = 0; 1085 1086 list_for_each_entry(obj, head, mm_list) { 1087 capture_bo(err++, obj); 1088 if (++i == count) 1089 break; 1090 } 1091 1092 return i; 1093 } 1094 1095 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, 1096 int count, struct list_head *head) 1097 { 1098 struct drm_i915_gem_object *obj; 1099 int i = 0; 1100 1101 list_for_each_entry(obj, head, gtt_list) { 1102 if (obj->pin_count == 0) 1103 continue; 1104 1105 capture_bo(err++, obj); 1106 if (++i == count) 1107 break; 1108 } 1109 1110 return i; 1111 } 1112 1113 static void i915_gem_record_fences(struct drm_device *dev, 1114 struct drm_i915_error_state *error) 1115 { 1116 struct drm_i915_private *dev_priv = dev->dev_private; 1117 int i; 1118 1119 /* Fences */ 1120 switch (INTEL_INFO(dev)->gen) { 1121 case 7: 1122 case 6: 1123 for (i = 0; i < 16; i++) 1124 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 1125 break; 1126 case 5: 1127 case 4: 1128 for (i = 0; i < 16; i++) 1129 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 1130 break; 1131 case 3: 1132 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 1133 for (i = 0; i < 8; i++) 1134 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 1135 case 2: 1136 for (i = 0; i < 8; i++) 1137 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 1138 break; 1139 1140 default: 1141 BUG(); 1142 } 1143 } 1144 1145 static struct drm_i915_error_object * 1146 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, 1147 struct intel_ring_buffer *ring) 1148 { 1149 struct drm_i915_gem_object *obj; 1150 u32 seqno; 1151 1152 if (!ring->get_seqno) 1153 return NULL; 1154 1155 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { 1156 u32 acthd = I915_READ(ACTHD); 1157 1158 if (WARN_ON(ring->id != RCS)) 1159 return NULL; 1160 1161 obj = ring->private; 1162 if (acthd >= obj->gtt_offset && 1163 acthd < obj->gtt_offset + obj->base.size) 1164 return i915_error_object_create(dev_priv, obj); 1165 } 1166 1167 seqno = ring->get_seqno(ring, false); 1168 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 1169 if (obj->ring != ring) 1170 continue; 1171 1172 if (i915_seqno_passed(seqno, obj->last_read_seqno)) 1173 continue; 1174 1175 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) 1176 continue; 1177 1178 /* We need to copy these to an anonymous buffer as the simplest 1179 * method to avoid being overwritten by userspace. 1180 */ 1181 return i915_error_object_create(dev_priv, obj); 1182 } 1183 1184 return NULL; 1185 } 1186 1187 static void i915_record_ring_state(struct drm_device *dev, 1188 struct drm_i915_error_state *error, 1189 struct intel_ring_buffer *ring) 1190 { 1191 struct drm_i915_private *dev_priv = dev->dev_private; 1192 1193 if (INTEL_INFO(dev)->gen >= 6) { 1194 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); 1195 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); 1196 error->semaphore_mboxes[ring->id][0] 1197 = I915_READ(RING_SYNC_0(ring->mmio_base)); 1198 error->semaphore_mboxes[ring->id][1] 1199 = I915_READ(RING_SYNC_1(ring->mmio_base)); 1200 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; 1201 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; 1202 } 1203 1204 if (INTEL_INFO(dev)->gen >= 4) { 1205 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); 1206 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); 1207 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 1208 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 1209 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 1210 if (ring->id == RCS) 1211 error->bbaddr = I915_READ64(BB_ADDR); 1212 } else { 1213 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); 1214 error->ipeir[ring->id] = I915_READ(IPEIR); 1215 error->ipehr[ring->id] = I915_READ(IPEHR); 1216 error->instdone[ring->id] = I915_READ(INSTDONE); 1217 } 1218 1219 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); 1220 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); 1221 error->seqno[ring->id] = ring->get_seqno(ring, false); 1222 error->acthd[ring->id] = intel_ring_get_active_head(ring); 1223 error->head[ring->id] = I915_READ_HEAD(ring); 1224 error->tail[ring->id] = I915_READ_TAIL(ring); 1225 error->ctl[ring->id] = I915_READ_CTL(ring); 1226 1227 error->cpu_ring_head[ring->id] = ring->head; 1228 error->cpu_ring_tail[ring->id] = ring->tail; 1229 } 1230 1231 static void i915_gem_record_rings(struct drm_device *dev, 1232 struct drm_i915_error_state *error) 1233 { 1234 struct drm_i915_private *dev_priv = dev->dev_private; 1235 struct intel_ring_buffer *ring; 1236 struct drm_i915_gem_request *request; 1237 int i, count; 1238 1239 for_each_ring(ring, dev_priv, i) { 1240 i915_record_ring_state(dev, error, ring); 1241 1242 error->ring[i].batchbuffer = 1243 i915_error_first_batchbuffer(dev_priv, ring); 1244 1245 error->ring[i].ringbuffer = 1246 i915_error_object_create(dev_priv, ring->obj); 1247 1248 count = 0; 1249 list_for_each_entry(request, &ring->request_list, list) 1250 count++; 1251 1252 error->ring[i].num_requests = count; 1253 error->ring[i].requests = 1254 kmalloc(count*sizeof(struct drm_i915_error_request), 1255 GFP_ATOMIC); 1256 if (error->ring[i].requests == NULL) { 1257 error->ring[i].num_requests = 0; 1258 continue; 1259 } 1260 1261 count = 0; 1262 list_for_each_entry(request, &ring->request_list, list) { 1263 struct drm_i915_error_request *erq; 1264 1265 erq = &error->ring[i].requests[count++]; 1266 erq->seqno = request->seqno; 1267 erq->jiffies = request->emitted_jiffies; 1268 erq->tail = request->tail; 1269 } 1270 } 1271 } 1272 1273 /** 1274 * i915_capture_error_state - capture an error record for later analysis 1275 * @dev: drm device 1276 * 1277 * Should be called when an error is detected (either a hang or an error 1278 * interrupt) to capture error state from the time of the error. Fills 1279 * out a structure which becomes available in debugfs for user level tools 1280 * to pick up. 1281 */ 1282 static void i915_capture_error_state(struct drm_device *dev) 1283 { 1284 struct drm_i915_private *dev_priv = dev->dev_private; 1285 struct drm_i915_gem_object *obj; 1286 struct drm_i915_error_state *error; 1287 unsigned long flags; 1288 int i, pipe; 1289 1290 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1291 error = dev_priv->gpu_error.first_error; 1292 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1293 if (error) 1294 return; 1295 1296 /* Account for pipe specific data like PIPE*STAT */ 1297 error = kmalloc(sizeof(*error), M_DRM, M_WAITOK | M_NULLOK | M_ZERO); 1298 if (!error) { 1299 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 1300 return; 1301 } 1302 1303 DRM_INFO("capturing error event; look for more information in" 1304 "/sys/kernel/debug/dri/%d/i915_error_state\n", 1305 dev->primary->index); 1306 1307 kref_init(&error->ref); 1308 error->eir = I915_READ(EIR); 1309 error->pgtbl_er = I915_READ(PGTBL_ER); 1310 error->ccid = I915_READ(CCID); 1311 1312 if (HAS_PCH_SPLIT(dev)) 1313 error->ier = I915_READ(DEIER) | I915_READ(GTIER); 1314 else if (IS_VALLEYVIEW(dev)) 1315 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); 1316 else if (IS_GEN2(dev)) 1317 error->ier = I915_READ16(IER); 1318 else 1319 error->ier = I915_READ(IER); 1320 1321 if (INTEL_INFO(dev)->gen >= 6) 1322 error->derrmr = I915_READ(DERRMR); 1323 1324 if (IS_VALLEYVIEW(dev)) 1325 error->forcewake = I915_READ(FORCEWAKE_VLV); 1326 else if (INTEL_INFO(dev)->gen >= 7) 1327 error->forcewake = I915_READ(FORCEWAKE_MT); 1328 else if (INTEL_INFO(dev)->gen == 6) 1329 error->forcewake = I915_READ(FORCEWAKE); 1330 1331 for_each_pipe(pipe) 1332 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 1333 1334 if (INTEL_INFO(dev)->gen >= 6) { 1335 error->error = I915_READ(ERROR_GEN6); 1336 error->done_reg = I915_READ(DONE_REG); 1337 } 1338 1339 if (INTEL_INFO(dev)->gen == 7) 1340 error->err_int = I915_READ(GEN7_ERR_INT); 1341 1342 i915_get_extra_instdone(dev, error->extra_instdone); 1343 1344 i915_gem_record_fences(dev, error); 1345 i915_gem_record_rings(dev, error); 1346 1347 /* Record buffers on the active and pinned lists. */ 1348 error->active_bo = NULL; 1349 error->pinned_bo = NULL; 1350 1351 i = 0; 1352 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 1353 i++; 1354 error->active_bo_count = i; 1355 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) 1356 if (obj->pin_count) 1357 i++; 1358 error->pinned_bo_count = i - error->active_bo_count; 1359 1360 error->active_bo = NULL; 1361 error->pinned_bo = NULL; 1362 if (i) { 1363 error->active_bo = kmalloc(sizeof(*error->active_bo)*i, 1364 GFP_ATOMIC); 1365 if (error->active_bo) 1366 error->pinned_bo = 1367 error->active_bo + error->active_bo_count; 1368 } 1369 1370 if (error->active_bo) 1371 error->active_bo_count = 1372 capture_active_bo(error->active_bo, 1373 error->active_bo_count, 1374 &dev_priv->mm.active_list); 1375 1376 if (error->pinned_bo) 1377 error->pinned_bo_count = 1378 capture_pinned_bo(error->pinned_bo, 1379 error->pinned_bo_count, 1380 &dev_priv->mm.bound_list); 1381 1382 do_gettimeofday(&error->time); 1383 1384 error->overlay = intel_overlay_capture_error_state(dev); 1385 error->display = intel_display_capture_error_state(dev); 1386 1387 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1388 if (dev_priv->gpu_error.first_error == NULL) { 1389 dev_priv->gpu_error.first_error = error; 1390 error = NULL; 1391 } 1392 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1393 1394 if (error) 1395 i915_error_state_free(&error->ref); 1396 } 1397 1398 void i915_destroy_error_state(struct drm_device *dev) 1399 { 1400 struct drm_i915_private *dev_priv = dev->dev_private; 1401 struct drm_i915_error_state *error; 1402 1403 lockmgr(&dev_priv->gpu_error.lock, LK_EXCLUSIVE); 1404 error = dev_priv->gpu_error.first_error; 1405 dev_priv->gpu_error.first_error = NULL; 1406 lockmgr(&dev_priv->gpu_error.lock, LK_RELEASE); 1407 1408 if (error) 1409 i915_error_state_free(dev, error); 1410 } 1411 #else 1412 #define i915_capture_error_state(x) 1413 #endif 1414 1415 static void i915_report_and_clear_eir(struct drm_device *dev) 1416 { 1417 struct drm_i915_private *dev_priv = dev->dev_private; 1418 uint32_t instdone[I915_NUM_INSTDONE_REG]; 1419 u32 eir = I915_READ(EIR); 1420 int pipe, i; 1421 1422 if (!eir) 1423 return; 1424 1425 pr_err("render error detected, EIR: 0x%08x\n", eir); 1426 1427 i915_get_extra_instdone(dev, instdone); 1428 1429 if (IS_G4X(dev)) { 1430 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 1431 u32 ipeir = I915_READ(IPEIR_I965); 1432 1433 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1434 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1435 for (i = 0; i < ARRAY_SIZE(instdone); i++) 1436 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1437 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1438 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1439 I915_WRITE(IPEIR_I965, ipeir); 1440 POSTING_READ(IPEIR_I965); 1441 } 1442 if (eir & GM45_ERROR_PAGE_TABLE) { 1443 u32 pgtbl_err = I915_READ(PGTBL_ER); 1444 pr_err("page table error\n"); 1445 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 1446 I915_WRITE(PGTBL_ER, pgtbl_err); 1447 POSTING_READ(PGTBL_ER); 1448 } 1449 } 1450 1451 if (!IS_GEN2(dev)) { 1452 if (eir & I915_ERROR_PAGE_TABLE) { 1453 u32 pgtbl_err = I915_READ(PGTBL_ER); 1454 pr_err("page table error\n"); 1455 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 1456 I915_WRITE(PGTBL_ER, pgtbl_err); 1457 POSTING_READ(PGTBL_ER); 1458 } 1459 } 1460 1461 if (eir & I915_ERROR_MEMORY_REFRESH) { 1462 pr_err("memory refresh error:\n"); 1463 for_each_pipe(pipe) 1464 pr_err("pipe %c stat: 0x%08x\n", 1465 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 1466 /* pipestat has already been acked */ 1467 } 1468 if (eir & I915_ERROR_INSTRUCTION) { 1469 pr_err("instruction error\n"); 1470 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 1471 for (i = 0; i < ARRAY_SIZE(instdone); i++) 1472 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1473 if (INTEL_INFO(dev)->gen < 4) { 1474 u32 ipeir = I915_READ(IPEIR); 1475 1476 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 1477 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 1478 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 1479 I915_WRITE(IPEIR, ipeir); 1480 POSTING_READ(IPEIR); 1481 } else { 1482 u32 ipeir = I915_READ(IPEIR_I965); 1483 1484 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1485 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1486 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1487 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1488 I915_WRITE(IPEIR_I965, ipeir); 1489 POSTING_READ(IPEIR_I965); 1490 } 1491 } 1492 1493 I915_WRITE(EIR, eir); 1494 POSTING_READ(EIR); 1495 eir = I915_READ(EIR); 1496 if (eir) { 1497 /* 1498 * some errors might have become stuck, 1499 * mask them. 1500 */ 1501 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 1502 I915_WRITE(EMR, I915_READ(EMR) | eir); 1503 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 1504 } 1505 } 1506 1507 /** 1508 * i915_handle_error - handle an error interrupt 1509 * @dev: drm device 1510 * 1511 * Do some basic checking of regsiter state at error interrupt time and 1512 * dump it to the syslog. Also call i915_capture_error_state() to make 1513 * sure we get a record and make it available in debugfs. Fire a uevent 1514 * so userspace knows something bad happened (should trigger collection 1515 * of a ring dump etc.). 1516 */ 1517 void i915_handle_error(struct drm_device *dev, bool wedged) 1518 { 1519 struct drm_i915_private *dev_priv = dev->dev_private; 1520 struct intel_ring_buffer *ring; 1521 int i; 1522 1523 i915_capture_error_state(dev); 1524 i915_report_and_clear_eir(dev); 1525 1526 if (wedged) { 1527 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 1528 &dev_priv->gpu_error.reset_counter); 1529 1530 /* 1531 * Wakeup waiting processes so that the reset work item 1532 * doesn't deadlock trying to grab various locks. 1533 */ 1534 for_each_ring(ring, dev_priv, i) 1535 wake_up_all(&ring->irq_queue); 1536 } 1537 1538 queue_work(dev_priv->wq, &dev_priv->gpu_error.work); 1539 } 1540 1541 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) 1542 { 1543 drm_i915_private_t *dev_priv = dev->dev_private; 1544 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1545 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1546 struct drm_i915_gem_object *obj; 1547 struct intel_unpin_work *work; 1548 bool stall_detected; 1549 1550 /* Ignore early vblank irqs */ 1551 if (intel_crtc == NULL) 1552 return; 1553 1554 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 1555 work = intel_crtc->unpin_work; 1556 1557 if (work == NULL || 1558 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 1559 !work->enable_stall_check) { 1560 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 1561 lockmgr(&dev->event_lock, LK_RELEASE); 1562 return; 1563 } 1564 1565 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 1566 obj = work->pending_flip_obj; 1567 if (INTEL_INFO(dev)->gen >= 4) { 1568 int dspsurf = DSPSURF(intel_crtc->plane); 1569 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 1570 obj->gtt_offset; 1571 } else { 1572 int dspaddr = DSPADDR(intel_crtc->plane); 1573 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 1574 crtc->y * crtc->fb->pitches[0] + 1575 crtc->x * crtc->fb->bits_per_pixel/8); 1576 } 1577 1578 lockmgr(&dev->event_lock, LK_RELEASE); 1579 1580 if (stall_detected) { 1581 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 1582 intel_prepare_page_flip(dev, intel_crtc->plane); 1583 } 1584 } 1585 1586 /* Called from drm generic code, passed 'crtc' which 1587 * we use as a pipe index 1588 */ 1589 static int i915_enable_vblank(struct drm_device *dev, int pipe) 1590 { 1591 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1592 1593 if (!i915_pipe_enabled(dev, pipe)) 1594 return -EINVAL; 1595 1596 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1597 if (INTEL_INFO(dev)->gen >= 4) 1598 i915_enable_pipestat(dev_priv, pipe, 1599 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1600 else 1601 i915_enable_pipestat(dev_priv, pipe, 1602 PIPE_VBLANK_INTERRUPT_ENABLE); 1603 1604 /* maintain vblank delivery even in deep C-states */ 1605 if (dev_priv->info->gen == 3) 1606 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 1607 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1608 1609 return 0; 1610 } 1611 1612 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 1613 { 1614 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1615 1616 if (!i915_pipe_enabled(dev, pipe)) 1617 return -EINVAL; 1618 1619 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1620 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1621 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1622 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1623 1624 return 0; 1625 } 1626 1627 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) 1628 { 1629 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1630 1631 if (!i915_pipe_enabled(dev, pipe)) 1632 return -EINVAL; 1633 1634 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1635 ironlake_enable_display_irq(dev_priv, 1636 DE_PIPEA_VBLANK_IVB << (5 * pipe)); 1637 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1638 1639 return 0; 1640 } 1641 1642 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 1643 { 1644 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1645 u32 imr; 1646 1647 if (!i915_pipe_enabled(dev, pipe)) 1648 return -EINVAL; 1649 1650 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1651 imr = I915_READ(VLV_IMR); 1652 if (pipe == 0) 1653 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1654 else 1655 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1656 I915_WRITE(VLV_IMR, imr); 1657 i915_enable_pipestat(dev_priv, pipe, 1658 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1659 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1660 1661 return 0; 1662 } 1663 1664 /* Called from drm generic code, passed 'crtc' which 1665 * we use as a pipe index 1666 */ 1667 static void i915_disable_vblank(struct drm_device *dev, int pipe) 1668 { 1669 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1670 1671 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1672 if (dev_priv->info->gen == 3) 1673 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 1674 1675 i915_disable_pipestat(dev_priv, pipe, 1676 PIPE_VBLANK_INTERRUPT_ENABLE | 1677 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1678 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1679 } 1680 1681 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 1682 { 1683 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1684 1685 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1686 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1687 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1688 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1689 } 1690 1691 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) 1692 { 1693 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1694 1695 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1696 ironlake_disable_display_irq(dev_priv, 1697 DE_PIPEA_VBLANK_IVB << (pipe * 5)); 1698 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1699 } 1700 1701 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 1702 { 1703 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1704 u32 imr; 1705 1706 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1707 i915_disable_pipestat(dev_priv, pipe, 1708 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1709 imr = I915_READ(VLV_IMR); 1710 if (pipe == 0) 1711 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1712 else 1713 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1714 I915_WRITE(VLV_IMR, imr); 1715 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1716 } 1717 1718 static u32 1719 ring_last_seqno(struct intel_ring_buffer *ring) 1720 { 1721 return list_entry(ring->request_list.prev, 1722 struct drm_i915_gem_request, list)->seqno; 1723 } 1724 1725 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) 1726 { 1727 if (list_empty(&ring->request_list) || 1728 i915_seqno_passed(ring->get_seqno(ring, false), 1729 ring_last_seqno(ring))) { 1730 /* Issue a wake-up to catch stuck h/w. */ 1731 #if 0 /* XXX From OpenBSD */ 1732 if (waitqueue_active(&ring->irq_queue)) { 1733 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 1734 ring->name); 1735 wake_up_all(&ring->irq_queue); 1736 *err = true; 1737 } 1738 #else 1739 wake_up_all(&ring->irq_queue); 1740 #endif 1741 return true; 1742 } 1743 return false; 1744 } 1745 1746 static bool kick_ring(struct intel_ring_buffer *ring) 1747 { 1748 struct drm_device *dev = ring->dev; 1749 struct drm_i915_private *dev_priv = dev->dev_private; 1750 u32 tmp = I915_READ_CTL(ring); 1751 if (tmp & RING_WAIT) { 1752 DRM_ERROR("Kicking stuck wait on %s\n", 1753 ring->name); 1754 I915_WRITE_CTL(ring, tmp); 1755 return true; 1756 } 1757 return false; 1758 } 1759 1760 static bool i915_hangcheck_hung(struct drm_device *dev) 1761 { 1762 drm_i915_private_t *dev_priv = dev->dev_private; 1763 1764 if (dev_priv->gpu_error.hangcheck_count++ > 1) { 1765 bool hung = true; 1766 1767 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 1768 i915_handle_error(dev, true); 1769 1770 if (!IS_GEN2(dev)) { 1771 struct intel_ring_buffer *ring; 1772 int i; 1773 1774 /* Is the chip hanging on a WAIT_FOR_EVENT? 1775 * If so we can simply poke the RB_WAIT bit 1776 * and break the hang. This should work on 1777 * all but the second generation chipsets. 1778 */ 1779 for_each_ring(ring, dev_priv, i) 1780 hung &= !kick_ring(ring); 1781 } 1782 1783 return hung; 1784 } 1785 1786 return false; 1787 } 1788 1789 /** 1790 * This is called when the chip hasn't reported back with completed 1791 * batchbuffers in a long time. The first time this is called we simply record 1792 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses 1793 * again, we assume the chip is wedged and try to fix it. 1794 */ 1795 void i915_hangcheck_elapsed(unsigned long data) 1796 { 1797 struct drm_device *dev = (struct drm_device *)data; 1798 drm_i915_private_t *dev_priv = dev->dev_private; 1799 uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG]; 1800 struct intel_ring_buffer *ring; 1801 bool err = false, idle; 1802 int i; 1803 1804 if (!i915_enable_hangcheck) 1805 return; 1806 1807 memset(acthd, 0, sizeof(acthd)); 1808 idle = true; 1809 for_each_ring(ring, dev_priv, i) { 1810 idle &= i915_hangcheck_ring_idle(ring, &err); 1811 acthd[i] = intel_ring_get_active_head(ring); 1812 } 1813 1814 /* If all work is done then ACTHD clearly hasn't advanced. */ 1815 if (idle) { 1816 if (err) { 1817 if (i915_hangcheck_hung(dev)) 1818 return; 1819 1820 goto repeat; 1821 } 1822 1823 dev_priv->gpu_error.hangcheck_count = 0; 1824 return; 1825 } 1826 1827 i915_get_extra_instdone(dev, instdone); 1828 if (memcmp(dev_priv->gpu_error.last_acthd, acthd, 1829 sizeof(acthd)) == 0 && 1830 memcmp(dev_priv->gpu_error.prev_instdone, instdone, 1831 sizeof(instdone)) == 0) { 1832 if (i915_hangcheck_hung(dev)) 1833 return; 1834 } else { 1835 dev_priv->gpu_error.hangcheck_count = 0; 1836 1837 memcpy(dev_priv->gpu_error.last_acthd, acthd, 1838 sizeof(acthd)); 1839 memcpy(dev_priv->gpu_error.prev_instdone, instdone, 1840 sizeof(instdone)); 1841 } 1842 1843 repeat: 1844 /* Reset timer case chip hangs without another request being added */ 1845 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 1846 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 1847 } 1848 1849 /* drm_dma.h hooks 1850 */ 1851 static void ironlake_irq_preinstall(struct drm_device *dev) 1852 { 1853 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1854 1855 atomic_set(&dev_priv->irq_received, 0); 1856 1857 I915_WRITE(HWSTAM, 0xeffe); 1858 1859 /* XXX hotplug from PCH */ 1860 1861 I915_WRITE(DEIMR, 0xffffffff); 1862 I915_WRITE(DEIER, 0x0); 1863 POSTING_READ(DEIER); 1864 1865 /* and GT */ 1866 I915_WRITE(GTIMR, 0xffffffff); 1867 I915_WRITE(GTIER, 0x0); 1868 POSTING_READ(GTIER); 1869 1870 /* south display irq */ 1871 I915_WRITE(SDEIMR, 0xffffffff); 1872 I915_WRITE(SDEIER, 0x0); 1873 POSTING_READ(SDEIER); 1874 } 1875 1876 static void valleyview_irq_preinstall(struct drm_device *dev) 1877 { 1878 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1879 int pipe; 1880 1881 atomic_set(&dev_priv->irq_received, 0); 1882 1883 /* VLV magic */ 1884 I915_WRITE(VLV_IMR, 0); 1885 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 1886 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 1887 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 1888 1889 /* and GT */ 1890 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1891 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1892 I915_WRITE(GTIMR, 0xffffffff); 1893 I915_WRITE(GTIER, 0x0); 1894 POSTING_READ(GTIER); 1895 1896 I915_WRITE(DPINVGTT, 0xff); 1897 1898 I915_WRITE(PORT_HOTPLUG_EN, 0); 1899 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 1900 for_each_pipe(pipe) 1901 I915_WRITE(PIPESTAT(pipe), 0xffff); 1902 I915_WRITE(VLV_IIR, 0xffffffff); 1903 I915_WRITE(VLV_IMR, 0xffffffff); 1904 I915_WRITE(VLV_IER, 0x0); 1905 POSTING_READ(VLV_IER); 1906 } 1907 1908 /* 1909 * Enable digital hotplug on the PCH, and configure the DP short pulse 1910 * duration to 2ms (which is the minimum in the Display Port spec) 1911 * 1912 * This register is the same on all known PCH chips. 1913 */ 1914 1915 static void ibx_enable_hotplug(struct drm_device *dev) 1916 { 1917 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1918 u32 hotplug; 1919 1920 hotplug = I915_READ(PCH_PORT_HOTPLUG); 1921 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 1922 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 1923 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 1924 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 1925 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 1926 } 1927 1928 static void ibx_irq_postinstall(struct drm_device *dev) 1929 { 1930 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1931 u32 mask; 1932 1933 if (HAS_PCH_IBX(dev)) 1934 mask = SDE_HOTPLUG_MASK | 1935 SDE_GMBUS | 1936 SDE_AUX_MASK; 1937 else 1938 mask = SDE_HOTPLUG_MASK_CPT | 1939 SDE_GMBUS_CPT | 1940 SDE_AUX_MASK_CPT; 1941 1942 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1943 I915_WRITE(SDEIMR, ~mask); 1944 I915_WRITE(SDEIER, mask); 1945 POSTING_READ(SDEIER); 1946 1947 ibx_enable_hotplug(dev); 1948 } 1949 1950 static int ironlake_irq_postinstall(struct drm_device *dev) 1951 { 1952 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1953 /* enable kind of interrupts always enabled */ 1954 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1955 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 1956 DE_AUX_CHANNEL_A; 1957 u32 render_irqs; 1958 1959 dev_priv->irq_mask = ~display_mask; 1960 1961 /* should always can generate irq */ 1962 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1963 I915_WRITE(DEIMR, dev_priv->irq_mask); 1964 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); 1965 POSTING_READ(DEIER); 1966 1967 dev_priv->gt_irq_mask = ~0; 1968 1969 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1970 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1971 1972 if (IS_GEN6(dev)) 1973 render_irqs = 1974 GT_USER_INTERRUPT | 1975 GEN6_BSD_USER_INTERRUPT | 1976 GEN6_BLITTER_USER_INTERRUPT; 1977 else 1978 render_irqs = 1979 GT_USER_INTERRUPT | 1980 GT_PIPE_NOTIFY | 1981 GT_BSD_USER_INTERRUPT; 1982 I915_WRITE(GTIER, render_irqs); 1983 POSTING_READ(GTIER); 1984 1985 ibx_irq_postinstall(dev); 1986 1987 if (IS_IRONLAKE_M(dev)) { 1988 /* Clear & enable PCU event interrupts */ 1989 I915_WRITE(DEIIR, DE_PCU_EVENT); 1990 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); 1991 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 1992 } 1993 1994 return 0; 1995 } 1996 1997 static int ivybridge_irq_postinstall(struct drm_device *dev) 1998 { 1999 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2000 /* enable kind of interrupts always enabled */ 2001 u32 display_mask = 2002 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | 2003 DE_PLANEC_FLIP_DONE_IVB | 2004 DE_PLANEB_FLIP_DONE_IVB | 2005 DE_PLANEA_FLIP_DONE_IVB | 2006 DE_AUX_CHANNEL_A_IVB; 2007 u32 render_irqs; 2008 2009 dev_priv->irq_mask = ~display_mask; 2010 2011 /* should always can generate irq */ 2012 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2013 I915_WRITE(DEIMR, dev_priv->irq_mask); 2014 I915_WRITE(DEIER, 2015 display_mask | 2016 DE_PIPEC_VBLANK_IVB | 2017 DE_PIPEB_VBLANK_IVB | 2018 DE_PIPEA_VBLANK_IVB); 2019 POSTING_READ(DEIER); 2020 2021 dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 2022 2023 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2024 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2025 2026 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | 2027 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 2028 I915_WRITE(GTIER, render_irqs); 2029 POSTING_READ(GTIER); 2030 2031 ibx_irq_postinstall(dev); 2032 2033 return 0; 2034 } 2035 2036 static int valleyview_irq_postinstall(struct drm_device *dev) 2037 { 2038 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2039 u32 enable_mask; 2040 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2041 u32 render_irqs; 2042 u16 msid; 2043 2044 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2045 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2046 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2047 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2048 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2049 2050 /* 2051 *Leave vblank interrupts masked initially. enable/disable will 2052 * toggle them based on usage. 2053 */ 2054 dev_priv->irq_mask = (~enable_mask) | 2055 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2056 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2057 2058 dev_priv->pipestat[0] = 0; 2059 dev_priv->pipestat[1] = 0; 2060 2061 /* Hack for broken MSIs on VLV */ 2062 pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000); 2063 pci_read_config_word(dev->pdev, 0x98, &msid); 2064 msid &= 0xff; /* mask out delivery bits */ 2065 msid |= (1<<14); 2066 pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); 2067 2068 I915_WRITE(PORT_HOTPLUG_EN, 0); 2069 POSTING_READ(PORT_HOTPLUG_EN); 2070 2071 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 2072 I915_WRITE(VLV_IER, enable_mask); 2073 I915_WRITE(VLV_IIR, 0xffffffff); 2074 I915_WRITE(PIPESTAT(0), 0xffff); 2075 I915_WRITE(PIPESTAT(1), 0xffff); 2076 POSTING_READ(VLV_IER); 2077 2078 i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2079 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2080 i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2081 2082 I915_WRITE(VLV_IIR, 0xffffffff); 2083 I915_WRITE(VLV_IIR, 0xffffffff); 2084 2085 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2086 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2087 2088 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | 2089 GEN6_BLITTER_USER_INTERRUPT; 2090 I915_WRITE(GTIER, render_irqs); 2091 POSTING_READ(GTIER); 2092 2093 /* ack & enable invalid PTE error interrupts */ 2094 #if 0 /* FIXME: add support to irq handler for checking these bits */ 2095 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2096 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 2097 #endif 2098 2099 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2100 2101 return 0; 2102 } 2103 2104 static void valleyview_hpd_irq_setup(struct drm_device *dev) 2105 { 2106 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2107 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2108 2109 /* Note HDMI and DP share bits */ 2110 if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS) 2111 hotplug_en |= PORTB_HOTPLUG_INT_EN; 2112 if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS) 2113 hotplug_en |= PORTC_HOTPLUG_INT_EN; 2114 if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS) 2115 hotplug_en |= PORTD_HOTPLUG_INT_EN; 2116 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) 2117 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2118 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) 2119 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2120 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2121 hotplug_en |= CRT_HOTPLUG_INT_EN; 2122 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2123 } 2124 2125 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2126 } 2127 2128 static void valleyview_irq_uninstall(struct drm_device *dev) 2129 { 2130 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2131 int pipe; 2132 2133 if (!dev_priv) 2134 return; 2135 2136 for_each_pipe(pipe) 2137 I915_WRITE(PIPESTAT(pipe), 0xffff); 2138 2139 I915_WRITE(HWSTAM, 0xffffffff); 2140 I915_WRITE(PORT_HOTPLUG_EN, 0); 2141 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2142 for_each_pipe(pipe) 2143 I915_WRITE(PIPESTAT(pipe), 0xffff); 2144 I915_WRITE(VLV_IIR, 0xffffffff); 2145 I915_WRITE(VLV_IMR, 0xffffffff); 2146 I915_WRITE(VLV_IER, 0x0); 2147 POSTING_READ(VLV_IER); 2148 } 2149 2150 static void ironlake_irq_uninstall(struct drm_device *dev) 2151 { 2152 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2153 2154 if (!dev_priv) 2155 return; 2156 2157 I915_WRITE(HWSTAM, 0xffffffff); 2158 2159 I915_WRITE(DEIMR, 0xffffffff); 2160 I915_WRITE(DEIER, 0x0); 2161 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2162 2163 I915_WRITE(GTIMR, 0xffffffff); 2164 I915_WRITE(GTIER, 0x0); 2165 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2166 2167 I915_WRITE(SDEIMR, 0xffffffff); 2168 I915_WRITE(SDEIER, 0x0); 2169 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2170 } 2171 2172 static void i8xx_irq_preinstall(struct drm_device * dev) 2173 { 2174 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2175 int pipe; 2176 2177 atomic_set(&dev_priv->irq_received, 0); 2178 2179 for_each_pipe(pipe) 2180 I915_WRITE(PIPESTAT(pipe), 0); 2181 I915_WRITE16(IMR, 0xffff); 2182 I915_WRITE16(IER, 0x0); 2183 POSTING_READ16(IER); 2184 } 2185 2186 static int i8xx_irq_postinstall(struct drm_device *dev) 2187 { 2188 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2189 2190 dev_priv->pipestat[0] = 0; 2191 dev_priv->pipestat[1] = 0; 2192 2193 I915_WRITE16(EMR, 2194 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2195 2196 /* Unmask the interrupts that we always want on. */ 2197 dev_priv->irq_mask = 2198 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2199 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2200 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2201 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2202 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2203 I915_WRITE16(IMR, dev_priv->irq_mask); 2204 2205 I915_WRITE16(IER, 2206 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2207 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2208 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2209 I915_USER_INTERRUPT); 2210 POSTING_READ16(IER); 2211 2212 return 0; 2213 } 2214 2215 static irqreturn_t i8xx_irq_handler(void *arg) 2216 { 2217 struct drm_device *dev = (struct drm_device *) arg; 2218 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2219 u16 iir, new_iir; 2220 u32 pipe_stats[2]; 2221 int irq_received; 2222 int pipe; 2223 u16 flip_mask = 2224 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2225 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2226 2227 atomic_inc(&dev_priv->irq_received); 2228 2229 iir = I915_READ16(IIR); 2230 if (iir == 0) 2231 return; 2232 2233 while (iir & ~flip_mask) { 2234 /* Can't rely on pipestat interrupt bit in iir as it might 2235 * have been cleared after the pipestat interrupt was received. 2236 * It doesn't set the bit in iir again, but it still produces 2237 * interrupts (for non-MSI). 2238 */ 2239 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2240 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2241 i915_handle_error(dev, false); 2242 2243 for_each_pipe(pipe) { 2244 int reg = PIPESTAT(pipe); 2245 pipe_stats[pipe] = I915_READ(reg); 2246 2247 /* 2248 * Clear the PIPE*STAT regs before the IIR 2249 */ 2250 if (pipe_stats[pipe] & 0x8000ffff) { 2251 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2252 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2253 pipe_name(pipe)); 2254 I915_WRITE(reg, pipe_stats[pipe]); 2255 irq_received = 1; 2256 } 2257 } 2258 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2259 2260 I915_WRITE16(IIR, iir & ~flip_mask); 2261 new_iir = I915_READ16(IIR); /* Flush posted writes */ 2262 2263 i915_update_dri1_breadcrumb(dev); 2264 2265 if (iir & I915_USER_INTERRUPT) 2266 notify_ring(dev, &dev_priv->ring[RCS]); 2267 2268 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 2269 drm_handle_vblank(dev, 0)) { 2270 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { 2271 intel_prepare_page_flip(dev, 0); 2272 intel_finish_page_flip(dev, 0); 2273 flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; 2274 } 2275 } 2276 2277 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 2278 drm_handle_vblank(dev, 1)) { 2279 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { 2280 intel_prepare_page_flip(dev, 1); 2281 intel_finish_page_flip(dev, 1); 2282 flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2283 } 2284 } 2285 2286 iir = new_iir; 2287 } 2288 2289 return; 2290 } 2291 2292 static void i8xx_irq_uninstall(struct drm_device * dev) 2293 { 2294 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2295 int pipe; 2296 2297 for_each_pipe(pipe) { 2298 /* Clear enable bits; then clear status bits */ 2299 I915_WRITE(PIPESTAT(pipe), 0); 2300 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2301 } 2302 I915_WRITE16(IMR, 0xffff); 2303 I915_WRITE16(IER, 0x0); 2304 I915_WRITE16(IIR, I915_READ16(IIR)); 2305 } 2306 2307 static void i915_irq_preinstall(struct drm_device * dev) 2308 { 2309 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2310 int pipe; 2311 2312 atomic_set(&dev_priv->irq_received, 0); 2313 2314 if (I915_HAS_HOTPLUG(dev)) { 2315 I915_WRITE(PORT_HOTPLUG_EN, 0); 2316 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2317 } 2318 2319 I915_WRITE16(HWSTAM, 0xeffe); 2320 for_each_pipe(pipe) 2321 I915_WRITE(PIPESTAT(pipe), 0); 2322 I915_WRITE(IMR, 0xffffffff); 2323 I915_WRITE(IER, 0x0); 2324 POSTING_READ(IER); 2325 } 2326 2327 static int i915_irq_postinstall(struct drm_device *dev) 2328 { 2329 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2330 u32 enable_mask; 2331 2332 dev_priv->pipestat[0] = 0; 2333 dev_priv->pipestat[1] = 0; 2334 2335 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2336 2337 /* Unmask the interrupts that we always want on. */ 2338 dev_priv->irq_mask = 2339 ~(I915_ASLE_INTERRUPT | 2340 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2341 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2342 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2343 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2344 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2345 2346 enable_mask = 2347 I915_ASLE_INTERRUPT | 2348 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2349 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2350 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2351 I915_USER_INTERRUPT; 2352 2353 if (I915_HAS_HOTPLUG(dev)) { 2354 I915_WRITE(PORT_HOTPLUG_EN, 0); 2355 POSTING_READ(PORT_HOTPLUG_EN); 2356 2357 /* Enable in IER... */ 2358 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 2359 /* and unmask in IMR */ 2360 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 2361 } 2362 2363 I915_WRITE(IMR, dev_priv->irq_mask); 2364 I915_WRITE(IER, enable_mask); 2365 POSTING_READ(IER); 2366 2367 intel_opregion_enable_asle(dev); 2368 2369 return 0; 2370 } 2371 2372 static void i915_hpd_irq_setup(struct drm_device *dev) 2373 { 2374 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2375 u32 hotplug_en; 2376 2377 if (I915_HAS_HOTPLUG(dev)) { 2378 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2379 2380 if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS) 2381 hotplug_en |= PORTB_HOTPLUG_INT_EN; 2382 if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS) 2383 hotplug_en |= PORTC_HOTPLUG_INT_EN; 2384 if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS) 2385 hotplug_en |= PORTD_HOTPLUG_INT_EN; 2386 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) 2387 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2388 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) 2389 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2390 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2391 hotplug_en |= CRT_HOTPLUG_INT_EN; 2392 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2393 } 2394 2395 /* Ignore TV since it's buggy */ 2396 2397 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2398 } 2399 } 2400 2401 static irqreturn_t i915_irq_handler(void *arg) 2402 { 2403 struct drm_device *dev = (struct drm_device *) arg; 2404 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2405 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 2406 u32 flip_mask = 2407 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2408 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2409 u32 flip[2] = { 2410 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT, 2411 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT 2412 }; 2413 int pipe; 2414 2415 atomic_inc(&dev_priv->irq_received); 2416 2417 iir = I915_READ(IIR); 2418 do { 2419 bool irq_received = (iir & ~flip_mask) != 0; 2420 bool blc_event = false; 2421 2422 /* Can't rely on pipestat interrupt bit in iir as it might 2423 * have been cleared after the pipestat interrupt was received. 2424 * It doesn't set the bit in iir again, but it still produces 2425 * interrupts (for non-MSI). 2426 */ 2427 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2428 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2429 i915_handle_error(dev, false); 2430 2431 for_each_pipe(pipe) { 2432 int reg = PIPESTAT(pipe); 2433 pipe_stats[pipe] = I915_READ(reg); 2434 2435 /* Clear the PIPE*STAT regs before the IIR */ 2436 if (pipe_stats[pipe] & 0x8000ffff) { 2437 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2438 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2439 pipe_name(pipe)); 2440 I915_WRITE(reg, pipe_stats[pipe]); 2441 irq_received = true; 2442 } 2443 } 2444 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2445 2446 if (!irq_received) 2447 break; 2448 2449 /* Consume port. Then clear IIR or we'll miss events */ 2450 if ((I915_HAS_HOTPLUG(dev)) && 2451 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 2452 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2453 2454 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2455 hotplug_status); 2456 if (hotplug_status & dev_priv->hotplug_supported_mask) 2457 queue_work(dev_priv->wq, 2458 &dev_priv->hotplug_work); 2459 2460 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2461 POSTING_READ(PORT_HOTPLUG_STAT); 2462 } 2463 2464 I915_WRITE(IIR, iir & ~flip_mask); 2465 new_iir = I915_READ(IIR); /* Flush posted writes */ 2466 2467 if (iir & I915_USER_INTERRUPT) 2468 notify_ring(dev, &dev_priv->ring[RCS]); 2469 2470 for_each_pipe(pipe) { 2471 int plane = pipe; 2472 if (IS_MOBILE(dev)) 2473 plane = !plane; 2474 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 2475 drm_handle_vblank(dev, pipe)) { 2476 if (iir & flip[plane]) { 2477 intel_prepare_page_flip(dev, plane); 2478 intel_finish_page_flip(dev, pipe); 2479 flip_mask &= ~flip[plane]; 2480 } 2481 } 2482 2483 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2484 blc_event = true; 2485 } 2486 2487 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2488 intel_opregion_asle_intr(dev); 2489 2490 /* With MSI, interrupts are only generated when iir 2491 * transitions from zero to nonzero. If another bit got 2492 * set while we were handling the existing iir bits, then 2493 * we would never get another interrupt. 2494 * 2495 * This is fine on non-MSI as well, as if we hit this path 2496 * we avoid exiting the interrupt handler only to generate 2497 * another one. 2498 * 2499 * Note that for MSI this could cause a stray interrupt report 2500 * if an interrupt landed in the time between writing IIR and 2501 * the posting read. This should be rare enough to never 2502 * trigger the 99% of 100,000 interrupts test for disabling 2503 * stray interrupts. 2504 */ 2505 iir = new_iir; 2506 } while (iir & ~flip_mask); 2507 2508 i915_update_dri1_breadcrumb(dev); 2509 } 2510 2511 static void i915_irq_uninstall(struct drm_device * dev) 2512 { 2513 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2514 int pipe; 2515 2516 if (I915_HAS_HOTPLUG(dev)) { 2517 I915_WRITE(PORT_HOTPLUG_EN, 0); 2518 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2519 } 2520 2521 I915_WRITE16(HWSTAM, 0xffff); 2522 for_each_pipe(pipe) { 2523 /* Clear enable bits; then clear status bits */ 2524 I915_WRITE(PIPESTAT(pipe), 0); 2525 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2526 } 2527 I915_WRITE(IMR, 0xffffffff); 2528 I915_WRITE(IER, 0x0); 2529 2530 I915_WRITE(IIR, I915_READ(IIR)); 2531 } 2532 2533 static void i965_irq_preinstall(struct drm_device * dev) 2534 { 2535 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2536 int pipe; 2537 2538 atomic_set(&dev_priv->irq_received, 0); 2539 2540 I915_WRITE(PORT_HOTPLUG_EN, 0); 2541 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2542 2543 I915_WRITE(HWSTAM, 0xeffe); 2544 for_each_pipe(pipe) 2545 I915_WRITE(PIPESTAT(pipe), 0); 2546 I915_WRITE(IMR, 0xffffffff); 2547 I915_WRITE(IER, 0x0); 2548 POSTING_READ(IER); 2549 } 2550 2551 static int i965_irq_postinstall(struct drm_device *dev) 2552 { 2553 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2554 u32 enable_mask; 2555 u32 error_mask; 2556 2557 /* Unmask the interrupts that we always want on. */ 2558 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 2559 I915_DISPLAY_PORT_INTERRUPT | 2560 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2561 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2562 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2563 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2564 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2565 2566 enable_mask = ~dev_priv->irq_mask; 2567 enable_mask |= I915_USER_INTERRUPT; 2568 2569 if (IS_G4X(dev)) 2570 enable_mask |= I915_BSD_USER_INTERRUPT; 2571 2572 dev_priv->pipestat[0] = 0; 2573 dev_priv->pipestat[1] = 0; 2574 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); 2575 2576 /* 2577 * Enable some error detection, note the instruction error mask 2578 * bit is reserved, so we leave it masked. 2579 */ 2580 if (IS_G4X(dev)) { 2581 error_mask = ~(GM45_ERROR_PAGE_TABLE | 2582 GM45_ERROR_MEM_PRIV | 2583 GM45_ERROR_CP_PRIV | 2584 I915_ERROR_MEMORY_REFRESH); 2585 } else { 2586 error_mask = ~(I915_ERROR_PAGE_TABLE | 2587 I915_ERROR_MEMORY_REFRESH); 2588 } 2589 I915_WRITE(EMR, error_mask); 2590 2591 I915_WRITE(IMR, dev_priv->irq_mask); 2592 I915_WRITE(IER, enable_mask); 2593 POSTING_READ(IER); 2594 2595 I915_WRITE(PORT_HOTPLUG_EN, 0); 2596 POSTING_READ(PORT_HOTPLUG_EN); 2597 2598 intel_opregion_enable_asle(dev); 2599 2600 return 0; 2601 } 2602 2603 static void i965_hpd_irq_setup(struct drm_device *dev) 2604 { 2605 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2606 u32 hotplug_en; 2607 2608 /* Note HDMI and DP share hotplug bits */ 2609 hotplug_en = 0; 2610 if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS) 2611 hotplug_en |= PORTB_HOTPLUG_INT_EN; 2612 if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS) 2613 hotplug_en |= PORTC_HOTPLUG_INT_EN; 2614 if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS) 2615 hotplug_en |= PORTD_HOTPLUG_INT_EN; 2616 if (IS_G4X(dev)) { 2617 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X) 2618 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2619 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X) 2620 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2621 } else { 2622 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965) 2623 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2624 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965) 2625 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2626 } 2627 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2628 hotplug_en |= CRT_HOTPLUG_INT_EN; 2629 2630 /* Programming the CRT detection parameters tends 2631 to generate a spurious hotplug event about three 2632 seconds later. So just do it once. 2633 */ 2634 if (IS_G4X(dev)) 2635 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 2636 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2637 } 2638 2639 /* Ignore TV since it's buggy */ 2640 2641 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2642 } 2643 2644 static irqreturn_t i965_irq_handler(void *arg) 2645 { 2646 struct drm_device *dev = (struct drm_device *) arg; 2647 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2648 u32 iir, new_iir; 2649 u32 pipe_stats[I915_MAX_PIPES]; 2650 int irq_received; 2651 int pipe; 2652 2653 atomic_inc(&dev_priv->irq_received); 2654 2655 iir = I915_READ(IIR); 2656 2657 for (;;) { 2658 bool blc_event = false; 2659 2660 irq_received = iir != 0; 2661 2662 /* Can't rely on pipestat interrupt bit in iir as it might 2663 * have been cleared after the pipestat interrupt was received. 2664 * It doesn't set the bit in iir again, but it still produces 2665 * interrupts (for non-MSI). 2666 */ 2667 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2668 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2669 i915_handle_error(dev, false); 2670 2671 for_each_pipe(pipe) { 2672 int reg = PIPESTAT(pipe); 2673 pipe_stats[pipe] = I915_READ(reg); 2674 2675 /* 2676 * Clear the PIPE*STAT regs before the IIR 2677 */ 2678 if (pipe_stats[pipe] & 0x8000ffff) { 2679 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2680 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2681 pipe_name(pipe)); 2682 I915_WRITE(reg, pipe_stats[pipe]); 2683 irq_received = 1; 2684 } 2685 } 2686 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2687 2688 if (!irq_received) 2689 break; 2690 2691 /* Consume port. Then clear IIR or we'll miss events */ 2692 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 2693 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2694 2695 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2696 hotplug_status); 2697 if (hotplug_status & dev_priv->hotplug_supported_mask) 2698 queue_work(dev_priv->wq, 2699 &dev_priv->hotplug_work); 2700 2701 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2702 I915_READ(PORT_HOTPLUG_STAT); 2703 } 2704 2705 I915_WRITE(IIR, iir); 2706 new_iir = I915_READ(IIR); /* Flush posted writes */ 2707 2708 if (iir & I915_USER_INTERRUPT) 2709 notify_ring(dev, &dev_priv->ring[RCS]); 2710 if (iir & I915_BSD_USER_INTERRUPT) 2711 notify_ring(dev, &dev_priv->ring[VCS]); 2712 2713 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) 2714 intel_prepare_page_flip(dev, 0); 2715 2716 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) 2717 intel_prepare_page_flip(dev, 1); 2718 2719 for_each_pipe(pipe) { 2720 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 2721 drm_handle_vblank(dev, pipe)) { 2722 i915_pageflip_stall_check(dev, pipe); 2723 intel_finish_page_flip(dev, pipe); 2724 } 2725 2726 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2727 blc_event = true; 2728 } 2729 2730 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2731 intel_opregion_asle_intr(dev); 2732 2733 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 2734 gmbus_irq_handler(dev); 2735 2736 /* With MSI, interrupts are only generated when iir 2737 * transitions from zero to nonzero. If another bit got 2738 * set while we were handling the existing iir bits, then 2739 * we would never get another interrupt. 2740 * 2741 * This is fine on non-MSI as well, as if we hit this path 2742 * we avoid exiting the interrupt handler only to generate 2743 * another one. 2744 * 2745 * Note that for MSI this could cause a stray interrupt report 2746 * if an interrupt landed in the time between writing IIR and 2747 * the posting read. This should be rare enough to never 2748 * trigger the 99% of 100,000 interrupts test for disabling 2749 * stray interrupts. 2750 */ 2751 iir = new_iir; 2752 } 2753 2754 i915_update_dri1_breadcrumb(dev); 2755 } 2756 2757 static void i965_irq_uninstall(struct drm_device * dev) 2758 { 2759 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2760 int pipe; 2761 2762 if (!dev_priv) 2763 return; 2764 2765 I915_WRITE(PORT_HOTPLUG_EN, 0); 2766 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2767 2768 I915_WRITE(HWSTAM, 0xffffffff); 2769 for_each_pipe(pipe) 2770 I915_WRITE(PIPESTAT(pipe), 0); 2771 I915_WRITE(IMR, 0xffffffff); 2772 I915_WRITE(IER, 0x0); 2773 2774 for_each_pipe(pipe) 2775 I915_WRITE(PIPESTAT(pipe), 2776 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 2777 I915_WRITE(IIR, I915_READ(IIR)); 2778 } 2779 2780 void intel_irq_init(struct drm_device *dev) 2781 { 2782 struct drm_i915_private *dev_priv = dev->dev_private; 2783 2784 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 2785 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 2786 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 2787 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 2788 2789 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 2790 i915_hangcheck_elapsed, 2791 (unsigned long) dev); 2792 2793 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 2794 2795 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2796 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2797 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 2798 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 2799 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2800 } 2801 2802 if (drm_core_check_feature(dev, DRIVER_MODESET)) 2803 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 2804 else 2805 dev->driver->get_vblank_timestamp = NULL; 2806 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 2807 2808 if (IS_VALLEYVIEW(dev)) { 2809 dev->driver->irq_handler = valleyview_irq_handler; 2810 dev->driver->irq_preinstall = valleyview_irq_preinstall; 2811 dev->driver->irq_postinstall = valleyview_irq_postinstall; 2812 dev->driver->irq_uninstall = valleyview_irq_uninstall; 2813 dev->driver->enable_vblank = valleyview_enable_vblank; 2814 dev->driver->disable_vblank = valleyview_disable_vblank; 2815 dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup; 2816 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 2817 /* Share pre & uninstall handlers with ILK/SNB */ 2818 dev->driver->irq_handler = ivybridge_irq_handler; 2819 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2820 dev->driver->irq_postinstall = ivybridge_irq_postinstall; 2821 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2822 dev->driver->enable_vblank = ivybridge_enable_vblank; 2823 dev->driver->disable_vblank = ivybridge_disable_vblank; 2824 } else if (HAS_PCH_SPLIT(dev)) { 2825 dev->driver->irq_handler = ironlake_irq_handler; 2826 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2827 dev->driver->irq_postinstall = ironlake_irq_postinstall; 2828 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2829 dev->driver->enable_vblank = ironlake_enable_vblank; 2830 dev->driver->disable_vblank = ironlake_disable_vblank; 2831 } else { 2832 if (INTEL_INFO(dev)->gen == 2) { 2833 dev->driver->irq_preinstall = i8xx_irq_preinstall; 2834 dev->driver->irq_postinstall = i8xx_irq_postinstall; 2835 dev->driver->irq_handler = i8xx_irq_handler; 2836 dev->driver->irq_uninstall = i8xx_irq_uninstall; 2837 } else if (INTEL_INFO(dev)->gen == 3) { 2838 dev->driver->irq_preinstall = i915_irq_preinstall; 2839 dev->driver->irq_postinstall = i915_irq_postinstall; 2840 dev->driver->irq_uninstall = i915_irq_uninstall; 2841 dev->driver->irq_handler = i915_irq_handler; 2842 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 2843 } else { 2844 dev->driver->irq_preinstall = i965_irq_preinstall; 2845 dev->driver->irq_postinstall = i965_irq_postinstall; 2846 dev->driver->irq_uninstall = i965_irq_uninstall; 2847 dev->driver->irq_handler = i965_irq_handler; 2848 dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup; 2849 } 2850 dev->driver->enable_vblank = i915_enable_vblank; 2851 dev->driver->disable_vblank = i915_disable_vblank; 2852 } 2853 } 2854 2855 void intel_hpd_init(struct drm_device *dev) 2856 { 2857 struct drm_i915_private *dev_priv = dev->dev_private; 2858 2859 if (dev_priv->display.hpd_irq_setup) 2860 dev_priv->display.hpd_irq_setup(dev); 2861 } 2862