1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/printk.h> 32 #include <linux/sysrq.h> 33 #include <linux/slab.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /* For display hotplug interrupt */ 41 static void 42 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 43 { 44 if ((dev_priv->irq_mask & mask) != 0) { 45 dev_priv->irq_mask &= ~mask; 46 I915_WRITE(DEIMR, dev_priv->irq_mask); 47 POSTING_READ(DEIMR); 48 } 49 } 50 51 static inline void 52 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 53 { 54 if ((dev_priv->irq_mask & mask) != mask) { 55 dev_priv->irq_mask |= mask; 56 I915_WRITE(DEIMR, dev_priv->irq_mask); 57 POSTING_READ(DEIMR); 58 } 59 } 60 61 void 62 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 63 { 64 if ((dev_priv->pipestat[pipe] & mask) != mask) { 65 u32 reg = PIPESTAT(pipe); 66 67 dev_priv->pipestat[pipe] |= mask; 68 /* Enable the interrupt, clear any pending status */ 69 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); 70 POSTING_READ(reg); 71 } 72 } 73 74 void 75 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 76 { 77 if ((dev_priv->pipestat[pipe] & mask) != 0) { 78 u32 reg = PIPESTAT(pipe); 79 80 dev_priv->pipestat[pipe] &= ~mask; 81 I915_WRITE(reg, dev_priv->pipestat[pipe]); 82 POSTING_READ(reg); 83 } 84 } 85 86 /** 87 * intel_enable_asle - enable ASLE interrupt for OpRegion 88 */ 89 void intel_enable_asle(struct drm_device *dev) 90 { 91 drm_i915_private_t *dev_priv = dev->dev_private; 92 unsigned long irqflags; 93 94 /* FIXME: opregion/asle for VLV */ 95 if (IS_VALLEYVIEW(dev)) 96 return; 97 98 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 99 100 if (HAS_PCH_SPLIT(dev)) 101 ironlake_enable_display_irq(dev_priv, DE_GSE); 102 else { 103 i915_enable_pipestat(dev_priv, 1, 104 PIPE_LEGACY_BLC_EVENT_ENABLE); 105 if (INTEL_INFO(dev)->gen >= 4) 106 i915_enable_pipestat(dev_priv, 0, 107 PIPE_LEGACY_BLC_EVENT_ENABLE); 108 } 109 110 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 111 } 112 113 /** 114 * i915_pipe_enabled - check if a pipe is enabled 115 * @dev: DRM device 116 * @pipe: pipe to check 117 * 118 * Reading certain registers when the pipe is disabled can hang the chip. 119 * Use this routine to make sure the PLL is running and the pipe is active 120 * before reading such registers if unsure. 121 */ 122 static int 123 i915_pipe_enabled(struct drm_device *dev, int pipe) 124 { 125 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 126 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 127 pipe); 128 129 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE; 130 } 131 132 /* Called from drm generic code, passed a 'crtc', which 133 * we use as a pipe index 134 */ 135 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 136 { 137 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 138 unsigned long high_frame; 139 unsigned long low_frame; 140 u32 high1, high2, low; 141 142 if (!i915_pipe_enabled(dev, pipe)) { 143 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 144 "pipe %c\n", pipe_name(pipe)); 145 return 0; 146 } 147 148 high_frame = PIPEFRAME(pipe); 149 low_frame = PIPEFRAMEPIXEL(pipe); 150 151 /* 152 * High & low register fields aren't synchronized, so make sure 153 * we get a low value that's stable across two reads of the high 154 * register. 155 */ 156 do { 157 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 158 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 159 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 160 } while (high1 != high2); 161 162 high1 >>= PIPE_FRAME_HIGH_SHIFT; 163 low >>= PIPE_FRAME_LOW_SHIFT; 164 return (high1 << 8) | low; 165 } 166 167 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 168 { 169 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 170 int reg = PIPE_FRMCOUNT_GM45(pipe); 171 172 if (!i915_pipe_enabled(dev, pipe)) { 173 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 174 "pipe %c\n", pipe_name(pipe)); 175 return 0; 176 } 177 178 return I915_READ(reg); 179 } 180 181 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 182 int *vpos, int *hpos) 183 { 184 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 185 u32 vbl = 0, position = 0; 186 int vbl_start, vbl_end, htotal, vtotal; 187 bool in_vbl = true; 188 int ret = 0; 189 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 190 pipe); 191 192 if (!i915_pipe_enabled(dev, pipe)) { 193 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 194 "pipe %c\n", pipe_name(pipe)); 195 return 0; 196 } 197 198 /* Get vtotal. */ 199 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 200 201 if (INTEL_INFO(dev)->gen >= 4) { 202 /* No obvious pixelcount register. Only query vertical 203 * scanout position from Display scan line register. 204 */ 205 position = I915_READ(PIPEDSL(pipe)); 206 207 /* Decode into vertical scanout position. Don't have 208 * horizontal scanout position. 209 */ 210 *vpos = position & 0x1fff; 211 *hpos = 0; 212 } else { 213 /* Have access to pixelcount since start of frame. 214 * We can split this into vertical and horizontal 215 * scanout position. 216 */ 217 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 218 219 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); 220 *vpos = position / htotal; 221 *hpos = position - (*vpos * htotal); 222 } 223 224 /* Query vblank area. */ 225 vbl = I915_READ(VBLANK(cpu_transcoder)); 226 227 /* Test position against vblank region. */ 228 vbl_start = vbl & 0x1fff; 229 vbl_end = (vbl >> 16) & 0x1fff; 230 231 if ((*vpos < vbl_start) || (*vpos > vbl_end)) 232 in_vbl = false; 233 234 /* Inside "upper part" of vblank area? Apply corrective offset: */ 235 if (in_vbl && (*vpos >= vbl_start)) 236 *vpos = *vpos - vtotal; 237 238 /* Readouts valid? */ 239 if (vbl > 0) 240 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 241 242 /* In vblank? */ 243 if (in_vbl) 244 ret |= DRM_SCANOUTPOS_INVBL; 245 246 return ret; 247 } 248 249 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 250 int *max_error, 251 struct timeval *vblank_time, 252 unsigned flags) 253 { 254 struct drm_i915_private *dev_priv = dev->dev_private; 255 struct drm_crtc *crtc; 256 257 if (pipe < 0 || pipe >= dev_priv->num_pipe) { 258 DRM_ERROR("Invalid crtc %d\n", pipe); 259 return -EINVAL; 260 } 261 262 /* Get drm_crtc to timestamp: */ 263 crtc = intel_get_crtc_for_pipe(dev, pipe); 264 if (crtc == NULL) { 265 DRM_ERROR("Invalid crtc %d\n", pipe); 266 return -EINVAL; 267 } 268 269 if (!crtc->enabled) { 270 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 271 return -EBUSY; 272 } 273 274 /* Helper routine in DRM core does all the work: */ 275 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 276 vblank_time, flags, 277 crtc); 278 } 279 280 /* 281 * Handle hotplug events outside the interrupt handler proper. 282 */ 283 static void i915_hotplug_work_func(struct work_struct *work) 284 { 285 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 286 hotplug_work); 287 struct drm_device *dev = dev_priv->dev; 288 struct drm_mode_config *mode_config = &dev->mode_config; 289 struct intel_encoder *encoder; 290 291 mutex_lock(&mode_config->mutex); 292 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 293 294 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 295 if (encoder->hot_plug) 296 encoder->hot_plug(encoder); 297 298 mutex_unlock(&mode_config->mutex); 299 300 /* Just fire off a uevent and let userspace tell us what to do */ 301 drm_helper_hpd_irq_event(dev); 302 } 303 304 /* defined intel_pm.c */ 305 extern spinlock_t mchdev_lock; 306 307 static void ironlake_handle_rps_change(struct drm_device *dev) 308 { 309 drm_i915_private_t *dev_priv = dev->dev_private; 310 u32 busy_up, busy_down, max_avg, min_avg; 311 u8 new_delay; 312 unsigned long flags; 313 314 spin_lock_irqsave(&mchdev_lock, flags); 315 316 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 317 318 new_delay = dev_priv->ips.cur_delay; 319 320 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 321 busy_up = I915_READ(RCPREVBSYTUPAVG); 322 busy_down = I915_READ(RCPREVBSYTDNAVG); 323 max_avg = I915_READ(RCBMAXAVG); 324 min_avg = I915_READ(RCBMINAVG); 325 326 /* Handle RCS change request from hw */ 327 if (busy_up > max_avg) { 328 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 329 new_delay = dev_priv->ips.cur_delay - 1; 330 if (new_delay < dev_priv->ips.max_delay) 331 new_delay = dev_priv->ips.max_delay; 332 } else if (busy_down < min_avg) { 333 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 334 new_delay = dev_priv->ips.cur_delay + 1; 335 if (new_delay > dev_priv->ips.min_delay) 336 new_delay = dev_priv->ips.min_delay; 337 } 338 339 if (ironlake_set_drps(dev, new_delay)) 340 dev_priv->ips.cur_delay = new_delay; 341 342 spin_unlock_irqrestore(&mchdev_lock, flags); 343 344 return; 345 } 346 347 static void notify_ring(struct drm_device *dev, 348 struct intel_ring_buffer *ring) 349 { 350 struct drm_i915_private *dev_priv = dev->dev_private; 351 352 if (ring->obj == NULL) 353 return; 354 355 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); 356 357 #ifdef __NetBSD__ 358 { 359 unsigned long flags; 360 spin_lock_irqsave(&dev_priv->irq_lock, flags); 361 DRM_SPIN_WAKEUP_ALL(&ring->irq_queue, &dev_priv->irq_lock); 362 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 363 } 364 #else 365 wake_up_all(&ring->irq_queue); 366 #endif 367 if (i915_enable_hangcheck) { 368 dev_priv->hangcheck_count = 0; 369 mod_timer(&dev_priv->hangcheck_timer, 370 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 371 } 372 } 373 374 static void gen6_pm_rps_work(struct work_struct *work) 375 { 376 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 377 rps.work); 378 u32 pm_iir, pm_imr; 379 u8 new_delay; 380 381 spin_lock_irq(&dev_priv->rps.lock); 382 pm_iir = dev_priv->rps.pm_iir; 383 dev_priv->rps.pm_iir = 0; 384 pm_imr = I915_READ(GEN6_PMIMR); 385 I915_WRITE(GEN6_PMIMR, 0); 386 spin_unlock_irq(&dev_priv->rps.lock); 387 __USE(pm_imr); /* XXX reduce merge conflicts */ 388 389 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) 390 return; 391 392 mutex_lock(&dev_priv->rps.hw_lock); 393 394 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) 395 new_delay = dev_priv->rps.cur_delay + 1; 396 else 397 new_delay = dev_priv->rps.cur_delay - 1; 398 399 /* sysfs frequency interfaces may have snuck in while servicing the 400 * interrupt 401 */ 402 if (!(new_delay > dev_priv->rps.max_delay || 403 new_delay < dev_priv->rps.min_delay)) { 404 gen6_set_rps(dev_priv->dev, new_delay); 405 } 406 407 mutex_unlock(&dev_priv->rps.hw_lock); 408 } 409 410 411 /** 412 * ivybridge_parity_work - Workqueue called when a parity error interrupt 413 * occurred. 414 * @work: workqueue struct 415 * 416 * Doesn't actually do anything except notify userspace. As a consequence of 417 * this event, userspace should try to remap the bad rows since statistically 418 * it is likely the same row is more likely to go bad again. 419 */ 420 static void ivybridge_parity_work(struct work_struct *work) 421 { 422 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 423 l3_parity.error_work); 424 u32 error_status, row, bank, subbank; 425 #ifndef __NetBSD__ /* XXX kobject uevent...? */ 426 char *parity_event[5]; 427 #endif 428 uint32_t misccpctl; 429 unsigned long flags; 430 431 /* We must turn off DOP level clock gating to access the L3 registers. 432 * In order to prevent a get/put style interface, acquire struct mutex 433 * any time we access those registers. 434 */ 435 mutex_lock(&dev_priv->dev->struct_mutex); 436 437 misccpctl = I915_READ(GEN7_MISCCPCTL); 438 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 439 POSTING_READ(GEN7_MISCCPCTL); 440 441 error_status = I915_READ(GEN7_L3CDERRST1); 442 row = GEN7_PARITY_ERROR_ROW(error_status); 443 bank = GEN7_PARITY_ERROR_BANK(error_status); 444 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 445 446 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | 447 GEN7_L3CDERRST1_ENABLE); 448 POSTING_READ(GEN7_L3CDERRST1); 449 450 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 451 452 spin_lock_irqsave(&dev_priv->irq_lock, flags); 453 dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 454 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 455 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 456 457 mutex_unlock(&dev_priv->dev->struct_mutex); 458 459 #ifndef __NetBSD__ /* XXX kobject uevent...? */ 460 parity_event[0] = "L3_PARITY_ERROR=1"; 461 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 462 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 463 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 464 parity_event[4] = NULL; 465 466 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, 467 KOBJ_CHANGE, parity_event); 468 #endif 469 470 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", 471 row, bank, subbank); 472 473 #ifndef __NetBSD__ /* XXX kobject uevent...? */ 474 kfree(parity_event[3]); 475 kfree(parity_event[2]); 476 kfree(parity_event[1]); 477 #endif 478 } 479 480 static void ivybridge_handle_parity_error(struct drm_device *dev) 481 { 482 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 483 unsigned long flags; 484 485 if (!HAS_L3_GPU_CACHE(dev)) 486 return; 487 488 spin_lock_irqsave(&dev_priv->irq_lock, flags); 489 dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 490 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 491 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 492 493 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 494 } 495 496 static void snb_gt_irq_handler(struct drm_device *dev, 497 struct drm_i915_private *dev_priv, 498 u32 gt_iir) 499 { 500 501 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | 502 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) 503 notify_ring(dev, &dev_priv->ring[RCS]); 504 if (gt_iir & GEN6_BSD_USER_INTERRUPT) 505 notify_ring(dev, &dev_priv->ring[VCS]); 506 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) 507 notify_ring(dev, &dev_priv->ring[BCS]); 508 509 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | 510 GT_GEN6_BSD_CS_ERROR_INTERRUPT | 511 GT_RENDER_CS_ERROR_INTERRUPT)) { 512 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 513 i915_handle_error(dev, false); 514 } 515 516 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) 517 ivybridge_handle_parity_error(dev); 518 } 519 520 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, 521 u32 pm_iir) 522 { 523 unsigned long flags; 524 525 /* 526 * IIR bits should never already be set because IMR should 527 * prevent an interrupt from being shown in IIR. The warning 528 * displays a case where we've unsafely cleared 529 * dev_priv->rps.pm_iir. Although missing an interrupt of the same 530 * type is not a problem, it displays a problem in the logic. 531 * 532 * The mask bit in IMR is cleared by dev_priv->rps.work. 533 */ 534 535 spin_lock_irqsave(&dev_priv->rps.lock, flags); 536 dev_priv->rps.pm_iir |= pm_iir; 537 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 538 POSTING_READ(GEN6_PMIMR); 539 spin_unlock_irqrestore(&dev_priv->rps.lock, flags); 540 541 queue_work(dev_priv->wq, &dev_priv->rps.work); 542 } 543 544 static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) 545 { 546 struct drm_device *dev = (struct drm_device *) arg; 547 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 548 u32 iir, gt_iir, pm_iir; 549 irqreturn_t ret = IRQ_NONE; 550 unsigned long irqflags; 551 int pipe; 552 u32 pipe_stats[I915_MAX_PIPES]; 553 bool blc_event __unused; 554 555 atomic_inc(&dev_priv->irq_received); 556 557 while (true) { 558 iir = I915_READ(VLV_IIR); 559 gt_iir = I915_READ(GTIIR); 560 pm_iir = I915_READ(GEN6_PMIIR); 561 562 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 563 goto out; 564 565 ret = IRQ_HANDLED; 566 567 snb_gt_irq_handler(dev, dev_priv, gt_iir); 568 569 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 570 for_each_pipe(pipe) { 571 int reg = PIPESTAT(pipe); 572 pipe_stats[pipe] = I915_READ(reg); 573 574 /* 575 * Clear the PIPE*STAT regs before the IIR 576 */ 577 if (pipe_stats[pipe] & 0x8000ffff) { 578 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 579 DRM_DEBUG_DRIVER("pipe %c underrun\n", 580 pipe_name(pipe)); 581 I915_WRITE(reg, pipe_stats[pipe]); 582 } 583 } 584 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 585 586 for_each_pipe(pipe) { 587 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 588 drm_handle_vblank(dev, pipe); 589 590 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 591 intel_prepare_page_flip(dev, pipe); 592 intel_finish_page_flip(dev, pipe); 593 } 594 } 595 596 /* Consume port. Then clear IIR or we'll miss events */ 597 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 598 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 599 600 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 601 hotplug_status); 602 if (hotplug_status & dev_priv->hotplug_supported_mask) 603 queue_work(dev_priv->wq, 604 &dev_priv->hotplug_work); 605 606 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 607 I915_READ(PORT_HOTPLUG_STAT); 608 } 609 610 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 611 blc_event = true; 612 613 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 614 gen6_queue_rps_work(dev_priv, pm_iir); 615 616 I915_WRITE(GTIIR, gt_iir); 617 I915_WRITE(GEN6_PMIIR, pm_iir); 618 I915_WRITE(VLV_IIR, iir); 619 } 620 621 out: 622 return ret; 623 } 624 625 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 626 { 627 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 628 int pipe; 629 630 if (pch_iir & SDE_HOTPLUG_MASK) 631 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 632 633 if (pch_iir & SDE_AUDIO_POWER_MASK) 634 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 635 (pch_iir & SDE_AUDIO_POWER_MASK) >> 636 SDE_AUDIO_POWER_SHIFT); 637 638 if (pch_iir & SDE_GMBUS) 639 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 640 641 if (pch_iir & SDE_AUDIO_HDCP_MASK) 642 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 643 644 if (pch_iir & SDE_AUDIO_TRANS_MASK) 645 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 646 647 if (pch_iir & SDE_POISON) 648 DRM_ERROR("PCH poison interrupt\n"); 649 650 if (pch_iir & SDE_FDI_MASK) 651 for_each_pipe(pipe) 652 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 653 pipe_name(pipe), 654 I915_READ(FDI_RX_IIR(pipe))); 655 656 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 657 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 658 659 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 660 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 661 662 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 663 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); 664 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 665 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); 666 } 667 668 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 669 { 670 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 671 int pipe; 672 673 if (pch_iir & SDE_HOTPLUG_MASK_CPT) 674 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 675 676 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) 677 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 678 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 679 SDE_AUDIO_POWER_SHIFT_CPT); 680 681 if (pch_iir & SDE_AUX_MASK_CPT) 682 DRM_DEBUG_DRIVER("AUX channel interrupt\n"); 683 684 if (pch_iir & SDE_GMBUS_CPT) 685 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 686 687 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 688 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 689 690 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 691 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 692 693 if (pch_iir & SDE_FDI_MASK_CPT) 694 for_each_pipe(pipe) 695 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 696 pipe_name(pipe), 697 I915_READ(FDI_RX_IIR(pipe))); 698 } 699 700 static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) 701 { 702 struct drm_device *dev = (struct drm_device *) arg; 703 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 704 u32 de_iir, gt_iir, de_ier, pm_iir; 705 irqreturn_t ret = IRQ_NONE; 706 int i; 707 708 atomic_inc(&dev_priv->irq_received); 709 710 /* disable master interrupt before clearing iir */ 711 de_ier = I915_READ(DEIER); 712 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 713 714 gt_iir = I915_READ(GTIIR); 715 if (gt_iir) { 716 snb_gt_irq_handler(dev, dev_priv, gt_iir); 717 I915_WRITE(GTIIR, gt_iir); 718 ret = IRQ_HANDLED; 719 } 720 721 de_iir = I915_READ(DEIIR); 722 if (de_iir) { 723 if (de_iir & DE_GSE_IVB) 724 intel_opregion_gse_intr(dev); 725 726 for (i = 0; i < 3; i++) { 727 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 728 drm_handle_vblank(dev, i); 729 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 730 intel_prepare_page_flip(dev, i); 731 intel_finish_page_flip_plane(dev, i); 732 } 733 } 734 735 /* check event from PCH */ 736 if (de_iir & DE_PCH_EVENT_IVB) { 737 u32 pch_iir = I915_READ(SDEIIR); 738 739 cpt_irq_handler(dev, pch_iir); 740 741 /* clear PCH hotplug event before clear CPU irq */ 742 I915_WRITE(SDEIIR, pch_iir); 743 } 744 745 I915_WRITE(DEIIR, de_iir); 746 ret = IRQ_HANDLED; 747 } 748 749 pm_iir = I915_READ(GEN6_PMIIR); 750 if (pm_iir) { 751 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 752 gen6_queue_rps_work(dev_priv, pm_iir); 753 I915_WRITE(GEN6_PMIIR, pm_iir); 754 ret = IRQ_HANDLED; 755 } 756 757 I915_WRITE(DEIER, de_ier); 758 POSTING_READ(DEIER); 759 760 return ret; 761 } 762 763 static void ilk_gt_irq_handler(struct drm_device *dev, 764 struct drm_i915_private *dev_priv, 765 u32 gt_iir) 766 { 767 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) 768 notify_ring(dev, &dev_priv->ring[RCS]); 769 if (gt_iir & GT_BSD_USER_INTERRUPT) 770 notify_ring(dev, &dev_priv->ring[VCS]); 771 } 772 773 static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) 774 { 775 struct drm_device *dev = (struct drm_device *) arg; 776 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 777 int ret = IRQ_NONE; 778 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 779 780 atomic_inc(&dev_priv->irq_received); 781 782 /* disable master interrupt before clearing iir */ 783 de_ier = I915_READ(DEIER); 784 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 785 POSTING_READ(DEIER); 786 787 de_iir = I915_READ(DEIIR); 788 gt_iir = I915_READ(GTIIR); 789 pch_iir = I915_READ(SDEIIR); 790 pm_iir = I915_READ(GEN6_PMIIR); 791 792 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && 793 (!IS_GEN6(dev) || pm_iir == 0)) 794 goto done; 795 796 ret = IRQ_HANDLED; 797 798 if (IS_GEN5(dev)) 799 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 800 else 801 snb_gt_irq_handler(dev, dev_priv, gt_iir); 802 803 if (de_iir & DE_GSE) 804 intel_opregion_gse_intr(dev); 805 806 if (de_iir & DE_PIPEA_VBLANK) 807 drm_handle_vblank(dev, 0); 808 809 if (de_iir & DE_PIPEB_VBLANK) 810 drm_handle_vblank(dev, 1); 811 812 if (de_iir & DE_PLANEA_FLIP_DONE) { 813 intel_prepare_page_flip(dev, 0); 814 intel_finish_page_flip_plane(dev, 0); 815 } 816 817 if (de_iir & DE_PLANEB_FLIP_DONE) { 818 intel_prepare_page_flip(dev, 1); 819 intel_finish_page_flip_plane(dev, 1); 820 } 821 822 /* check event from PCH */ 823 if (de_iir & DE_PCH_EVENT) { 824 if (HAS_PCH_CPT(dev)) 825 cpt_irq_handler(dev, pch_iir); 826 else 827 ibx_irq_handler(dev, pch_iir); 828 } 829 830 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 831 ironlake_handle_rps_change(dev); 832 833 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) 834 gen6_queue_rps_work(dev_priv, pm_iir); 835 836 /* should clear PCH hotplug event before clear CPU irq */ 837 I915_WRITE(SDEIIR, pch_iir); 838 I915_WRITE(GTIIR, gt_iir); 839 I915_WRITE(DEIIR, de_iir); 840 I915_WRITE(GEN6_PMIIR, pm_iir); 841 842 done: 843 I915_WRITE(DEIER, de_ier); 844 POSTING_READ(DEIER); 845 846 return ret; 847 } 848 849 /** 850 * i915_error_work_func - do process context error handling work 851 * @work: work struct 852 * 853 * Fire an error uevent so userspace can see that a hang or error 854 * was detected. 855 */ 856 static void i915_error_work_func(struct work_struct *work) 857 { 858 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 859 error_work); 860 struct drm_device *dev = dev_priv->dev; 861 #ifndef __NetBSD__ /* XXX kobject uevent...? */ 862 char *error_event[] = { "ERROR=1", NULL }; 863 char *reset_event[] = { "RESET=1", NULL }; 864 char *reset_done_event[] = { "ERROR=0", NULL }; 865 866 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 867 #endif 868 869 if (atomic_read(&dev_priv->mm.wedged)) { 870 DRM_DEBUG_DRIVER("resetting chip\n"); 871 #ifndef __NetBSD__ /* XXX kobject uevent...? */ 872 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); 873 #endif 874 if (!i915_reset(dev)) { 875 atomic_set(&dev_priv->mm.wedged, 0); 876 #ifndef __NetBSD__ /* XXX kobject uevent...? */ 877 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); 878 #endif 879 } 880 complete_all(&dev_priv->error_completion); 881 } 882 } 883 884 /* NB: please notice the memset */ 885 static void i915_get_extra_instdone(struct drm_device *dev, 886 uint32_t *instdone) 887 { 888 struct drm_i915_private *dev_priv = dev->dev_private; 889 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); 890 891 switch(INTEL_INFO(dev)->gen) { 892 case 2: 893 case 3: 894 instdone[0] = I915_READ(INSTDONE); 895 break; 896 case 4: 897 case 5: 898 case 6: 899 instdone[0] = I915_READ(INSTDONE_I965); 900 instdone[1] = I915_READ(INSTDONE1); 901 break; 902 default: 903 WARN_ONCE(1, "Unsupported platform\n"); 904 case 7: 905 instdone[0] = I915_READ(GEN7_INSTDONE_1); 906 instdone[1] = I915_READ(GEN7_SC_INSTDONE); 907 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 908 instdone[3] = I915_READ(GEN7_ROW_INSTDONE); 909 break; 910 } 911 } 912 913 #ifdef CONFIG_DEBUG_FS 914 static struct drm_i915_error_object * 915 i915_error_object_create(struct drm_i915_private *dev_priv, 916 struct drm_i915_gem_object *src) 917 { 918 struct drm_i915_error_object *dst; 919 int i, count; 920 u32 reloc_offset; 921 922 if (src == NULL || src->pages == NULL) 923 return NULL; 924 925 count = src->base.size / PAGE_SIZE; 926 927 dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC); 928 if (dst == NULL) 929 return NULL; 930 931 reloc_offset = src->gtt_offset; 932 for (i = 0; i < count; i++) { 933 unsigned long flags; 934 void *d; 935 936 d = kmalloc(PAGE_SIZE, GFP_ATOMIC); 937 if (d == NULL) 938 goto unwind; 939 940 local_irq_save(flags); 941 if (reloc_offset < dev_priv->mm.gtt_mappable_end && 942 src->has_global_gtt_mapping) { 943 void __iomem *s; 944 945 /* Simply ignore tiling or any overlapping fence. 946 * It's part of the error state, and this hopefully 947 * captures what the GPU read. 948 */ 949 950 s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 951 reloc_offset); 952 memcpy_fromio(d, s, PAGE_SIZE); 953 io_mapping_unmap_atomic(s); 954 } else { 955 struct page *page; 956 void *s; 957 958 page = i915_gem_object_get_page(src, i); 959 960 drm_clflush_pages(&page, 1); 961 962 s = kmap_atomic(page); 963 memcpy(d, s, PAGE_SIZE); 964 kunmap_atomic(s); 965 966 drm_clflush_pages(&page, 1); 967 } 968 local_irq_restore(flags); 969 970 dst->pages[i] = d; 971 972 reloc_offset += PAGE_SIZE; 973 } 974 dst->page_count = count; 975 dst->gtt_offset = src->gtt_offset; 976 977 return dst; 978 979 unwind: 980 while (i--) 981 kfree(dst->pages[i]); 982 kfree(dst); 983 return NULL; 984 } 985 986 static void 987 i915_error_object_free(struct drm_i915_error_object *obj) 988 { 989 int page; 990 991 if (obj == NULL) 992 return; 993 994 for (page = 0; page < obj->page_count; page++) 995 kfree(obj->pages[page]); 996 997 kfree(obj); 998 } 999 1000 void 1001 i915_error_state_free(struct kref *error_ref) 1002 { 1003 struct drm_i915_error_state *error = container_of(error_ref, 1004 typeof(*error), ref); 1005 int i; 1006 1007 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 1008 i915_error_object_free(error->ring[i].batchbuffer); 1009 i915_error_object_free(error->ring[i].ringbuffer); 1010 kfree(error->ring[i].requests); 1011 } 1012 1013 kfree(error->active_bo); 1014 kfree(error->overlay); 1015 kfree(error); 1016 } 1017 static void capture_bo(struct drm_i915_error_buffer *err, 1018 struct drm_i915_gem_object *obj) 1019 { 1020 err->size = obj->base.size; 1021 err->name = obj->base.name; 1022 err->rseqno = obj->last_read_seqno; 1023 err->wseqno = obj->last_write_seqno; 1024 err->gtt_offset = obj->gtt_offset; 1025 err->read_domains = obj->base.read_domains; 1026 err->write_domain = obj->base.write_domain; 1027 err->fence_reg = obj->fence_reg; 1028 err->pinned = 0; 1029 if (obj->pin_count > 0) 1030 err->pinned = 1; 1031 if (obj->user_pin_count > 0) 1032 err->pinned = -1; 1033 err->tiling = obj->tiling_mode; 1034 err->dirty = obj->dirty; 1035 err->purgeable = obj->madv != I915_MADV_WILLNEED; 1036 err->ring = obj->ring ? obj->ring->id : -1; 1037 err->cache_level = obj->cache_level; 1038 } 1039 1040 static u32 capture_active_bo(struct drm_i915_error_buffer *err, 1041 int count, struct list_head *head) 1042 { 1043 struct drm_i915_gem_object *obj; 1044 int i = 0; 1045 1046 list_for_each_entry(obj, head, mm_list) { 1047 capture_bo(err++, obj); 1048 if (++i == count) 1049 break; 1050 } 1051 1052 return i; 1053 } 1054 1055 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, 1056 int count, struct list_head *head) 1057 { 1058 struct drm_i915_gem_object *obj; 1059 int i = 0; 1060 1061 list_for_each_entry(obj, head, gtt_list) { 1062 if (obj->pin_count == 0) 1063 continue; 1064 1065 capture_bo(err++, obj); 1066 if (++i == count) 1067 break; 1068 } 1069 1070 return i; 1071 } 1072 1073 static void i915_gem_record_fences(struct drm_device *dev, 1074 struct drm_i915_error_state *error) 1075 { 1076 struct drm_i915_private *dev_priv = dev->dev_private; 1077 int i; 1078 1079 /* Fences */ 1080 switch (INTEL_INFO(dev)->gen) { 1081 case 7: 1082 case 6: 1083 for (i = 0; i < 16; i++) 1084 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 1085 break; 1086 case 5: 1087 case 4: 1088 for (i = 0; i < 16; i++) 1089 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 1090 break; 1091 case 3: 1092 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 1093 for (i = 0; i < 8; i++) 1094 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 1095 case 2: 1096 for (i = 0; i < 8; i++) 1097 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 1098 break; 1099 1100 } 1101 } 1102 1103 static struct drm_i915_error_object * 1104 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, 1105 struct intel_ring_buffer *ring) 1106 { 1107 struct drm_i915_gem_object *obj; 1108 u32 seqno; 1109 1110 if (!ring->get_seqno) 1111 return NULL; 1112 1113 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { 1114 u32 acthd = I915_READ(ACTHD); 1115 1116 if (WARN_ON(ring->id != RCS)) 1117 return NULL; 1118 1119 obj = ring->private; 1120 if (acthd >= obj->gtt_offset && 1121 acthd < obj->gtt_offset + obj->base.size) 1122 return i915_error_object_create(dev_priv, obj); 1123 } 1124 1125 seqno = ring->get_seqno(ring, false); 1126 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 1127 if (obj->ring != ring) 1128 continue; 1129 1130 if (i915_seqno_passed(seqno, obj->last_read_seqno)) 1131 continue; 1132 1133 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) 1134 continue; 1135 1136 /* We need to copy these to an anonymous buffer as the simplest 1137 * method to avoid being overwritten by userspace. 1138 */ 1139 return i915_error_object_create(dev_priv, obj); 1140 } 1141 1142 return NULL; 1143 } 1144 1145 static void i915_record_ring_state(struct drm_device *dev, 1146 struct drm_i915_error_state *error, 1147 struct intel_ring_buffer *ring) 1148 { 1149 struct drm_i915_private *dev_priv = dev->dev_private; 1150 1151 if (INTEL_INFO(dev)->gen >= 6) { 1152 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); 1153 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); 1154 error->semaphore_mboxes[ring->id][0] 1155 = I915_READ(RING_SYNC_0(ring->mmio_base)); 1156 error->semaphore_mboxes[ring->id][1] 1157 = I915_READ(RING_SYNC_1(ring->mmio_base)); 1158 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; 1159 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; 1160 } 1161 1162 if (INTEL_INFO(dev)->gen >= 4) { 1163 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); 1164 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); 1165 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 1166 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 1167 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 1168 if (ring->id == RCS) 1169 error->bbaddr = I915_READ64(BB_ADDR); 1170 } else { 1171 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); 1172 error->ipeir[ring->id] = I915_READ(IPEIR); 1173 error->ipehr[ring->id] = I915_READ(IPEHR); 1174 error->instdone[ring->id] = I915_READ(INSTDONE); 1175 } 1176 1177 #ifdef __NetBSD__ 1178 { 1179 unsigned long flags; 1180 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1181 error->waiting[ring->id] = DRM_SPIN_WAITERS_P(&ring->irq_queue, 1182 &dev_priv->irq_lock); 1183 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1184 } 1185 #else 1186 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); 1187 #endif 1188 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); 1189 error->seqno[ring->id] = ring->get_seqno(ring, false); 1190 error->acthd[ring->id] = intel_ring_get_active_head(ring); 1191 error->head[ring->id] = I915_READ_HEAD(ring); 1192 error->tail[ring->id] = I915_READ_TAIL(ring); 1193 error->ctl[ring->id] = I915_READ_CTL(ring); 1194 1195 error->cpu_ring_head[ring->id] = ring->head; 1196 error->cpu_ring_tail[ring->id] = ring->tail; 1197 } 1198 1199 static void i915_gem_record_rings(struct drm_device *dev, 1200 struct drm_i915_error_state *error) 1201 { 1202 struct drm_i915_private *dev_priv = dev->dev_private; 1203 struct intel_ring_buffer *ring; 1204 struct drm_i915_gem_request *request; 1205 int i, count; 1206 1207 for_each_ring(ring, dev_priv, i) { 1208 i915_record_ring_state(dev, error, ring); 1209 1210 error->ring[i].batchbuffer = 1211 i915_error_first_batchbuffer(dev_priv, ring); 1212 1213 error->ring[i].ringbuffer = 1214 i915_error_object_create(dev_priv, ring->obj); 1215 1216 count = 0; 1217 list_for_each_entry(request, &ring->request_list, list) 1218 count++; 1219 1220 error->ring[i].num_requests = count; 1221 error->ring[i].requests = 1222 kmalloc(count*sizeof(struct drm_i915_error_request), 1223 GFP_ATOMIC); 1224 if (error->ring[i].requests == NULL) { 1225 error->ring[i].num_requests = 0; 1226 continue; 1227 } 1228 1229 count = 0; 1230 list_for_each_entry(request, &ring->request_list, list) { 1231 struct drm_i915_error_request *erq; 1232 1233 erq = &error->ring[i].requests[count++]; 1234 erq->seqno = request->seqno; 1235 erq->jiffies = request->emitted_jiffies; 1236 erq->tail = request->tail; 1237 } 1238 } 1239 } 1240 1241 /** 1242 * i915_capture_error_state - capture an error record for later analysis 1243 * @dev: drm device 1244 * 1245 * Should be called when an error is detected (either a hang or an error 1246 * interrupt) to capture error state from the time of the error. Fills 1247 * out a structure which becomes available in debugfs for user level tools 1248 * to pick up. 1249 */ 1250 static void i915_capture_error_state(struct drm_device *dev) 1251 { 1252 struct drm_i915_private *dev_priv = dev->dev_private; 1253 struct drm_i915_gem_object *obj; 1254 struct drm_i915_error_state *error; 1255 unsigned long flags; 1256 int i, pipe; 1257 1258 spin_lock_irqsave(&dev_priv->error_lock, flags); 1259 error = dev_priv->first_error; 1260 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1261 if (error) 1262 return; 1263 1264 /* Account for pipe specific data like PIPE*STAT */ 1265 error = kzalloc(sizeof(*error), GFP_ATOMIC); 1266 if (!error) { 1267 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 1268 return; 1269 } 1270 1271 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", 1272 dev->primary->index); 1273 1274 kref_init(&error->ref); 1275 error->eir = I915_READ(EIR); 1276 error->pgtbl_er = I915_READ(PGTBL_ER); 1277 error->ccid = I915_READ(CCID); 1278 1279 if (HAS_PCH_SPLIT(dev)) 1280 error->ier = I915_READ(DEIER) | I915_READ(GTIER); 1281 else if (IS_VALLEYVIEW(dev)) 1282 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); 1283 else if (IS_GEN2(dev)) 1284 error->ier = I915_READ16(IER); 1285 else 1286 error->ier = I915_READ(IER); 1287 1288 if (INTEL_INFO(dev)->gen >= 6) 1289 error->derrmr = I915_READ(DERRMR); 1290 1291 if (IS_VALLEYVIEW(dev)) 1292 error->forcewake = I915_READ(FORCEWAKE_VLV); 1293 else if (INTEL_INFO(dev)->gen >= 7) 1294 error->forcewake = I915_READ(FORCEWAKE_MT); 1295 else if (INTEL_INFO(dev)->gen == 6) 1296 error->forcewake = I915_READ(FORCEWAKE); 1297 1298 for_each_pipe(pipe) 1299 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 1300 1301 if (INTEL_INFO(dev)->gen >= 6) { 1302 error->error = I915_READ(ERROR_GEN6); 1303 error->done_reg = I915_READ(DONE_REG); 1304 } 1305 1306 if (INTEL_INFO(dev)->gen == 7) 1307 error->err_int = I915_READ(GEN7_ERR_INT); 1308 1309 i915_get_extra_instdone(dev, error->extra_instdone); 1310 1311 i915_gem_record_fences(dev, error); 1312 i915_gem_record_rings(dev, error); 1313 1314 /* Record buffers on the active and pinned lists. */ 1315 error->active_bo = NULL; 1316 error->pinned_bo = NULL; 1317 1318 i = 0; 1319 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 1320 i++; 1321 error->active_bo_count = i; 1322 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) 1323 if (obj->pin_count) 1324 i++; 1325 error->pinned_bo_count = i - error->active_bo_count; 1326 1327 error->active_bo = NULL; 1328 error->pinned_bo = NULL; 1329 if (i) { 1330 error->active_bo = kmalloc(sizeof(*error->active_bo)*i, 1331 GFP_ATOMIC); 1332 if (error->active_bo) 1333 error->pinned_bo = 1334 error->active_bo + error->active_bo_count; 1335 } 1336 1337 if (error->active_bo) 1338 error->active_bo_count = 1339 capture_active_bo(error->active_bo, 1340 error->active_bo_count, 1341 &dev_priv->mm.active_list); 1342 1343 if (error->pinned_bo) 1344 error->pinned_bo_count = 1345 capture_pinned_bo(error->pinned_bo, 1346 error->pinned_bo_count, 1347 &dev_priv->mm.bound_list); 1348 1349 do_gettimeofday(&error->time); 1350 1351 error->overlay = intel_overlay_capture_error_state(dev); 1352 error->display = intel_display_capture_error_state(dev); 1353 1354 spin_lock_irqsave(&dev_priv->error_lock, flags); 1355 if (dev_priv->first_error == NULL) { 1356 dev_priv->first_error = error; 1357 error = NULL; 1358 } 1359 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1360 1361 if (error) 1362 i915_error_state_free(&error->ref); 1363 } 1364 1365 void i915_destroy_error_state(struct drm_device *dev) 1366 { 1367 struct drm_i915_private *dev_priv = dev->dev_private; 1368 struct drm_i915_error_state *error; 1369 unsigned long flags; 1370 1371 spin_lock_irqsave(&dev_priv->error_lock, flags); 1372 error = dev_priv->first_error; 1373 dev_priv->first_error = NULL; 1374 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1375 1376 if (error) 1377 kref_put(&error->ref, i915_error_state_free); 1378 } 1379 #else 1380 #define i915_capture_error_state(x) 1381 #endif 1382 1383 static void i915_report_and_clear_eir(struct drm_device *dev) 1384 { 1385 struct drm_i915_private *dev_priv = dev->dev_private; 1386 uint32_t instdone[I915_NUM_INSTDONE_REG]; 1387 u32 eir = I915_READ(EIR); 1388 int pipe, i; 1389 1390 if (!eir) 1391 return; 1392 1393 pr_err("render error detected, EIR: 0x%08x\n", eir); 1394 1395 i915_get_extra_instdone(dev, instdone); 1396 1397 if (IS_G4X(dev)) { 1398 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 1399 u32 ipeir = I915_READ(IPEIR_I965); 1400 1401 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1402 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1403 for (i = 0; i < ARRAY_SIZE(instdone); i++) 1404 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1405 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1406 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1407 I915_WRITE(IPEIR_I965, ipeir); 1408 POSTING_READ(IPEIR_I965); 1409 } 1410 if (eir & GM45_ERROR_PAGE_TABLE) { 1411 u32 pgtbl_err = I915_READ(PGTBL_ER); 1412 pr_err("page table error\n"); 1413 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 1414 I915_WRITE(PGTBL_ER, pgtbl_err); 1415 POSTING_READ(PGTBL_ER); 1416 } 1417 } 1418 1419 if (!IS_GEN2(dev)) { 1420 if (eir & I915_ERROR_PAGE_TABLE) { 1421 u32 pgtbl_err = I915_READ(PGTBL_ER); 1422 pr_err("page table error\n"); 1423 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 1424 I915_WRITE(PGTBL_ER, pgtbl_err); 1425 POSTING_READ(PGTBL_ER); 1426 } 1427 } 1428 1429 if (eir & I915_ERROR_MEMORY_REFRESH) { 1430 pr_err("memory refresh error:\n"); 1431 for_each_pipe(pipe) 1432 pr_err("pipe %c stat: 0x%08x\n", 1433 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 1434 /* pipestat has already been acked */ 1435 } 1436 if (eir & I915_ERROR_INSTRUCTION) { 1437 pr_err("instruction error\n"); 1438 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 1439 for (i = 0; i < ARRAY_SIZE(instdone); i++) 1440 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 1441 if (INTEL_INFO(dev)->gen < 4) { 1442 u32 ipeir = I915_READ(IPEIR); 1443 1444 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 1445 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 1446 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 1447 I915_WRITE(IPEIR, ipeir); 1448 POSTING_READ(IPEIR); 1449 } else { 1450 u32 ipeir = I915_READ(IPEIR_I965); 1451 1452 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 1453 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 1454 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 1455 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 1456 I915_WRITE(IPEIR_I965, ipeir); 1457 POSTING_READ(IPEIR_I965); 1458 } 1459 } 1460 1461 I915_WRITE(EIR, eir); 1462 POSTING_READ(EIR); 1463 eir = I915_READ(EIR); 1464 if (eir) { 1465 /* 1466 * some errors might have become stuck, 1467 * mask them. 1468 */ 1469 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 1470 I915_WRITE(EMR, I915_READ(EMR) | eir); 1471 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 1472 } 1473 } 1474 1475 /** 1476 * i915_handle_error - handle an error interrupt 1477 * @dev: drm device 1478 * 1479 * Do some basic checking of regsiter state at error interrupt time and 1480 * dump it to the syslog. Also call i915_capture_error_state() to make 1481 * sure we get a record and make it available in debugfs. Fire a uevent 1482 * so userspace knows something bad happened (should trigger collection 1483 * of a ring dump etc.). 1484 */ 1485 void i915_handle_error(struct drm_device *dev, bool wedged) 1486 { 1487 struct drm_i915_private *dev_priv = dev->dev_private; 1488 struct intel_ring_buffer *ring; 1489 int i; 1490 1491 i915_capture_error_state(dev); 1492 i915_report_and_clear_eir(dev); 1493 1494 if (wedged) { 1495 INIT_COMPLETION(dev_priv->error_completion); 1496 atomic_set(&dev_priv->mm.wedged, 1); 1497 1498 /* 1499 * Wakeup waiting processes so they don't hang 1500 */ 1501 for_each_ring(ring, dev_priv, i) 1502 #ifdef __NetBSD__ 1503 { 1504 unsigned long flags; 1505 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1506 DRM_SPIN_WAKEUP_ALL(&ring->irq_queue, 1507 &dev_priv->irq_lock); 1508 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1509 } 1510 #else 1511 wake_up_all(&ring->irq_queue); 1512 #endif 1513 } 1514 1515 queue_work(dev_priv->wq, &dev_priv->error_work); 1516 } 1517 1518 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) 1519 { 1520 drm_i915_private_t *dev_priv = dev->dev_private; 1521 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1522 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1523 struct drm_i915_gem_object *obj; 1524 struct intel_unpin_work *work; 1525 unsigned long flags; 1526 bool stall_detected; 1527 1528 /* Ignore early vblank irqs */ 1529 if (intel_crtc == NULL) 1530 return; 1531 1532 spin_lock_irqsave(&dev->event_lock, flags); 1533 work = intel_crtc->unpin_work; 1534 1535 if (work == NULL || 1536 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 1537 !work->enable_stall_check) { 1538 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 1539 spin_unlock_irqrestore(&dev->event_lock, flags); 1540 return; 1541 } 1542 1543 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 1544 obj = work->pending_flip_obj; 1545 if (INTEL_INFO(dev)->gen >= 4) { 1546 int dspsurf = DSPSURF(intel_crtc->plane); 1547 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 1548 obj->gtt_offset; 1549 } else { 1550 int dspaddr = DSPADDR(intel_crtc->plane); 1551 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 1552 crtc->y * crtc->fb->pitches[0] + 1553 crtc->x * crtc->fb->bits_per_pixel/8); 1554 } 1555 1556 spin_unlock_irqrestore(&dev->event_lock, flags); 1557 1558 if (stall_detected) { 1559 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 1560 intel_prepare_page_flip(dev, intel_crtc->plane); 1561 } 1562 } 1563 1564 /* Called from drm generic code, passed 'crtc' which 1565 * we use as a pipe index 1566 */ 1567 static int i915_enable_vblank(struct drm_device *dev, int pipe) 1568 { 1569 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1570 unsigned long irqflags; 1571 1572 if (!i915_pipe_enabled(dev, pipe)) 1573 return -EINVAL; 1574 1575 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1576 if (INTEL_INFO(dev)->gen >= 4) 1577 i915_enable_pipestat(dev_priv, pipe, 1578 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1579 else 1580 i915_enable_pipestat(dev_priv, pipe, 1581 PIPE_VBLANK_INTERRUPT_ENABLE); 1582 1583 /* maintain vblank delivery even in deep C-states */ 1584 if (dev_priv->info->gen == 3) 1585 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 1586 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1587 1588 return 0; 1589 } 1590 1591 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 1592 { 1593 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1594 unsigned long irqflags; 1595 1596 if (!i915_pipe_enabled(dev, pipe)) 1597 return -EINVAL; 1598 1599 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1600 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1601 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1602 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1603 1604 return 0; 1605 } 1606 1607 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) 1608 { 1609 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1610 unsigned long irqflags; 1611 1612 if (!i915_pipe_enabled(dev, pipe)) 1613 return -EINVAL; 1614 1615 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1616 ironlake_enable_display_irq(dev_priv, 1617 DE_PIPEA_VBLANK_IVB << (5 * pipe)); 1618 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1619 1620 return 0; 1621 } 1622 1623 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 1624 { 1625 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1626 unsigned long irqflags; 1627 u32 imr; 1628 1629 if (!i915_pipe_enabled(dev, pipe)) 1630 return -EINVAL; 1631 1632 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1633 imr = I915_READ(VLV_IMR); 1634 if (pipe == 0) 1635 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1636 else 1637 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1638 I915_WRITE(VLV_IMR, imr); 1639 i915_enable_pipestat(dev_priv, pipe, 1640 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1641 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1642 1643 return 0; 1644 } 1645 1646 /* Called from drm generic code, passed 'crtc' which 1647 * we use as a pipe index 1648 */ 1649 static void i915_disable_vblank(struct drm_device *dev, int pipe) 1650 { 1651 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1652 unsigned long irqflags; 1653 1654 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1655 if (dev_priv->info->gen == 3) 1656 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 1657 1658 i915_disable_pipestat(dev_priv, pipe, 1659 PIPE_VBLANK_INTERRUPT_ENABLE | 1660 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1661 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1662 } 1663 1664 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 1665 { 1666 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1667 unsigned long irqflags; 1668 1669 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1670 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1671 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1672 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1673 } 1674 1675 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) 1676 { 1677 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1678 unsigned long irqflags; 1679 1680 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1681 ironlake_disable_display_irq(dev_priv, 1682 DE_PIPEA_VBLANK_IVB << (pipe * 5)); 1683 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1684 } 1685 1686 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 1687 { 1688 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1689 unsigned long irqflags; 1690 u32 imr; 1691 1692 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1693 i915_disable_pipestat(dev_priv, pipe, 1694 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1695 imr = I915_READ(VLV_IMR); 1696 if (pipe == 0) 1697 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 1698 else 1699 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 1700 I915_WRITE(VLV_IMR, imr); 1701 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1702 } 1703 1704 static u32 1705 ring_last_seqno(struct intel_ring_buffer *ring) 1706 { 1707 return list_entry(ring->request_list.prev, 1708 struct drm_i915_gem_request, list)->seqno; 1709 } 1710 1711 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) 1712 { 1713 if (list_empty(&ring->request_list) || 1714 i915_seqno_passed(ring->get_seqno(ring, false), 1715 ring_last_seqno(ring))) { 1716 /* Issue a wake-up to catch stuck h/w. */ 1717 #ifdef __NetBSD__ 1718 /* 1719 * XXX What invariants is the irq_queue relying on? 1720 */ 1721 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1722 unsigned long flags; 1723 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1724 if (DRM_SPIN_WAITERS_P(&ring->irq_queue, 1725 &dev_priv->irq_lock)) { 1726 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 1727 ring->name); 1728 DRM_SPIN_WAKEUP_ALL(&ring->irq_queue, 1729 &dev_priv->irq_lock); 1730 *err = true; 1731 } 1732 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1733 #else 1734 if (waitqueue_active(&ring->irq_queue)) { 1735 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 1736 ring->name); 1737 wake_up_all(&ring->irq_queue); 1738 *err = true; 1739 } 1740 #endif 1741 return true; 1742 } 1743 return false; 1744 } 1745 1746 static bool kick_ring(struct intel_ring_buffer *ring) 1747 { 1748 struct drm_device *dev = ring->dev; 1749 struct drm_i915_private *dev_priv = dev->dev_private; 1750 u32 tmp = I915_READ_CTL(ring); 1751 if (tmp & RING_WAIT) { 1752 DRM_ERROR("Kicking stuck wait on %s\n", 1753 ring->name); 1754 I915_WRITE_CTL(ring, tmp); 1755 return true; 1756 } 1757 return false; 1758 } 1759 1760 static bool i915_hangcheck_hung(struct drm_device *dev) 1761 { 1762 drm_i915_private_t *dev_priv = dev->dev_private; 1763 1764 if (dev_priv->hangcheck_count++ > 1) { 1765 bool hung = true; 1766 1767 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 1768 i915_handle_error(dev, true); 1769 1770 if (!IS_GEN2(dev)) { 1771 struct intel_ring_buffer *ring; 1772 int i; 1773 1774 /* Is the chip hanging on a WAIT_FOR_EVENT? 1775 * If so we can simply poke the RB_WAIT bit 1776 * and break the hang. This should work on 1777 * all but the second generation chipsets. 1778 */ 1779 for_each_ring(ring, dev_priv, i) 1780 hung &= !kick_ring(ring); 1781 } 1782 1783 return hung; 1784 } 1785 1786 return false; 1787 } 1788 1789 /** 1790 * This is called when the chip hasn't reported back with completed 1791 * batchbuffers in a long time. The first time this is called we simply record 1792 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses 1793 * again, we assume the chip is wedged and try to fix it. 1794 */ 1795 void i915_hangcheck_elapsed(unsigned long data) 1796 { 1797 struct drm_device *dev = (struct drm_device *)data; 1798 drm_i915_private_t *dev_priv = dev->dev_private; 1799 uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG]; 1800 struct intel_ring_buffer *ring; 1801 bool err = false, idle; 1802 int i; 1803 1804 if (!i915_enable_hangcheck) 1805 return; 1806 1807 memset(acthd, 0, sizeof(acthd)); 1808 idle = true; 1809 for_each_ring(ring, dev_priv, i) { 1810 idle &= i915_hangcheck_ring_idle(ring, &err); 1811 acthd[i] = intel_ring_get_active_head(ring); 1812 } 1813 1814 /* If all work is done then ACTHD clearly hasn't advanced. */ 1815 if (idle) { 1816 if (err) { 1817 if (i915_hangcheck_hung(dev)) 1818 return; 1819 1820 goto repeat; 1821 } 1822 1823 dev_priv->hangcheck_count = 0; 1824 return; 1825 } 1826 1827 i915_get_extra_instdone(dev, instdone); 1828 if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && 1829 memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) { 1830 if (i915_hangcheck_hung(dev)) 1831 return; 1832 } else { 1833 dev_priv->hangcheck_count = 0; 1834 1835 memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); 1836 memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone)); 1837 } 1838 1839 repeat: 1840 /* Reset timer case chip hangs without another request being added */ 1841 mod_timer(&dev_priv->hangcheck_timer, 1842 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 1843 } 1844 1845 /* drm_dma.h hooks 1846 */ 1847 static void ironlake_irq_preinstall(struct drm_device *dev) 1848 { 1849 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1850 1851 atomic_set(&dev_priv->irq_received, 0); 1852 1853 I915_WRITE(HWSTAM, 0xeffe); 1854 1855 /* XXX hotplug from PCH */ 1856 1857 I915_WRITE(DEIMR, 0xffffffff); 1858 I915_WRITE(DEIER, 0x0); 1859 POSTING_READ(DEIER); 1860 1861 /* and GT */ 1862 I915_WRITE(GTIMR, 0xffffffff); 1863 I915_WRITE(GTIER, 0x0); 1864 POSTING_READ(GTIER); 1865 1866 /* south display irq */ 1867 I915_WRITE(SDEIMR, 0xffffffff); 1868 I915_WRITE(SDEIER, 0x0); 1869 POSTING_READ(SDEIER); 1870 } 1871 1872 static void valleyview_irq_preinstall(struct drm_device *dev) 1873 { 1874 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1875 int pipe; 1876 1877 atomic_set(&dev_priv->irq_received, 0); 1878 1879 /* VLV magic */ 1880 I915_WRITE(VLV_IMR, 0); 1881 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 1882 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 1883 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 1884 1885 /* and GT */ 1886 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1887 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1888 I915_WRITE(GTIMR, 0xffffffff); 1889 I915_WRITE(GTIER, 0x0); 1890 POSTING_READ(GTIER); 1891 1892 I915_WRITE(DPINVGTT, 0xff); 1893 1894 I915_WRITE(PORT_HOTPLUG_EN, 0); 1895 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 1896 for_each_pipe(pipe) 1897 I915_WRITE(PIPESTAT(pipe), 0xffff); 1898 I915_WRITE(VLV_IIR, 0xffffffff); 1899 I915_WRITE(VLV_IMR, 0xffffffff); 1900 I915_WRITE(VLV_IER, 0x0); 1901 POSTING_READ(VLV_IER); 1902 } 1903 1904 /* 1905 * Enable digital hotplug on the PCH, and configure the DP short pulse 1906 * duration to 2ms (which is the minimum in the Display Port spec) 1907 * 1908 * This register is the same on all known PCH chips. 1909 */ 1910 1911 static void ironlake_enable_pch_hotplug(struct drm_device *dev) 1912 { 1913 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1914 u32 hotplug; 1915 1916 hotplug = I915_READ(PCH_PORT_HOTPLUG); 1917 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 1918 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 1919 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 1920 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 1921 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 1922 } 1923 1924 static int ironlake_irq_postinstall(struct drm_device *dev) 1925 { 1926 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1927 /* enable kind of interrupts always enabled */ 1928 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1929 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1930 u32 render_irqs; 1931 u32 hotplug_mask; 1932 1933 dev_priv->irq_mask = ~display_mask; 1934 1935 /* should always can generate irq */ 1936 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1937 I915_WRITE(DEIMR, dev_priv->irq_mask); 1938 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); 1939 POSTING_READ(DEIER); 1940 1941 dev_priv->gt_irq_mask = ~0; 1942 1943 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1944 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1945 1946 if (IS_GEN6(dev)) 1947 render_irqs = 1948 GT_USER_INTERRUPT | 1949 GEN6_BSD_USER_INTERRUPT | 1950 GEN6_BLITTER_USER_INTERRUPT; 1951 else 1952 render_irqs = 1953 GT_USER_INTERRUPT | 1954 GT_PIPE_NOTIFY | 1955 GT_BSD_USER_INTERRUPT; 1956 I915_WRITE(GTIER, render_irqs); 1957 POSTING_READ(GTIER); 1958 1959 if (HAS_PCH_CPT(dev)) { 1960 hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1961 SDE_PORTB_HOTPLUG_CPT | 1962 SDE_PORTC_HOTPLUG_CPT | 1963 SDE_PORTD_HOTPLUG_CPT); 1964 } else { 1965 hotplug_mask = (SDE_CRT_HOTPLUG | 1966 SDE_PORTB_HOTPLUG | 1967 SDE_PORTC_HOTPLUG | 1968 SDE_PORTD_HOTPLUG | 1969 SDE_AUX_MASK); 1970 } 1971 1972 dev_priv->pch_irq_mask = ~hotplug_mask; 1973 1974 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1975 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); 1976 I915_WRITE(SDEIER, hotplug_mask); 1977 POSTING_READ(SDEIER); 1978 1979 ironlake_enable_pch_hotplug(dev); 1980 1981 if (IS_IRONLAKE_M(dev)) { 1982 /* Clear & enable PCU event interrupts */ 1983 I915_WRITE(DEIIR, DE_PCU_EVENT); 1984 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); 1985 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 1986 } 1987 1988 return 0; 1989 } 1990 1991 static int ivybridge_irq_postinstall(struct drm_device *dev) 1992 { 1993 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1994 /* enable kind of interrupts always enabled */ 1995 u32 display_mask = 1996 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | 1997 DE_PLANEC_FLIP_DONE_IVB | 1998 DE_PLANEB_FLIP_DONE_IVB | 1999 DE_PLANEA_FLIP_DONE_IVB; 2000 u32 render_irqs; 2001 u32 hotplug_mask; 2002 2003 dev_priv->irq_mask = ~display_mask; 2004 2005 /* should always can generate irq */ 2006 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2007 I915_WRITE(DEIMR, dev_priv->irq_mask); 2008 I915_WRITE(DEIER, 2009 display_mask | 2010 DE_PIPEC_VBLANK_IVB | 2011 DE_PIPEB_VBLANK_IVB | 2012 DE_PIPEA_VBLANK_IVB); 2013 POSTING_READ(DEIER); 2014 2015 dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 2016 2017 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2018 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2019 2020 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | 2021 GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT; 2022 I915_WRITE(GTIER, render_irqs); 2023 POSTING_READ(GTIER); 2024 2025 hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 2026 SDE_PORTB_HOTPLUG_CPT | 2027 SDE_PORTC_HOTPLUG_CPT | 2028 SDE_PORTD_HOTPLUG_CPT); 2029 dev_priv->pch_irq_mask = ~hotplug_mask; 2030 2031 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2032 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); 2033 I915_WRITE(SDEIER, hotplug_mask); 2034 POSTING_READ(SDEIER); 2035 2036 ironlake_enable_pch_hotplug(dev); 2037 2038 return 0; 2039 } 2040 2041 static int valleyview_irq_postinstall(struct drm_device *dev) 2042 { 2043 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2044 u32 enable_mask; 2045 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2046 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2047 u32 render_irqs; 2048 u16 msid; 2049 2050 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2051 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2052 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2053 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2054 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2055 2056 /* 2057 *Leave vblank interrupts masked initially. enable/disable will 2058 * toggle them based on usage. 2059 */ 2060 dev_priv->irq_mask = (~enable_mask) | 2061 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2062 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2063 2064 dev_priv->pipestat[0] = 0; 2065 dev_priv->pipestat[1] = 0; 2066 2067 /* Hack for broken MSIs on VLV */ 2068 pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000); 2069 pci_read_config_word(dev->pdev, 0x98, &msid); 2070 msid &= 0xff; /* mask out delivery bits */ 2071 msid |= (1<<14); 2072 pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); 2073 2074 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 2075 I915_WRITE(VLV_IER, enable_mask); 2076 I915_WRITE(VLV_IIR, 0xffffffff); 2077 I915_WRITE(PIPESTAT(0), 0xffff); 2078 I915_WRITE(PIPESTAT(1), 0xffff); 2079 POSTING_READ(VLV_IER); 2080 2081 i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2082 i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2083 2084 I915_WRITE(VLV_IIR, 0xffffffff); 2085 I915_WRITE(VLV_IIR, 0xffffffff); 2086 2087 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2088 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2089 2090 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | 2091 GEN6_BLITTER_USER_INTERRUPT; 2092 I915_WRITE(GTIER, render_irqs); 2093 POSTING_READ(GTIER); 2094 2095 /* ack & enable invalid PTE error interrupts */ 2096 #if 0 /* FIXME: add support to irq handler for checking these bits */ 2097 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2098 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 2099 #endif 2100 2101 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2102 /* Note HDMI and DP share bits */ 2103 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2104 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2105 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2106 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2107 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2108 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2109 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) 2110 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2111 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) 2112 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2113 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2114 hotplug_en |= CRT_HOTPLUG_INT_EN; 2115 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2116 } 2117 2118 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2119 2120 return 0; 2121 } 2122 2123 static void valleyview_irq_uninstall(struct drm_device *dev) 2124 { 2125 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2126 int pipe; 2127 2128 if (!dev_priv) 2129 return; 2130 2131 for_each_pipe(pipe) 2132 I915_WRITE(PIPESTAT(pipe), 0xffff); 2133 2134 I915_WRITE(HWSTAM, 0xffffffff); 2135 I915_WRITE(PORT_HOTPLUG_EN, 0); 2136 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2137 for_each_pipe(pipe) 2138 I915_WRITE(PIPESTAT(pipe), 0xffff); 2139 I915_WRITE(VLV_IIR, 0xffffffff); 2140 I915_WRITE(VLV_IMR, 0xffffffff); 2141 I915_WRITE(VLV_IER, 0x0); 2142 POSTING_READ(VLV_IER); 2143 } 2144 2145 static void ironlake_irq_uninstall(struct drm_device *dev) 2146 { 2147 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2148 2149 if (!dev_priv) 2150 return; 2151 2152 I915_WRITE(HWSTAM, 0xffffffff); 2153 2154 I915_WRITE(DEIMR, 0xffffffff); 2155 I915_WRITE(DEIER, 0x0); 2156 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2157 2158 I915_WRITE(GTIMR, 0xffffffff); 2159 I915_WRITE(GTIER, 0x0); 2160 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2161 2162 I915_WRITE(SDEIMR, 0xffffffff); 2163 I915_WRITE(SDEIER, 0x0); 2164 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2165 } 2166 2167 static void i8xx_irq_preinstall(struct drm_device * dev) 2168 { 2169 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2170 int pipe; 2171 2172 atomic_set(&dev_priv->irq_received, 0); 2173 2174 for_each_pipe(pipe) 2175 I915_WRITE(PIPESTAT(pipe), 0); 2176 I915_WRITE16(IMR, 0xffff); 2177 I915_WRITE16(IER, 0x0); 2178 POSTING_READ16(IER); 2179 } 2180 2181 static int i8xx_irq_postinstall(struct drm_device *dev) 2182 { 2183 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2184 2185 dev_priv->pipestat[0] = 0; 2186 dev_priv->pipestat[1] = 0; 2187 2188 I915_WRITE16(EMR, 2189 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2190 2191 /* Unmask the interrupts that we always want on. */ 2192 dev_priv->irq_mask = 2193 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2194 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2195 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2196 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2197 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2198 I915_WRITE16(IMR, dev_priv->irq_mask); 2199 2200 I915_WRITE16(IER, 2201 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2202 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2203 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2204 I915_USER_INTERRUPT); 2205 POSTING_READ16(IER); 2206 2207 return 0; 2208 } 2209 2210 static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS) 2211 { 2212 struct drm_device *dev = (struct drm_device *) arg; 2213 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2214 u16 iir, new_iir; 2215 u32 pipe_stats[2]; 2216 unsigned long irqflags; 2217 int pipe; 2218 u16 flip_mask = 2219 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2220 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2221 2222 atomic_inc(&dev_priv->irq_received); 2223 2224 iir = I915_READ16(IIR); 2225 if (iir == 0) 2226 return IRQ_NONE; 2227 2228 while (iir & ~flip_mask) { 2229 /* Can't rely on pipestat interrupt bit in iir as it might 2230 * have been cleared after the pipestat interrupt was received. 2231 * It doesn't set the bit in iir again, but it still produces 2232 * interrupts (for non-MSI). 2233 */ 2234 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2235 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2236 i915_handle_error(dev, false); 2237 2238 for_each_pipe(pipe) { 2239 int reg = PIPESTAT(pipe); 2240 pipe_stats[pipe] = I915_READ(reg); 2241 2242 /* 2243 * Clear the PIPE*STAT regs before the IIR 2244 */ 2245 if (pipe_stats[pipe] & 0x8000ffff) { 2246 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2247 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2248 pipe_name(pipe)); 2249 I915_WRITE(reg, pipe_stats[pipe]); 2250 } 2251 } 2252 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2253 2254 I915_WRITE16(IIR, iir & ~flip_mask); 2255 new_iir = I915_READ16(IIR); /* Flush posted writes */ 2256 2257 i915_update_dri1_breadcrumb(dev); 2258 2259 if (iir & I915_USER_INTERRUPT) 2260 notify_ring(dev, &dev_priv->ring[RCS]); 2261 2262 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && 2263 drm_handle_vblank(dev, 0)) { 2264 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { 2265 intel_prepare_page_flip(dev, 0); 2266 intel_finish_page_flip(dev, 0); 2267 flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; 2268 } 2269 } 2270 2271 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && 2272 drm_handle_vblank(dev, 1)) { 2273 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { 2274 intel_prepare_page_flip(dev, 1); 2275 intel_finish_page_flip(dev, 1); 2276 flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2277 } 2278 } 2279 2280 iir = new_iir; 2281 } 2282 2283 return IRQ_HANDLED; 2284 } 2285 2286 static void i8xx_irq_uninstall(struct drm_device * dev) 2287 { 2288 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2289 int pipe; 2290 2291 for_each_pipe(pipe) { 2292 /* Clear enable bits; then clear status bits */ 2293 I915_WRITE(PIPESTAT(pipe), 0); 2294 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2295 } 2296 I915_WRITE16(IMR, 0xffff); 2297 I915_WRITE16(IER, 0x0); 2298 I915_WRITE16(IIR, I915_READ16(IIR)); 2299 } 2300 2301 static void i915_irq_preinstall(struct drm_device * dev) 2302 { 2303 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2304 int pipe; 2305 2306 atomic_set(&dev_priv->irq_received, 0); 2307 2308 if (I915_HAS_HOTPLUG(dev)) { 2309 I915_WRITE(PORT_HOTPLUG_EN, 0); 2310 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2311 } 2312 2313 I915_WRITE16(HWSTAM, 0xeffe); 2314 for_each_pipe(pipe) 2315 I915_WRITE(PIPESTAT(pipe), 0); 2316 I915_WRITE(IMR, 0xffffffff); 2317 I915_WRITE(IER, 0x0); 2318 POSTING_READ(IER); 2319 } 2320 2321 static int i915_irq_postinstall(struct drm_device *dev) 2322 { 2323 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2324 u32 enable_mask; 2325 2326 dev_priv->pipestat[0] = 0; 2327 dev_priv->pipestat[1] = 0; 2328 2329 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 2330 2331 /* Unmask the interrupts that we always want on. */ 2332 dev_priv->irq_mask = 2333 ~(I915_ASLE_INTERRUPT | 2334 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2335 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2336 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2337 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2338 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2339 2340 enable_mask = 2341 I915_ASLE_INTERRUPT | 2342 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2343 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2344 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 2345 I915_USER_INTERRUPT; 2346 2347 if (I915_HAS_HOTPLUG(dev)) { 2348 /* Enable in IER... */ 2349 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 2350 /* and unmask in IMR */ 2351 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 2352 } 2353 2354 I915_WRITE(IMR, dev_priv->irq_mask); 2355 I915_WRITE(IER, enable_mask); 2356 POSTING_READ(IER); 2357 2358 if (I915_HAS_HOTPLUG(dev)) { 2359 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2360 2361 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2362 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2363 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2364 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2365 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2366 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2367 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) 2368 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2369 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) 2370 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2371 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2372 hotplug_en |= CRT_HOTPLUG_INT_EN; 2373 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2374 } 2375 2376 /* Ignore TV since it's buggy */ 2377 2378 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2379 } 2380 2381 intel_opregion_enable_asle(dev); 2382 2383 return 0; 2384 } 2385 2386 static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS) 2387 { 2388 struct drm_device *dev = (struct drm_device *) arg; 2389 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2390 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 2391 unsigned long irqflags; 2392 u32 flip_mask = 2393 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2394 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 2395 u32 flip[2] = { 2396 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT, 2397 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT 2398 }; 2399 int pipe, ret = IRQ_NONE; 2400 2401 atomic_inc(&dev_priv->irq_received); 2402 2403 iir = I915_READ(IIR); 2404 do { 2405 bool irq_received = (iir & ~flip_mask) != 0; 2406 bool blc_event = false; 2407 2408 /* Can't rely on pipestat interrupt bit in iir as it might 2409 * have been cleared after the pipestat interrupt was received. 2410 * It doesn't set the bit in iir again, but it still produces 2411 * interrupts (for non-MSI). 2412 */ 2413 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2414 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2415 i915_handle_error(dev, false); 2416 2417 for_each_pipe(pipe) { 2418 int reg = PIPESTAT(pipe); 2419 pipe_stats[pipe] = I915_READ(reg); 2420 2421 /* Clear the PIPE*STAT regs before the IIR */ 2422 if (pipe_stats[pipe] & 0x8000ffff) { 2423 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2424 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2425 pipe_name(pipe)); 2426 I915_WRITE(reg, pipe_stats[pipe]); 2427 irq_received = true; 2428 } 2429 } 2430 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2431 2432 if (!irq_received) 2433 break; 2434 2435 /* Consume port. Then clear IIR or we'll miss events */ 2436 if ((I915_HAS_HOTPLUG(dev)) && 2437 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 2438 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2439 2440 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2441 hotplug_status); 2442 if (hotplug_status & dev_priv->hotplug_supported_mask) 2443 queue_work(dev_priv->wq, 2444 &dev_priv->hotplug_work); 2445 2446 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2447 POSTING_READ(PORT_HOTPLUG_STAT); 2448 } 2449 2450 I915_WRITE(IIR, iir & ~flip_mask); 2451 new_iir = I915_READ(IIR); /* Flush posted writes */ 2452 2453 if (iir & I915_USER_INTERRUPT) 2454 notify_ring(dev, &dev_priv->ring[RCS]); 2455 2456 for_each_pipe(pipe) { 2457 int plane = pipe; 2458 if (IS_MOBILE(dev)) 2459 plane = !plane; 2460 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 2461 drm_handle_vblank(dev, pipe)) { 2462 if (iir & flip[plane]) { 2463 intel_prepare_page_flip(dev, plane); 2464 intel_finish_page_flip(dev, pipe); 2465 flip_mask &= ~flip[plane]; 2466 } 2467 } 2468 2469 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2470 blc_event = true; 2471 } 2472 2473 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2474 intel_opregion_asle_intr(dev); 2475 2476 /* With MSI, interrupts are only generated when iir 2477 * transitions from zero to nonzero. If another bit got 2478 * set while we were handling the existing iir bits, then 2479 * we would never get another interrupt. 2480 * 2481 * This is fine on non-MSI as well, as if we hit this path 2482 * we avoid exiting the interrupt handler only to generate 2483 * another one. 2484 * 2485 * Note that for MSI this could cause a stray interrupt report 2486 * if an interrupt landed in the time between writing IIR and 2487 * the posting read. This should be rare enough to never 2488 * trigger the 99% of 100,000 interrupts test for disabling 2489 * stray interrupts. 2490 */ 2491 ret = IRQ_HANDLED; 2492 iir = new_iir; 2493 } while (iir & ~flip_mask); 2494 2495 i915_update_dri1_breadcrumb(dev); 2496 2497 return ret; 2498 } 2499 2500 static void i915_irq_uninstall(struct drm_device * dev) 2501 { 2502 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2503 int pipe; 2504 2505 if (I915_HAS_HOTPLUG(dev)) { 2506 I915_WRITE(PORT_HOTPLUG_EN, 0); 2507 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2508 } 2509 2510 I915_WRITE16(HWSTAM, 0xffff); 2511 for_each_pipe(pipe) { 2512 /* Clear enable bits; then clear status bits */ 2513 I915_WRITE(PIPESTAT(pipe), 0); 2514 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 2515 } 2516 I915_WRITE(IMR, 0xffffffff); 2517 I915_WRITE(IER, 0x0); 2518 2519 I915_WRITE(IIR, I915_READ(IIR)); 2520 } 2521 2522 static void i965_irq_preinstall(struct drm_device * dev) 2523 { 2524 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2525 int pipe; 2526 2527 atomic_set(&dev_priv->irq_received, 0); 2528 2529 I915_WRITE(PORT_HOTPLUG_EN, 0); 2530 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2531 2532 I915_WRITE(HWSTAM, 0xeffe); 2533 for_each_pipe(pipe) 2534 I915_WRITE(PIPESTAT(pipe), 0); 2535 I915_WRITE(IMR, 0xffffffff); 2536 I915_WRITE(IER, 0x0); 2537 POSTING_READ(IER); 2538 } 2539 2540 static int i965_irq_postinstall(struct drm_device *dev) 2541 { 2542 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2543 u32 hotplug_en; 2544 u32 enable_mask; 2545 u32 error_mask; 2546 2547 /* Unmask the interrupts that we always want on. */ 2548 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 2549 I915_DISPLAY_PORT_INTERRUPT | 2550 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2551 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2552 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 2553 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 2554 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2555 2556 enable_mask = ~dev_priv->irq_mask; 2557 enable_mask |= I915_USER_INTERRUPT; 2558 2559 if (IS_G4X(dev)) 2560 enable_mask |= I915_BSD_USER_INTERRUPT; 2561 2562 dev_priv->pipestat[0] = 0; 2563 dev_priv->pipestat[1] = 0; 2564 2565 /* 2566 * Enable some error detection, note the instruction error mask 2567 * bit is reserved, so we leave it masked. 2568 */ 2569 if (IS_G4X(dev)) { 2570 error_mask = ~(GM45_ERROR_PAGE_TABLE | 2571 GM45_ERROR_MEM_PRIV | 2572 GM45_ERROR_CP_PRIV | 2573 I915_ERROR_MEMORY_REFRESH); 2574 } else { 2575 error_mask = ~(I915_ERROR_PAGE_TABLE | 2576 I915_ERROR_MEMORY_REFRESH); 2577 } 2578 I915_WRITE(EMR, error_mask); 2579 2580 I915_WRITE(IMR, dev_priv->irq_mask); 2581 I915_WRITE(IER, enable_mask); 2582 POSTING_READ(IER); 2583 2584 /* Note HDMI and DP share hotplug bits */ 2585 hotplug_en = 0; 2586 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2587 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2588 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2589 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2590 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2591 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2592 if (IS_G4X(dev)) { 2593 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X) 2594 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2595 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X) 2596 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2597 } else { 2598 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965) 2599 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2600 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965) 2601 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2602 } 2603 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2604 hotplug_en |= CRT_HOTPLUG_INT_EN; 2605 2606 /* Programming the CRT detection parameters tends 2607 to generate a spurious hotplug event about three 2608 seconds later. So just do it once. 2609 */ 2610 if (IS_G4X(dev)) 2611 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 2612 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2613 } 2614 2615 /* Ignore TV since it's buggy */ 2616 2617 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2618 2619 intel_opregion_enable_asle(dev); 2620 2621 return 0; 2622 } 2623 2624 static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS) 2625 { 2626 struct drm_device *dev = (struct drm_device *) arg; 2627 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2628 u32 iir, new_iir; 2629 u32 pipe_stats[I915_MAX_PIPES]; 2630 unsigned long irqflags; 2631 int irq_received; 2632 int ret = IRQ_NONE, pipe; 2633 2634 atomic_inc(&dev_priv->irq_received); 2635 2636 iir = I915_READ(IIR); 2637 2638 for (;;) { 2639 bool blc_event = false; 2640 2641 irq_received = iir != 0; 2642 2643 /* Can't rely on pipestat interrupt bit in iir as it might 2644 * have been cleared after the pipestat interrupt was received. 2645 * It doesn't set the bit in iir again, but it still produces 2646 * interrupts (for non-MSI). 2647 */ 2648 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2649 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 2650 i915_handle_error(dev, false); 2651 2652 for_each_pipe(pipe) { 2653 int reg = PIPESTAT(pipe); 2654 pipe_stats[pipe] = I915_READ(reg); 2655 2656 /* 2657 * Clear the PIPE*STAT regs before the IIR 2658 */ 2659 if (pipe_stats[pipe] & 0x8000ffff) { 2660 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 2661 DRM_DEBUG_DRIVER("pipe %c underrun\n", 2662 pipe_name(pipe)); 2663 I915_WRITE(reg, pipe_stats[pipe]); 2664 irq_received = 1; 2665 } 2666 } 2667 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2668 2669 if (!irq_received) 2670 break; 2671 2672 ret = IRQ_HANDLED; 2673 2674 /* Consume port. Then clear IIR or we'll miss events */ 2675 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 2676 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 2677 2678 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2679 hotplug_status); 2680 if (hotplug_status & dev_priv->hotplug_supported_mask) 2681 queue_work(dev_priv->wq, 2682 &dev_priv->hotplug_work); 2683 2684 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 2685 I915_READ(PORT_HOTPLUG_STAT); 2686 } 2687 2688 I915_WRITE(IIR, iir); 2689 new_iir = I915_READ(IIR); /* Flush posted writes */ 2690 2691 if (iir & I915_USER_INTERRUPT) 2692 notify_ring(dev, &dev_priv->ring[RCS]); 2693 if (iir & I915_BSD_USER_INTERRUPT) 2694 notify_ring(dev, &dev_priv->ring[VCS]); 2695 2696 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) 2697 intel_prepare_page_flip(dev, 0); 2698 2699 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) 2700 intel_prepare_page_flip(dev, 1); 2701 2702 for_each_pipe(pipe) { 2703 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 2704 drm_handle_vblank(dev, pipe)) { 2705 i915_pageflip_stall_check(dev, pipe); 2706 intel_finish_page_flip(dev, pipe); 2707 } 2708 2709 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 2710 blc_event = true; 2711 } 2712 2713 2714 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2715 intel_opregion_asle_intr(dev); 2716 2717 /* With MSI, interrupts are only generated when iir 2718 * transitions from zero to nonzero. If another bit got 2719 * set while we were handling the existing iir bits, then 2720 * we would never get another interrupt. 2721 * 2722 * This is fine on non-MSI as well, as if we hit this path 2723 * we avoid exiting the interrupt handler only to generate 2724 * another one. 2725 * 2726 * Note that for MSI this could cause a stray interrupt report 2727 * if an interrupt landed in the time between writing IIR and 2728 * the posting read. This should be rare enough to never 2729 * trigger the 99% of 100,000 interrupts test for disabling 2730 * stray interrupts. 2731 */ 2732 iir = new_iir; 2733 } 2734 2735 i915_update_dri1_breadcrumb(dev); 2736 2737 return ret; 2738 } 2739 2740 static void i965_irq_uninstall(struct drm_device * dev) 2741 { 2742 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2743 int pipe; 2744 2745 if (!dev_priv) 2746 return; 2747 2748 I915_WRITE(PORT_HOTPLUG_EN, 0); 2749 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2750 2751 I915_WRITE(HWSTAM, 0xffffffff); 2752 for_each_pipe(pipe) 2753 I915_WRITE(PIPESTAT(pipe), 0); 2754 I915_WRITE(IMR, 0xffffffff); 2755 I915_WRITE(IER, 0x0); 2756 2757 for_each_pipe(pipe) 2758 I915_WRITE(PIPESTAT(pipe), 2759 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 2760 I915_WRITE(IIR, I915_READ(IIR)); 2761 } 2762 2763 void intel_irq_init(struct drm_device *dev) 2764 { 2765 struct drm_i915_private *dev_priv = dev->dev_private; 2766 2767 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 2768 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 2769 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 2770 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 2771 2772 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2773 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2774 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 2775 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 2776 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 2777 } 2778 2779 if (drm_core_check_feature(dev, DRIVER_MODESET)) 2780 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 2781 else 2782 dev->driver->get_vblank_timestamp = NULL; 2783 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 2784 2785 if (IS_VALLEYVIEW(dev)) { 2786 dev->driver->irq_handler = valleyview_irq_handler; 2787 dev->driver->irq_preinstall = valleyview_irq_preinstall; 2788 dev->driver->irq_postinstall = valleyview_irq_postinstall; 2789 dev->driver->irq_uninstall = valleyview_irq_uninstall; 2790 dev->driver->enable_vblank = valleyview_enable_vblank; 2791 dev->driver->disable_vblank = valleyview_disable_vblank; 2792 } else if (IS_IVYBRIDGE(dev)) { 2793 /* Share pre & uninstall handlers with ILK/SNB */ 2794 dev->driver->irq_handler = ivybridge_irq_handler; 2795 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2796 dev->driver->irq_postinstall = ivybridge_irq_postinstall; 2797 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2798 dev->driver->enable_vblank = ivybridge_enable_vblank; 2799 dev->driver->disable_vblank = ivybridge_disable_vblank; 2800 } else if (IS_HASWELL(dev)) { 2801 /* Share interrupts handling with IVB */ 2802 dev->driver->irq_handler = ivybridge_irq_handler; 2803 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2804 dev->driver->irq_postinstall = ivybridge_irq_postinstall; 2805 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2806 dev->driver->enable_vblank = ivybridge_enable_vblank; 2807 dev->driver->disable_vblank = ivybridge_disable_vblank; 2808 } else if (HAS_PCH_SPLIT(dev)) { 2809 dev->driver->irq_handler = ironlake_irq_handler; 2810 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2811 dev->driver->irq_postinstall = ironlake_irq_postinstall; 2812 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2813 dev->driver->enable_vblank = ironlake_enable_vblank; 2814 dev->driver->disable_vblank = ironlake_disable_vblank; 2815 } else { 2816 if (INTEL_INFO(dev)->gen == 2) { 2817 dev->driver->irq_preinstall = i8xx_irq_preinstall; 2818 dev->driver->irq_postinstall = i8xx_irq_postinstall; 2819 dev->driver->irq_handler = i8xx_irq_handler; 2820 dev->driver->irq_uninstall = i8xx_irq_uninstall; 2821 } else if (INTEL_INFO(dev)->gen == 3) { 2822 dev->driver->irq_preinstall = i915_irq_preinstall; 2823 dev->driver->irq_postinstall = i915_irq_postinstall; 2824 dev->driver->irq_uninstall = i915_irq_uninstall; 2825 dev->driver->irq_handler = i915_irq_handler; 2826 } else { 2827 dev->driver->irq_preinstall = i965_irq_preinstall; 2828 dev->driver->irq_postinstall = i965_irq_postinstall; 2829 dev->driver->irq_uninstall = i965_irq_uninstall; 2830 dev->driver->irq_handler = i965_irq_handler; 2831 } 2832 dev->driver->enable_vblank = i915_enable_vblank; 2833 dev->driver->disable_vblank = i915_disable_vblank; 2834 } 2835 } 2836