1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*- 2 */ 3 /* 4 * 5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * The above copyright notice and this permission notice (including the 17 * next paragraph) shall be included in all copies or substantial portions 18 * of the Software. 19 * 20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 27 * 28 */ 29 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 #include <uapi_drm/i915_drm.h> 34 35 #include "i915_reg.h" 36 #include "intel_bios.h" 37 #include "intel_ringbuffer.h" 38 #include <linux/io-mapping.h> 39 #include <linux/i2c.h> 40 #include <drm/intel-gtt.h> 41 #include <linux/kref.h> 42 #include <linux/pm_qos.h> 43 44 #define CONFIG_ACPI 1 45 46 /* General customization: 47 */ 48 49 #define DRIVER_AUTHOR "Tungsten Graphics, Inc." 50 51 #define DRIVER_NAME "i915" 52 #define DRIVER_DESC "Intel Graphics" 53 #define DRIVER_DATE "20080730" 54 55 enum i915_pipe { 56 PIPE_A = 0, 57 PIPE_B, 58 PIPE_C, 59 I915_MAX_PIPES 60 }; 61 #define pipe_name(p) ((p) + 'A') 62 63 enum transcoder { 64 TRANSCODER_A = 0, 65 TRANSCODER_B, 66 TRANSCODER_C, 67 TRANSCODER_EDP = 0xF, 68 }; 69 #define transcoder_name(t) ((t) + 'A') 70 71 enum plane { 72 PLANE_A = 0, 73 PLANE_B, 74 PLANE_C, 75 }; 76 #define plane_name(p) ((p) + 'A') 77 78 #define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A') 79 80 enum port { 81 PORT_A = 0, 82 PORT_B, 83 PORT_C, 84 PORT_D, 85 PORT_E, 86 I915_MAX_PORTS 87 }; 88 #define port_name(p) ((p) + 'A') 89 90 enum intel_display_power_domain { 91 POWER_DOMAIN_PIPE_A, 92 POWER_DOMAIN_PIPE_B, 93 POWER_DOMAIN_PIPE_C, 94 POWER_DOMAIN_PIPE_A_PANEL_FITTER, 95 POWER_DOMAIN_PIPE_B_PANEL_FITTER, 96 POWER_DOMAIN_PIPE_C_PANEL_FITTER, 97 POWER_DOMAIN_TRANSCODER_A, 98 POWER_DOMAIN_TRANSCODER_B, 99 POWER_DOMAIN_TRANSCODER_C, 100 POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF, 101 }; 102 103 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) 104 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \ 105 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) 106 #define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A) 107 108 enum hpd_pin { 109 HPD_NONE = 0, 110 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */ 111 HPD_TV = HPD_NONE, /* TV is known to be unreliable */ 112 HPD_CRT, 113 HPD_SDVO_B, 114 HPD_SDVO_C, 115 HPD_PORT_B, 116 HPD_PORT_C, 117 HPD_PORT_D, 118 HPD_NUM_PINS 119 }; 120 121 #define I915_GEM_GPU_DOMAINS \ 122 (I915_GEM_DOMAIN_RENDER | \ 123 I915_GEM_DOMAIN_SAMPLER | \ 124 I915_GEM_DOMAIN_COMMAND | \ 125 I915_GEM_DOMAIN_INSTRUCTION | \ 126 I915_GEM_DOMAIN_VERTEX) 127 128 #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++) 129 130 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 131 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ 132 if ((intel_encoder)->base.crtc == (__crtc)) 133 134 struct drm_i915_private; 135 136 enum intel_dpll_id { 137 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ 138 /* real shared dpll ids must be >= 0 */ 139 DPLL_ID_PCH_PLL_A, 140 DPLL_ID_PCH_PLL_B, 141 }; 142 #define I915_NUM_PLLS 2 143 144 struct intel_dpll_hw_state { 145 uint32_t dpll; 146 uint32_t fp0; 147 uint32_t fp1; 148 }; 149 150 struct intel_shared_dpll { 151 int refcount; /* count of number of CRTCs sharing this PLL */ 152 int active; /* count of number of active CRTCs (i.e. DPMS on) */ 153 bool on; /* is the PLL actually active? Disabled during modeset */ 154 const char *name; 155 /* should match the index in the dev_priv->shared_dplls array */ 156 enum intel_dpll_id id; 157 struct intel_dpll_hw_state hw_state; 158 void (*enable)(struct drm_i915_private *dev_priv, 159 struct intel_shared_dpll *pll); 160 void (*disable)(struct drm_i915_private *dev_priv, 161 struct intel_shared_dpll *pll); 162 bool (*get_hw_state)(struct drm_i915_private *dev_priv, 163 struct intel_shared_dpll *pll, 164 struct intel_dpll_hw_state *hw_state); 165 }; 166 167 /* Used by dp and fdi links */ 168 struct intel_link_m_n { 169 uint32_t tu; 170 uint32_t gmch_m; 171 uint32_t gmch_n; 172 uint32_t link_m; 173 uint32_t link_n; 174 }; 175 176 void intel_link_compute_m_n(int bpp, int nlanes, 177 int pixel_clock, int link_clock, 178 struct intel_link_m_n *m_n); 179 180 struct intel_ddi_plls { 181 int spll_refcount; 182 int wrpll1_refcount; 183 int wrpll2_refcount; 184 }; 185 186 /* Interface history: 187 * 188 * 1.1: Original. 189 * 1.2: Add Power Management 190 * 1.3: Add vblank support 191 * 1.4: Fix cmdbuffer path, add heap destroy 192 * 1.5: Add vblank pipe configuration 193 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank 194 * - Support vertical blank on secondary display pipe 195 */ 196 #define DRIVER_MAJOR 1 197 #define DRIVER_MINOR 6 198 #define DRIVER_PATCHLEVEL 0 199 200 #define WATCH_COHERENCY 0 201 #define WATCH_LISTS 0 202 #define WATCH_GTT 0 203 204 #define I915_GEM_PHYS_CURSOR_0 1 205 #define I915_GEM_PHYS_CURSOR_1 2 206 #define I915_GEM_PHYS_OVERLAY_REGS 3 207 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) 208 209 struct drm_i915_gem_phys_object { 210 int id; 211 struct page **page_list; 212 drm_dma_handle_t *handle; 213 struct drm_i915_gem_object *cur_obj; 214 }; 215 216 struct opregion_header; 217 struct opregion_acpi; 218 struct opregion_swsci; 219 struct opregion_asle; 220 221 struct intel_opregion { 222 struct opregion_header __iomem *header; 223 struct opregion_acpi __iomem *acpi; 224 struct opregion_swsci __iomem *swsci; 225 struct opregion_asle __iomem *asle; 226 void __iomem *vbt; 227 u32 __iomem *lid_state; 228 }; 229 #define OPREGION_SIZE (8*1024) 230 231 struct intel_overlay; 232 struct intel_overlay_error_state; 233 234 struct drm_i915_master_private { 235 drm_local_map_t *sarea; 236 struct _drm_i915_sarea *sarea_priv; 237 }; 238 #define I915_FENCE_REG_NONE -1 239 #define I915_MAX_NUM_FENCES 32 240 /* 32 fences + sign bit for FENCE_REG_NONE */ 241 #define I915_MAX_NUM_FENCE_BITS 6 242 243 struct drm_i915_fence_reg { 244 struct list_head lru_list; 245 struct drm_i915_gem_object *obj; 246 int pin_count; 247 }; 248 249 struct sdvo_device_mapping { 250 u8 initialized; 251 u8 dvo_port; 252 u8 slave_addr; 253 u8 dvo_wiring; 254 u8 i2c_pin; 255 u8 ddc_pin; 256 }; 257 258 struct intel_display_error_state; 259 260 struct drm_i915_error_state { 261 struct kref ref; 262 u32 eir; 263 u32 pgtbl_er; 264 u32 ier; 265 u32 ccid; 266 u32 derrmr; 267 u32 forcewake; 268 bool waiting[I915_NUM_RINGS]; 269 u32 pipestat[I915_MAX_PIPES]; 270 u32 tail[I915_NUM_RINGS]; 271 u32 head[I915_NUM_RINGS]; 272 u32 ctl[I915_NUM_RINGS]; 273 u32 ipeir[I915_NUM_RINGS]; 274 u32 ipehr[I915_NUM_RINGS]; 275 u32 instdone[I915_NUM_RINGS]; 276 u32 acthd[I915_NUM_RINGS]; 277 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; 278 u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1]; 279 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */ 280 /* our own tracking of ring head and tail */ 281 u32 cpu_ring_head[I915_NUM_RINGS]; 282 u32 cpu_ring_tail[I915_NUM_RINGS]; 283 u32 error; /* gen6+ */ 284 u32 err_int; /* gen7 */ 285 u32 instpm[I915_NUM_RINGS]; 286 u32 instps[I915_NUM_RINGS]; 287 u32 extra_instdone[I915_NUM_INSTDONE_REG]; 288 u32 seqno[I915_NUM_RINGS]; 289 u64 bbaddr; 290 u32 fault_reg[I915_NUM_RINGS]; 291 u32 done_reg; 292 u32 faddr[I915_NUM_RINGS]; 293 u64 fence[I915_MAX_NUM_FENCES]; 294 struct timeval time; 295 struct drm_i915_error_ring { 296 struct drm_i915_error_object { 297 int page_count; 298 u32 gtt_offset; 299 u32 *pages[0]; 300 } *ringbuffer, *batchbuffer, *ctx; 301 struct drm_i915_error_request { 302 long jiffies; 303 u32 seqno; 304 u32 tail; 305 } *requests; 306 int num_requests; 307 } ring[I915_NUM_RINGS]; 308 struct drm_i915_error_buffer { 309 u32 size; 310 u32 name; 311 u32 rseqno, wseqno; 312 u32 gtt_offset; 313 u32 read_domains; 314 u32 write_domain; 315 s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 316 s32 pinned:2; 317 u32 tiling:2; 318 u32 dirty:1; 319 u32 purgeable:1; 320 s32 ring:4; 321 u32 cache_level:2; 322 } *active_bo, *pinned_bo; 323 u32 active_bo_count, pinned_bo_count; 324 struct intel_overlay_error_state *overlay; 325 struct intel_display_error_state *display; 326 }; 327 328 struct intel_crtc_config; 329 struct intel_crtc; 330 struct intel_limit; 331 struct dpll; 332 333 struct drm_i915_display_funcs { 334 bool (*fbc_enabled)(struct drm_device *dev); 335 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); 336 void (*disable_fbc)(struct drm_device *dev); 337 int (*get_display_clock_speed)(struct drm_device *dev); 338 int (*get_fifo_size)(struct drm_device *dev, int plane); 339 /** 340 * find_dpll() - Find the best values for the PLL 341 * @limit: limits for the PLL 342 * @crtc: current CRTC 343 * @target: target frequency in kHz 344 * @refclk: reference clock frequency in kHz 345 * @match_clock: if provided, @best_clock P divider must 346 * match the P divider from @match_clock 347 * used for LVDS downclocking 348 * @best_clock: best PLL values found 349 * 350 * Returns true on success, false on failure. 351 */ 352 bool (*find_dpll)(const struct intel_limit *limit, 353 struct drm_crtc *crtc, 354 int target, int refclk, 355 struct dpll *match_clock, 356 struct dpll *best_clock); 357 void (*update_wm)(struct drm_device *dev); 358 void (*update_sprite_wm)(struct drm_device *dev, int pipe, 359 uint32_t sprite_width, int pixel_size, 360 bool enable); 361 void (*modeset_global_resources)(struct drm_device *dev); 362 /* Returns the active state of the crtc, and if the crtc is active, 363 * fills out the pipe-config with the hw state. */ 364 bool (*get_pipe_config)(struct intel_crtc *, 365 struct intel_crtc_config *); 366 int (*crtc_mode_set)(struct drm_crtc *crtc, 367 int x, int y, 368 struct drm_framebuffer *old_fb); 369 void (*crtc_enable)(struct drm_crtc *crtc); 370 void (*crtc_disable)(struct drm_crtc *crtc); 371 void (*off)(struct drm_crtc *crtc); 372 void (*write_eld)(struct drm_connector *connector, 373 struct drm_crtc *crtc); 374 void (*fdi_link_train)(struct drm_crtc *crtc); 375 void (*init_clock_gating)(struct drm_device *dev); 376 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 377 struct drm_framebuffer *fb, 378 struct drm_i915_gem_object *obj); 379 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, 380 int x, int y); 381 void (*hpd_irq_setup)(struct drm_device *dev); 382 /* clock updates for mode set */ 383 /* cursor updates */ 384 /* render clock increase/decrease */ 385 /* display clock increase/decrease */ 386 /* pll clock increase/decrease */ 387 }; 388 389 struct drm_i915_gt_funcs { 390 void (*force_wake_get)(struct drm_i915_private *dev_priv); 391 void (*force_wake_put)(struct drm_i915_private *dev_priv); 392 }; 393 394 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ 395 func(is_mobile) sep \ 396 func(is_i85x) sep \ 397 func(is_i915g) sep \ 398 func(is_i945gm) sep \ 399 func(is_g33) sep \ 400 func(need_gfx_hws) sep \ 401 func(is_g4x) sep \ 402 func(is_pineview) sep \ 403 func(is_broadwater) sep \ 404 func(is_crestline) sep \ 405 func(is_ivybridge) sep \ 406 func(is_valleyview) sep \ 407 func(is_haswell) sep \ 408 func(has_force_wake) sep \ 409 func(has_fbc) sep \ 410 func(has_pipe_cxsr) sep \ 411 func(has_hotplug) sep \ 412 func(cursor_needs_physical) sep \ 413 func(has_overlay) sep \ 414 func(overlay_needs_physical) sep \ 415 func(supports_tv) sep \ 416 func(has_bsd_ring) sep \ 417 func(has_blt_ring) sep \ 418 func(has_vebox_ring) sep \ 419 func(has_llc) sep \ 420 func(has_ddi) sep \ 421 func(has_fpga_dbg) 422 423 #define DEFINE_FLAG(name) u8 name:1 424 #define SEP_SEMICOLON ; 425 426 struct intel_device_info { 427 u32 display_mmio_offset; 428 u8 num_pipes:3; 429 u8 gen; 430 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); 431 }; 432 433 #undef DEFINE_FLAG 434 #undef SEP_SEMICOLON 435 436 enum i915_cache_level { 437 I915_CACHE_NONE = 0, 438 I915_CACHE_LLC, 439 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ 440 }; 441 442 typedef uint32_t gen6_gtt_pte_t; 443 444 /* The Graphics Translation Table is the way in which GEN hardware translates a 445 * Graphics Virtual Address into a Physical Address. In addition to the normal 446 * collateral associated with any va->pa translations GEN hardware also has a 447 * portion of the GTT which can be mapped by the CPU and remain both coherent 448 * and correct (in cases like swizzling). That region is referred to as GMADR in 449 * the spec. 450 */ 451 struct i915_gtt { 452 unsigned long start; /* Start offset of used GTT */ 453 size_t total; /* Total size GTT can map */ 454 size_t stolen_size; /* Total size of stolen memory */ 455 456 unsigned long mappable_end; /* End offset that we can CPU map */ 457 struct io_mapping *mappable; /* Mapping to our CPU mappable region */ 458 phys_addr_t mappable_base; /* PA of our GMADR */ 459 460 /** "Graphics Stolen Memory" holds the global PTEs */ 461 void __iomem *gsm; 462 463 bool do_idle_maps; 464 dma_addr_t scratch_page_dma; 465 struct page *scratch_page; 466 467 /* global gtt ops */ 468 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, 469 size_t *stolen, phys_addr_t *mappable_base, 470 unsigned long *mappable_end); 471 void (*gtt_remove)(struct drm_device *dev); 472 void (*gtt_clear_range)(struct drm_device *dev, 473 unsigned int first_entry, 474 unsigned int num_entries); 475 void (*gtt_insert_entries)(struct drm_device *dev, 476 struct sg_table *st, 477 unsigned int pg_start, 478 enum i915_cache_level cache_level); 479 gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev, 480 dma_addr_t addr, 481 enum i915_cache_level level); 482 }; 483 #define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT) 484 485 #define I915_PPGTT_PD_ENTRIES 512 486 #define I915_PPGTT_PT_ENTRIES 1024 487 struct i915_hw_ppgtt { 488 struct drm_device *dev; 489 unsigned num_pd_entries; 490 vm_page_t *pt_pages; 491 uint32_t pd_offset; 492 dma_addr_t *pt_dma_addr; 493 dma_addr_t scratch_page_dma_addr; 494 495 /* pte functions, mirroring the interface of the global gtt. */ 496 void (*clear_range)(struct i915_hw_ppgtt *ppgtt, 497 unsigned int first_entry, 498 unsigned int num_entries); 499 void (*insert_entries)(struct i915_hw_ppgtt *ppgtt, 500 struct sg_table *st, 501 unsigned int pg_start, 502 enum i915_cache_level cache_level); 503 gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev, 504 dma_addr_t addr, 505 enum i915_cache_level level); 506 int (*enable)(struct drm_device *dev); 507 void (*cleanup)(struct i915_hw_ppgtt *ppgtt); 508 }; 509 510 struct i915_ctx_hang_stats { 511 /* This context had batch pending when hang was declared */ 512 unsigned batch_pending; 513 514 /* This context had batch active when hang was declared */ 515 unsigned batch_active; 516 }; 517 518 /* This must match up with the value previously used for execbuf2.rsvd1. */ 519 #define DEFAULT_CONTEXT_ID 0 520 struct i915_hw_context { 521 struct kref ref; 522 int id; 523 bool is_initialized; 524 struct drm_i915_file_private *file_priv; 525 struct intel_ring_buffer *ring; 526 struct drm_i915_gem_object *obj; 527 struct i915_ctx_hang_stats hang_stats; 528 }; 529 530 enum no_fbc_reason { 531 FBC_NO_OUTPUT, /* no outputs enabled to compress */ 532 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ 533 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ 534 FBC_MODE_TOO_LARGE, /* mode too large for compression */ 535 FBC_BAD_PLANE, /* fbc not supported on plane */ 536 FBC_NOT_TILED, /* buffer not tiled */ 537 FBC_MULTIPLE_PIPES, /* more than one pipe active */ 538 FBC_MODULE_PARAM, 539 }; 540 541 enum intel_pch { 542 PCH_NONE = 0, /* No PCH present */ 543 PCH_IBX, /* Ibexpeak PCH */ 544 PCH_CPT, /* Cougarpoint PCH */ 545 PCH_LPT, /* Lynxpoint PCH */ 546 PCH_NOP, 547 }; 548 549 enum intel_sbi_destination { 550 SBI_ICLK, 551 SBI_MPHY, 552 }; 553 554 #define QUIRK_PIPEA_FORCE (1<<0) 555 #define QUIRK_LVDS_SSC_DISABLE (1<<1) 556 #define QUIRK_INVERT_BRIGHTNESS (1<<2) 557 #define QUIRK_NO_PCH_PWM_ENABLE (1<<3) 558 559 struct intel_fbdev; 560 struct intel_fbc_work; 561 562 struct intel_gmbus { 563 u32 force_bit; 564 u32 reg0; 565 u32 gpio_reg; 566 struct drm_i915_private *dev_priv; 567 }; 568 569 struct intel_iic_softc { 570 struct drm_device *drm_dev; 571 device_t iic_dev; 572 bool force_bit_dev; 573 char name[32]; 574 uint32_t reg; 575 uint32_t reg0; 576 }; 577 578 struct i915_suspend_saved_registers { 579 u8 saveLBB; 580 u32 saveDSPACNTR; 581 u32 saveDSPBCNTR; 582 u32 saveDSPARB; 583 u32 savePIPEACONF; 584 u32 savePIPEBCONF; 585 u32 savePIPEASRC; 586 u32 savePIPEBSRC; 587 u32 saveFPA0; 588 u32 saveFPA1; 589 u32 saveDPLL_A; 590 u32 saveDPLL_A_MD; 591 u32 saveHTOTAL_A; 592 u32 saveHBLANK_A; 593 u32 saveHSYNC_A; 594 u32 saveVTOTAL_A; 595 u32 saveVBLANK_A; 596 u32 saveVSYNC_A; 597 u32 saveBCLRPAT_A; 598 u32 saveTRANSACONF; 599 u32 saveTRANS_HTOTAL_A; 600 u32 saveTRANS_HBLANK_A; 601 u32 saveTRANS_HSYNC_A; 602 u32 saveTRANS_VTOTAL_A; 603 u32 saveTRANS_VBLANK_A; 604 u32 saveTRANS_VSYNC_A; 605 u32 savePIPEASTAT; 606 u32 saveDSPASTRIDE; 607 u32 saveDSPASIZE; 608 u32 saveDSPAPOS; 609 u32 saveDSPAADDR; 610 u32 saveDSPASURF; 611 u32 saveDSPATILEOFF; 612 u32 savePFIT_PGM_RATIOS; 613 u32 saveBLC_HIST_CTL; 614 u32 saveBLC_PWM_CTL; 615 u32 saveBLC_PWM_CTL2; 616 u32 saveBLC_CPU_PWM_CTL; 617 u32 saveBLC_CPU_PWM_CTL2; 618 u32 saveFPB0; 619 u32 saveFPB1; 620 u32 saveDPLL_B; 621 u32 saveDPLL_B_MD; 622 u32 saveHTOTAL_B; 623 u32 saveHBLANK_B; 624 u32 saveHSYNC_B; 625 u32 saveVTOTAL_B; 626 u32 saveVBLANK_B; 627 u32 saveVSYNC_B; 628 u32 saveBCLRPAT_B; 629 u32 saveTRANSBCONF; 630 u32 saveTRANS_HTOTAL_B; 631 u32 saveTRANS_HBLANK_B; 632 u32 saveTRANS_HSYNC_B; 633 u32 saveTRANS_VTOTAL_B; 634 u32 saveTRANS_VBLANK_B; 635 u32 saveTRANS_VSYNC_B; 636 u32 savePIPEBSTAT; 637 u32 saveDSPBSTRIDE; 638 u32 saveDSPBSIZE; 639 u32 saveDSPBPOS; 640 u32 saveDSPBADDR; 641 u32 saveDSPBSURF; 642 u32 saveDSPBTILEOFF; 643 u32 saveVGA0; 644 u32 saveVGA1; 645 u32 saveVGA_PD; 646 u32 saveVGACNTRL; 647 u32 saveADPA; 648 u32 saveLVDS; 649 u32 savePP_ON_DELAYS; 650 u32 savePP_OFF_DELAYS; 651 u32 saveDVOA; 652 u32 saveDVOB; 653 u32 saveDVOC; 654 u32 savePP_ON; 655 u32 savePP_OFF; 656 u32 savePP_CONTROL; 657 u32 savePP_DIVISOR; 658 u32 savePFIT_CONTROL; 659 u32 save_palette_a[256]; 660 u32 save_palette_b[256]; 661 u32 saveDPFC_CB_BASE; 662 u32 saveFBC_CFB_BASE; 663 u32 saveFBC_LL_BASE; 664 u32 saveFBC_CONTROL; 665 u32 saveFBC_CONTROL2; 666 u32 saveIER; 667 u32 saveIIR; 668 u32 saveIMR; 669 u32 saveDEIER; 670 u32 saveDEIMR; 671 u32 saveGTIER; 672 u32 saveGTIMR; 673 u32 saveFDI_RXA_IMR; 674 u32 saveFDI_RXB_IMR; 675 u32 saveCACHE_MODE_0; 676 u32 saveMI_ARB_STATE; 677 u32 saveSWF0[16]; 678 u32 saveSWF1[16]; 679 u32 saveSWF2[3]; 680 u8 saveMSR; 681 u8 saveSR[8]; 682 u8 saveGR[25]; 683 u8 saveAR_INDEX; 684 u8 saveAR[21]; 685 u8 saveDACMASK; 686 u8 saveCR[37]; 687 uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 688 u32 saveCURACNTR; 689 u32 saveCURAPOS; 690 u32 saveCURABASE; 691 u32 saveCURBCNTR; 692 u32 saveCURBPOS; 693 u32 saveCURBBASE; 694 u32 saveCURSIZE; 695 u32 saveDP_B; 696 u32 saveDP_C; 697 u32 saveDP_D; 698 u32 savePIPEA_GMCH_DATA_M; 699 u32 savePIPEB_GMCH_DATA_M; 700 u32 savePIPEA_GMCH_DATA_N; 701 u32 savePIPEB_GMCH_DATA_N; 702 u32 savePIPEA_DP_LINK_M; 703 u32 savePIPEB_DP_LINK_M; 704 u32 savePIPEA_DP_LINK_N; 705 u32 savePIPEB_DP_LINK_N; 706 u32 saveFDI_RXA_CTL; 707 u32 saveFDI_TXA_CTL; 708 u32 saveFDI_RXB_CTL; 709 u32 saveFDI_TXB_CTL; 710 u32 savePFA_CTL_1; 711 u32 savePFB_CTL_1; 712 u32 savePFA_WIN_SZ; 713 u32 savePFB_WIN_SZ; 714 u32 savePFA_WIN_POS; 715 u32 savePFB_WIN_POS; 716 u32 savePCH_DREF_CONTROL; 717 u32 saveDISP_ARB_CTL; 718 u32 savePIPEA_DATA_M1; 719 u32 savePIPEA_DATA_N1; 720 u32 savePIPEA_LINK_M1; 721 u32 savePIPEA_LINK_N1; 722 u32 savePIPEB_DATA_M1; 723 u32 savePIPEB_DATA_N1; 724 u32 savePIPEB_LINK_M1; 725 u32 savePIPEB_LINK_N1; 726 u32 saveMCHBAR_RENDER_STANDBY; 727 u32 savePCH_PORT_HOTPLUG; 728 }; 729 730 struct intel_gen6_power_mgmt { 731 struct work_struct work; 732 struct delayed_work vlv_work; 733 u32 pm_iir; 734 /* lock - irqsave spinlock that protectects the work_struct and 735 * pm_iir. */ 736 struct lock lock; 737 738 /* The below variables an all the rps hw state are protected by 739 * dev->struct mutext. */ 740 u8 cur_delay; 741 u8 min_delay; 742 u8 max_delay; 743 u8 rpe_delay; 744 u8 hw_max; 745 746 struct delayed_work delayed_resume_work; 747 748 /* 749 * Protects RPS/RC6 register access and PCU communication. 750 * Must be taken after struct_mutex if nested. 751 */ 752 struct lock hw_lock; 753 }; 754 755 /* defined intel_pm.c */ 756 extern struct lock mchdev_lock; 757 758 struct intel_ilk_power_mgmt { 759 u8 cur_delay; 760 u8 min_delay; 761 u8 max_delay; 762 u8 fmax; 763 u8 fstart; 764 765 u64 last_count1; 766 unsigned long last_time1; 767 unsigned long chipset_power; 768 u64 last_count2; 769 struct timespec last_time2; 770 unsigned long gfx_power; 771 u8 corr; 772 773 int c_m; 774 int r_t; 775 776 struct drm_i915_gem_object *pwrctx; 777 struct drm_i915_gem_object *renderctx; 778 }; 779 780 /* Power well structure for haswell */ 781 struct i915_power_well { 782 struct drm_device *device; 783 struct lock lock; 784 /* power well enable/disable usage count */ 785 int count; 786 int i915_request; 787 }; 788 789 struct i915_dri1_state { 790 unsigned allow_batchbuffer : 1; 791 u32 __iomem *gfx_hws_cpu_addr; 792 793 unsigned int cpp; 794 int back_offset; 795 int front_offset; 796 int current_page; 797 int page_flipping; 798 799 uint32_t counter; 800 }; 801 802 struct intel_l3_parity { 803 u32 *remap_info; 804 struct work_struct error_work; 805 }; 806 807 struct i915_gem_mm { 808 /** Bridge to intel-gtt-ko */ 809 struct intel_gtt *gtt; 810 /** Memory allocator for GTT stolen memory */ 811 struct drm_mm stolen; 812 /** Memory allocator for GTT */ 813 struct drm_mm gtt_space; 814 /** List of all objects in gtt_space. Used to restore gtt 815 * mappings on resume */ 816 struct list_head bound_list; 817 /** 818 * List of objects which are not bound to the GTT (thus 819 * are idle and not used by the GPU) but still have 820 * (presumably uncached) pages still attached. 821 */ 822 struct list_head unbound_list; 823 824 /** Usable portion of the GTT for GEM */ 825 unsigned long stolen_base; /* limited to low memory (32-bit) */ 826 827 int gtt_mtrr; 828 829 /** PPGTT used for aliasing the PPGTT with the GTT */ 830 struct i915_hw_ppgtt *aliasing_ppgtt; 831 832 eventhandler_tag inactive_shrinker; 833 bool shrinker_no_lock_stealing; 834 835 /** 836 * List of objects currently involved in rendering. 837 * 838 * Includes buffers having the contents of their GPU caches 839 * flushed, not necessarily primitives. last_rendering_seqno 840 * represents when the rendering involved will be completed. 841 * 842 * A reference is held on the buffer while on this list. 843 */ 844 struct list_head active_list; 845 846 /** 847 * LRU list of objects which are not in the ringbuffer and 848 * are ready to unbind, but are still in the GTT. 849 * 850 * last_rendering_seqno is 0 while an object is in this list. 851 * 852 * A reference is not held on the buffer while on this list, 853 * as merely being GTT-bound shouldn't prevent its being 854 * freed, and we'll pull it off the list in the free path. 855 */ 856 struct list_head inactive_list; 857 858 /** LRU list of objects with fence regs on them. */ 859 struct list_head fence_list; 860 861 /** 862 * We leave the user IRQ off as much as possible, 863 * but this means that requests will finish and never 864 * be retired once the system goes idle. Set a timer to 865 * fire periodically while the ring is running. When it 866 * fires, go retire requests. 867 */ 868 struct delayed_work retire_work; 869 870 /** 871 * Are we in a non-interruptible section of code like 872 * modesetting? 873 */ 874 bool interruptible; 875 876 /** 877 * Flag if the X Server, and thus DRM, is not currently in 878 * control of the device. 879 * 880 * This is set between LeaveVT and EnterVT. It needs to be 881 * replaced with a semaphore. It also needs to be 882 * transitioned away from for kernel modesetting. 883 */ 884 int suspended; 885 886 /** Bit 6 swizzling required for X tiling */ 887 uint32_t bit_6_swizzle_x; 888 /** Bit 6 swizzling required for Y tiling */ 889 uint32_t bit_6_swizzle_y; 890 891 /* storage for physical objects */ 892 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; 893 894 /* accounting, useful for userland debugging */ 895 size_t object_memory; 896 u32 object_count; 897 }; 898 899 struct drm_i915_error_state_buf { 900 unsigned bytes; 901 unsigned size; 902 int err; 903 u8 *buf; 904 loff_t start; 905 loff_t pos; 906 }; 907 908 struct i915_gpu_error { 909 /* For hangcheck timer */ 910 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ 911 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) 912 struct timer_list hangcheck_timer; 913 int hangcheck_count; 914 uint32_t last_acthd[I915_NUM_RINGS]; 915 uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; 916 917 /* For reset and error_state handling. */ 918 struct lock lock; 919 /* Protected by the above dev->gpu_error.lock. */ 920 struct drm_i915_error_state *first_error; 921 struct work_struct work; 922 923 unsigned long last_reset; 924 925 /** 926 * State variable and reset counter controlling the reset flow 927 * 928 * Upper bits are for the reset counter. This counter is used by the 929 * wait_seqno code to race-free noticed that a reset event happened and 930 * that it needs to restart the entire ioctl (since most likely the 931 * seqno it waited for won't ever signal anytime soon). 932 * 933 * This is important for lock-free wait paths, where no contended lock 934 * naturally enforces the correct ordering between the bail-out of the 935 * waiter and the gpu reset work code. 936 * 937 * Lowest bit controls the reset state machine: Set means a reset is in 938 * progress. This state will (presuming we don't have any bugs) decay 939 * into either unset (successful reset) or the special WEDGED value (hw 940 * terminally sour). All waiters on the reset_queue will be woken when 941 * that happens. 942 */ 943 atomic_t reset_counter; 944 945 /** 946 * Special values/flags for reset_counter 947 * 948 * Note that the code relies on 949 * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG 950 * being true. 951 */ 952 #define I915_RESET_IN_PROGRESS_FLAG 1 953 #define I915_WEDGED 0xffffffff 954 955 /** 956 * Waitqueue to signal when the reset has completed. Used by clients 957 * that wait for dev_priv->mm.wedged to settle. 958 */ 959 wait_queue_head_t reset_queue; 960 961 /* For gpu hang simulation. */ 962 unsigned int stop_rings; 963 }; 964 965 enum modeset_restore { 966 MODESET_ON_LID_OPEN, 967 MODESET_DONE, 968 MODESET_SUSPENDED, 969 }; 970 971 struct intel_vbt_data { 972 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 973 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 974 975 /* Feature bits */ 976 unsigned int int_tv_support:1; 977 unsigned int lvds_dither:1; 978 unsigned int lvds_vbt:1; 979 unsigned int int_crt_support:1; 980 unsigned int lvds_use_ssc:1; 981 unsigned int display_clock_mode:1; 982 unsigned int fdi_rx_polarity_inverted:1; 983 int lvds_ssc_freq; 984 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 985 986 /* eDP */ 987 int edp_rate; 988 int edp_lanes; 989 int edp_preemphasis; 990 int edp_vswing; 991 bool edp_initialized; 992 bool edp_support; 993 int edp_bpp; 994 struct edp_power_seq edp_pps; 995 996 int crt_ddc_pin; 997 998 int child_dev_num; 999 struct child_device_config *child_dev; 1000 }; 1001 1002 typedef struct drm_i915_private { 1003 struct drm_device *dev; 1004 struct kmem_cache *slab; 1005 1006 const struct intel_device_info *info; 1007 1008 int relative_constants_mode; 1009 1010 device_t *gmbus_bridge; 1011 device_t *bbbus_bridge; 1012 device_t *bbbus; 1013 1014 drm_local_map_t *sarea; 1015 drm_local_map_t *mmio_map; 1016 char __iomem *regs; 1017 1018 struct drm_i915_gt_funcs gt; 1019 /** gt_fifo_count and the subsequent register write are synchronized 1020 * with dev->struct_mutex. */ 1021 unsigned gt_fifo_count; 1022 /** forcewake_count is protected by gt_lock */ 1023 unsigned forcewake_count; 1024 /** gt_lock is also taken in irq contexts. */ 1025 struct lock gt_lock; 1026 1027 device_t *gmbus; 1028 1029 1030 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 1031 * controller on different i2c buses. */ 1032 struct lock gmbus_mutex; 1033 1034 drm_i915_sarea_t *sarea_priv; 1035 /** 1036 * Base address of the gmbus and gpio block. 1037 */ 1038 uint32_t gpio_mmio_base; 1039 1040 wait_queue_head_t gmbus_wait_queue; 1041 1042 struct pci_dev *bridge_dev; 1043 struct intel_ring_buffer ring[I915_NUM_RINGS]; 1044 uint32_t last_seqno, next_seqno; 1045 1046 drm_dma_handle_t *status_page_dmah; 1047 struct resource *mch_res; 1048 int mch_res_rid; 1049 1050 atomic_t irq_received; 1051 1052 /* protects the irq masks */ 1053 struct lock irq_lock; 1054 1055 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ 1056 struct pm_qos_request pm_qos; 1057 1058 /* DPIO indirect register protection */ 1059 struct lock dpio_lock; 1060 1061 /** Cached value of IMR to avoid reads in updating the bitfield */ 1062 u32 irq_mask; 1063 u32 gt_irq_mask; 1064 1065 struct work_struct hotplug_work; 1066 bool enable_hotplug_processing; 1067 struct { 1068 unsigned long hpd_last_jiffies; 1069 int hpd_cnt; 1070 enum { 1071 HPD_ENABLED = 0, 1072 HPD_DISABLED = 1, 1073 HPD_MARK_DISABLED = 2 1074 } hpd_mark; 1075 } hpd_stats[HPD_NUM_PINS]; 1076 u32 hpd_event_bits; 1077 struct timer_list hotplug_reenable_timer; 1078 1079 int num_plane; 1080 1081 unsigned long cfb_size; 1082 unsigned int cfb_fb; 1083 enum plane cfb_plane; 1084 int cfb_y; 1085 struct intel_fbc_work *fbc_work; 1086 1087 struct intel_opregion opregion; 1088 struct intel_vbt_data vbt; 1089 1090 /* overlay */ 1091 struct intel_overlay *overlay; 1092 unsigned int sprite_scaling_enabled; 1093 1094 /* backlight */ 1095 struct { 1096 int level; 1097 bool enabled; 1098 struct spinlock lock; /* bl registers and the above bl fields */ 1099 struct backlight_device *device; 1100 } backlight; 1101 1102 /* LVDS info */ 1103 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ 1104 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ 1105 bool no_aux_handshake; 1106 1107 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 1108 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 1109 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1110 1111 unsigned int fsb_freq, mem_freq, is_ddr3; 1112 1113 struct workqueue_struct *wq; 1114 1115 /* Display functions */ 1116 struct drm_i915_display_funcs display; 1117 1118 /* PCH chipset type */ 1119 enum intel_pch pch_type; 1120 unsigned short pch_id; 1121 1122 unsigned long quirks; 1123 1124 enum modeset_restore modeset_restore; 1125 struct lock modeset_restore_lock; 1126 1127 struct i915_gtt gtt; 1128 1129 struct i915_gem_mm mm; 1130 1131 /* Kernel Modesetting */ 1132 1133 struct sdvo_device_mapping sdvo_mappings[2]; 1134 1135 struct drm_crtc *plane_to_crtc_mapping[3]; 1136 struct drm_crtc *pipe_to_crtc_mapping[3]; 1137 wait_queue_head_t pending_flip_queue; 1138 1139 int num_shared_dpll; 1140 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1141 struct intel_ddi_plls ddi_plls; 1142 1143 /* Reclocking support */ 1144 bool render_reclock_avail; 1145 bool lvds_downclock_avail; 1146 /* indicates the reduced downclock for LVDS*/ 1147 int lvds_downclock; 1148 u16 orig_clock; 1149 1150 bool mchbar_need_disable; 1151 1152 struct intel_l3_parity l3_parity; 1153 1154 /* gen6+ rps state */ 1155 struct intel_gen6_power_mgmt rps; 1156 1157 /* ilk-only ips/rps state. Everything in here is protected by the global 1158 * mchdev_lock in intel_pm.c */ 1159 struct intel_ilk_power_mgmt ips; 1160 1161 /* Haswell power well */ 1162 struct i915_power_well power_well; 1163 1164 enum no_fbc_reason no_fbc_reason; 1165 1166 struct drm_mm_node *compressed_fb; 1167 struct drm_mm_node *compressed_llb; 1168 1169 struct i915_gpu_error gpu_error; 1170 1171 struct drm_i915_gem_object *vlv_pctx; 1172 1173 /* list of fbdev register on this device */ 1174 struct intel_fbdev *fbdev; 1175 1176 /* 1177 * The console may be contended at resume, but we don't 1178 * want it to block on it. 1179 */ 1180 struct work_struct console_resume_work; 1181 1182 struct drm_property *broadcast_rgb_property; 1183 struct drm_property *force_audio_property; 1184 1185 bool hw_contexts_disabled; 1186 uint32_t hw_context_size; 1187 1188 u32 fdi_rx_config; 1189 1190 struct i915_suspend_saved_registers regfile; 1191 1192 /* Old dri1 support infrastructure, beware the dragons ya fools entering 1193 * here! */ 1194 struct i915_dri1_state dri1; 1195 } drm_i915_private_t; 1196 1197 /* Iterate over initialised rings */ 1198 #define for_each_ring(ring__, dev_priv__, i__) \ 1199 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ 1200 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__))) 1201 1202 enum hdmi_force_audio { 1203 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ 1204 HDMI_AUDIO_OFF, /* force turn off HDMI audio */ 1205 HDMI_AUDIO_AUTO, /* trust EDID */ 1206 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 1207 }; 1208 1209 #define I915_GTT_RESERVED ((struct drm_mm_node *)0x1) 1210 1211 struct drm_i915_gem_object_ops { 1212 /* Interface between the GEM object and its backing storage. 1213 * get_pages() is called once prior to the use of the associated set 1214 * of pages before to binding them into the GTT, and put_pages() is 1215 * called after we no longer need them. As we expect there to be 1216 * associated cost with migrating pages between the backing storage 1217 * and making them available for the GPU (e.g. clflush), we may hold 1218 * onto the pages after they are no longer referenced by the GPU 1219 * in case they may be used again shortly (for example migrating the 1220 * pages to a different memory domain within the GTT). put_pages() 1221 * will therefore most likely be called when the object itself is 1222 * being released or under memory pressure (where we attempt to 1223 * reap pages for the shrinker). 1224 */ 1225 int (*get_pages)(struct drm_i915_gem_object *); 1226 void (*put_pages)(struct drm_i915_gem_object *); 1227 }; 1228 1229 struct drm_i915_gem_object { 1230 struct drm_gem_object base; 1231 1232 const struct drm_i915_gem_object_ops *ops; 1233 1234 /** Current space allocated to this object in the GTT, if any. */ 1235 struct drm_mm_node *gtt_space; 1236 /** Stolen memory for this object, instead of being backed by shmem. */ 1237 struct drm_mm_node *stolen; 1238 struct list_head global_list; 1239 1240 /** This object's place on the active/inactive lists */ 1241 struct list_head ring_list; 1242 struct list_head mm_list; 1243 /** This object's place in the batchbuffer or on the eviction list */ 1244 struct list_head exec_list; 1245 1246 /** 1247 * This is set if the object is on the active lists (has pending 1248 * rendering and so a non-zero seqno), and is not set if it i s on 1249 * inactive (ready to be unbound) list. 1250 */ 1251 unsigned int active:1; 1252 1253 /** 1254 * This is set if the object has been written to since last bound 1255 * to the GTT 1256 */ 1257 unsigned int dirty:1; 1258 1259 /** 1260 * Fence register bits (if any) for this object. Will be set 1261 * as needed when mapped into the GTT. 1262 * Protected by dev->struct_mutex. 1263 */ 1264 signed int fence_reg:I915_MAX_NUM_FENCE_BITS; 1265 1266 /** 1267 * Advice: are the backing pages purgeable? 1268 */ 1269 unsigned int madv:2; 1270 1271 /** 1272 * Current tiling mode for the object. 1273 */ 1274 unsigned int tiling_mode:2; 1275 /** 1276 * Whether the tiling parameters for the currently associated fence 1277 * register have changed. Note that for the purposes of tracking 1278 * tiling changes we also treat the unfenced register, the register 1279 * slot that the object occupies whilst it executes a fenced 1280 * command (such as BLT on gen2/3), as a "fence". 1281 */ 1282 unsigned int fence_dirty:1; 1283 1284 /** How many users have pinned this object in GTT space. The following 1285 * users can each hold at most one reference: pwrite/pread, pin_ioctl 1286 * (via user_pin_count), execbuffer (objects are not allowed multiple 1287 * times for the same batchbuffer), and the framebuffer code. When 1288 * switching/pageflipping, the framebuffer code has at most two buffers 1289 * pinned per crtc. 1290 * 1291 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 1292 * bits with absolutely no headroom. So use 4 bits. */ 1293 unsigned int pin_count:4; 1294 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf 1295 1296 /** 1297 * Is the object at the current location in the gtt mappable and 1298 * fenceable? Used to avoid costly recalculations. 1299 */ 1300 unsigned int map_and_fenceable:1; 1301 1302 /** 1303 * Whether the current gtt mapping needs to be mappable (and isn't just 1304 * mappable by accident). Track pin and fault separate for a more 1305 * accurate mappable working set. 1306 */ 1307 unsigned int fault_mappable:1; 1308 unsigned int pin_mappable:1; 1309 1310 /* 1311 * Is the GPU currently using a fence to access this buffer, 1312 */ 1313 unsigned int pending_fenced_gpu_access:1; 1314 unsigned int fenced_gpu_access:1; 1315 1316 unsigned int cache_level:2; 1317 1318 unsigned int has_aliasing_ppgtt_mapping:1; 1319 unsigned int has_global_gtt_mapping:1; 1320 unsigned int has_dma_mapping:1; 1321 1322 vm_page_t *pages; 1323 int pages_pin_count; 1324 1325 /** 1326 * Used for performing relocations during execbuffer insertion. 1327 */ 1328 struct hlist_node exec_node; 1329 unsigned long exec_handle; 1330 struct drm_i915_gem_exec_object2 *exec_entry; 1331 1332 /** 1333 * Current offset of the object in GTT space. 1334 * 1335 * This is the same as gtt_space->start 1336 */ 1337 uint32_t gtt_offset; 1338 1339 struct intel_ring_buffer *ring; 1340 1341 /** Breadcrumb of last rendering to the buffer. */ 1342 uint32_t last_read_seqno; 1343 uint32_t last_write_seqno; 1344 /** Breadcrumb of last fenced GPU access to the buffer. */ 1345 uint32_t last_fenced_seqno; 1346 1347 /** Current tiling stride for the object, if it's tiled. */ 1348 uint32_t stride; 1349 1350 /** Record of address bit 17 of each page at last unbind. */ 1351 unsigned long *bit_17; 1352 1353 /** User space pin count and filp owning the pin */ 1354 uint32_t user_pin_count; 1355 struct drm_file *pin_filp; 1356 1357 /** for phy allocated objects */ 1358 struct drm_i915_gem_phys_object *phys_obj; 1359 }; 1360 #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base) 1361 1362 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 1363 1364 /** 1365 * Request queue structure. 1366 * 1367 * The request queue allows us to note sequence numbers that have been emitted 1368 * and may be associated with active buffers to be retired. 1369 * 1370 * By keeping this list, we can avoid having to do questionable 1371 * sequence-number comparisons on buffer last_rendering_seqnos, and associate 1372 * an emission time with seqnos for tracking how far ahead of the GPU we are. 1373 */ 1374 struct drm_i915_gem_request { 1375 /** On Which ring this request was generated */ 1376 struct intel_ring_buffer *ring; 1377 1378 /** GEM sequence number associated with this request. */ 1379 uint32_t seqno; 1380 1381 /** Position in the ringbuffer of the start of the request */ 1382 u32 head; 1383 1384 /** Position in the ringbuffer of the end of the request */ 1385 u32 tail; 1386 1387 /** Context related to this request */ 1388 struct i915_hw_context *ctx; 1389 1390 /** Batch buffer related to this request if any */ 1391 struct drm_i915_gem_object *batch_obj; 1392 1393 /** Time at which this request was emitted, in jiffies. */ 1394 unsigned long emitted_jiffies; 1395 1396 /** global list entry for this request */ 1397 struct list_head list; 1398 1399 struct drm_i915_file_private *file_priv; 1400 /** file_priv list entry for this request */ 1401 struct list_head client_list; 1402 }; 1403 1404 struct drm_i915_file_private { 1405 struct { 1406 struct spinlock lock; 1407 struct list_head request_list; 1408 } mm; 1409 struct idr context_idr; 1410 1411 struct i915_ctx_hang_stats hang_stats; 1412 }; 1413 1414 #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) 1415 1416 #define IS_I830(dev) ((dev)->pci_device == 0x3577) 1417 #define IS_845G(dev) ((dev)->pci_device == 0x2562) 1418 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x) 1419 #define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1420 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1421 #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1422 #define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1423 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 1424 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) 1425 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) 1426 #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) 1427 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 1428 #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) 1429 #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) 1430 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 1431 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 1432 #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 1433 #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1434 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 1435 #define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \ 1436 (dev)->pci_device == 0x0152 || \ 1437 (dev)->pci_device == 0x015a) 1438 #define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \ 1439 (dev)->pci_device == 0x0106 || \ 1440 (dev)->pci_device == 0x010A) 1441 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 1442 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 1443 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1444 #define IS_ULT(dev) (IS_HASWELL(dev) && \ 1445 ((dev)->pci_device & 0xFF00) == 0x0A00) 1446 1447 /* 1448 * The genX designation typically refers to the render engine, so render 1449 * capability related checks should use IS_GEN, while display and other checks 1450 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 1451 * chips, etc.). 1452 */ 1453 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 1454 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 1455 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 1456 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 1457 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 1458 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 1459 1460 #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring) 1461 #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) 1462 #define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring) 1463 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 1464 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1465 1466 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 1467 #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev)) 1468 1469 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 1470 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 1471 1472 /* Early gen2 have a totally busted CS tlb and require pinned batches. */ 1473 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) 1474 1475 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1476 * rows, which changed the alignment requirements and fence programming. 1477 */ 1478 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ 1479 IS_I915GM(dev))) 1480 #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) 1481 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) 1482 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) 1483 #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) 1484 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) 1485 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 1486 /* dsparb controlled by hw only */ 1487 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1488 1489 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2) 1490 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1491 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1492 1493 #define HAS_IPS(dev) (IS_ULT(dev)) 1494 1495 #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 1496 1497 #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 1498 #define HAS_POWER_WELL(dev) (IS_HASWELL(dev)) 1499 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 1500 1501 #define INTEL_PCH_DEVICE_ID_MASK 0xff00 1502 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 1503 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 1504 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 1505 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 1506 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 1507 1508 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1509 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 1510 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1511 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 1512 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP) 1513 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) 1514 1515 #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) 1516 1517 #define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 1518 1519 #define GT_FREQUENCY_MULTIPLIER 50 1520 1521 #include "i915_trace.h" 1522 1523 /** 1524 * RC6 is a special power stage which allows the GPU to enter an very 1525 * low-voltage mode when idle, using down to 0V while at this stage. This 1526 * stage is entered automatically when the GPU is idle when RC6 support is 1527 * enabled, and as soon as new workload arises GPU wakes up automatically as well. 1528 * 1529 * There are different RC6 modes available in Intel GPU, which differentiate 1530 * among each other with the latency required to enter and leave RC6 and 1531 * voltage consumed by the GPU in different states. 1532 * 1533 * The combination of the following flags define which states GPU is allowed 1534 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and 1535 * RC6pp is deepest RC6. Their support by hardware varies according to the 1536 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one 1537 * which brings the most power savings; deeper states save more power, but 1538 * require higher latency to switch to and wake up. 1539 */ 1540 #define INTEL_RC6_ENABLE (1<<0) 1541 #define INTEL_RC6p_ENABLE (1<<1) 1542 #define INTEL_RC6pp_ENABLE (1<<2) 1543 1544 extern struct drm_ioctl_desc i915_ioctls[]; 1545 extern int i915_max_ioctl; 1546 extern unsigned int i915_fbpercrtc __always_unused; 1547 extern int i915_panel_ignore_lid __read_mostly; 1548 extern unsigned int i915_powersave __read_mostly; 1549 extern int i915_semaphores __read_mostly; 1550 extern unsigned int i915_lvds_downclock __read_mostly; 1551 extern int i915_lvds_channel_mode __read_mostly; 1552 extern int i915_panel_use_ssc __read_mostly; 1553 extern int i915_vbt_sdvo_panel_type __read_mostly; 1554 extern int i915_enable_rc6 __read_mostly; 1555 extern int i915_enable_fbc __read_mostly; 1556 extern bool i915_enable_hangcheck __read_mostly; 1557 extern int i915_enable_ppgtt __read_mostly; 1558 extern unsigned int i915_preliminary_hw_support __read_mostly; 1559 extern int i915_disable_power_well __read_mostly; 1560 extern int i915_enable_ips __read_mostly; 1561 1562 extern int i915_master_create(struct drm_device *dev, struct drm_master *master); 1563 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); 1564 1565 /* i915_dma.c */ 1566 void i915_update_dri1_breadcrumb(struct drm_device *dev); 1567 extern void i915_kernel_lost_context(struct drm_device * dev); 1568 extern int i915_driver_load(struct drm_device *, unsigned long flags); 1569 extern int i915_driver_unload(struct drm_device *); 1570 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); 1571 extern void i915_driver_lastclose(struct drm_device * dev); 1572 extern void i915_driver_preclose(struct drm_device *dev, 1573 struct drm_file *file_priv); 1574 extern void i915_driver_postclose(struct drm_device *dev, 1575 struct drm_file *file_priv); 1576 extern int i915_driver_device_is_agp(struct drm_device * dev); 1577 #ifdef CONFIG_COMPAT 1578 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 1579 unsigned long arg); 1580 #endif 1581 extern int i915_emit_box(struct drm_device *dev, 1582 struct drm_clip_rect *box, 1583 int DR1, int DR4); 1584 extern int intel_gpu_reset(struct drm_device *dev); 1585 extern int i915_reset(struct drm_device *dev); 1586 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 1587 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 1588 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 1589 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 1590 1591 extern void intel_console_resume(struct work_struct *work); 1592 1593 /* i915_irq.c */ 1594 void i915_hangcheck_elapsed(unsigned long data); 1595 void i915_handle_error(struct drm_device *dev, bool wedged); 1596 1597 extern void intel_irq_init(struct drm_device *dev); 1598 extern void intel_pm_init(struct drm_device *dev); 1599 extern void intel_hpd_init(struct drm_device *dev); 1600 extern void intel_gt_init(struct drm_device *dev); 1601 extern void intel_gt_sanitize(struct drm_device *dev); 1602 1603 void i915_error_state_free(struct kref *error_ref); 1604 1605 void 1606 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1607 1608 void 1609 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1610 1611 #ifdef CONFIG_DEBUG_FS 1612 extern void i915_destroy_error_state(struct drm_device *dev); 1613 #else 1614 #define i915_destroy_error_state(x) 1615 #endif 1616 1617 1618 /* i915_gem.c */ 1619 int i915_gem_init_ioctl(struct drm_device *dev, void *data, 1620 struct drm_file *file_priv); 1621 int i915_gem_create_ioctl(struct drm_device *dev, void *data, 1622 struct drm_file *file_priv); 1623 int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 1624 struct drm_file *file_priv); 1625 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1626 struct drm_file *file_priv); 1627 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 1628 struct drm_file *file_priv); 1629 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 1630 struct drm_file *file_priv); 1631 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1632 struct drm_file *file_priv); 1633 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 1634 struct drm_file *file_priv); 1635 int i915_gem_execbuffer(struct drm_device *dev, void *data, 1636 struct drm_file *file_priv); 1637 int i915_gem_execbuffer2(struct drm_device *dev, void *data, 1638 struct drm_file *file_priv); 1639 int i915_gem_pin_ioctl(struct drm_device *dev, void *data, 1640 struct drm_file *file_priv); 1641 int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 1642 struct drm_file *file_priv); 1643 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 1644 struct drm_file *file_priv); 1645 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, 1646 struct drm_file *file); 1647 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 1648 struct drm_file *file); 1649 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 1650 struct drm_file *file_priv); 1651 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 1652 struct drm_file *file_priv); 1653 int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 1654 struct drm_file *file_priv); 1655 int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 1656 struct drm_file *file_priv); 1657 int i915_gem_set_tiling(struct drm_device *dev, void *data, 1658 struct drm_file *file_priv); 1659 int i915_gem_get_tiling(struct drm_device *dev, void *data, 1660 struct drm_file *file_priv); 1661 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 1662 struct drm_file *file_priv); 1663 int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 1664 struct drm_file *file_priv); 1665 void i915_gem_load(struct drm_device *dev); 1666 void *i915_gem_object_alloc(struct drm_device *dev); 1667 void i915_gem_object_free(struct drm_i915_gem_object *obj); 1668 int i915_gem_init_object(struct drm_gem_object *obj); 1669 void i915_gem_object_init(struct drm_i915_gem_object *obj, 1670 const struct drm_i915_gem_object_ops *ops); 1671 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 1672 size_t size); 1673 void i915_gem_free_object(struct drm_gem_object *obj); 1674 1675 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 1676 uint32_t alignment, 1677 bool map_and_fenceable, 1678 bool nonblocking); 1679 void i915_gem_object_unpin(struct drm_i915_gem_object *obj); 1680 int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); 1681 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 1682 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 1683 void i915_gem_lastclose(struct drm_device *dev); 1684 1685 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); 1686 static inline struct vm_page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 1687 { 1688 return obj->pages[n]; 1689 } 1690 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) 1691 { 1692 BUG_ON(obj->pages == NULL); 1693 obj->pages_pin_count++; 1694 } 1695 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) 1696 { 1697 BUG_ON(obj->pages_pin_count == 0); 1698 obj->pages_pin_count--; 1699 } 1700 1701 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 1702 int i915_gem_object_sync(struct drm_i915_gem_object *obj, 1703 struct intel_ring_buffer *to); 1704 void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1705 struct intel_ring_buffer *ring); 1706 1707 int i915_gem_dumb_create(struct drm_file *file_priv, 1708 struct drm_device *dev, 1709 struct drm_mode_create_dumb *args); 1710 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 1711 uint32_t handle, uint64_t *offset); 1712 int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, 1713 uint32_t handle); 1714 /** 1715 * Returns true if seq1 is later than seq2. 1716 */ 1717 static inline bool 1718 i915_seqno_passed(uint32_t seq1, uint32_t seq2) 1719 { 1720 return (int32_t)(seq1 - seq2) >= 0; 1721 } 1722 1723 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 1724 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); 1725 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); 1726 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 1727 1728 static inline bool 1729 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) 1730 { 1731 if (obj->fence_reg != I915_FENCE_REG_NONE) { 1732 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1733 dev_priv->fence_regs[obj->fence_reg].pin_count++; 1734 return true; 1735 } else 1736 return false; 1737 } 1738 1739 static inline void 1740 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) 1741 { 1742 if (obj->fence_reg != I915_FENCE_REG_NONE) { 1743 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1744 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0); 1745 dev_priv->fence_regs[obj->fence_reg].pin_count--; 1746 } 1747 } 1748 1749 void i915_gem_retire_requests(struct drm_device *dev); 1750 void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); 1751 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, 1752 bool interruptible); 1753 static inline bool i915_reset_in_progress(struct i915_gpu_error *error) 1754 { 1755 return unlikely(atomic_read(&error->reset_counter) 1756 & I915_RESET_IN_PROGRESS_FLAG); 1757 } 1758 1759 static inline bool i915_terminally_wedged(struct i915_gpu_error *error) 1760 { 1761 return atomic_read(&error->reset_counter) == I915_WEDGED; 1762 } 1763 1764 void i915_gem_reset(struct drm_device *dev); 1765 void i915_gem_clflush_object(struct drm_i915_gem_object *obj); 1766 int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, 1767 uint32_t read_domains, 1768 uint32_t write_domain); 1769 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj); 1770 int __must_check i915_gem_init(struct drm_device *dev); 1771 int __must_check i915_gem_init_hw(struct drm_device *dev); 1772 void i915_gem_l3_remap(struct drm_device *dev); 1773 void i915_gem_init_swizzling(struct drm_device *dev); 1774 void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1775 int __must_check i915_gpu_idle(struct drm_device *dev); 1776 int __must_check i915_gem_idle(struct drm_device *dev); 1777 int __i915_add_request(struct intel_ring_buffer *ring, 1778 struct drm_file *file, 1779 struct drm_i915_gem_object *batch_obj, 1780 u32 *seqno); 1781 #define i915_add_request(ring, seqno) \ 1782 __i915_add_request(ring, NULL, NULL, seqno) 1783 int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, 1784 uint32_t seqno); 1785 int i915_gem_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 1786 vm_page_t *mres); 1787 int __must_check 1788 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, 1789 bool write); 1790 int __must_check 1791 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); 1792 int __must_check 1793 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 1794 u32 alignment, 1795 struct intel_ring_buffer *pipelined); 1796 int i915_gem_attach_phys_object(struct drm_device *dev, 1797 struct drm_i915_gem_object *obj, 1798 int id, 1799 int align); 1800 void i915_gem_detach_phys_object(struct drm_device *dev, 1801 struct drm_i915_gem_object *obj); 1802 void i915_gem_free_all_phys_object(struct drm_device *dev); 1803 void i915_gem_release(struct drm_device *dev, struct drm_file *file); 1804 1805 uint32_t 1806 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); 1807 uint32_t 1808 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, 1809 int tiling_mode, bool fenced); 1810 1811 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 1812 enum i915_cache_level cache_level); 1813 1814 #if 0 1815 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 1816 struct dma_buf *dma_buf); 1817 1818 struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 1819 struct drm_gem_object *gem_obj, int flags); 1820 #endif 1821 1822 void i915_gem_restore_fences(struct drm_device *dev); 1823 1824 /* i915_gem_context.c */ 1825 void i915_gem_context_init(struct drm_device *dev); 1826 void i915_gem_context_fini(struct drm_device *dev); 1827 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 1828 int i915_switch_context(struct intel_ring_buffer *ring, 1829 struct drm_file *file, int to_id); 1830 void i915_gem_context_free(struct kref *ctx_ref); 1831 static inline void i915_gem_context_reference(struct i915_hw_context *ctx) 1832 { 1833 kref_get(&ctx->ref); 1834 } 1835 1836 static inline void i915_gem_context_unreference(struct i915_hw_context *ctx) 1837 { 1838 kref_put(&ctx->ref, i915_gem_context_free); 1839 } 1840 1841 struct i915_ctx_hang_stats * __must_check 1842 i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring, 1843 struct drm_file *file, 1844 u32 id); 1845 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 1846 struct drm_file *file); 1847 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 1848 struct drm_file *file); 1849 1850 /* i915_gem_gtt.c */ 1851 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); 1852 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, 1853 struct drm_i915_gem_object *obj, 1854 enum i915_cache_level cache_level); 1855 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 1856 struct drm_i915_gem_object *obj); 1857 1858 void i915_gem_restore_gtt_mappings(struct drm_device *dev); 1859 int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); 1860 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 1861 enum i915_cache_level cache_level); 1862 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); 1863 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); 1864 void i915_gem_init_global_gtt(struct drm_device *dev); 1865 void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, 1866 unsigned long mappable_end, unsigned long end); 1867 int i915_gem_gtt_init(struct drm_device *dev); 1868 static inline void i915_gem_chipset_flush(struct drm_device *dev) 1869 { 1870 if (INTEL_INFO(dev)->gen < 6) 1871 intel_gtt_chipset_flush(); 1872 } 1873 1874 1875 /* i915_gem_evict.c */ 1876 int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, 1877 unsigned alignment, 1878 unsigned cache_level, 1879 bool mappable, 1880 bool nonblock); 1881 int i915_gem_evict_everything(struct drm_device *dev); 1882 1883 /* i915_gem_stolen.c */ 1884 int i915_gem_init_stolen(struct drm_device *dev); 1885 int i915_gem_stolen_setup_compression(struct drm_device *dev, int size); 1886 void i915_gem_stolen_cleanup_compression(struct drm_device *dev); 1887 void i915_gem_cleanup_stolen(struct drm_device *dev); 1888 struct drm_i915_gem_object * 1889 i915_gem_object_create_stolen(struct drm_device *dev, u32 size); 1890 struct drm_i915_gem_object * 1891 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, 1892 u32 stolen_offset, 1893 u32 gtt_offset, 1894 u32 size); 1895 void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj); 1896 1897 /* i915_gem_tiling.c */ 1898 inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 1899 { 1900 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 1901 1902 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 1903 obj->tiling_mode != I915_TILING_NONE; 1904 } 1905 1906 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 1907 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); 1908 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); 1909 1910 /* i915_gem_debug.c */ 1911 void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len, 1912 const char *where, uint32_t mark); 1913 #if WATCH_LISTS 1914 int i915_verify_lists(struct drm_device *dev); 1915 #else 1916 #define i915_verify_lists(dev) 0 1917 #endif 1918 void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, 1919 int handle); 1920 1921 /* i915_debugfs.c */ 1922 int i915_debugfs_init(struct drm_minor *minor); 1923 void i915_debugfs_cleanup(struct drm_minor *minor); 1924 __printf(2, 3) 1925 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); 1926 1927 /* i915_suspend.c */ 1928 extern int i915_save_state(struct drm_device *dev); 1929 extern int i915_restore_state(struct drm_device *dev); 1930 1931 /* i915_ums.c */ 1932 void i915_save_display_reg(struct drm_device *dev); 1933 void i915_restore_display_reg(struct drm_device *dev); 1934 1935 /* i915_sysfs.c */ 1936 void i915_setup_sysfs(struct drm_device *dev_priv); 1937 void i915_teardown_sysfs(struct drm_device *dev_priv); 1938 1939 /* intel_i2c.c */ 1940 extern int intel_setup_gmbus(struct drm_device *dev); 1941 extern void intel_teardown_gmbus(struct drm_device *dev); 1942 static inline bool intel_gmbus_is_port_valid(unsigned port) 1943 { 1944 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD); 1945 } 1946 1947 extern struct device *intel_gmbus_get_adapter( 1948 struct drm_i915_private *dev_priv, unsigned port); 1949 extern void intel_gmbus_set_speed(struct device *adapter, int speed); 1950 extern void intel_gmbus_force_bit(struct device *adapter, bool force_bit); 1951 static inline bool intel_gmbus_is_forced_bit(struct device *adapter) 1952 { 1953 struct intel_iic_softc *sc; 1954 sc = device_get_softc(device_get_parent(adapter)); 1955 1956 return sc->force_bit_dev; 1957 } 1958 extern void intel_i2c_reset(struct drm_device *dev); 1959 1960 /* intel_opregion.c */ 1961 extern int intel_opregion_setup(struct drm_device *dev); 1962 #ifdef CONFIG_ACPI 1963 extern void intel_opregion_init(struct drm_device *dev); 1964 extern void intel_opregion_fini(struct drm_device *dev); 1965 extern void intel_opregion_asle_intr(struct drm_device *dev); 1966 #else 1967 static inline void intel_opregion_init(struct drm_device *dev) { return; } 1968 static inline void intel_opregion_fini(struct drm_device *dev) { return; } 1969 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 1970 #endif 1971 1972 /* intel_acpi.c */ 1973 #ifdef CONFIG_ACPI 1974 extern void intel_register_dsm_handler(void); 1975 extern void intel_unregister_dsm_handler(void); 1976 #else 1977 static inline void intel_register_dsm_handler(void) { return; } 1978 static inline void intel_unregister_dsm_handler(void) { return; } 1979 #endif /* CONFIG_ACPI */ 1980 1981 /* modesetting */ 1982 extern void intel_modeset_init_hw(struct drm_device *dev); 1983 extern void intel_modeset_suspend_hw(struct drm_device *dev); 1984 extern void intel_modeset_init(struct drm_device *dev); 1985 extern void intel_modeset_gem_init(struct drm_device *dev); 1986 extern void intel_modeset_cleanup(struct drm_device *dev); 1987 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1988 extern void intel_modeset_setup_hw_state(struct drm_device *dev, 1989 bool force_restore); 1990 extern void i915_redisable_vga(struct drm_device *dev); 1991 extern void intel_disable_fbc(struct drm_device *dev); 1992 extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1993 extern void intel_init_pch_refclk(struct drm_device *dev); 1994 extern void gen6_set_rps(struct drm_device *dev, u8 val); 1995 extern void valleyview_set_rps(struct drm_device *dev, u8 val); 1996 extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv); 1997 extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv); 1998 extern void intel_detect_pch(struct drm_device *dev); 1999 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 2000 extern int intel_enable_rc6(const struct drm_device *dev); 2001 2002 extern bool i915_semaphore_is_enabled(struct drm_device *dev); 2003 int i915_reg_read_ioctl(struct drm_device *dev, void *data, 2004 struct drm_file *file); 2005 2006 const struct intel_device_info *i915_get_device_id(int device); 2007 2008 /* overlay */ 2009 #ifdef CONFIG_DEBUG_FS 2010 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 2011 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 2012 struct intel_overlay_error_state *error); 2013 2014 extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); 2015 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 2016 struct drm_device *dev, 2017 struct intel_display_error_state *error); 2018 #endif 2019 2020 /* On SNB platform, before reading ring registers forcewake bit 2021 * must be set to prevent GT core from power down and stale values being 2022 * returned. 2023 */ 2024 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); 2025 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 2026 int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); 2027 2028 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); 2029 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); 2030 2031 /* intel_sideband.c */ 2032 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr); 2033 void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val); 2034 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); 2035 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg); 2036 void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val); 2037 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 2038 enum intel_sbi_destination destination); 2039 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 2040 enum intel_sbi_destination destination); 2041 2042 int vlv_gpu_freq(int ddr_freq, int val); 2043 int vlv_freq_opcode(int ddr_freq, int val); 2044 2045 #define __i915_read(x, y) \ 2046 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); 2047 2048 __i915_read(8, 8) 2049 __i915_read(16, 16) 2050 __i915_read(32, 32) 2051 __i915_read(64, 64) 2052 #undef __i915_read 2053 2054 #define __i915_write(x, y) \ 2055 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val); 2056 2057 __i915_write(8, 8) 2058 __i915_write(16, 16) 2059 __i915_write(32, 32) 2060 __i915_write(64, 64) 2061 #undef __i915_write 2062 2063 #define I915_READ8(reg) i915_read8(dev_priv, (reg)) 2064 #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val)) 2065 2066 #define I915_READ16(reg) i915_read16(dev_priv, (reg)) 2067 #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val)) 2068 #define I915_READ16_NOTRACE(reg) DRM_READ16(dev_priv->mmio_map, (reg)) 2069 #define I915_WRITE16_NOTRACE(reg, val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val)) 2070 2071 #define I915_READ(reg) i915_read32(dev_priv, (reg)) 2072 #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val)) 2073 #define I915_READ_NOTRACE(reg) DRM_READ32(dev_priv->mmio_map, (reg)) 2074 #define I915_WRITE_NOTRACE(reg, val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) 2075 2076 #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val)) 2077 #define I915_READ64(reg) i915_read64(dev_priv, (reg)) 2078 2079 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 2080 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 2081 2082 /* "Broadcast RGB" property */ 2083 #define INTEL_BROADCAST_RGB_AUTO 0 2084 #define INTEL_BROADCAST_RGB_FULL 1 2085 #define INTEL_BROADCAST_RGB_LIMITED 2 2086 2087 static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) 2088 { 2089 if (HAS_PCH_SPLIT(dev)) 2090 return CPU_VGACNTRL; 2091 else if (IS_VALLEYVIEW(dev)) 2092 return VLV_VGACNTRL; 2093 else 2094 return VGACNTRL; 2095 } 2096 2097 static inline void __user *to_user_ptr(u64 address) 2098 { 2099 return (void __user *)(uintptr_t)address; 2100 } 2101 2102 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m) 2103 { 2104 unsigned long j = msecs_to_jiffies(m); 2105 2106 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 2107 } 2108 2109 static inline unsigned long 2110 timespec_to_jiffies_timeout(const struct timespec *value) 2111 { 2112 unsigned long j = timespec_to_jiffies(value); 2113 2114 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 2115 } 2116 2117 #endif 2118