1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30 #include <drm/drmP.h> 31 #include "i915_drv.h" 32 #include <drm/i915_drm.h> 33 #include "i915_trace.h" 34 #include "intel_drv.h" 35 36 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, 37 * but keeps the logic simple. Indeed, the whole purpose of this macro is just 38 * to give some inclination as to some of the magic values used in the various 39 * workarounds! 40 */ 41 #define CACHELINE_BYTES 64 42 43 static inline int __ring_space(int head, int tail, int size) 44 { 45 int space = head - (tail + I915_RING_FREE_SPACE); 46 if (space < 0) 47 space += size; 48 return space; 49 } 50 51 static inline int ring_space(struct intel_engine_cs *ring) 52 { 53 struct intel_ringbuffer *ringbuf = ring->buffer; 54 return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size); 55 } 56 57 static bool intel_ring_stopped(struct intel_engine_cs *ring) 58 { 59 struct drm_i915_private *dev_priv = ring->dev->dev_private; 60 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); 61 } 62 63 void __intel_ring_advance(struct intel_engine_cs *ring) 64 { 65 struct intel_ringbuffer *ringbuf = ring->buffer; 66 ringbuf->tail &= ringbuf->size - 1; 67 if (intel_ring_stopped(ring)) 68 return; 69 ring->write_tail(ring, ringbuf->tail); 70 } 71 72 static int 73 gen2_render_ring_flush(struct intel_engine_cs *ring, 74 u32 invalidate_domains, 75 u32 flush_domains) 76 { 77 u32 cmd; 78 int ret; 79 80 cmd = MI_FLUSH; 81 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) 82 cmd |= MI_NO_WRITE_FLUSH; 83 84 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 85 cmd |= MI_READ_FLUSH; 86 87 ret = intel_ring_begin(ring, 2); 88 if (ret) 89 return ret; 90 91 intel_ring_emit(ring, cmd); 92 intel_ring_emit(ring, MI_NOOP); 93 intel_ring_advance(ring); 94 95 return 0; 96 } 97 98 static int 99 gen4_render_ring_flush(struct intel_engine_cs *ring, 100 u32 invalidate_domains, 101 u32 flush_domains) 102 { 103 struct drm_device *dev = ring->dev; 104 u32 cmd; 105 int ret; 106 107 /* 108 * read/write caches: 109 * 110 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 111 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 112 * also flushed at 2d versus 3d pipeline switches. 113 * 114 * read-only caches: 115 * 116 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 117 * MI_READ_FLUSH is set, and is always flushed on 965. 118 * 119 * I915_GEM_DOMAIN_COMMAND may not exist? 120 * 121 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 122 * invalidated when MI_EXE_FLUSH is set. 123 * 124 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 125 * invalidated with every MI_FLUSH. 126 * 127 * TLBs: 128 * 129 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 130 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 131 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 132 * are flushed at any MI_FLUSH. 133 */ 134 135 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 136 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) 137 cmd &= ~MI_NO_WRITE_FLUSH; 138 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 139 cmd |= MI_EXE_FLUSH; 140 141 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && 142 (IS_G4X(dev) || IS_GEN5(dev))) 143 cmd |= MI_INVALIDATE_ISP; 144 145 ret = intel_ring_begin(ring, 2); 146 if (ret) 147 return ret; 148 149 intel_ring_emit(ring, cmd); 150 intel_ring_emit(ring, MI_NOOP); 151 intel_ring_advance(ring); 152 153 return 0; 154 } 155 156 /** 157 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 158 * implementing two workarounds on gen6. From section 1.4.7.1 159 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 160 * 161 * [DevSNB-C+{W/A}] Before any depth stall flush (including those 162 * produced by non-pipelined state commands), software needs to first 163 * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 164 * 0. 165 * 166 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 167 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 168 * 169 * And the workaround for these two requires this workaround first: 170 * 171 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 172 * BEFORE the pipe-control with a post-sync op and no write-cache 173 * flushes. 174 * 175 * And this last workaround is tricky because of the requirements on 176 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 177 * volume 2 part 1: 178 * 179 * "1 of the following must also be set: 180 * - Render Target Cache Flush Enable ([12] of DW1) 181 * - Depth Cache Flush Enable ([0] of DW1) 182 * - Stall at Pixel Scoreboard ([1] of DW1) 183 * - Depth Stall ([13] of DW1) 184 * - Post-Sync Operation ([13] of DW1) 185 * - Notify Enable ([8] of DW1)" 186 * 187 * The cache flushes require the workaround flush that triggered this 188 * one, so we can't use it. Depth stall would trigger the same. 189 * Post-sync nonzero is what triggered this second workaround, so we 190 * can't use that one either. Notify enable is IRQs, which aren't 191 * really our business. That leaves only stall at scoreboard. 192 */ 193 static int 194 intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring) 195 { 196 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 197 int ret; 198 199 200 ret = intel_ring_begin(ring, 6); 201 if (ret) 202 return ret; 203 204 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 205 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 206 PIPE_CONTROL_STALL_AT_SCOREBOARD); 207 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 208 intel_ring_emit(ring, 0); /* low dword */ 209 intel_ring_emit(ring, 0); /* high dword */ 210 intel_ring_emit(ring, MI_NOOP); 211 intel_ring_advance(ring); 212 213 ret = intel_ring_begin(ring, 6); 214 if (ret) 215 return ret; 216 217 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 218 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); 219 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 220 intel_ring_emit(ring, 0); 221 intel_ring_emit(ring, 0); 222 intel_ring_emit(ring, MI_NOOP); 223 intel_ring_advance(ring); 224 225 return 0; 226 } 227 228 static int 229 gen6_render_ring_flush(struct intel_engine_cs *ring, 230 u32 invalidate_domains, u32 flush_domains) 231 { 232 u32 flags = 0; 233 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 234 int ret; 235 236 /* Force SNB workarounds for PIPE_CONTROL flushes */ 237 ret = intel_emit_post_sync_nonzero_flush(ring); 238 if (ret) 239 return ret; 240 241 /* Just flush everything. Experiments have shown that reducing the 242 * number of bits based on the write domains has little performance 243 * impact. 244 */ 245 if (flush_domains) { 246 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 247 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 248 /* 249 * Ensure that any following seqno writes only happen 250 * when the render cache is indeed flushed. 251 */ 252 flags |= PIPE_CONTROL_CS_STALL; 253 } 254 if (invalidate_domains) { 255 flags |= PIPE_CONTROL_TLB_INVALIDATE; 256 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 257 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 258 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 259 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 260 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 261 /* 262 * TLB invalidate requires a post-sync write. 263 */ 264 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 265 } 266 267 ret = intel_ring_begin(ring, 4); 268 if (ret) 269 return ret; 270 271 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 272 intel_ring_emit(ring, flags); 273 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 274 intel_ring_emit(ring, 0); 275 intel_ring_advance(ring); 276 277 return 0; 278 } 279 280 static int 281 gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring) 282 { 283 int ret; 284 285 ret = intel_ring_begin(ring, 4); 286 if (ret) 287 return ret; 288 289 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 290 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 291 PIPE_CONTROL_STALL_AT_SCOREBOARD); 292 intel_ring_emit(ring, 0); 293 intel_ring_emit(ring, 0); 294 intel_ring_advance(ring); 295 296 return 0; 297 } 298 299 static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value) 300 { 301 int ret; 302 303 if (!ring->fbc_dirty) 304 return 0; 305 306 ret = intel_ring_begin(ring, 6); 307 if (ret) 308 return ret; 309 /* WaFbcNukeOn3DBlt:ivb/hsw */ 310 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 311 intel_ring_emit(ring, MSG_FBC_REND_STATE); 312 intel_ring_emit(ring, value); 313 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT); 314 intel_ring_emit(ring, MSG_FBC_REND_STATE); 315 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 316 intel_ring_advance(ring); 317 318 ring->fbc_dirty = false; 319 return 0; 320 } 321 322 static int 323 gen7_render_ring_flush(struct intel_engine_cs *ring, 324 u32 invalidate_domains, u32 flush_domains) 325 { 326 u32 flags = 0; 327 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 328 int ret; 329 330 /* 331 * Ensure that any following seqno writes only happen when the render 332 * cache is indeed flushed. 333 * 334 * Workaround: 4th PIPE_CONTROL command (except the ones with only 335 * read-cache invalidate bits set) must have the CS_STALL bit set. We 336 * don't try to be clever and just set it unconditionally. 337 */ 338 flags |= PIPE_CONTROL_CS_STALL; 339 340 /* Just flush everything. Experiments have shown that reducing the 341 * number of bits based on the write domains has little performance 342 * impact. 343 */ 344 if (flush_domains) { 345 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 346 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 347 } 348 if (invalidate_domains) { 349 flags |= PIPE_CONTROL_TLB_INVALIDATE; 350 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 351 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 352 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 353 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 354 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 355 /* 356 * TLB invalidate requires a post-sync write. 357 */ 358 flags |= PIPE_CONTROL_QW_WRITE; 359 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 360 361 /* Workaround: we must issue a pipe_control with CS-stall bit 362 * set before a pipe_control command that has the state cache 363 * invalidate bit set. */ 364 gen7_render_ring_cs_stall_wa(ring); 365 } 366 367 ret = intel_ring_begin(ring, 4); 368 if (ret) 369 return ret; 370 371 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 372 intel_ring_emit(ring, flags); 373 intel_ring_emit(ring, scratch_addr); 374 intel_ring_emit(ring, 0); 375 intel_ring_advance(ring); 376 377 if (!invalidate_domains && flush_domains) 378 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE); 379 380 return 0; 381 } 382 383 static int 384 gen8_render_ring_flush(struct intel_engine_cs *ring, 385 u32 invalidate_domains, u32 flush_domains) 386 { 387 u32 flags = 0; 388 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 389 int ret; 390 391 flags |= PIPE_CONTROL_CS_STALL; 392 393 if (flush_domains) { 394 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 395 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 396 } 397 if (invalidate_domains) { 398 flags |= PIPE_CONTROL_TLB_INVALIDATE; 399 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 400 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 401 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 402 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 403 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 404 flags |= PIPE_CONTROL_QW_WRITE; 405 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 406 } 407 408 ret = intel_ring_begin(ring, 6); 409 if (ret) 410 return ret; 411 412 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 413 intel_ring_emit(ring, flags); 414 intel_ring_emit(ring, scratch_addr); 415 intel_ring_emit(ring, 0); 416 intel_ring_emit(ring, 0); 417 intel_ring_emit(ring, 0); 418 intel_ring_advance(ring); 419 420 return 0; 421 422 } 423 424 static void ring_write_tail(struct intel_engine_cs *ring, 425 u32 value) 426 { 427 struct drm_i915_private *dev_priv = ring->dev->dev_private; 428 I915_WRITE_TAIL(ring, value); 429 } 430 431 u64 intel_ring_get_active_head(struct intel_engine_cs *ring) 432 { 433 struct drm_i915_private *dev_priv = ring->dev->dev_private; 434 u64 acthd; 435 436 if (INTEL_INFO(ring->dev)->gen >= 8) 437 acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base), 438 RING_ACTHD_UDW(ring->mmio_base)); 439 else if (INTEL_INFO(ring->dev)->gen >= 4) 440 acthd = I915_READ(RING_ACTHD(ring->mmio_base)); 441 else 442 acthd = I915_READ(ACTHD); 443 444 return acthd; 445 } 446 447 static void ring_setup_phys_status_page(struct intel_engine_cs *ring) 448 { 449 struct drm_i915_private *dev_priv = ring->dev->dev_private; 450 u32 addr; 451 452 addr = dev_priv->status_page_dmah->busaddr; 453 if (INTEL_INFO(ring->dev)->gen >= 4) 454 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 455 I915_WRITE(HWS_PGA, addr); 456 } 457 458 static bool stop_ring(struct intel_engine_cs *ring) 459 { 460 struct drm_i915_private *dev_priv = to_i915(ring->dev); 461 462 if (!IS_GEN2(ring->dev)) { 463 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); 464 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { 465 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); 466 return false; 467 } 468 } 469 470 I915_WRITE_CTL(ring, 0); 471 I915_WRITE_HEAD(ring, 0); 472 ring->write_tail(ring, 0); 473 474 if (!IS_GEN2(ring->dev)) { 475 (void)I915_READ_CTL(ring); 476 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); 477 } 478 479 return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; 480 } 481 482 static int init_ring_common(struct intel_engine_cs *ring) 483 { 484 struct drm_device *dev = ring->dev; 485 struct drm_i915_private *dev_priv = dev->dev_private; 486 struct intel_ringbuffer *ringbuf = ring->buffer; 487 struct drm_i915_gem_object *obj = ringbuf->obj; 488 int ret = 0; 489 490 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 491 492 if (!stop_ring(ring)) { 493 /* G45 ring initialization often fails to reset head to zero */ 494 DRM_DEBUG_KMS("%s head not reset to zero " 495 "ctl %08x head %08x tail %08x start %08x\n", 496 ring->name, 497 I915_READ_CTL(ring), 498 I915_READ_HEAD(ring), 499 I915_READ_TAIL(ring), 500 I915_READ_START(ring)); 501 502 if (!stop_ring(ring)) { 503 DRM_ERROR("failed to set %s head to zero " 504 "ctl %08x head %08x tail %08x start %08x\n", 505 ring->name, 506 I915_READ_CTL(ring), 507 I915_READ_HEAD(ring), 508 I915_READ_TAIL(ring), 509 I915_READ_START(ring)); 510 ret = -EIO; 511 goto out; 512 } 513 } 514 515 if (I915_NEED_GFX_HWS(dev)) 516 intel_ring_setup_status_page(ring); 517 else 518 ring_setup_phys_status_page(ring); 519 520 /* Initialize the ring. This must happen _after_ we've cleared the ring 521 * registers with the above sequence (the readback of the HEAD registers 522 * also enforces ordering), otherwise the hw might lose the new ring 523 * register values. */ 524 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); 525 I915_WRITE_CTL(ring, 526 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) 527 | RING_VALID); 528 529 /* If the head is still not zero, the ring is dead */ 530 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && 531 I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && 532 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { 533 DRM_ERROR("%s initialization failed " 534 "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n", 535 ring->name, 536 I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID, 537 I915_READ_HEAD(ring), I915_READ_TAIL(ring), 538 I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj)); 539 ret = -EIO; 540 goto out; 541 } 542 543 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 544 i915_kernel_lost_context(ring->dev); 545 else { 546 ringbuf->head = I915_READ_HEAD(ring); 547 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 548 ringbuf->space = ring_space(ring); 549 ringbuf->last_retired_head = -1; 550 } 551 552 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 553 554 out: 555 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 556 557 return ret; 558 } 559 560 static int 561 init_pipe_control(struct intel_engine_cs *ring) 562 { 563 int ret; 564 565 if (ring->scratch.obj) 566 return 0; 567 568 ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); 569 if (ring->scratch.obj == NULL) { 570 DRM_ERROR("Failed to allocate seqno page\n"); 571 ret = -ENOMEM; 572 goto err; 573 } 574 575 ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); 576 if (ret) 577 goto err_unref; 578 579 ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0); 580 if (ret) 581 goto err_unref; 582 583 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj); 584 ring->scratch.cpu_page = kmap(ring->scratch.obj->pages[0]); 585 if (ring->scratch.cpu_page == NULL) { 586 ret = -ENOMEM; 587 goto err_unpin; 588 } 589 590 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 591 ring->name, ring->scratch.gtt_offset); 592 return 0; 593 594 err_unpin: 595 i915_gem_object_ggtt_unpin(ring->scratch.obj); 596 err_unref: 597 drm_gem_object_unreference(&ring->scratch.obj->base); 598 err: 599 return ret; 600 } 601 602 static int init_render_ring(struct intel_engine_cs *ring) 603 { 604 struct drm_device *dev = ring->dev; 605 struct drm_i915_private *dev_priv = dev->dev_private; 606 int ret = init_ring_common(ring); 607 608 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 609 if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) 610 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 611 612 /* We need to disable the AsyncFlip performance optimisations in order 613 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 614 * programmed to '1' on all products. 615 * 616 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv 617 */ 618 if (INTEL_INFO(dev)->gen >= 6) 619 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 620 621 /* Required for the hardware to program scanline values for waiting */ 622 /* WaEnableFlushTlbInvalidationMode:snb */ 623 if (INTEL_INFO(dev)->gen == 6) 624 I915_WRITE(GFX_MODE, 625 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 626 627 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 628 if (IS_GEN7(dev)) 629 I915_WRITE(GFX_MODE_GEN7, 630 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 631 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 632 633 if (INTEL_INFO(dev)->gen >= 5) { 634 ret = init_pipe_control(ring); 635 if (ret) 636 return ret; 637 } 638 639 if (IS_GEN6(dev)) { 640 /* From the Sandybridge PRM, volume 1 part 3, page 24: 641 * "If this bit is set, STCunit will have LRA as replacement 642 * policy. [...] This bit must be reset. LRA replacement 643 * policy is not supported." 644 */ 645 I915_WRITE(CACHE_MODE_0, 646 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 647 } 648 649 if (INTEL_INFO(dev)->gen >= 6) 650 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 651 652 if (HAS_L3_DPF(dev)) 653 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 654 655 return ret; 656 } 657 658 static void render_ring_cleanup(struct intel_engine_cs *ring) 659 { 660 struct drm_device *dev = ring->dev; 661 662 if (ring->scratch.obj == NULL) 663 return; 664 665 if (INTEL_INFO(dev)->gen >= 5) { 666 kunmap(ring->scratch.obj->pages[0]); 667 i915_gem_object_ggtt_unpin(ring->scratch.obj); 668 } 669 670 drm_gem_object_unreference(&ring->scratch.obj->base); 671 ring->scratch.obj = NULL; 672 } 673 674 static int gen6_signal(struct intel_engine_cs *signaller, 675 unsigned int num_dwords) 676 { 677 struct drm_device *dev = signaller->dev; 678 struct drm_i915_private *dev_priv = dev->dev_private; 679 struct intel_engine_cs *useless; 680 int i, ret; 681 682 /* NB: In order to be able to do semaphore MBOX updates for varying 683 * number of rings, it's easiest if we round up each individual update 684 * to a multiple of 2 (since ring updates must always be a multiple of 685 * 2) even though the actual update only requires 3 dwords. 686 */ 687 #define MBOX_UPDATE_DWORDS 4 688 if (i915_semaphore_is_enabled(dev)) 689 num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS); 690 else 691 return intel_ring_begin(signaller, num_dwords); 692 693 ret = intel_ring_begin(signaller, num_dwords); 694 if (ret) 695 return ret; 696 #undef MBOX_UPDATE_DWORDS 697 698 for_each_ring(useless, dev_priv, i) { 699 u32 mbox_reg = signaller->semaphore.mbox.signal[i]; 700 if (mbox_reg != GEN6_NOSYNC) { 701 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); 702 intel_ring_emit(signaller, mbox_reg); 703 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); 704 intel_ring_emit(signaller, MI_NOOP); 705 } else { 706 intel_ring_emit(signaller, MI_NOOP); 707 intel_ring_emit(signaller, MI_NOOP); 708 intel_ring_emit(signaller, MI_NOOP); 709 intel_ring_emit(signaller, MI_NOOP); 710 } 711 } 712 713 return 0; 714 } 715 716 /** 717 * gen6_add_request - Update the semaphore mailbox registers 718 * 719 * @ring - ring that is adding a request 720 * @seqno - return seqno stuck into the ring 721 * 722 * Update the mailbox registers in the *other* rings with the current seqno. 723 * This acts like a signal in the canonical semaphore. 724 */ 725 static int 726 gen6_add_request(struct intel_engine_cs *ring) 727 { 728 int ret; 729 730 ret = ring->semaphore.signal(ring, 4); 731 if (ret) 732 return ret; 733 734 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 735 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 736 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 737 intel_ring_emit(ring, MI_USER_INTERRUPT); 738 __intel_ring_advance(ring); 739 740 return 0; 741 } 742 743 static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, 744 u32 seqno) 745 { 746 struct drm_i915_private *dev_priv = dev->dev_private; 747 return dev_priv->last_seqno < seqno; 748 } 749 750 /** 751 * intel_ring_sync - sync the waiter to the signaller on seqno 752 * 753 * @waiter - ring that is waiting 754 * @signaller - ring which has, or will signal 755 * @seqno - seqno which the waiter will block on 756 */ 757 static int 758 gen6_ring_sync(struct intel_engine_cs *waiter, 759 struct intel_engine_cs *signaller, 760 u32 seqno) 761 { 762 u32 dw1 = MI_SEMAPHORE_MBOX | 763 MI_SEMAPHORE_COMPARE | 764 MI_SEMAPHORE_REGISTER; 765 u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id]; 766 int ret; 767 768 /* Throughout all of the GEM code, seqno passed implies our current 769 * seqno is >= the last seqno executed. However for hardware the 770 * comparison is strictly greater than. 771 */ 772 seqno -= 1; 773 774 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); 775 776 ret = intel_ring_begin(waiter, 4); 777 if (ret) 778 return ret; 779 780 /* If seqno wrap happened, omit the wait with no-ops */ 781 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { 782 intel_ring_emit(waiter, dw1 | wait_mbox); 783 intel_ring_emit(waiter, seqno); 784 intel_ring_emit(waiter, 0); 785 intel_ring_emit(waiter, MI_NOOP); 786 } else { 787 intel_ring_emit(waiter, MI_NOOP); 788 intel_ring_emit(waiter, MI_NOOP); 789 intel_ring_emit(waiter, MI_NOOP); 790 intel_ring_emit(waiter, MI_NOOP); 791 } 792 intel_ring_advance(waiter); 793 794 return 0; 795 } 796 797 #define PIPE_CONTROL_FLUSH(ring__, addr__) \ 798 do { \ 799 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ 800 PIPE_CONTROL_DEPTH_STALL); \ 801 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ 802 intel_ring_emit(ring__, 0); \ 803 intel_ring_emit(ring__, 0); \ 804 } while (0) 805 806 static int 807 pc_render_add_request(struct intel_engine_cs *ring) 808 { 809 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 810 int ret; 811 812 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently 813 * incoherent with writes to memory, i.e. completely fubar, 814 * so we need to use PIPE_NOTIFY instead. 815 * 816 * However, we also need to workaround the qword write 817 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to 818 * memory before requesting an interrupt. 819 */ 820 ret = intel_ring_begin(ring, 32); 821 if (ret) 822 return ret; 823 824 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 825 PIPE_CONTROL_WRITE_FLUSH | 826 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 827 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 828 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 829 intel_ring_emit(ring, 0); 830 PIPE_CONTROL_FLUSH(ring, scratch_addr); 831 scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ 832 PIPE_CONTROL_FLUSH(ring, scratch_addr); 833 scratch_addr += 2 * CACHELINE_BYTES; 834 PIPE_CONTROL_FLUSH(ring, scratch_addr); 835 scratch_addr += 2 * CACHELINE_BYTES; 836 PIPE_CONTROL_FLUSH(ring, scratch_addr); 837 scratch_addr += 2 * CACHELINE_BYTES; 838 PIPE_CONTROL_FLUSH(ring, scratch_addr); 839 scratch_addr += 2 * CACHELINE_BYTES; 840 PIPE_CONTROL_FLUSH(ring, scratch_addr); 841 842 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 843 PIPE_CONTROL_WRITE_FLUSH | 844 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 845 PIPE_CONTROL_NOTIFY); 846 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 847 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 848 intel_ring_emit(ring, 0); 849 __intel_ring_advance(ring); 850 851 return 0; 852 } 853 854 static u32 855 gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 856 { 857 /* Workaround to force correct ordering between irq and seqno writes on 858 * ivb (and maybe also on snb) by reading from a CS register (like 859 * ACTHD) before reading the status page. */ 860 if (!lazy_coherency) { 861 struct drm_i915_private *dev_priv = ring->dev->dev_private; 862 POSTING_READ(RING_ACTHD(ring->mmio_base)); 863 } 864 865 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 866 } 867 868 static u32 869 ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 870 { 871 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 872 } 873 874 static void 875 ring_set_seqno(struct intel_engine_cs *ring, u32 seqno) 876 { 877 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); 878 } 879 880 static u32 881 pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 882 { 883 return ring->scratch.cpu_page[0]; 884 } 885 886 static void 887 pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno) 888 { 889 ring->scratch.cpu_page[0] = seqno; 890 } 891 892 static bool 893 gen5_ring_get_irq(struct intel_engine_cs *ring) 894 { 895 struct drm_device *dev = ring->dev; 896 struct drm_i915_private *dev_priv = dev->dev_private; 897 898 if (!dev->irq_enabled) 899 return false; 900 901 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 902 if (ring->irq_refcount++ == 0) 903 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); 904 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 905 906 return true; 907 } 908 909 static void 910 gen5_ring_put_irq(struct intel_engine_cs *ring) 911 { 912 struct drm_device *dev = ring->dev; 913 struct drm_i915_private *dev_priv = dev->dev_private; 914 915 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 916 if (--ring->irq_refcount == 0) 917 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); 918 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 919 } 920 921 static bool 922 i9xx_ring_get_irq(struct intel_engine_cs *ring) 923 { 924 struct drm_device *dev = ring->dev; 925 struct drm_i915_private *dev_priv = dev->dev_private; 926 927 if (!dev->irq_enabled) 928 return false; 929 930 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 931 if (ring->irq_refcount++ == 0) { 932 dev_priv->irq_mask &= ~ring->irq_enable_mask; 933 I915_WRITE(IMR, dev_priv->irq_mask); 934 POSTING_READ(IMR); 935 } 936 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 937 938 return true; 939 } 940 941 static void 942 i9xx_ring_put_irq(struct intel_engine_cs *ring) 943 { 944 struct drm_device *dev = ring->dev; 945 struct drm_i915_private *dev_priv = dev->dev_private; 946 947 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 948 if (--ring->irq_refcount == 0) { 949 dev_priv->irq_mask |= ring->irq_enable_mask; 950 I915_WRITE(IMR, dev_priv->irq_mask); 951 POSTING_READ(IMR); 952 } 953 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 954 } 955 956 static bool 957 i8xx_ring_get_irq(struct intel_engine_cs *ring) 958 { 959 struct drm_device *dev = ring->dev; 960 struct drm_i915_private *dev_priv = dev->dev_private; 961 962 if (!dev->irq_enabled) 963 return false; 964 965 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 966 if (ring->irq_refcount++ == 0) { 967 dev_priv->irq_mask &= ~ring->irq_enable_mask; 968 I915_WRITE16(IMR, dev_priv->irq_mask); 969 POSTING_READ16(IMR); 970 } 971 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 972 973 return true; 974 } 975 976 static void 977 i8xx_ring_put_irq(struct intel_engine_cs *ring) 978 { 979 struct drm_device *dev = ring->dev; 980 struct drm_i915_private *dev_priv = dev->dev_private; 981 982 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 983 if (--ring->irq_refcount == 0) { 984 dev_priv->irq_mask |= ring->irq_enable_mask; 985 I915_WRITE16(IMR, dev_priv->irq_mask); 986 POSTING_READ16(IMR); 987 } 988 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 989 } 990 991 void intel_ring_setup_status_page(struct intel_engine_cs *ring) 992 { 993 struct drm_device *dev = ring->dev; 994 struct drm_i915_private *dev_priv = ring->dev->dev_private; 995 u32 mmio = 0; 996 997 /* The ring status page addresses are no longer next to the rest of 998 * the ring registers as of gen7. 999 */ 1000 if (IS_GEN7(dev)) { 1001 switch (ring->id) { 1002 case RCS: 1003 mmio = RENDER_HWS_PGA_GEN7; 1004 break; 1005 case BCS: 1006 mmio = BLT_HWS_PGA_GEN7; 1007 break; 1008 /* 1009 * VCS2 actually doesn't exist on Gen7. Only shut up 1010 * gcc switch check warning 1011 */ 1012 case VCS2: 1013 case VCS: 1014 mmio = BSD_HWS_PGA_GEN7; 1015 break; 1016 case VECS: 1017 mmio = VEBOX_HWS_PGA_GEN7; 1018 break; 1019 } 1020 } else if (IS_GEN6(ring->dev)) { 1021 mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 1022 } else { 1023 /* XXX: gen8 returns to sanity */ 1024 mmio = RING_HWS_PGA(ring->mmio_base); 1025 } 1026 1027 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 1028 POSTING_READ(mmio); 1029 1030 /* 1031 * Flush the TLB for this page 1032 * 1033 * FIXME: These two bits have disappeared on gen8, so a question 1034 * arises: do we still need this and if so how should we go about 1035 * invalidating the TLB? 1036 */ 1037 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { 1038 u32 reg = RING_INSTPM(ring->mmio_base); 1039 1040 /* ring should be idle before issuing a sync flush*/ 1041 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); 1042 1043 I915_WRITE(reg, 1044 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 1045 INSTPM_SYNC_FLUSH)); 1046 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, 1047 1000)) 1048 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 1049 ring->name); 1050 } 1051 } 1052 1053 static int 1054 bsd_ring_flush(struct intel_engine_cs *ring, 1055 u32 invalidate_domains, 1056 u32 flush_domains) 1057 { 1058 int ret; 1059 1060 ret = intel_ring_begin(ring, 2); 1061 if (ret) 1062 return ret; 1063 1064 intel_ring_emit(ring, MI_FLUSH); 1065 intel_ring_emit(ring, MI_NOOP); 1066 intel_ring_advance(ring); 1067 return 0; 1068 } 1069 1070 static int 1071 i9xx_add_request(struct intel_engine_cs *ring) 1072 { 1073 int ret; 1074 1075 ret = intel_ring_begin(ring, 4); 1076 if (ret) 1077 return ret; 1078 1079 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 1080 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1081 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 1082 intel_ring_emit(ring, MI_USER_INTERRUPT); 1083 __intel_ring_advance(ring); 1084 1085 return 0; 1086 } 1087 1088 static bool 1089 gen6_ring_get_irq(struct intel_engine_cs *ring) 1090 { 1091 struct drm_device *dev = ring->dev; 1092 struct drm_i915_private *dev_priv = dev->dev_private; 1093 1094 if (!dev->irq_enabled) 1095 return false; 1096 1097 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1098 if (ring->irq_refcount++ == 0) { 1099 if (HAS_L3_DPF(dev) && ring->id == RCS) 1100 I915_WRITE_IMR(ring, 1101 ~(ring->irq_enable_mask | 1102 GT_PARITY_ERROR(dev))); 1103 else 1104 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1105 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1106 } 1107 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1108 1109 return true; 1110 } 1111 1112 static void 1113 gen6_ring_put_irq(struct intel_engine_cs *ring) 1114 { 1115 struct drm_device *dev = ring->dev; 1116 struct drm_i915_private *dev_priv = dev->dev_private; 1117 1118 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1119 if (--ring->irq_refcount == 0) { 1120 if (HAS_L3_DPF(dev) && ring->id == RCS) 1121 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 1122 else 1123 I915_WRITE_IMR(ring, ~0); 1124 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1125 } 1126 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1127 } 1128 1129 static bool 1130 hsw_vebox_get_irq(struct intel_engine_cs *ring) 1131 { 1132 struct drm_device *dev = ring->dev; 1133 struct drm_i915_private *dev_priv = dev->dev_private; 1134 1135 if (!dev->irq_enabled) 1136 return false; 1137 1138 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1139 if (ring->irq_refcount++ == 0) { 1140 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1141 snb_enable_pm_irq(dev_priv, ring->irq_enable_mask); 1142 } 1143 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1144 1145 return true; 1146 } 1147 1148 static void 1149 hsw_vebox_put_irq(struct intel_engine_cs *ring) 1150 { 1151 struct drm_device *dev = ring->dev; 1152 struct drm_i915_private *dev_priv = dev->dev_private; 1153 1154 if (!dev->irq_enabled) 1155 return; 1156 1157 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1158 if (--ring->irq_refcount == 0) { 1159 I915_WRITE_IMR(ring, ~0); 1160 snb_disable_pm_irq(dev_priv, ring->irq_enable_mask); 1161 } 1162 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1163 } 1164 1165 static bool 1166 gen8_ring_get_irq(struct intel_engine_cs *ring) 1167 { 1168 struct drm_device *dev = ring->dev; 1169 struct drm_i915_private *dev_priv = dev->dev_private; 1170 1171 if (!dev->irq_enabled) 1172 return false; 1173 1174 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1175 if (ring->irq_refcount++ == 0) { 1176 if (HAS_L3_DPF(dev) && ring->id == RCS) { 1177 I915_WRITE_IMR(ring, 1178 ~(ring->irq_enable_mask | 1179 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1180 } else { 1181 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1182 } 1183 POSTING_READ(RING_IMR(ring->mmio_base)); 1184 } 1185 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1186 1187 return true; 1188 } 1189 1190 static void 1191 gen8_ring_put_irq(struct intel_engine_cs *ring) 1192 { 1193 struct drm_device *dev = ring->dev; 1194 struct drm_i915_private *dev_priv = dev->dev_private; 1195 1196 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1197 if (--ring->irq_refcount == 0) { 1198 if (HAS_L3_DPF(dev) && ring->id == RCS) { 1199 I915_WRITE_IMR(ring, 1200 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1201 } else { 1202 I915_WRITE_IMR(ring, ~0); 1203 } 1204 POSTING_READ(RING_IMR(ring->mmio_base)); 1205 } 1206 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1207 } 1208 1209 static int 1210 i965_dispatch_execbuffer(struct intel_engine_cs *ring, 1211 u64 offset, u32 length, 1212 unsigned flags) 1213 { 1214 int ret; 1215 1216 ret = intel_ring_begin(ring, 2); 1217 if (ret) 1218 return ret; 1219 1220 intel_ring_emit(ring, 1221 MI_BATCH_BUFFER_START | 1222 MI_BATCH_GTT | 1223 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); 1224 intel_ring_emit(ring, offset); 1225 intel_ring_advance(ring); 1226 1227 return 0; 1228 } 1229 1230 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1231 #define I830_BATCH_LIMIT (256*1024) 1232 static int 1233 i830_dispatch_execbuffer(struct intel_engine_cs *ring, 1234 u64 offset, u32 len, 1235 unsigned flags) 1236 { 1237 int ret; 1238 1239 if (flags & I915_DISPATCH_PINNED) { 1240 ret = intel_ring_begin(ring, 4); 1241 if (ret) 1242 return ret; 1243 1244 intel_ring_emit(ring, MI_BATCH_BUFFER); 1245 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1246 intel_ring_emit(ring, offset + len - 8); 1247 intel_ring_emit(ring, MI_NOOP); 1248 intel_ring_advance(ring); 1249 } else { 1250 u32 cs_offset = ring->scratch.gtt_offset; 1251 1252 if (len > I830_BATCH_LIMIT) 1253 return -ENOSPC; 1254 1255 ret = intel_ring_begin(ring, 9+3); 1256 if (ret) 1257 return ret; 1258 /* Blit the batch (which has now all relocs applied) to the stable batch 1259 * scratch bo area (so that the CS never stumbles over its tlb 1260 * invalidation bug) ... */ 1261 intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | 1262 XY_SRC_COPY_BLT_WRITE_ALPHA | 1263 XY_SRC_COPY_BLT_WRITE_RGB); 1264 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); 1265 intel_ring_emit(ring, 0); 1266 intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); 1267 intel_ring_emit(ring, cs_offset); 1268 intel_ring_emit(ring, 0); 1269 intel_ring_emit(ring, 4096); 1270 intel_ring_emit(ring, offset); 1271 intel_ring_emit(ring, MI_FLUSH); 1272 1273 /* ... and execute it. */ 1274 intel_ring_emit(ring, MI_BATCH_BUFFER); 1275 intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1276 intel_ring_emit(ring, cs_offset + len - 8); 1277 intel_ring_advance(ring); 1278 } 1279 1280 return 0; 1281 } 1282 1283 static int 1284 i915_dispatch_execbuffer(struct intel_engine_cs *ring, 1285 u64 offset, u32 len, 1286 unsigned flags) 1287 { 1288 int ret; 1289 1290 ret = intel_ring_begin(ring, 2); 1291 if (ret) 1292 return ret; 1293 1294 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1295 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1296 intel_ring_advance(ring); 1297 1298 return 0; 1299 } 1300 1301 static void cleanup_status_page(struct intel_engine_cs *ring) 1302 { 1303 struct drm_i915_gem_object *obj; 1304 1305 obj = ring->status_page.obj; 1306 if (obj == NULL) 1307 return; 1308 1309 kunmap(obj->pages[0]); 1310 i915_gem_object_ggtt_unpin(obj); 1311 drm_gem_object_unreference(&obj->base); 1312 ring->status_page.obj = NULL; 1313 } 1314 1315 static int init_status_page(struct intel_engine_cs *ring) 1316 { 1317 struct drm_i915_gem_object *obj; 1318 1319 if ((obj = ring->status_page.obj) == NULL) { 1320 int ret; 1321 1322 obj = i915_gem_alloc_object(ring->dev, 4096); 1323 if (obj == NULL) { 1324 DRM_ERROR("Failed to allocate status page\n"); 1325 return -ENOMEM; 1326 } 1327 1328 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1329 if (ret) 1330 goto err_unref; 1331 1332 ret = i915_gem_obj_ggtt_pin(obj, 4096, 0); 1333 if (ret) { 1334 err_unref: 1335 drm_gem_object_unreference(&obj->base); 1336 return ret; 1337 } 1338 1339 ring->status_page.obj = obj; 1340 } 1341 1342 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); 1343 ring->status_page.page_addr = kmap(obj->pages[0]); 1344 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1345 1346 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 1347 ring->name, ring->status_page.gfx_addr); 1348 1349 return 0; 1350 } 1351 1352 static int init_phys_status_page(struct intel_engine_cs *ring) 1353 { 1354 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1355 1356 if (!dev_priv->status_page_dmah) { 1357 dev_priv->status_page_dmah = 1358 drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); 1359 if (!dev_priv->status_page_dmah) 1360 return -ENOMEM; 1361 } 1362 1363 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1364 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1365 1366 return 0; 1367 } 1368 1369 static int allocate_ring_buffer(struct intel_engine_cs *ring) 1370 { 1371 struct drm_device *dev = ring->dev; 1372 struct intel_ringbuffer *ringbuf = ring->buffer; 1373 struct drm_i915_gem_object *obj; 1374 int ret; 1375 1376 if (intel_ring_initialized(ring)) 1377 return 0; 1378 1379 obj = NULL; 1380 if (!HAS_LLC(dev)) 1381 obj = i915_gem_object_create_stolen(dev, ringbuf->size); 1382 if (obj == NULL) 1383 obj = i915_gem_alloc_object(dev, ringbuf->size); 1384 if (obj == NULL) 1385 return -ENOMEM; 1386 1387 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 1388 if (ret) 1389 goto err_unref; 1390 1391 ret = i915_gem_object_set_to_gtt_domain(obj, true); 1392 if (ret) 1393 goto err_unpin; 1394 1395 ringbuf->virtual_start = 1396 ioremap_wc(dev->agp->base + i915_gem_obj_ggtt_offset(obj), 1397 ringbuf->size); 1398 if (ringbuf->virtual_start == NULL) { 1399 ret = -EINVAL; 1400 goto err_unpin; 1401 } 1402 1403 ringbuf->obj = obj; 1404 return 0; 1405 1406 err_unpin: 1407 i915_gem_object_ggtt_unpin(obj); 1408 err_unref: 1409 drm_gem_object_unreference(&obj->base); 1410 return ret; 1411 } 1412 1413 static int intel_init_ring_buffer(struct drm_device *dev, 1414 struct intel_engine_cs *ring) 1415 { 1416 struct intel_ringbuffer *ringbuf = ring->buffer; 1417 int ret; 1418 1419 if (ringbuf == NULL) { 1420 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); 1421 if (!ringbuf) 1422 return -ENOMEM; 1423 ring->buffer = ringbuf; 1424 } 1425 1426 ring->dev = dev; 1427 INIT_LIST_HEAD(&ring->active_list); 1428 INIT_LIST_HEAD(&ring->request_list); 1429 ringbuf->size = 32 * PAGE_SIZE; 1430 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); 1431 1432 init_waitqueue_head(&ring->irq_queue); 1433 1434 if (I915_NEED_GFX_HWS(dev)) { 1435 ret = init_status_page(ring); 1436 if (ret) 1437 goto error; 1438 } else { 1439 BUG_ON(ring->id != RCS); 1440 ret = init_phys_status_page(ring); 1441 if (ret) 1442 goto error; 1443 } 1444 1445 ret = allocate_ring_buffer(ring); 1446 if (ret) { 1447 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); 1448 goto error; 1449 } 1450 1451 /* Workaround an erratum on the i830 which causes a hang if 1452 * the TAIL pointer points to within the last 2 cachelines 1453 * of the buffer. 1454 */ 1455 ringbuf->effective_size = ringbuf->size; 1456 if (IS_I830(dev) || IS_845G(dev)) 1457 ringbuf->effective_size -= 2 * CACHELINE_BYTES; 1458 1459 ret = i915_cmd_parser_init_ring(ring); 1460 if (ret) 1461 goto error; 1462 1463 ret = ring->init(ring); 1464 if (ret) 1465 goto error; 1466 1467 return 0; 1468 1469 error: 1470 kfree(ringbuf); 1471 ring->buffer = NULL; 1472 return ret; 1473 } 1474 1475 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) 1476 { 1477 struct drm_i915_private *dev_priv = to_i915(ring->dev); 1478 struct intel_ringbuffer *ringbuf = ring->buffer; 1479 1480 if (!intel_ring_initialized(ring)) 1481 return; 1482 1483 intel_stop_ring_buffer(ring); 1484 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); 1485 1486 pmap_unmapdev((vm_offset_t)ringbuf->virtual_start, ringbuf->size); 1487 1488 i915_gem_object_ggtt_unpin(ringbuf->obj); 1489 drm_gem_object_unreference(&ringbuf->obj->base); 1490 ringbuf->obj = NULL; 1491 ring->preallocated_lazy_request = NULL; 1492 ring->outstanding_lazy_seqno = 0; 1493 1494 if (ring->cleanup) 1495 ring->cleanup(ring); 1496 1497 cleanup_status_page(ring); 1498 1499 i915_cmd_parser_fini_ring(ring); 1500 1501 kfree(ringbuf); 1502 ring->buffer = NULL; 1503 } 1504 1505 static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) 1506 { 1507 struct intel_ringbuffer *ringbuf = ring->buffer; 1508 struct drm_i915_gem_request *request; 1509 u32 seqno = 0; 1510 int ret; 1511 1512 if (ringbuf->last_retired_head != -1) { 1513 ringbuf->head = ringbuf->last_retired_head; 1514 ringbuf->last_retired_head = -1; 1515 1516 ringbuf->space = ring_space(ring); 1517 if (ringbuf->space >= n) 1518 return 0; 1519 } 1520 1521 list_for_each_entry(request, &ring->request_list, list) { 1522 if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) { 1523 seqno = request->seqno; 1524 break; 1525 } 1526 } 1527 1528 if (seqno == 0) 1529 return -ENOSPC; 1530 1531 ret = i915_wait_seqno(ring, seqno); 1532 if (ret) 1533 return ret; 1534 1535 i915_gem_retire_requests_ring(ring); 1536 ringbuf->head = ringbuf->last_retired_head; 1537 ringbuf->last_retired_head = -1; 1538 1539 ringbuf->space = ring_space(ring); 1540 return 0; 1541 } 1542 1543 static int ring_wait_for_space(struct intel_engine_cs *ring, int n) 1544 { 1545 struct drm_device *dev = ring->dev; 1546 struct drm_i915_private *dev_priv = dev->dev_private; 1547 struct intel_ringbuffer *ringbuf = ring->buffer; 1548 unsigned long end; 1549 int ret; 1550 1551 ret = intel_ring_wait_request(ring, n); 1552 if (ret != -ENOSPC) 1553 return ret; 1554 1555 /* force the tail write in case we have been skipping them */ 1556 __intel_ring_advance(ring); 1557 1558 /* With GEM the hangcheck timer should kick us out of the loop, 1559 * leaving it early runs the risk of corrupting GEM state (due 1560 * to running on almost untested codepaths). But on resume 1561 * timers don't work yet, so prevent a complete hang in that 1562 * case by choosing an insanely large timeout. */ 1563 end = jiffies + 60 * HZ; 1564 1565 trace_i915_ring_wait_begin(ring); 1566 do { 1567 ringbuf->head = I915_READ_HEAD(ring); 1568 ringbuf->space = ring_space(ring); 1569 if (ringbuf->space >= n) { 1570 ret = 0; 1571 break; 1572 } 1573 1574 #if 0 1575 if (!drm_core_check_feature(dev, DRIVER_MODESET) && 1576 dev->primary->master) { 1577 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1578 if (master_priv->sarea_priv) 1579 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1580 } 1581 #else 1582 if (dev_priv->sarea_priv) 1583 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1584 #endif 1585 1586 1587 msleep(1); 1588 1589 #if 0 1590 if (dev_priv->mm.interruptible && signal_pending(current)) { 1591 ret = -ERESTARTSYS; 1592 break; 1593 } 1594 #endif 1595 1596 ret = i915_gem_check_wedge(&dev_priv->gpu_error, 1597 dev_priv->mm.interruptible); 1598 if (ret) 1599 break; 1600 1601 if (time_after(jiffies, end)) { 1602 ret = -EBUSY; 1603 break; 1604 } 1605 } while (1); 1606 trace_i915_ring_wait_end(ring); 1607 return ret; 1608 } 1609 1610 static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) 1611 { 1612 uint32_t __iomem *virt; 1613 struct intel_ringbuffer *ringbuf = ring->buffer; 1614 int rem = ringbuf->size - ringbuf->tail; 1615 1616 if (ringbuf->space < rem) { 1617 int ret = ring_wait_for_space(ring, rem); 1618 if (ret) 1619 return ret; 1620 } 1621 1622 virt = (unsigned int *)((char *)ringbuf->virtual_start + ringbuf->tail); 1623 rem /= 4; 1624 while (rem--) 1625 iowrite32(MI_NOOP, virt++); 1626 1627 ringbuf->tail = 0; 1628 ringbuf->space = ring_space(ring); 1629 1630 return 0; 1631 } 1632 1633 int intel_ring_idle(struct intel_engine_cs *ring) 1634 { 1635 u32 seqno; 1636 int ret; 1637 1638 /* We need to add any requests required to flush the objects and ring */ 1639 if (ring->outstanding_lazy_seqno) { 1640 ret = i915_add_request(ring, NULL); 1641 if (ret) 1642 return ret; 1643 } 1644 1645 /* Wait upon the last request to be completed */ 1646 if (list_empty(&ring->request_list)) 1647 return 0; 1648 1649 seqno = list_entry(ring->request_list.prev, 1650 struct drm_i915_gem_request, 1651 list)->seqno; 1652 1653 return i915_wait_seqno(ring, seqno); 1654 } 1655 1656 static int 1657 intel_ring_alloc_seqno(struct intel_engine_cs *ring) 1658 { 1659 if (ring->outstanding_lazy_seqno) 1660 return 0; 1661 1662 if (ring->preallocated_lazy_request == NULL) { 1663 struct drm_i915_gem_request *request; 1664 1665 request = kmalloc(sizeof(*request), M_DRM, M_WAITOK); 1666 if (request == NULL) 1667 return -ENOMEM; 1668 1669 ring->preallocated_lazy_request = request; 1670 } 1671 1672 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); 1673 } 1674 1675 static int __intel_ring_prepare(struct intel_engine_cs *ring, 1676 int bytes) 1677 { 1678 struct intel_ringbuffer *ringbuf = ring->buffer; 1679 int ret; 1680 1681 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) { 1682 ret = intel_wrap_ring_buffer(ring); 1683 if (unlikely(ret)) 1684 return ret; 1685 } 1686 1687 if (unlikely(ringbuf->space < bytes)) { 1688 ret = ring_wait_for_space(ring, bytes); 1689 if (unlikely(ret)) 1690 return ret; 1691 } 1692 1693 return 0; 1694 } 1695 1696 int intel_ring_begin(struct intel_engine_cs *ring, 1697 int num_dwords) 1698 { 1699 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1700 int ret; 1701 1702 ret = i915_gem_check_wedge(&dev_priv->gpu_error, 1703 dev_priv->mm.interruptible); 1704 if (ret) 1705 return ret; 1706 1707 ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t)); 1708 if (ret) 1709 return ret; 1710 1711 /* Preallocate the olr before touching the ring */ 1712 ret = intel_ring_alloc_seqno(ring); 1713 if (ret) 1714 return ret; 1715 1716 ring->buffer->space -= num_dwords * sizeof(uint32_t); 1717 return 0; 1718 } 1719 1720 /* Align the ring tail to a cacheline boundary */ 1721 int intel_ring_cacheline_align(struct intel_engine_cs *ring) 1722 { 1723 int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 1724 int ret; 1725 1726 if (num_dwords == 0) 1727 return 0; 1728 1729 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; 1730 ret = intel_ring_begin(ring, num_dwords); 1731 if (ret) 1732 return ret; 1733 1734 while (num_dwords--) 1735 intel_ring_emit(ring, MI_NOOP); 1736 1737 intel_ring_advance(ring); 1738 1739 return 0; 1740 } 1741 1742 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno) 1743 { 1744 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1745 1746 BUG_ON(ring->outstanding_lazy_seqno); 1747 1748 if (INTEL_INFO(ring->dev)->gen >= 6) { 1749 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); 1750 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); 1751 if (HAS_VEBOX(ring->dev)) 1752 I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); 1753 } 1754 1755 ring->set_seqno(ring, seqno); 1756 ring->hangcheck.seqno = seqno; 1757 } 1758 1759 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring, 1760 u32 value) 1761 { 1762 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1763 1764 /* Every tail move must follow the sequence below */ 1765 1766 /* Disable notification that the ring is IDLE. The GT 1767 * will then assume that it is busy and bring it out of rc6. 1768 */ 1769 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 1770 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1771 1772 /* Clear the context id. Here be magic! */ 1773 I915_WRITE64(GEN6_BSD_RNCID, 0x0); 1774 1775 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 1776 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & 1777 GEN6_BSD_SLEEP_INDICATOR) == 0, 1778 50)) 1779 DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); 1780 1781 /* Now that the ring is fully powered up, update the tail */ 1782 I915_WRITE_TAIL(ring, value); 1783 POSTING_READ(RING_TAIL(ring->mmio_base)); 1784 1785 /* Let the ring send IDLE messages to the GT again, 1786 * and so let it sleep to conserve power when idle. 1787 */ 1788 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 1789 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1790 } 1791 1792 static int gen6_bsd_ring_flush(struct intel_engine_cs *ring, 1793 u32 invalidate, u32 flush) 1794 { 1795 uint32_t cmd; 1796 int ret; 1797 1798 ret = intel_ring_begin(ring, 4); 1799 if (ret) 1800 return ret; 1801 1802 cmd = MI_FLUSH_DW; 1803 if (INTEL_INFO(ring->dev)->gen >= 8) 1804 cmd += 1; 1805 /* 1806 * Bspec vol 1c.5 - video engine command streamer: 1807 * "If ENABLED, all TLBs will be invalidated once the flush 1808 * operation is complete. This bit is only valid when the 1809 * Post-Sync Operation field is a value of 1h or 3h." 1810 */ 1811 if (invalidate & I915_GEM_GPU_DOMAINS) 1812 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | 1813 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 1814 intel_ring_emit(ring, cmd); 1815 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 1816 if (INTEL_INFO(ring->dev)->gen >= 8) { 1817 intel_ring_emit(ring, 0); /* upper addr */ 1818 intel_ring_emit(ring, 0); /* value */ 1819 } else { 1820 intel_ring_emit(ring, 0); 1821 intel_ring_emit(ring, MI_NOOP); 1822 } 1823 intel_ring_advance(ring); 1824 return 0; 1825 } 1826 1827 static int 1828 gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 1829 u64 offset, u32 len, 1830 unsigned flags) 1831 { 1832 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1833 bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL && 1834 !(flags & I915_DISPATCH_SECURE); 1835 int ret; 1836 1837 ret = intel_ring_begin(ring, 4); 1838 if (ret) 1839 return ret; 1840 1841 /* FIXME(BDW): Address space and security selectors. */ 1842 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8)); 1843 intel_ring_emit(ring, lower_32_bits(offset)); 1844 intel_ring_emit(ring, upper_32_bits(offset)); 1845 intel_ring_emit(ring, MI_NOOP); 1846 intel_ring_advance(ring); 1847 1848 return 0; 1849 } 1850 1851 static int 1852 hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 1853 u64 offset, u32 len, 1854 unsigned flags) 1855 { 1856 int ret; 1857 1858 ret = intel_ring_begin(ring, 2); 1859 if (ret) 1860 return ret; 1861 1862 intel_ring_emit(ring, 1863 MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | 1864 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); 1865 /* bit0-7 is the length on GEN6+ */ 1866 intel_ring_emit(ring, offset); 1867 intel_ring_advance(ring); 1868 1869 return 0; 1870 } 1871 1872 static int 1873 gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 1874 u64 offset, u32 len, 1875 unsigned flags) 1876 { 1877 int ret; 1878 1879 ret = intel_ring_begin(ring, 2); 1880 if (ret) 1881 return ret; 1882 1883 intel_ring_emit(ring, 1884 MI_BATCH_BUFFER_START | 1885 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); 1886 /* bit0-7 is the length on GEN6+ */ 1887 intel_ring_emit(ring, offset); 1888 intel_ring_advance(ring); 1889 1890 return 0; 1891 } 1892 1893 /* Blitter support (SandyBridge+) */ 1894 1895 static int gen6_ring_flush(struct intel_engine_cs *ring, 1896 u32 invalidate, u32 flush) 1897 { 1898 struct drm_device *dev = ring->dev; 1899 uint32_t cmd; 1900 int ret; 1901 1902 ret = intel_ring_begin(ring, 4); 1903 if (ret) 1904 return ret; 1905 1906 cmd = MI_FLUSH_DW; 1907 if (INTEL_INFO(ring->dev)->gen >= 8) 1908 cmd += 1; 1909 /* 1910 * Bspec vol 1c.3 - blitter engine command streamer: 1911 * "If ENABLED, all TLBs will be invalidated once the flush 1912 * operation is complete. This bit is only valid when the 1913 * Post-Sync Operation field is a value of 1h or 3h." 1914 */ 1915 if (invalidate & I915_GEM_DOMAIN_RENDER) 1916 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | 1917 MI_FLUSH_DW_OP_STOREDW; 1918 intel_ring_emit(ring, cmd); 1919 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 1920 if (INTEL_INFO(ring->dev)->gen >= 8) { 1921 intel_ring_emit(ring, 0); /* upper addr */ 1922 intel_ring_emit(ring, 0); /* value */ 1923 } else { 1924 intel_ring_emit(ring, 0); 1925 intel_ring_emit(ring, MI_NOOP); 1926 } 1927 intel_ring_advance(ring); 1928 1929 if (IS_GEN7(dev) && !invalidate && flush) 1930 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); 1931 1932 return 0; 1933 } 1934 1935 int intel_init_render_ring_buffer(struct drm_device *dev) 1936 { 1937 struct drm_i915_private *dev_priv = dev->dev_private; 1938 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; 1939 1940 ring->name = "render ring"; 1941 ring->id = RCS; 1942 ring->mmio_base = RENDER_RING_BASE; 1943 1944 if (INTEL_INFO(dev)->gen >= 6) { 1945 ring->add_request = gen6_add_request; 1946 ring->flush = gen7_render_ring_flush; 1947 if (INTEL_INFO(dev)->gen == 6) 1948 ring->flush = gen6_render_ring_flush; 1949 if (INTEL_INFO(dev)->gen >= 8) { 1950 ring->flush = gen8_render_ring_flush; 1951 ring->irq_get = gen8_ring_get_irq; 1952 ring->irq_put = gen8_ring_put_irq; 1953 } else { 1954 ring->irq_get = gen6_ring_get_irq; 1955 ring->irq_put = gen6_ring_put_irq; 1956 } 1957 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1958 ring->get_seqno = gen6_ring_get_seqno; 1959 ring->set_seqno = ring_set_seqno; 1960 ring->semaphore.sync_to = gen6_ring_sync; 1961 ring->semaphore.signal = gen6_signal; 1962 /* 1963 * The current semaphore is only applied on pre-gen8 platform. 1964 * And there is no VCS2 ring on the pre-gen8 platform. So the 1965 * semaphore between RCS and VCS2 is initialized as INVALID. 1966 * Gen8 will initialize the sema between VCS2 and RCS later. 1967 */ 1968 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; 1969 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; 1970 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; 1971 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; 1972 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 1973 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; 1974 ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; 1975 ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; 1976 ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; 1977 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 1978 } else if (IS_GEN5(dev)) { 1979 ring->add_request = pc_render_add_request; 1980 ring->flush = gen4_render_ring_flush; 1981 ring->get_seqno = pc_render_get_seqno; 1982 ring->set_seqno = pc_render_set_seqno; 1983 ring->irq_get = gen5_ring_get_irq; 1984 ring->irq_put = gen5_ring_put_irq; 1985 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT | 1986 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; 1987 } else { 1988 ring->add_request = i9xx_add_request; 1989 if (INTEL_INFO(dev)->gen < 4) 1990 ring->flush = gen2_render_ring_flush; 1991 else 1992 ring->flush = gen4_render_ring_flush; 1993 ring->get_seqno = ring_get_seqno; 1994 ring->set_seqno = ring_set_seqno; 1995 if (IS_GEN2(dev)) { 1996 ring->irq_get = i8xx_ring_get_irq; 1997 ring->irq_put = i8xx_ring_put_irq; 1998 } else { 1999 ring->irq_get = i9xx_ring_get_irq; 2000 ring->irq_put = i9xx_ring_put_irq; 2001 } 2002 ring->irq_enable_mask = I915_USER_INTERRUPT; 2003 } 2004 ring->write_tail = ring_write_tail; 2005 if (IS_HASWELL(dev)) 2006 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 2007 else if (IS_GEN8(dev)) 2008 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2009 else if (INTEL_INFO(dev)->gen >= 6) 2010 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2011 else if (INTEL_INFO(dev)->gen >= 4) 2012 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2013 else if (IS_I830(dev) || IS_845G(dev)) 2014 ring->dispatch_execbuffer = i830_dispatch_execbuffer; 2015 else 2016 ring->dispatch_execbuffer = i915_dispatch_execbuffer; 2017 ring->init = init_render_ring; 2018 ring->cleanup = render_ring_cleanup; 2019 2020 /* Workaround batchbuffer to combat CS tlb bug. */ 2021 if (HAS_BROKEN_CS_TLB(dev)) { 2022 struct drm_i915_gem_object *obj; 2023 int ret; 2024 2025 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); 2026 if (obj == NULL) { 2027 DRM_ERROR("Failed to allocate batch bo\n"); 2028 return -ENOMEM; 2029 } 2030 2031 ret = i915_gem_obj_ggtt_pin(obj, 0, 0); 2032 if (ret != 0) { 2033 drm_gem_object_unreference(&obj->base); 2034 DRM_ERROR("Failed to ping batch bo\n"); 2035 return ret; 2036 } 2037 2038 ring->scratch.obj = obj; 2039 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); 2040 } 2041 2042 return intel_init_ring_buffer(dev, ring); 2043 } 2044 2045 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) 2046 { 2047 struct drm_i915_private *dev_priv = dev->dev_private; 2048 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; 2049 struct intel_ringbuffer *ringbuf = ring->buffer; 2050 int ret; 2051 2052 if (ringbuf == NULL) { 2053 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); 2054 if (!ringbuf) 2055 return -ENOMEM; 2056 ring->buffer = ringbuf; 2057 } 2058 2059 ring->name = "render ring"; 2060 ring->id = RCS; 2061 ring->mmio_base = RENDER_RING_BASE; 2062 2063 if (INTEL_INFO(dev)->gen >= 6) { 2064 /* non-kms not supported on gen6+ */ 2065 ret = -ENODEV; 2066 goto err_ringbuf; 2067 } 2068 2069 /* Note: gem is not supported on gen5/ilk without kms (the corresponding 2070 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up 2071 * the special gen5 functions. */ 2072 ring->add_request = i9xx_add_request; 2073 if (INTEL_INFO(dev)->gen < 4) 2074 ring->flush = gen2_render_ring_flush; 2075 else 2076 ring->flush = gen4_render_ring_flush; 2077 ring->get_seqno = ring_get_seqno; 2078 ring->set_seqno = ring_set_seqno; 2079 if (IS_GEN2(dev)) { 2080 ring->irq_get = i8xx_ring_get_irq; 2081 ring->irq_put = i8xx_ring_put_irq; 2082 } else { 2083 ring->irq_get = i9xx_ring_get_irq; 2084 ring->irq_put = i9xx_ring_put_irq; 2085 } 2086 ring->irq_enable_mask = I915_USER_INTERRUPT; 2087 ring->write_tail = ring_write_tail; 2088 if (INTEL_INFO(dev)->gen >= 4) 2089 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2090 else if (IS_I830(dev) || IS_845G(dev)) 2091 ring->dispatch_execbuffer = i830_dispatch_execbuffer; 2092 else 2093 ring->dispatch_execbuffer = i915_dispatch_execbuffer; 2094 ring->init = init_render_ring; 2095 ring->cleanup = render_ring_cleanup; 2096 2097 ring->dev = dev; 2098 INIT_LIST_HEAD(&ring->active_list); 2099 INIT_LIST_HEAD(&ring->request_list); 2100 2101 ringbuf->size = size; 2102 ringbuf->effective_size = ringbuf->size; 2103 if (IS_I830(ring->dev) || IS_845G(ring->dev)) 2104 ringbuf->effective_size -= 2 * CACHELINE_BYTES; 2105 2106 ringbuf->virtual_start = ioremap_wc(start, size); 2107 if (ringbuf->virtual_start == NULL) { 2108 DRM_ERROR("can not ioremap virtual address for" 2109 " ring buffer\n"); 2110 ret = -ENOMEM; 2111 goto err_ringbuf; 2112 } 2113 2114 if (!I915_NEED_GFX_HWS(dev)) { 2115 ret = init_phys_status_page(ring); 2116 if (ret) 2117 goto err_vstart; 2118 } 2119 2120 return 0; 2121 2122 err_vstart: 2123 pmap_unmapdev((vm_offset_t)ring->buffer->virtual_start, size); 2124 err_ringbuf: 2125 kfree(ringbuf); 2126 ring->buffer = NULL; 2127 return ret; 2128 } 2129 2130 int intel_init_bsd_ring_buffer(struct drm_device *dev) 2131 { 2132 struct drm_i915_private *dev_priv = dev->dev_private; 2133 struct intel_engine_cs *ring = &dev_priv->ring[VCS]; 2134 2135 ring->name = "bsd ring"; 2136 ring->id = VCS; 2137 2138 ring->write_tail = ring_write_tail; 2139 if (INTEL_INFO(dev)->gen >= 6) { 2140 ring->mmio_base = GEN6_BSD_RING_BASE; 2141 /* gen6 bsd needs a special wa for tail updates */ 2142 if (IS_GEN6(dev)) 2143 ring->write_tail = gen6_bsd_ring_write_tail; 2144 ring->flush = gen6_bsd_ring_flush; 2145 ring->add_request = gen6_add_request; 2146 ring->get_seqno = gen6_ring_get_seqno; 2147 ring->set_seqno = ring_set_seqno; 2148 if (INTEL_INFO(dev)->gen >= 8) { 2149 ring->irq_enable_mask = 2150 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 2151 ring->irq_get = gen8_ring_get_irq; 2152 ring->irq_put = gen8_ring_put_irq; 2153 ring->dispatch_execbuffer = 2154 gen8_ring_dispatch_execbuffer; 2155 } else { 2156 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2157 ring->irq_get = gen6_ring_get_irq; 2158 ring->irq_put = gen6_ring_put_irq; 2159 ring->dispatch_execbuffer = 2160 gen6_ring_dispatch_execbuffer; 2161 } 2162 ring->semaphore.sync_to = gen6_ring_sync; 2163 ring->semaphore.signal = gen6_signal; 2164 /* 2165 * The current semaphore is only applied on pre-gen8 platform. 2166 * And there is no VCS2 ring on the pre-gen8 platform. So the 2167 * semaphore between VCS and VCS2 is initialized as INVALID. 2168 * Gen8 will initialize the sema between VCS2 and VCS later. 2169 */ 2170 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; 2171 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; 2172 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB; 2173 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE; 2174 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2175 ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC; 2176 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; 2177 ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC; 2178 ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC; 2179 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2180 } else { 2181 ring->mmio_base = BSD_RING_BASE; 2182 ring->flush = bsd_ring_flush; 2183 ring->add_request = i9xx_add_request; 2184 ring->get_seqno = ring_get_seqno; 2185 ring->set_seqno = ring_set_seqno; 2186 if (IS_GEN5(dev)) { 2187 ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 2188 ring->irq_get = gen5_ring_get_irq; 2189 ring->irq_put = gen5_ring_put_irq; 2190 } else { 2191 ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; 2192 ring->irq_get = i9xx_ring_get_irq; 2193 ring->irq_put = i9xx_ring_put_irq; 2194 } 2195 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2196 } 2197 ring->init = init_ring_common; 2198 2199 return intel_init_ring_buffer(dev, ring); 2200 } 2201 2202 /** 2203 * Initialize the second BSD ring for Broadwell GT3. 2204 * It is noted that this only exists on Broadwell GT3. 2205 */ 2206 int intel_init_bsd2_ring_buffer(struct drm_device *dev) 2207 { 2208 struct drm_i915_private *dev_priv = dev->dev_private; 2209 struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; 2210 2211 if ((INTEL_INFO(dev)->gen != 8)) { 2212 DRM_ERROR("No dual-BSD ring on non-BDW machine\n"); 2213 return -EINVAL; 2214 } 2215 2216 ring->name = "bds2_ring"; 2217 ring->id = VCS2; 2218 2219 ring->write_tail = ring_write_tail; 2220 ring->mmio_base = GEN8_BSD2_RING_BASE; 2221 ring->flush = gen6_bsd_ring_flush; 2222 ring->add_request = gen6_add_request; 2223 ring->get_seqno = gen6_ring_get_seqno; 2224 ring->set_seqno = ring_set_seqno; 2225 ring->irq_enable_mask = 2226 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; 2227 ring->irq_get = gen8_ring_get_irq; 2228 ring->irq_put = gen8_ring_put_irq; 2229 ring->dispatch_execbuffer = 2230 gen8_ring_dispatch_execbuffer; 2231 ring->semaphore.sync_to = gen6_ring_sync; 2232 ring->semaphore.signal = gen6_signal; 2233 /* 2234 * The current semaphore is only applied on the pre-gen8. And there 2235 * is no bsd2 ring on the pre-gen8. So now the semaphore_register 2236 * between VCS2 and other ring is initialized as invalid. 2237 * Gen8 will initialize the sema between VCS2 and other ring later. 2238 */ 2239 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; 2240 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; 2241 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; 2242 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; 2243 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2244 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; 2245 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; 2246 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; 2247 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; 2248 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2249 2250 ring->init = init_ring_common; 2251 2252 return intel_init_ring_buffer(dev, ring); 2253 } 2254 2255 int intel_init_blt_ring_buffer(struct drm_device *dev) 2256 { 2257 struct drm_i915_private *dev_priv = dev->dev_private; 2258 struct intel_engine_cs *ring = &dev_priv->ring[BCS]; 2259 2260 ring->name = "blitter ring"; 2261 ring->id = BCS; 2262 2263 ring->mmio_base = BLT_RING_BASE; 2264 ring->write_tail = ring_write_tail; 2265 ring->flush = gen6_ring_flush; 2266 ring->add_request = gen6_add_request; 2267 ring->get_seqno = gen6_ring_get_seqno; 2268 ring->set_seqno = ring_set_seqno; 2269 if (INTEL_INFO(dev)->gen >= 8) { 2270 ring->irq_enable_mask = 2271 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 2272 ring->irq_get = gen8_ring_get_irq; 2273 ring->irq_put = gen8_ring_put_irq; 2274 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2275 } else { 2276 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2277 ring->irq_get = gen6_ring_get_irq; 2278 ring->irq_put = gen6_ring_put_irq; 2279 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2280 } 2281 ring->semaphore.sync_to = gen6_ring_sync; 2282 ring->semaphore.signal = gen6_signal; 2283 /* 2284 * The current semaphore is only applied on pre-gen8 platform. And 2285 * there is no VCS2 ring on the pre-gen8 platform. So the semaphore 2286 * between BCS and VCS2 is initialized as INVALID. 2287 * Gen8 will initialize the sema between BCS and VCS2 later. 2288 */ 2289 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR; 2290 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV; 2291 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; 2292 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE; 2293 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2294 ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC; 2295 ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC; 2296 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; 2297 ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC; 2298 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2299 ring->init = init_ring_common; 2300 2301 return intel_init_ring_buffer(dev, ring); 2302 } 2303 2304 int intel_init_vebox_ring_buffer(struct drm_device *dev) 2305 { 2306 struct drm_i915_private *dev_priv = dev->dev_private; 2307 struct intel_engine_cs *ring = &dev_priv->ring[VECS]; 2308 2309 ring->name = "video enhancement ring"; 2310 ring->id = VECS; 2311 2312 ring->mmio_base = VEBOX_RING_BASE; 2313 ring->write_tail = ring_write_tail; 2314 ring->flush = gen6_ring_flush; 2315 ring->add_request = gen6_add_request; 2316 ring->get_seqno = gen6_ring_get_seqno; 2317 ring->set_seqno = ring_set_seqno; 2318 2319 if (INTEL_INFO(dev)->gen >= 8) { 2320 ring->irq_enable_mask = 2321 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; 2322 ring->irq_get = gen8_ring_get_irq; 2323 ring->irq_put = gen8_ring_put_irq; 2324 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2325 } else { 2326 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 2327 ring->irq_get = hsw_vebox_get_irq; 2328 ring->irq_put = hsw_vebox_put_irq; 2329 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2330 } 2331 ring->semaphore.sync_to = gen6_ring_sync; 2332 ring->semaphore.signal = gen6_signal; 2333 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; 2334 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV; 2335 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB; 2336 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; 2337 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2338 ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC; 2339 ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC; 2340 ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC; 2341 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; 2342 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2343 ring->init = init_ring_common; 2344 2345 return intel_init_ring_buffer(dev, ring); 2346 } 2347 2348 int 2349 intel_ring_flush_all_caches(struct intel_engine_cs *ring) 2350 { 2351 int ret; 2352 2353 if (!ring->gpu_caches_dirty) 2354 return 0; 2355 2356 ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); 2357 if (ret) 2358 return ret; 2359 2360 trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); 2361 2362 ring->gpu_caches_dirty = false; 2363 return 0; 2364 } 2365 2366 int 2367 intel_ring_invalidate_all_caches(struct intel_engine_cs *ring) 2368 { 2369 uint32_t flush_domains; 2370 int ret; 2371 2372 flush_domains = 0; 2373 if (ring->gpu_caches_dirty) 2374 flush_domains = I915_GEM_GPU_DOMAINS; 2375 2376 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); 2377 if (ret) 2378 return ret; 2379 2380 trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); 2381 2382 ring->gpu_caches_dirty = false; 2383 return 0; 2384 } 2385 2386 void 2387 intel_stop_ring_buffer(struct intel_engine_cs *ring) 2388 { 2389 int ret; 2390 2391 if (!intel_ring_initialized(ring)) 2392 return; 2393 2394 ret = intel_ring_idle(ring); 2395 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) 2396 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 2397 ring->name, ret); 2398 2399 stop_ring(ring); 2400 } 2401