1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30 #include <drm/drmP.h> 31 #include "i915_drv.h" 32 #include <drm/i915_drm.h> 33 #include "i915_trace.h" 34 #include "intel_drv.h" 35 36 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, 37 * but keeps the logic simple. Indeed, the whole purpose of this macro is just 38 * to give some inclination as to some of the magic values used in the various 39 * workarounds! 40 */ 41 #define CACHELINE_BYTES 64 42 43 static inline int __ring_space(int head, int tail, int size) 44 { 45 int space = head - (tail + I915_RING_FREE_SPACE); 46 if (space < 0) 47 space += size; 48 return space; 49 } 50 51 static inline int ring_space(struct intel_engine_cs *ring) 52 { 53 struct intel_ringbuffer *ringbuf = ring->buffer; 54 return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size); 55 } 56 57 static bool intel_ring_stopped(struct intel_engine_cs *ring) 58 { 59 struct drm_i915_private *dev_priv = ring->dev->dev_private; 60 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); 61 } 62 63 void __intel_ring_advance(struct intel_engine_cs *ring) 64 { 65 struct intel_ringbuffer *ringbuf = ring->buffer; 66 ringbuf->tail &= ringbuf->size - 1; 67 if (intel_ring_stopped(ring)) 68 return; 69 ring->write_tail(ring, ringbuf->tail); 70 } 71 72 static int 73 gen2_render_ring_flush(struct intel_engine_cs *ring, 74 u32 invalidate_domains, 75 u32 flush_domains) 76 { 77 u32 cmd; 78 int ret; 79 80 cmd = MI_FLUSH; 81 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) 82 cmd |= MI_NO_WRITE_FLUSH; 83 84 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 85 cmd |= MI_READ_FLUSH; 86 87 ret = intel_ring_begin(ring, 2); 88 if (ret) 89 return ret; 90 91 intel_ring_emit(ring, cmd); 92 intel_ring_emit(ring, MI_NOOP); 93 intel_ring_advance(ring); 94 95 return 0; 96 } 97 98 static int 99 gen4_render_ring_flush(struct intel_engine_cs *ring, 100 u32 invalidate_domains, 101 u32 flush_domains) 102 { 103 struct drm_device *dev = ring->dev; 104 u32 cmd; 105 int ret; 106 107 /* 108 * read/write caches: 109 * 110 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 111 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 112 * also flushed at 2d versus 3d pipeline switches. 113 * 114 * read-only caches: 115 * 116 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 117 * MI_READ_FLUSH is set, and is always flushed on 965. 118 * 119 * I915_GEM_DOMAIN_COMMAND may not exist? 120 * 121 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 122 * invalidated when MI_EXE_FLUSH is set. 123 * 124 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 125 * invalidated with every MI_FLUSH. 126 * 127 * TLBs: 128 * 129 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 130 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 131 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 132 * are flushed at any MI_FLUSH. 133 */ 134 135 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 136 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) 137 cmd &= ~MI_NO_WRITE_FLUSH; 138 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 139 cmd |= MI_EXE_FLUSH; 140 141 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && 142 (IS_G4X(dev) || IS_GEN5(dev))) 143 cmd |= MI_INVALIDATE_ISP; 144 145 ret = intel_ring_begin(ring, 2); 146 if (ret) 147 return ret; 148 149 intel_ring_emit(ring, cmd); 150 intel_ring_emit(ring, MI_NOOP); 151 intel_ring_advance(ring); 152 153 return 0; 154 } 155 156 /** 157 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 158 * implementing two workarounds on gen6. From section 1.4.7.1 159 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 160 * 161 * [DevSNB-C+{W/A}] Before any depth stall flush (including those 162 * produced by non-pipelined state commands), software needs to first 163 * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 164 * 0. 165 * 166 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 167 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 168 * 169 * And the workaround for these two requires this workaround first: 170 * 171 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 172 * BEFORE the pipe-control with a post-sync op and no write-cache 173 * flushes. 174 * 175 * And this last workaround is tricky because of the requirements on 176 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 177 * volume 2 part 1: 178 * 179 * "1 of the following must also be set: 180 * - Render Target Cache Flush Enable ([12] of DW1) 181 * - Depth Cache Flush Enable ([0] of DW1) 182 * - Stall at Pixel Scoreboard ([1] of DW1) 183 * - Depth Stall ([13] of DW1) 184 * - Post-Sync Operation ([13] of DW1) 185 * - Notify Enable ([8] of DW1)" 186 * 187 * The cache flushes require the workaround flush that triggered this 188 * one, so we can't use it. Depth stall would trigger the same. 189 * Post-sync nonzero is what triggered this second workaround, so we 190 * can't use that one either. Notify enable is IRQs, which aren't 191 * really our business. That leaves only stall at scoreboard. 192 */ 193 static int 194 intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring) 195 { 196 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 197 int ret; 198 199 200 ret = intel_ring_begin(ring, 6); 201 if (ret) 202 return ret; 203 204 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 205 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 206 PIPE_CONTROL_STALL_AT_SCOREBOARD); 207 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 208 intel_ring_emit(ring, 0); /* low dword */ 209 intel_ring_emit(ring, 0); /* high dword */ 210 intel_ring_emit(ring, MI_NOOP); 211 intel_ring_advance(ring); 212 213 ret = intel_ring_begin(ring, 6); 214 if (ret) 215 return ret; 216 217 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 218 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); 219 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 220 intel_ring_emit(ring, 0); 221 intel_ring_emit(ring, 0); 222 intel_ring_emit(ring, MI_NOOP); 223 intel_ring_advance(ring); 224 225 return 0; 226 } 227 228 static int 229 gen6_render_ring_flush(struct intel_engine_cs *ring, 230 u32 invalidate_domains, u32 flush_domains) 231 { 232 u32 flags = 0; 233 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 234 int ret; 235 236 /* Force SNB workarounds for PIPE_CONTROL flushes */ 237 ret = intel_emit_post_sync_nonzero_flush(ring); 238 if (ret) 239 return ret; 240 241 /* Just flush everything. Experiments have shown that reducing the 242 * number of bits based on the write domains has little performance 243 * impact. 244 */ 245 if (flush_domains) { 246 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 247 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 248 /* 249 * Ensure that any following seqno writes only happen 250 * when the render cache is indeed flushed. 251 */ 252 flags |= PIPE_CONTROL_CS_STALL; 253 } 254 if (invalidate_domains) { 255 flags |= PIPE_CONTROL_TLB_INVALIDATE; 256 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 257 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 258 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 259 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 260 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 261 /* 262 * TLB invalidate requires a post-sync write. 263 */ 264 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 265 } 266 267 ret = intel_ring_begin(ring, 4); 268 if (ret) 269 return ret; 270 271 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 272 intel_ring_emit(ring, flags); 273 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 274 intel_ring_emit(ring, 0); 275 intel_ring_advance(ring); 276 277 return 0; 278 } 279 280 static int 281 gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring) 282 { 283 int ret; 284 285 ret = intel_ring_begin(ring, 4); 286 if (ret) 287 return ret; 288 289 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 290 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 291 PIPE_CONTROL_STALL_AT_SCOREBOARD); 292 intel_ring_emit(ring, 0); 293 intel_ring_emit(ring, 0); 294 intel_ring_advance(ring); 295 296 return 0; 297 } 298 299 static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value) 300 { 301 int ret; 302 303 if (!ring->fbc_dirty) 304 return 0; 305 306 ret = intel_ring_begin(ring, 6); 307 if (ret) 308 return ret; 309 /* WaFbcNukeOn3DBlt:ivb/hsw */ 310 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 311 intel_ring_emit(ring, MSG_FBC_REND_STATE); 312 intel_ring_emit(ring, value); 313 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT); 314 intel_ring_emit(ring, MSG_FBC_REND_STATE); 315 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 316 intel_ring_advance(ring); 317 318 ring->fbc_dirty = false; 319 return 0; 320 } 321 322 static int 323 gen7_render_ring_flush(struct intel_engine_cs *ring, 324 u32 invalidate_domains, u32 flush_domains) 325 { 326 u32 flags = 0; 327 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 328 int ret; 329 330 /* 331 * Ensure that any following seqno writes only happen when the render 332 * cache is indeed flushed. 333 * 334 * Workaround: 4th PIPE_CONTROL command (except the ones with only 335 * read-cache invalidate bits set) must have the CS_STALL bit set. We 336 * don't try to be clever and just set it unconditionally. 337 */ 338 flags |= PIPE_CONTROL_CS_STALL; 339 340 /* Just flush everything. Experiments have shown that reducing the 341 * number of bits based on the write domains has little performance 342 * impact. 343 */ 344 if (flush_domains) { 345 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 346 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 347 } 348 if (invalidate_domains) { 349 flags |= PIPE_CONTROL_TLB_INVALIDATE; 350 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 351 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 352 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 353 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 354 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 355 /* 356 * TLB invalidate requires a post-sync write. 357 */ 358 flags |= PIPE_CONTROL_QW_WRITE; 359 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 360 361 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; 362 363 /* Workaround: we must issue a pipe_control with CS-stall bit 364 * set before a pipe_control command that has the state cache 365 * invalidate bit set. */ 366 gen7_render_ring_cs_stall_wa(ring); 367 } 368 369 ret = intel_ring_begin(ring, 4); 370 if (ret) 371 return ret; 372 373 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 374 intel_ring_emit(ring, flags); 375 intel_ring_emit(ring, scratch_addr); 376 intel_ring_emit(ring, 0); 377 intel_ring_advance(ring); 378 379 if (!invalidate_domains && flush_domains) 380 return gen7_ring_fbc_flush(ring, FBC_REND_NUKE); 381 382 return 0; 383 } 384 385 static int 386 gen8_render_ring_flush(struct intel_engine_cs *ring, 387 u32 invalidate_domains, u32 flush_domains) 388 { 389 u32 flags = 0; 390 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 391 int ret; 392 393 flags |= PIPE_CONTROL_CS_STALL; 394 395 if (flush_domains) { 396 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 397 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 398 } 399 if (invalidate_domains) { 400 flags |= PIPE_CONTROL_TLB_INVALIDATE; 401 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 402 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 403 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 404 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 405 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 406 flags |= PIPE_CONTROL_QW_WRITE; 407 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 408 } 409 410 ret = intel_ring_begin(ring, 6); 411 if (ret) 412 return ret; 413 414 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 415 intel_ring_emit(ring, flags); 416 intel_ring_emit(ring, scratch_addr); 417 intel_ring_emit(ring, 0); 418 intel_ring_emit(ring, 0); 419 intel_ring_emit(ring, 0); 420 intel_ring_advance(ring); 421 422 return 0; 423 424 } 425 426 static void ring_write_tail(struct intel_engine_cs *ring, 427 u32 value) 428 { 429 struct drm_i915_private *dev_priv = ring->dev->dev_private; 430 I915_WRITE_TAIL(ring, value); 431 } 432 433 u64 intel_ring_get_active_head(struct intel_engine_cs *ring) 434 { 435 struct drm_i915_private *dev_priv = ring->dev->dev_private; 436 u64 acthd; 437 438 if (INTEL_INFO(ring->dev)->gen >= 8) 439 acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base), 440 RING_ACTHD_UDW(ring->mmio_base)); 441 else if (INTEL_INFO(ring->dev)->gen >= 4) 442 acthd = I915_READ(RING_ACTHD(ring->mmio_base)); 443 else 444 acthd = I915_READ(ACTHD); 445 446 return acthd; 447 } 448 449 static void ring_setup_phys_status_page(struct intel_engine_cs *ring) 450 { 451 struct drm_i915_private *dev_priv = ring->dev->dev_private; 452 u32 addr; 453 454 addr = dev_priv->status_page_dmah->busaddr; 455 if (INTEL_INFO(ring->dev)->gen >= 4) 456 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 457 I915_WRITE(HWS_PGA, addr); 458 } 459 460 static bool stop_ring(struct intel_engine_cs *ring) 461 { 462 struct drm_i915_private *dev_priv = to_i915(ring->dev); 463 464 if (!IS_GEN2(ring->dev)) { 465 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); 466 if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { 467 DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); 468 return false; 469 } 470 } 471 472 I915_WRITE_CTL(ring, 0); 473 I915_WRITE_HEAD(ring, 0); 474 ring->write_tail(ring, 0); 475 476 if (!IS_GEN2(ring->dev)) { 477 (void)I915_READ_CTL(ring); 478 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); 479 } 480 481 return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; 482 } 483 484 static int init_ring_common(struct intel_engine_cs *ring) 485 { 486 struct drm_device *dev = ring->dev; 487 struct drm_i915_private *dev_priv = dev->dev_private; 488 struct intel_ringbuffer *ringbuf = ring->buffer; 489 struct drm_i915_gem_object *obj = ringbuf->obj; 490 int ret = 0; 491 492 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 493 494 if (!stop_ring(ring)) { 495 /* G45 ring initialization often fails to reset head to zero */ 496 DRM_DEBUG_KMS("%s head not reset to zero " 497 "ctl %08x head %08x tail %08x start %08x\n", 498 ring->name, 499 I915_READ_CTL(ring), 500 I915_READ_HEAD(ring), 501 I915_READ_TAIL(ring), 502 I915_READ_START(ring)); 503 504 if (!stop_ring(ring)) { 505 DRM_ERROR("failed to set %s head to zero " 506 "ctl %08x head %08x tail %08x start %08x\n", 507 ring->name, 508 I915_READ_CTL(ring), 509 I915_READ_HEAD(ring), 510 I915_READ_TAIL(ring), 511 I915_READ_START(ring)); 512 ret = -EIO; 513 goto out; 514 } 515 } 516 517 if (I915_NEED_GFX_HWS(dev)) 518 intel_ring_setup_status_page(ring); 519 else 520 ring_setup_phys_status_page(ring); 521 522 /* Enforce ordering by reading HEAD register back */ 523 I915_READ_HEAD(ring); 524 525 /* Initialize the ring. This must happen _after_ we've cleared the ring 526 * registers with the above sequence (the readback of the HEAD registers 527 * also enforces ordering), otherwise the hw might lose the new ring 528 * register values. */ 529 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); 530 I915_WRITE_CTL(ring, 531 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) 532 | RING_VALID); 533 534 /* If the head is still not zero, the ring is dead */ 535 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && 536 I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && 537 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { 538 DRM_ERROR("%s initialization failed " 539 "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n", 540 ring->name, 541 I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID, 542 I915_READ_HEAD(ring), I915_READ_TAIL(ring), 543 I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj)); 544 ret = -EIO; 545 goto out; 546 } 547 548 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 549 i915_kernel_lost_context(ring->dev); 550 else { 551 ringbuf->head = I915_READ_HEAD(ring); 552 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 553 ringbuf->space = ring_space(ring); 554 ringbuf->last_retired_head = -1; 555 } 556 557 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 558 559 out: 560 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 561 562 return ret; 563 } 564 565 static int 566 init_pipe_control(struct intel_engine_cs *ring) 567 { 568 int ret; 569 570 if (ring->scratch.obj) 571 return 0; 572 573 ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); 574 if (ring->scratch.obj == NULL) { 575 DRM_ERROR("Failed to allocate seqno page\n"); 576 ret = -ENOMEM; 577 goto err; 578 } 579 580 ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); 581 if (ret) 582 goto err_unref; 583 584 ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0); 585 if (ret) 586 goto err_unref; 587 588 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj); 589 ring->scratch.cpu_page = kmap(ring->scratch.obj->pages[0]); 590 if (ring->scratch.cpu_page == NULL) { 591 ret = -ENOMEM; 592 goto err_unpin; 593 } 594 595 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 596 ring->name, ring->scratch.gtt_offset); 597 return 0; 598 599 err_unpin: 600 i915_gem_object_ggtt_unpin(ring->scratch.obj); 601 err_unref: 602 drm_gem_object_unreference(&ring->scratch.obj->base); 603 err: 604 return ret; 605 } 606 607 static int init_render_ring(struct intel_engine_cs *ring) 608 { 609 struct drm_device *dev = ring->dev; 610 struct drm_i915_private *dev_priv = dev->dev_private; 611 int ret = init_ring_common(ring); 612 613 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 614 if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) 615 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 616 617 /* We need to disable the AsyncFlip performance optimisations in order 618 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 619 * programmed to '1' on all products. 620 * 621 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv 622 */ 623 if (INTEL_INFO(dev)->gen >= 6) 624 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 625 626 /* Required for the hardware to program scanline values for waiting */ 627 /* WaEnableFlushTlbInvalidationMode:snb */ 628 if (INTEL_INFO(dev)->gen == 6) 629 I915_WRITE(GFX_MODE, 630 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 631 632 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 633 if (IS_GEN7(dev)) 634 I915_WRITE(GFX_MODE_GEN7, 635 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 636 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 637 638 if (INTEL_INFO(dev)->gen >= 5) { 639 ret = init_pipe_control(ring); 640 if (ret) 641 return ret; 642 } 643 644 if (IS_GEN6(dev)) { 645 /* From the Sandybridge PRM, volume 1 part 3, page 24: 646 * "If this bit is set, STCunit will have LRA as replacement 647 * policy. [...] This bit must be reset. LRA replacement 648 * policy is not supported." 649 */ 650 I915_WRITE(CACHE_MODE_0, 651 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 652 } 653 654 if (INTEL_INFO(dev)->gen >= 6) 655 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 656 657 if (HAS_L3_DPF(dev)) 658 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 659 660 return ret; 661 } 662 663 static void render_ring_cleanup(struct intel_engine_cs *ring) 664 { 665 struct drm_device *dev = ring->dev; 666 667 if (ring->scratch.obj == NULL) 668 return; 669 670 if (INTEL_INFO(dev)->gen >= 5) { 671 kunmap(ring->scratch.obj->pages[0]); 672 i915_gem_object_ggtt_unpin(ring->scratch.obj); 673 } 674 675 drm_gem_object_unreference(&ring->scratch.obj->base); 676 ring->scratch.obj = NULL; 677 } 678 679 static int gen6_signal(struct intel_engine_cs *signaller, 680 unsigned int num_dwords) 681 { 682 struct drm_device *dev = signaller->dev; 683 struct drm_i915_private *dev_priv = dev->dev_private; 684 struct intel_engine_cs *useless; 685 int i, ret; 686 687 /* NB: In order to be able to do semaphore MBOX updates for varying 688 * number of rings, it's easiest if we round up each individual update 689 * to a multiple of 2 (since ring updates must always be a multiple of 690 * 2) even though the actual update only requires 3 dwords. 691 */ 692 #define MBOX_UPDATE_DWORDS 4 693 if (i915_semaphore_is_enabled(dev)) 694 num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS); 695 else 696 return intel_ring_begin(signaller, num_dwords); 697 698 ret = intel_ring_begin(signaller, num_dwords); 699 if (ret) 700 return ret; 701 #undef MBOX_UPDATE_DWORDS 702 703 for_each_ring(useless, dev_priv, i) { 704 u32 mbox_reg = signaller->semaphore.mbox.signal[i]; 705 if (mbox_reg != GEN6_NOSYNC) { 706 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); 707 intel_ring_emit(signaller, mbox_reg); 708 intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); 709 intel_ring_emit(signaller, MI_NOOP); 710 } else { 711 intel_ring_emit(signaller, MI_NOOP); 712 intel_ring_emit(signaller, MI_NOOP); 713 intel_ring_emit(signaller, MI_NOOP); 714 intel_ring_emit(signaller, MI_NOOP); 715 } 716 } 717 718 return 0; 719 } 720 721 /** 722 * gen6_add_request - Update the semaphore mailbox registers 723 * 724 * @ring - ring that is adding a request 725 * @seqno - return seqno stuck into the ring 726 * 727 * Update the mailbox registers in the *other* rings with the current seqno. 728 * This acts like a signal in the canonical semaphore. 729 */ 730 static int 731 gen6_add_request(struct intel_engine_cs *ring) 732 { 733 int ret; 734 735 ret = ring->semaphore.signal(ring, 4); 736 if (ret) 737 return ret; 738 739 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 740 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 741 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 742 intel_ring_emit(ring, MI_USER_INTERRUPT); 743 __intel_ring_advance(ring); 744 745 return 0; 746 } 747 748 static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, 749 u32 seqno) 750 { 751 struct drm_i915_private *dev_priv = dev->dev_private; 752 return dev_priv->last_seqno < seqno; 753 } 754 755 /** 756 * intel_ring_sync - sync the waiter to the signaller on seqno 757 * 758 * @waiter - ring that is waiting 759 * @signaller - ring which has, or will signal 760 * @seqno - seqno which the waiter will block on 761 */ 762 static int 763 gen6_ring_sync(struct intel_engine_cs *waiter, 764 struct intel_engine_cs *signaller, 765 u32 seqno) 766 { 767 u32 dw1 = MI_SEMAPHORE_MBOX | 768 MI_SEMAPHORE_COMPARE | 769 MI_SEMAPHORE_REGISTER; 770 u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id]; 771 int ret; 772 773 /* Throughout all of the GEM code, seqno passed implies our current 774 * seqno is >= the last seqno executed. However for hardware the 775 * comparison is strictly greater than. 776 */ 777 seqno -= 1; 778 779 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); 780 781 ret = intel_ring_begin(waiter, 4); 782 if (ret) 783 return ret; 784 785 /* If seqno wrap happened, omit the wait with no-ops */ 786 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { 787 intel_ring_emit(waiter, dw1 | wait_mbox); 788 intel_ring_emit(waiter, seqno); 789 intel_ring_emit(waiter, 0); 790 intel_ring_emit(waiter, MI_NOOP); 791 } else { 792 intel_ring_emit(waiter, MI_NOOP); 793 intel_ring_emit(waiter, MI_NOOP); 794 intel_ring_emit(waiter, MI_NOOP); 795 intel_ring_emit(waiter, MI_NOOP); 796 } 797 intel_ring_advance(waiter); 798 799 return 0; 800 } 801 802 #define PIPE_CONTROL_FLUSH(ring__, addr__) \ 803 do { \ 804 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ 805 PIPE_CONTROL_DEPTH_STALL); \ 806 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ 807 intel_ring_emit(ring__, 0); \ 808 intel_ring_emit(ring__, 0); \ 809 } while (0) 810 811 static int 812 pc_render_add_request(struct intel_engine_cs *ring) 813 { 814 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 815 int ret; 816 817 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently 818 * incoherent with writes to memory, i.e. completely fubar, 819 * so we need to use PIPE_NOTIFY instead. 820 * 821 * However, we also need to workaround the qword write 822 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to 823 * memory before requesting an interrupt. 824 */ 825 ret = intel_ring_begin(ring, 32); 826 if (ret) 827 return ret; 828 829 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 830 PIPE_CONTROL_WRITE_FLUSH | 831 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 832 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 833 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 834 intel_ring_emit(ring, 0); 835 PIPE_CONTROL_FLUSH(ring, scratch_addr); 836 scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ 837 PIPE_CONTROL_FLUSH(ring, scratch_addr); 838 scratch_addr += 2 * CACHELINE_BYTES; 839 PIPE_CONTROL_FLUSH(ring, scratch_addr); 840 scratch_addr += 2 * CACHELINE_BYTES; 841 PIPE_CONTROL_FLUSH(ring, scratch_addr); 842 scratch_addr += 2 * CACHELINE_BYTES; 843 PIPE_CONTROL_FLUSH(ring, scratch_addr); 844 scratch_addr += 2 * CACHELINE_BYTES; 845 PIPE_CONTROL_FLUSH(ring, scratch_addr); 846 847 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 848 PIPE_CONTROL_WRITE_FLUSH | 849 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 850 PIPE_CONTROL_NOTIFY); 851 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 852 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 853 intel_ring_emit(ring, 0); 854 __intel_ring_advance(ring); 855 856 return 0; 857 } 858 859 static u32 860 gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 861 { 862 /* Workaround to force correct ordering between irq and seqno writes on 863 * ivb (and maybe also on snb) by reading from a CS register (like 864 * ACTHD) before reading the status page. */ 865 if (!lazy_coherency) { 866 struct drm_i915_private *dev_priv = ring->dev->dev_private; 867 POSTING_READ(RING_ACTHD(ring->mmio_base)); 868 } 869 870 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 871 } 872 873 static u32 874 ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 875 { 876 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 877 } 878 879 static void 880 ring_set_seqno(struct intel_engine_cs *ring, u32 seqno) 881 { 882 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); 883 } 884 885 static u32 886 pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 887 { 888 return ring->scratch.cpu_page[0]; 889 } 890 891 static void 892 pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno) 893 { 894 ring->scratch.cpu_page[0] = seqno; 895 } 896 897 static bool 898 gen5_ring_get_irq(struct intel_engine_cs *ring) 899 { 900 struct drm_device *dev = ring->dev; 901 struct drm_i915_private *dev_priv = dev->dev_private; 902 903 if (!dev->irq_enabled) 904 return false; 905 906 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 907 if (ring->irq_refcount++ == 0) 908 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); 909 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 910 911 return true; 912 } 913 914 static void 915 gen5_ring_put_irq(struct intel_engine_cs *ring) 916 { 917 struct drm_device *dev = ring->dev; 918 struct drm_i915_private *dev_priv = dev->dev_private; 919 920 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 921 if (--ring->irq_refcount == 0) 922 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); 923 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 924 } 925 926 static bool 927 i9xx_ring_get_irq(struct intel_engine_cs *ring) 928 { 929 struct drm_device *dev = ring->dev; 930 struct drm_i915_private *dev_priv = dev->dev_private; 931 932 if (!dev->irq_enabled) 933 return false; 934 935 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 936 if (ring->irq_refcount++ == 0) { 937 dev_priv->irq_mask &= ~ring->irq_enable_mask; 938 I915_WRITE(IMR, dev_priv->irq_mask); 939 POSTING_READ(IMR); 940 } 941 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 942 943 return true; 944 } 945 946 static void 947 i9xx_ring_put_irq(struct intel_engine_cs *ring) 948 { 949 struct drm_device *dev = ring->dev; 950 struct drm_i915_private *dev_priv = dev->dev_private; 951 952 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 953 if (--ring->irq_refcount == 0) { 954 dev_priv->irq_mask |= ring->irq_enable_mask; 955 I915_WRITE(IMR, dev_priv->irq_mask); 956 POSTING_READ(IMR); 957 } 958 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 959 } 960 961 static bool 962 i8xx_ring_get_irq(struct intel_engine_cs *ring) 963 { 964 struct drm_device *dev = ring->dev; 965 struct drm_i915_private *dev_priv = dev->dev_private; 966 967 if (!dev->irq_enabled) 968 return false; 969 970 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 971 if (ring->irq_refcount++ == 0) { 972 dev_priv->irq_mask &= ~ring->irq_enable_mask; 973 I915_WRITE16(IMR, dev_priv->irq_mask); 974 POSTING_READ16(IMR); 975 } 976 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 977 978 return true; 979 } 980 981 static void 982 i8xx_ring_put_irq(struct intel_engine_cs *ring) 983 { 984 struct drm_device *dev = ring->dev; 985 struct drm_i915_private *dev_priv = dev->dev_private; 986 987 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 988 if (--ring->irq_refcount == 0) { 989 dev_priv->irq_mask |= ring->irq_enable_mask; 990 I915_WRITE16(IMR, dev_priv->irq_mask); 991 POSTING_READ16(IMR); 992 } 993 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 994 } 995 996 void intel_ring_setup_status_page(struct intel_engine_cs *ring) 997 { 998 struct drm_device *dev = ring->dev; 999 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1000 u32 mmio = 0; 1001 1002 /* The ring status page addresses are no longer next to the rest of 1003 * the ring registers as of gen7. 1004 */ 1005 if (IS_GEN7(dev)) { 1006 switch (ring->id) { 1007 case RCS: 1008 mmio = RENDER_HWS_PGA_GEN7; 1009 break; 1010 case BCS: 1011 mmio = BLT_HWS_PGA_GEN7; 1012 break; 1013 /* 1014 * VCS2 actually doesn't exist on Gen7. Only shut up 1015 * gcc switch check warning 1016 */ 1017 case VCS2: 1018 case VCS: 1019 mmio = BSD_HWS_PGA_GEN7; 1020 break; 1021 case VECS: 1022 mmio = VEBOX_HWS_PGA_GEN7; 1023 break; 1024 } 1025 } else if (IS_GEN6(ring->dev)) { 1026 mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 1027 } else { 1028 /* XXX: gen8 returns to sanity */ 1029 mmio = RING_HWS_PGA(ring->mmio_base); 1030 } 1031 1032 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 1033 POSTING_READ(mmio); 1034 1035 /* 1036 * Flush the TLB for this page 1037 * 1038 * FIXME: These two bits have disappeared on gen8, so a question 1039 * arises: do we still need this and if so how should we go about 1040 * invalidating the TLB? 1041 */ 1042 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { 1043 u32 reg = RING_INSTPM(ring->mmio_base); 1044 1045 /* ring should be idle before issuing a sync flush*/ 1046 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); 1047 1048 I915_WRITE(reg, 1049 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 1050 INSTPM_SYNC_FLUSH)); 1051 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, 1052 1000)) 1053 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 1054 ring->name); 1055 } 1056 } 1057 1058 static int 1059 bsd_ring_flush(struct intel_engine_cs *ring, 1060 u32 invalidate_domains, 1061 u32 flush_domains) 1062 { 1063 int ret; 1064 1065 ret = intel_ring_begin(ring, 2); 1066 if (ret) 1067 return ret; 1068 1069 intel_ring_emit(ring, MI_FLUSH); 1070 intel_ring_emit(ring, MI_NOOP); 1071 intel_ring_advance(ring); 1072 return 0; 1073 } 1074 1075 static int 1076 i9xx_add_request(struct intel_engine_cs *ring) 1077 { 1078 int ret; 1079 1080 ret = intel_ring_begin(ring, 4); 1081 if (ret) 1082 return ret; 1083 1084 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 1085 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1086 intel_ring_emit(ring, ring->outstanding_lazy_seqno); 1087 intel_ring_emit(ring, MI_USER_INTERRUPT); 1088 __intel_ring_advance(ring); 1089 1090 return 0; 1091 } 1092 1093 static bool 1094 gen6_ring_get_irq(struct intel_engine_cs *ring) 1095 { 1096 struct drm_device *dev = ring->dev; 1097 struct drm_i915_private *dev_priv = dev->dev_private; 1098 1099 if (!dev->irq_enabled) 1100 return false; 1101 1102 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1103 if (ring->irq_refcount++ == 0) { 1104 if (HAS_L3_DPF(dev) && ring->id == RCS) 1105 I915_WRITE_IMR(ring, 1106 ~(ring->irq_enable_mask | 1107 GT_PARITY_ERROR(dev))); 1108 else 1109 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1110 ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1111 } 1112 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1113 1114 return true; 1115 } 1116 1117 static void 1118 gen6_ring_put_irq(struct intel_engine_cs *ring) 1119 { 1120 struct drm_device *dev = ring->dev; 1121 struct drm_i915_private *dev_priv = dev->dev_private; 1122 1123 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1124 if (--ring->irq_refcount == 0) { 1125 if (HAS_L3_DPF(dev) && ring->id == RCS) 1126 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 1127 else 1128 I915_WRITE_IMR(ring, ~0); 1129 ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1130 } 1131 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1132 } 1133 1134 static bool 1135 hsw_vebox_get_irq(struct intel_engine_cs *ring) 1136 { 1137 struct drm_device *dev = ring->dev; 1138 struct drm_i915_private *dev_priv = dev->dev_private; 1139 1140 if (!dev->irq_enabled) 1141 return false; 1142 1143 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1144 if (ring->irq_refcount++ == 0) { 1145 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1146 snb_enable_pm_irq(dev_priv, ring->irq_enable_mask); 1147 } 1148 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1149 1150 return true; 1151 } 1152 1153 static void 1154 hsw_vebox_put_irq(struct intel_engine_cs *ring) 1155 { 1156 struct drm_device *dev = ring->dev; 1157 struct drm_i915_private *dev_priv = dev->dev_private; 1158 1159 if (!dev->irq_enabled) 1160 return; 1161 1162 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1163 if (--ring->irq_refcount == 0) { 1164 I915_WRITE_IMR(ring, ~0); 1165 snb_disable_pm_irq(dev_priv, ring->irq_enable_mask); 1166 } 1167 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1168 } 1169 1170 static bool 1171 gen8_ring_get_irq(struct intel_engine_cs *ring) 1172 { 1173 struct drm_device *dev = ring->dev; 1174 struct drm_i915_private *dev_priv = dev->dev_private; 1175 1176 if (!dev->irq_enabled) 1177 return false; 1178 1179 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1180 if (ring->irq_refcount++ == 0) { 1181 if (HAS_L3_DPF(dev) && ring->id == RCS) { 1182 I915_WRITE_IMR(ring, 1183 ~(ring->irq_enable_mask | 1184 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1185 } else { 1186 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1187 } 1188 POSTING_READ(RING_IMR(ring->mmio_base)); 1189 } 1190 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1191 1192 return true; 1193 } 1194 1195 static void 1196 gen8_ring_put_irq(struct intel_engine_cs *ring) 1197 { 1198 struct drm_device *dev = ring->dev; 1199 struct drm_i915_private *dev_priv = dev->dev_private; 1200 1201 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1202 if (--ring->irq_refcount == 0) { 1203 if (HAS_L3_DPF(dev) && ring->id == RCS) { 1204 I915_WRITE_IMR(ring, 1205 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1206 } else { 1207 I915_WRITE_IMR(ring, ~0); 1208 } 1209 POSTING_READ(RING_IMR(ring->mmio_base)); 1210 } 1211 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1212 } 1213 1214 static int 1215 i965_dispatch_execbuffer(struct intel_engine_cs *ring, 1216 u64 offset, u32 length, 1217 unsigned flags) 1218 { 1219 int ret; 1220 1221 ret = intel_ring_begin(ring, 2); 1222 if (ret) 1223 return ret; 1224 1225 intel_ring_emit(ring, 1226 MI_BATCH_BUFFER_START | 1227 MI_BATCH_GTT | 1228 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); 1229 intel_ring_emit(ring, offset); 1230 intel_ring_advance(ring); 1231 1232 return 0; 1233 } 1234 1235 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1236 #define I830_BATCH_LIMIT (256*1024) 1237 static int 1238 i830_dispatch_execbuffer(struct intel_engine_cs *ring, 1239 u64 offset, u32 len, 1240 unsigned flags) 1241 { 1242 int ret; 1243 1244 if (flags & I915_DISPATCH_PINNED) { 1245 ret = intel_ring_begin(ring, 4); 1246 if (ret) 1247 return ret; 1248 1249 intel_ring_emit(ring, MI_BATCH_BUFFER); 1250 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1251 intel_ring_emit(ring, offset + len - 8); 1252 intel_ring_emit(ring, MI_NOOP); 1253 intel_ring_advance(ring); 1254 } else { 1255 u32 cs_offset = ring->scratch.gtt_offset; 1256 1257 if (len > I830_BATCH_LIMIT) 1258 return -ENOSPC; 1259 1260 ret = intel_ring_begin(ring, 9+3); 1261 if (ret) 1262 return ret; 1263 /* Blit the batch (which has now all relocs applied) to the stable batch 1264 * scratch bo area (so that the CS never stumbles over its tlb 1265 * invalidation bug) ... */ 1266 intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | 1267 XY_SRC_COPY_BLT_WRITE_ALPHA | 1268 XY_SRC_COPY_BLT_WRITE_RGB); 1269 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); 1270 intel_ring_emit(ring, 0); 1271 intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); 1272 intel_ring_emit(ring, cs_offset); 1273 intel_ring_emit(ring, 0); 1274 intel_ring_emit(ring, 4096); 1275 intel_ring_emit(ring, offset); 1276 intel_ring_emit(ring, MI_FLUSH); 1277 1278 /* ... and execute it. */ 1279 intel_ring_emit(ring, MI_BATCH_BUFFER); 1280 intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1281 intel_ring_emit(ring, cs_offset + len - 8); 1282 intel_ring_advance(ring); 1283 } 1284 1285 return 0; 1286 } 1287 1288 static int 1289 i915_dispatch_execbuffer(struct intel_engine_cs *ring, 1290 u64 offset, u32 len, 1291 unsigned flags) 1292 { 1293 int ret; 1294 1295 ret = intel_ring_begin(ring, 2); 1296 if (ret) 1297 return ret; 1298 1299 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1300 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); 1301 intel_ring_advance(ring); 1302 1303 return 0; 1304 } 1305 1306 static void cleanup_status_page(struct intel_engine_cs *ring) 1307 { 1308 struct drm_i915_gem_object *obj; 1309 1310 obj = ring->status_page.obj; 1311 if (obj == NULL) 1312 return; 1313 1314 kunmap(obj->pages[0]); 1315 i915_gem_object_ggtt_unpin(obj); 1316 drm_gem_object_unreference(&obj->base); 1317 ring->status_page.obj = NULL; 1318 } 1319 1320 static int init_status_page(struct intel_engine_cs *ring) 1321 { 1322 struct drm_i915_gem_object *obj; 1323 1324 if ((obj = ring->status_page.obj) == NULL) { 1325 int ret; 1326 1327 obj = i915_gem_alloc_object(ring->dev, 4096); 1328 if (obj == NULL) { 1329 DRM_ERROR("Failed to allocate status page\n"); 1330 return -ENOMEM; 1331 } 1332 1333 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1334 if (ret) 1335 goto err_unref; 1336 1337 ret = i915_gem_obj_ggtt_pin(obj, 4096, 0); 1338 if (ret) { 1339 err_unref: 1340 drm_gem_object_unreference(&obj->base); 1341 return ret; 1342 } 1343 1344 ring->status_page.obj = obj; 1345 } 1346 1347 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); 1348 ring->status_page.page_addr = kmap(obj->pages[0]); 1349 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1350 1351 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 1352 ring->name, ring->status_page.gfx_addr); 1353 1354 return 0; 1355 } 1356 1357 static int init_phys_status_page(struct intel_engine_cs *ring) 1358 { 1359 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1360 1361 if (!dev_priv->status_page_dmah) { 1362 dev_priv->status_page_dmah = 1363 drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); 1364 if (!dev_priv->status_page_dmah) 1365 return -ENOMEM; 1366 } 1367 1368 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1369 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1370 1371 return 0; 1372 } 1373 1374 static int allocate_ring_buffer(struct intel_engine_cs *ring) 1375 { 1376 struct drm_device *dev = ring->dev; 1377 struct intel_ringbuffer *ringbuf = ring->buffer; 1378 struct drm_i915_gem_object *obj; 1379 int ret; 1380 1381 if (intel_ring_initialized(ring)) 1382 return 0; 1383 1384 obj = NULL; 1385 if (!HAS_LLC(dev)) 1386 obj = i915_gem_object_create_stolen(dev, ringbuf->size); 1387 if (obj == NULL) 1388 obj = i915_gem_alloc_object(dev, ringbuf->size); 1389 if (obj == NULL) 1390 return -ENOMEM; 1391 1392 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 1393 if (ret) 1394 goto err_unref; 1395 1396 ret = i915_gem_object_set_to_gtt_domain(obj, true); 1397 if (ret) 1398 goto err_unpin; 1399 1400 ringbuf->virtual_start = 1401 ioremap_wc(dev->agp->base + i915_gem_obj_ggtt_offset(obj), 1402 ringbuf->size); 1403 if (ringbuf->virtual_start == NULL) { 1404 ret = -EINVAL; 1405 goto err_unpin; 1406 } 1407 1408 ringbuf->obj = obj; 1409 return 0; 1410 1411 err_unpin: 1412 i915_gem_object_ggtt_unpin(obj); 1413 err_unref: 1414 drm_gem_object_unreference(&obj->base); 1415 return ret; 1416 } 1417 1418 static int intel_init_ring_buffer(struct drm_device *dev, 1419 struct intel_engine_cs *ring) 1420 { 1421 struct intel_ringbuffer *ringbuf = ring->buffer; 1422 int ret; 1423 1424 if (ringbuf == NULL) { 1425 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); 1426 if (!ringbuf) 1427 return -ENOMEM; 1428 ring->buffer = ringbuf; 1429 } 1430 1431 ring->dev = dev; 1432 INIT_LIST_HEAD(&ring->active_list); 1433 INIT_LIST_HEAD(&ring->request_list); 1434 ringbuf->size = 32 * PAGE_SIZE; 1435 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); 1436 1437 init_waitqueue_head(&ring->irq_queue); 1438 1439 if (I915_NEED_GFX_HWS(dev)) { 1440 ret = init_status_page(ring); 1441 if (ret) 1442 goto error; 1443 } else { 1444 BUG_ON(ring->id != RCS); 1445 ret = init_phys_status_page(ring); 1446 if (ret) 1447 goto error; 1448 } 1449 1450 ret = allocate_ring_buffer(ring); 1451 if (ret) { 1452 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); 1453 goto error; 1454 } 1455 1456 /* Workaround an erratum on the i830 which causes a hang if 1457 * the TAIL pointer points to within the last 2 cachelines 1458 * of the buffer. 1459 */ 1460 ringbuf->effective_size = ringbuf->size; 1461 if (IS_I830(dev) || IS_845G(dev)) 1462 ringbuf->effective_size -= 2 * CACHELINE_BYTES; 1463 1464 ret = i915_cmd_parser_init_ring(ring); 1465 if (ret) 1466 goto error; 1467 1468 ret = ring->init(ring); 1469 if (ret) 1470 goto error; 1471 1472 return 0; 1473 1474 error: 1475 kfree(ringbuf); 1476 ring->buffer = NULL; 1477 return ret; 1478 } 1479 1480 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) 1481 { 1482 struct drm_i915_private *dev_priv = to_i915(ring->dev); 1483 struct intel_ringbuffer *ringbuf = ring->buffer; 1484 1485 if (!intel_ring_initialized(ring)) 1486 return; 1487 1488 intel_stop_ring_buffer(ring); 1489 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); 1490 1491 pmap_unmapdev((vm_offset_t)ringbuf->virtual_start, ringbuf->size); 1492 1493 i915_gem_object_ggtt_unpin(ringbuf->obj); 1494 drm_gem_object_unreference(&ringbuf->obj->base); 1495 ringbuf->obj = NULL; 1496 ring->preallocated_lazy_request = NULL; 1497 ring->outstanding_lazy_seqno = 0; 1498 1499 if (ring->cleanup) 1500 ring->cleanup(ring); 1501 1502 cleanup_status_page(ring); 1503 1504 i915_cmd_parser_fini_ring(ring); 1505 1506 kfree(ringbuf); 1507 ring->buffer = NULL; 1508 } 1509 1510 static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) 1511 { 1512 struct intel_ringbuffer *ringbuf = ring->buffer; 1513 struct drm_i915_gem_request *request; 1514 u32 seqno = 0; 1515 int ret; 1516 1517 if (ringbuf->last_retired_head != -1) { 1518 ringbuf->head = ringbuf->last_retired_head; 1519 ringbuf->last_retired_head = -1; 1520 1521 ringbuf->space = ring_space(ring); 1522 if (ringbuf->space >= n) 1523 return 0; 1524 } 1525 1526 list_for_each_entry(request, &ring->request_list, list) { 1527 if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) { 1528 seqno = request->seqno; 1529 break; 1530 } 1531 } 1532 1533 if (seqno == 0) 1534 return -ENOSPC; 1535 1536 ret = i915_wait_seqno(ring, seqno); 1537 if (ret) 1538 return ret; 1539 1540 i915_gem_retire_requests_ring(ring); 1541 ringbuf->head = ringbuf->last_retired_head; 1542 ringbuf->last_retired_head = -1; 1543 1544 ringbuf->space = ring_space(ring); 1545 return 0; 1546 } 1547 1548 static int ring_wait_for_space(struct intel_engine_cs *ring, int n) 1549 { 1550 struct drm_device *dev = ring->dev; 1551 struct drm_i915_private *dev_priv = dev->dev_private; 1552 struct intel_ringbuffer *ringbuf = ring->buffer; 1553 unsigned long end; 1554 int ret; 1555 1556 ret = intel_ring_wait_request(ring, n); 1557 if (ret != -ENOSPC) 1558 return ret; 1559 1560 /* force the tail write in case we have been skipping them */ 1561 __intel_ring_advance(ring); 1562 1563 /* With GEM the hangcheck timer should kick us out of the loop, 1564 * leaving it early runs the risk of corrupting GEM state (due 1565 * to running on almost untested codepaths). But on resume 1566 * timers don't work yet, so prevent a complete hang in that 1567 * case by choosing an insanely large timeout. */ 1568 end = jiffies + 60 * HZ; 1569 1570 trace_i915_ring_wait_begin(ring); 1571 do { 1572 ringbuf->head = I915_READ_HEAD(ring); 1573 ringbuf->space = ring_space(ring); 1574 if (ringbuf->space >= n) { 1575 ret = 0; 1576 break; 1577 } 1578 1579 #if 0 1580 if (!drm_core_check_feature(dev, DRIVER_MODESET) && 1581 dev->primary->master) { 1582 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1583 if (master_priv->sarea_priv) 1584 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1585 } 1586 #else 1587 if (dev_priv->sarea_priv) 1588 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1589 #endif 1590 1591 1592 msleep(1); 1593 1594 #if 0 1595 if (dev_priv->mm.interruptible && signal_pending(current)) { 1596 ret = -ERESTARTSYS; 1597 break; 1598 } 1599 #endif 1600 1601 ret = i915_gem_check_wedge(&dev_priv->gpu_error, 1602 dev_priv->mm.interruptible); 1603 if (ret) 1604 break; 1605 1606 if (time_after(jiffies, end)) { 1607 ret = -EBUSY; 1608 break; 1609 } 1610 } while (1); 1611 trace_i915_ring_wait_end(ring); 1612 return ret; 1613 } 1614 1615 static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) 1616 { 1617 uint32_t __iomem *virt; 1618 struct intel_ringbuffer *ringbuf = ring->buffer; 1619 int rem = ringbuf->size - ringbuf->tail; 1620 1621 if (ringbuf->space < rem) { 1622 int ret = ring_wait_for_space(ring, rem); 1623 if (ret) 1624 return ret; 1625 } 1626 1627 virt = (unsigned int *)((char *)ringbuf->virtual_start + ringbuf->tail); 1628 rem /= 4; 1629 while (rem--) 1630 iowrite32(MI_NOOP, virt++); 1631 1632 ringbuf->tail = 0; 1633 ringbuf->space = ring_space(ring); 1634 1635 return 0; 1636 } 1637 1638 int intel_ring_idle(struct intel_engine_cs *ring) 1639 { 1640 u32 seqno; 1641 int ret; 1642 1643 /* We need to add any requests required to flush the objects and ring */ 1644 if (ring->outstanding_lazy_seqno) { 1645 ret = i915_add_request(ring, NULL); 1646 if (ret) 1647 return ret; 1648 } 1649 1650 /* Wait upon the last request to be completed */ 1651 if (list_empty(&ring->request_list)) 1652 return 0; 1653 1654 seqno = list_entry(ring->request_list.prev, 1655 struct drm_i915_gem_request, 1656 list)->seqno; 1657 1658 return i915_wait_seqno(ring, seqno); 1659 } 1660 1661 static int 1662 intel_ring_alloc_seqno(struct intel_engine_cs *ring) 1663 { 1664 if (ring->outstanding_lazy_seqno) 1665 return 0; 1666 1667 if (ring->preallocated_lazy_request == NULL) { 1668 struct drm_i915_gem_request *request; 1669 1670 request = kmalloc(sizeof(*request), M_DRM, M_WAITOK); 1671 if (request == NULL) 1672 return -ENOMEM; 1673 1674 ring->preallocated_lazy_request = request; 1675 } 1676 1677 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno); 1678 } 1679 1680 static int __intel_ring_prepare(struct intel_engine_cs *ring, 1681 int bytes) 1682 { 1683 struct intel_ringbuffer *ringbuf = ring->buffer; 1684 int ret; 1685 1686 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) { 1687 ret = intel_wrap_ring_buffer(ring); 1688 if (unlikely(ret)) 1689 return ret; 1690 } 1691 1692 if (unlikely(ringbuf->space < bytes)) { 1693 ret = ring_wait_for_space(ring, bytes); 1694 if (unlikely(ret)) 1695 return ret; 1696 } 1697 1698 return 0; 1699 } 1700 1701 int intel_ring_begin(struct intel_engine_cs *ring, 1702 int num_dwords) 1703 { 1704 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1705 int ret; 1706 1707 ret = i915_gem_check_wedge(&dev_priv->gpu_error, 1708 dev_priv->mm.interruptible); 1709 if (ret) 1710 return ret; 1711 1712 ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t)); 1713 if (ret) 1714 return ret; 1715 1716 /* Preallocate the olr before touching the ring */ 1717 ret = intel_ring_alloc_seqno(ring); 1718 if (ret) 1719 return ret; 1720 1721 ring->buffer->space -= num_dwords * sizeof(uint32_t); 1722 return 0; 1723 } 1724 1725 /* Align the ring tail to a cacheline boundary */ 1726 int intel_ring_cacheline_align(struct intel_engine_cs *ring) 1727 { 1728 int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 1729 int ret; 1730 1731 if (num_dwords == 0) 1732 return 0; 1733 1734 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; 1735 ret = intel_ring_begin(ring, num_dwords); 1736 if (ret) 1737 return ret; 1738 1739 while (num_dwords--) 1740 intel_ring_emit(ring, MI_NOOP); 1741 1742 intel_ring_advance(ring); 1743 1744 return 0; 1745 } 1746 1747 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno) 1748 { 1749 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1750 1751 BUG_ON(ring->outstanding_lazy_seqno); 1752 1753 if (INTEL_INFO(ring->dev)->gen >= 6) { 1754 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); 1755 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); 1756 if (HAS_VEBOX(ring->dev)) 1757 I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); 1758 } 1759 1760 ring->set_seqno(ring, seqno); 1761 ring->hangcheck.seqno = seqno; 1762 } 1763 1764 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring, 1765 u32 value) 1766 { 1767 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1768 1769 /* Every tail move must follow the sequence below */ 1770 1771 /* Disable notification that the ring is IDLE. The GT 1772 * will then assume that it is busy and bring it out of rc6. 1773 */ 1774 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 1775 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1776 1777 /* Clear the context id. Here be magic! */ 1778 I915_WRITE64(GEN6_BSD_RNCID, 0x0); 1779 1780 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 1781 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & 1782 GEN6_BSD_SLEEP_INDICATOR) == 0, 1783 50)) 1784 DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); 1785 1786 /* Now that the ring is fully powered up, update the tail */ 1787 I915_WRITE_TAIL(ring, value); 1788 POSTING_READ(RING_TAIL(ring->mmio_base)); 1789 1790 /* Let the ring send IDLE messages to the GT again, 1791 * and so let it sleep to conserve power when idle. 1792 */ 1793 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 1794 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 1795 } 1796 1797 static int gen6_bsd_ring_flush(struct intel_engine_cs *ring, 1798 u32 invalidate, u32 flush) 1799 { 1800 uint32_t cmd; 1801 int ret; 1802 1803 ret = intel_ring_begin(ring, 4); 1804 if (ret) 1805 return ret; 1806 1807 cmd = MI_FLUSH_DW; 1808 if (INTEL_INFO(ring->dev)->gen >= 8) 1809 cmd += 1; 1810 /* 1811 * Bspec vol 1c.5 - video engine command streamer: 1812 * "If ENABLED, all TLBs will be invalidated once the flush 1813 * operation is complete. This bit is only valid when the 1814 * Post-Sync Operation field is a value of 1h or 3h." 1815 */ 1816 if (invalidate & I915_GEM_GPU_DOMAINS) 1817 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD | 1818 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 1819 intel_ring_emit(ring, cmd); 1820 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 1821 if (INTEL_INFO(ring->dev)->gen >= 8) { 1822 intel_ring_emit(ring, 0); /* upper addr */ 1823 intel_ring_emit(ring, 0); /* value */ 1824 } else { 1825 intel_ring_emit(ring, 0); 1826 intel_ring_emit(ring, MI_NOOP); 1827 } 1828 intel_ring_advance(ring); 1829 return 0; 1830 } 1831 1832 static int 1833 gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 1834 u64 offset, u32 len, 1835 unsigned flags) 1836 { 1837 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1838 bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL && 1839 !(flags & I915_DISPATCH_SECURE); 1840 int ret; 1841 1842 ret = intel_ring_begin(ring, 4); 1843 if (ret) 1844 return ret; 1845 1846 /* FIXME(BDW): Address space and security selectors. */ 1847 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8)); 1848 intel_ring_emit(ring, lower_32_bits(offset)); 1849 intel_ring_emit(ring, upper_32_bits(offset)); 1850 intel_ring_emit(ring, MI_NOOP); 1851 intel_ring_advance(ring); 1852 1853 return 0; 1854 } 1855 1856 static int 1857 hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 1858 u64 offset, u32 len, 1859 unsigned flags) 1860 { 1861 int ret; 1862 1863 ret = intel_ring_begin(ring, 2); 1864 if (ret) 1865 return ret; 1866 1867 intel_ring_emit(ring, 1868 MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW | 1869 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW)); 1870 /* bit0-7 is the length on GEN6+ */ 1871 intel_ring_emit(ring, offset); 1872 intel_ring_advance(ring); 1873 1874 return 0; 1875 } 1876 1877 static int 1878 gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring, 1879 u64 offset, u32 len, 1880 unsigned flags) 1881 { 1882 int ret; 1883 1884 ret = intel_ring_begin(ring, 2); 1885 if (ret) 1886 return ret; 1887 1888 intel_ring_emit(ring, 1889 MI_BATCH_BUFFER_START | 1890 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); 1891 /* bit0-7 is the length on GEN6+ */ 1892 intel_ring_emit(ring, offset); 1893 intel_ring_advance(ring); 1894 1895 return 0; 1896 } 1897 1898 /* Blitter support (SandyBridge+) */ 1899 1900 static int gen6_ring_flush(struct intel_engine_cs *ring, 1901 u32 invalidate, u32 flush) 1902 { 1903 struct drm_device *dev = ring->dev; 1904 uint32_t cmd; 1905 int ret; 1906 1907 ret = intel_ring_begin(ring, 4); 1908 if (ret) 1909 return ret; 1910 1911 cmd = MI_FLUSH_DW; 1912 if (INTEL_INFO(ring->dev)->gen >= 8) 1913 cmd += 1; 1914 /* 1915 * Bspec vol 1c.3 - blitter engine command streamer: 1916 * "If ENABLED, all TLBs will be invalidated once the flush 1917 * operation is complete. This bit is only valid when the 1918 * Post-Sync Operation field is a value of 1h or 3h." 1919 */ 1920 if (invalidate & I915_GEM_DOMAIN_RENDER) 1921 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX | 1922 MI_FLUSH_DW_OP_STOREDW; 1923 intel_ring_emit(ring, cmd); 1924 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 1925 if (INTEL_INFO(ring->dev)->gen >= 8) { 1926 intel_ring_emit(ring, 0); /* upper addr */ 1927 intel_ring_emit(ring, 0); /* value */ 1928 } else { 1929 intel_ring_emit(ring, 0); 1930 intel_ring_emit(ring, MI_NOOP); 1931 } 1932 intel_ring_advance(ring); 1933 1934 if (IS_GEN7(dev) && !invalidate && flush) 1935 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); 1936 1937 return 0; 1938 } 1939 1940 int intel_init_render_ring_buffer(struct drm_device *dev) 1941 { 1942 struct drm_i915_private *dev_priv = dev->dev_private; 1943 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; 1944 1945 ring->name = "render ring"; 1946 ring->id = RCS; 1947 ring->mmio_base = RENDER_RING_BASE; 1948 1949 if (INTEL_INFO(dev)->gen >= 6) { 1950 ring->add_request = gen6_add_request; 1951 ring->flush = gen7_render_ring_flush; 1952 if (INTEL_INFO(dev)->gen == 6) 1953 ring->flush = gen6_render_ring_flush; 1954 if (INTEL_INFO(dev)->gen >= 8) { 1955 ring->flush = gen8_render_ring_flush; 1956 ring->irq_get = gen8_ring_get_irq; 1957 ring->irq_put = gen8_ring_put_irq; 1958 } else { 1959 ring->irq_get = gen6_ring_get_irq; 1960 ring->irq_put = gen6_ring_put_irq; 1961 } 1962 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1963 ring->get_seqno = gen6_ring_get_seqno; 1964 ring->set_seqno = ring_set_seqno; 1965 ring->semaphore.sync_to = gen6_ring_sync; 1966 ring->semaphore.signal = gen6_signal; 1967 /* 1968 * The current semaphore is only applied on pre-gen8 platform. 1969 * And there is no VCS2 ring on the pre-gen8 platform. So the 1970 * semaphore between RCS and VCS2 is initialized as INVALID. 1971 * Gen8 will initialize the sema between VCS2 and RCS later. 1972 */ 1973 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; 1974 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; 1975 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; 1976 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; 1977 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 1978 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; 1979 ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; 1980 ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; 1981 ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; 1982 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 1983 } else if (IS_GEN5(dev)) { 1984 ring->add_request = pc_render_add_request; 1985 ring->flush = gen4_render_ring_flush; 1986 ring->get_seqno = pc_render_get_seqno; 1987 ring->set_seqno = pc_render_set_seqno; 1988 ring->irq_get = gen5_ring_get_irq; 1989 ring->irq_put = gen5_ring_put_irq; 1990 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT | 1991 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; 1992 } else { 1993 ring->add_request = i9xx_add_request; 1994 if (INTEL_INFO(dev)->gen < 4) 1995 ring->flush = gen2_render_ring_flush; 1996 else 1997 ring->flush = gen4_render_ring_flush; 1998 ring->get_seqno = ring_get_seqno; 1999 ring->set_seqno = ring_set_seqno; 2000 if (IS_GEN2(dev)) { 2001 ring->irq_get = i8xx_ring_get_irq; 2002 ring->irq_put = i8xx_ring_put_irq; 2003 } else { 2004 ring->irq_get = i9xx_ring_get_irq; 2005 ring->irq_put = i9xx_ring_put_irq; 2006 } 2007 ring->irq_enable_mask = I915_USER_INTERRUPT; 2008 } 2009 ring->write_tail = ring_write_tail; 2010 if (IS_HASWELL(dev)) 2011 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 2012 else if (IS_GEN8(dev)) 2013 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2014 else if (INTEL_INFO(dev)->gen >= 6) 2015 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2016 else if (INTEL_INFO(dev)->gen >= 4) 2017 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2018 else if (IS_I830(dev) || IS_845G(dev)) 2019 ring->dispatch_execbuffer = i830_dispatch_execbuffer; 2020 else 2021 ring->dispatch_execbuffer = i915_dispatch_execbuffer; 2022 ring->init = init_render_ring; 2023 ring->cleanup = render_ring_cleanup; 2024 2025 /* Workaround batchbuffer to combat CS tlb bug. */ 2026 if (HAS_BROKEN_CS_TLB(dev)) { 2027 struct drm_i915_gem_object *obj; 2028 int ret; 2029 2030 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); 2031 if (obj == NULL) { 2032 DRM_ERROR("Failed to allocate batch bo\n"); 2033 return -ENOMEM; 2034 } 2035 2036 ret = i915_gem_obj_ggtt_pin(obj, 0, 0); 2037 if (ret != 0) { 2038 drm_gem_object_unreference(&obj->base); 2039 DRM_ERROR("Failed to ping batch bo\n"); 2040 return ret; 2041 } 2042 2043 ring->scratch.obj = obj; 2044 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); 2045 } 2046 2047 return intel_init_ring_buffer(dev, ring); 2048 } 2049 2050 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) 2051 { 2052 struct drm_i915_private *dev_priv = dev->dev_private; 2053 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; 2054 struct intel_ringbuffer *ringbuf = ring->buffer; 2055 int ret; 2056 2057 if (ringbuf == NULL) { 2058 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); 2059 if (!ringbuf) 2060 return -ENOMEM; 2061 ring->buffer = ringbuf; 2062 } 2063 2064 ring->name = "render ring"; 2065 ring->id = RCS; 2066 ring->mmio_base = RENDER_RING_BASE; 2067 2068 if (INTEL_INFO(dev)->gen >= 6) { 2069 /* non-kms not supported on gen6+ */ 2070 ret = -ENODEV; 2071 goto err_ringbuf; 2072 } 2073 2074 /* Note: gem is not supported on gen5/ilk without kms (the corresponding 2075 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up 2076 * the special gen5 functions. */ 2077 ring->add_request = i9xx_add_request; 2078 if (INTEL_INFO(dev)->gen < 4) 2079 ring->flush = gen2_render_ring_flush; 2080 else 2081 ring->flush = gen4_render_ring_flush; 2082 ring->get_seqno = ring_get_seqno; 2083 ring->set_seqno = ring_set_seqno; 2084 if (IS_GEN2(dev)) { 2085 ring->irq_get = i8xx_ring_get_irq; 2086 ring->irq_put = i8xx_ring_put_irq; 2087 } else { 2088 ring->irq_get = i9xx_ring_get_irq; 2089 ring->irq_put = i9xx_ring_put_irq; 2090 } 2091 ring->irq_enable_mask = I915_USER_INTERRUPT; 2092 ring->write_tail = ring_write_tail; 2093 if (INTEL_INFO(dev)->gen >= 4) 2094 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2095 else if (IS_I830(dev) || IS_845G(dev)) 2096 ring->dispatch_execbuffer = i830_dispatch_execbuffer; 2097 else 2098 ring->dispatch_execbuffer = i915_dispatch_execbuffer; 2099 ring->init = init_render_ring; 2100 ring->cleanup = render_ring_cleanup; 2101 2102 ring->dev = dev; 2103 INIT_LIST_HEAD(&ring->active_list); 2104 INIT_LIST_HEAD(&ring->request_list); 2105 2106 ringbuf->size = size; 2107 ringbuf->effective_size = ringbuf->size; 2108 if (IS_I830(ring->dev) || IS_845G(ring->dev)) 2109 ringbuf->effective_size -= 2 * CACHELINE_BYTES; 2110 2111 ringbuf->virtual_start = ioremap_wc(start, size); 2112 if (ringbuf->virtual_start == NULL) { 2113 DRM_ERROR("can not ioremap virtual address for" 2114 " ring buffer\n"); 2115 ret = -ENOMEM; 2116 goto err_ringbuf; 2117 } 2118 2119 if (!I915_NEED_GFX_HWS(dev)) { 2120 ret = init_phys_status_page(ring); 2121 if (ret) 2122 goto err_vstart; 2123 } 2124 2125 return 0; 2126 2127 err_vstart: 2128 pmap_unmapdev((vm_offset_t)ring->buffer->virtual_start, size); 2129 err_ringbuf: 2130 kfree(ringbuf); 2131 ring->buffer = NULL; 2132 return ret; 2133 } 2134 2135 int intel_init_bsd_ring_buffer(struct drm_device *dev) 2136 { 2137 struct drm_i915_private *dev_priv = dev->dev_private; 2138 struct intel_engine_cs *ring = &dev_priv->ring[VCS]; 2139 2140 ring->name = "bsd ring"; 2141 ring->id = VCS; 2142 2143 ring->write_tail = ring_write_tail; 2144 if (INTEL_INFO(dev)->gen >= 6) { 2145 ring->mmio_base = GEN6_BSD_RING_BASE; 2146 /* gen6 bsd needs a special wa for tail updates */ 2147 if (IS_GEN6(dev)) 2148 ring->write_tail = gen6_bsd_ring_write_tail; 2149 ring->flush = gen6_bsd_ring_flush; 2150 ring->add_request = gen6_add_request; 2151 ring->get_seqno = gen6_ring_get_seqno; 2152 ring->set_seqno = ring_set_seqno; 2153 if (INTEL_INFO(dev)->gen >= 8) { 2154 ring->irq_enable_mask = 2155 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 2156 ring->irq_get = gen8_ring_get_irq; 2157 ring->irq_put = gen8_ring_put_irq; 2158 ring->dispatch_execbuffer = 2159 gen8_ring_dispatch_execbuffer; 2160 } else { 2161 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2162 ring->irq_get = gen6_ring_get_irq; 2163 ring->irq_put = gen6_ring_put_irq; 2164 ring->dispatch_execbuffer = 2165 gen6_ring_dispatch_execbuffer; 2166 } 2167 ring->semaphore.sync_to = gen6_ring_sync; 2168 ring->semaphore.signal = gen6_signal; 2169 /* 2170 * The current semaphore is only applied on pre-gen8 platform. 2171 * And there is no VCS2 ring on the pre-gen8 platform. So the 2172 * semaphore between VCS and VCS2 is initialized as INVALID. 2173 * Gen8 will initialize the sema between VCS2 and VCS later. 2174 */ 2175 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; 2176 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; 2177 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB; 2178 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE; 2179 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2180 ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC; 2181 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; 2182 ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC; 2183 ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC; 2184 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2185 } else { 2186 ring->mmio_base = BSD_RING_BASE; 2187 ring->flush = bsd_ring_flush; 2188 ring->add_request = i9xx_add_request; 2189 ring->get_seqno = ring_get_seqno; 2190 ring->set_seqno = ring_set_seqno; 2191 if (IS_GEN5(dev)) { 2192 ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 2193 ring->irq_get = gen5_ring_get_irq; 2194 ring->irq_put = gen5_ring_put_irq; 2195 } else { 2196 ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; 2197 ring->irq_get = i9xx_ring_get_irq; 2198 ring->irq_put = i9xx_ring_put_irq; 2199 } 2200 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2201 } 2202 ring->init = init_ring_common; 2203 2204 return intel_init_ring_buffer(dev, ring); 2205 } 2206 2207 /** 2208 * Initialize the second BSD ring for Broadwell GT3. 2209 * It is noted that this only exists on Broadwell GT3. 2210 */ 2211 int intel_init_bsd2_ring_buffer(struct drm_device *dev) 2212 { 2213 struct drm_i915_private *dev_priv = dev->dev_private; 2214 struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; 2215 2216 if ((INTEL_INFO(dev)->gen != 8)) { 2217 DRM_ERROR("No dual-BSD ring on non-BDW machine\n"); 2218 return -EINVAL; 2219 } 2220 2221 ring->name = "bds2_ring"; 2222 ring->id = VCS2; 2223 2224 ring->write_tail = ring_write_tail; 2225 ring->mmio_base = GEN8_BSD2_RING_BASE; 2226 ring->flush = gen6_bsd_ring_flush; 2227 ring->add_request = gen6_add_request; 2228 ring->get_seqno = gen6_ring_get_seqno; 2229 ring->set_seqno = ring_set_seqno; 2230 ring->irq_enable_mask = 2231 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; 2232 ring->irq_get = gen8_ring_get_irq; 2233 ring->irq_put = gen8_ring_put_irq; 2234 ring->dispatch_execbuffer = 2235 gen8_ring_dispatch_execbuffer; 2236 ring->semaphore.sync_to = gen6_ring_sync; 2237 ring->semaphore.signal = gen6_signal; 2238 /* 2239 * The current semaphore is only applied on the pre-gen8. And there 2240 * is no bsd2 ring on the pre-gen8. So now the semaphore_register 2241 * between VCS2 and other ring is initialized as invalid. 2242 * Gen8 will initialize the sema between VCS2 and other ring later. 2243 */ 2244 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; 2245 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; 2246 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; 2247 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; 2248 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2249 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; 2250 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; 2251 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; 2252 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; 2253 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2254 2255 ring->init = init_ring_common; 2256 2257 return intel_init_ring_buffer(dev, ring); 2258 } 2259 2260 int intel_init_blt_ring_buffer(struct drm_device *dev) 2261 { 2262 struct drm_i915_private *dev_priv = dev->dev_private; 2263 struct intel_engine_cs *ring = &dev_priv->ring[BCS]; 2264 2265 ring->name = "blitter ring"; 2266 ring->id = BCS; 2267 2268 ring->mmio_base = BLT_RING_BASE; 2269 ring->write_tail = ring_write_tail; 2270 ring->flush = gen6_ring_flush; 2271 ring->add_request = gen6_add_request; 2272 ring->get_seqno = gen6_ring_get_seqno; 2273 ring->set_seqno = ring_set_seqno; 2274 if (INTEL_INFO(dev)->gen >= 8) { 2275 ring->irq_enable_mask = 2276 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 2277 ring->irq_get = gen8_ring_get_irq; 2278 ring->irq_put = gen8_ring_put_irq; 2279 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2280 } else { 2281 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2282 ring->irq_get = gen6_ring_get_irq; 2283 ring->irq_put = gen6_ring_put_irq; 2284 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2285 } 2286 ring->semaphore.sync_to = gen6_ring_sync; 2287 ring->semaphore.signal = gen6_signal; 2288 /* 2289 * The current semaphore is only applied on pre-gen8 platform. And 2290 * there is no VCS2 ring on the pre-gen8 platform. So the semaphore 2291 * between BCS and VCS2 is initialized as INVALID. 2292 * Gen8 will initialize the sema between BCS and VCS2 later. 2293 */ 2294 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR; 2295 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV; 2296 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; 2297 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE; 2298 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2299 ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC; 2300 ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC; 2301 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; 2302 ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC; 2303 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2304 ring->init = init_ring_common; 2305 2306 return intel_init_ring_buffer(dev, ring); 2307 } 2308 2309 int intel_init_vebox_ring_buffer(struct drm_device *dev) 2310 { 2311 struct drm_i915_private *dev_priv = dev->dev_private; 2312 struct intel_engine_cs *ring = &dev_priv->ring[VECS]; 2313 2314 ring->name = "video enhancement ring"; 2315 ring->id = VECS; 2316 2317 ring->mmio_base = VEBOX_RING_BASE; 2318 ring->write_tail = ring_write_tail; 2319 ring->flush = gen6_ring_flush; 2320 ring->add_request = gen6_add_request; 2321 ring->get_seqno = gen6_ring_get_seqno; 2322 ring->set_seqno = ring_set_seqno; 2323 2324 if (INTEL_INFO(dev)->gen >= 8) { 2325 ring->irq_enable_mask = 2326 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; 2327 ring->irq_get = gen8_ring_get_irq; 2328 ring->irq_put = gen8_ring_put_irq; 2329 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2330 } else { 2331 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 2332 ring->irq_get = hsw_vebox_get_irq; 2333 ring->irq_put = hsw_vebox_put_irq; 2334 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2335 } 2336 ring->semaphore.sync_to = gen6_ring_sync; 2337 ring->semaphore.signal = gen6_signal; 2338 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; 2339 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV; 2340 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB; 2341 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; 2342 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2343 ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC; 2344 ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC; 2345 ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC; 2346 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; 2347 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2348 ring->init = init_ring_common; 2349 2350 return intel_init_ring_buffer(dev, ring); 2351 } 2352 2353 int 2354 intel_ring_flush_all_caches(struct intel_engine_cs *ring) 2355 { 2356 int ret; 2357 2358 if (!ring->gpu_caches_dirty) 2359 return 0; 2360 2361 ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS); 2362 if (ret) 2363 return ret; 2364 2365 trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS); 2366 2367 ring->gpu_caches_dirty = false; 2368 return 0; 2369 } 2370 2371 int 2372 intel_ring_invalidate_all_caches(struct intel_engine_cs *ring) 2373 { 2374 uint32_t flush_domains; 2375 int ret; 2376 2377 flush_domains = 0; 2378 if (ring->gpu_caches_dirty) 2379 flush_domains = I915_GEM_GPU_DOMAINS; 2380 2381 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); 2382 if (ret) 2383 return ret; 2384 2385 trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains); 2386 2387 ring->gpu_caches_dirty = false; 2388 return 0; 2389 } 2390 2391 void 2392 intel_stop_ring_buffer(struct intel_engine_cs *ring) 2393 { 2394 int ret; 2395 2396 if (!intel_ring_initialized(ring)) 2397 return; 2398 2399 ret = intel_ring_idle(ring); 2400 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) 2401 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 2402 ring->name, ret); 2403 2404 stop_ring(ring); 2405 } 2406