1 /* 2 * Copyright © 2008-2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * Zou Nan hai <nanhai.zou@intel.com> 26 * Xiang Hai hao<haihao.xiang@intel.com> 27 * 28 */ 29 30 #include <linux/log2.h> 31 #include <drm/drmP.h> 32 #include "i915_drv.h" 33 #include <drm/i915_drm.h> 34 #include "i915_trace.h" 35 #include "intel_drv.h" 36 37 int __intel_ring_space(int head, int tail, int size) 38 { 39 int space = head - tail; 40 if (space <= 0) 41 space += size; 42 return space - I915_RING_FREE_SPACE; 43 } 44 45 void intel_ring_update_space(struct intel_ringbuffer *ringbuf) 46 { 47 if (ringbuf->last_retired_head != -1) { 48 ringbuf->head = ringbuf->last_retired_head; 49 ringbuf->last_retired_head = -1; 50 } 51 52 ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR, 53 ringbuf->tail, ringbuf->size); 54 } 55 56 int intel_ring_space(struct intel_ringbuffer *ringbuf) 57 { 58 intel_ring_update_space(ringbuf); 59 return ringbuf->space; 60 } 61 62 bool intel_ring_stopped(struct intel_engine_cs *ring) 63 { 64 struct drm_i915_private *dev_priv = ring->dev->dev_private; 65 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); 66 } 67 68 static void __intel_ring_advance(struct intel_engine_cs *ring) 69 { 70 struct intel_ringbuffer *ringbuf = ring->buffer; 71 ringbuf->tail &= ringbuf->size - 1; 72 if (intel_ring_stopped(ring)) 73 return; 74 ring->write_tail(ring, ringbuf->tail); 75 } 76 77 static int 78 gen2_render_ring_flush(struct drm_i915_gem_request *req, 79 u32 invalidate_domains, 80 u32 flush_domains) 81 { 82 struct intel_engine_cs *ring = req->ring; 83 u32 cmd; 84 int ret; 85 86 cmd = MI_FLUSH; 87 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0) 88 cmd |= MI_NO_WRITE_FLUSH; 89 90 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 91 cmd |= MI_READ_FLUSH; 92 93 ret = intel_ring_begin(req, 2); 94 if (ret) 95 return ret; 96 97 intel_ring_emit(ring, cmd); 98 intel_ring_emit(ring, MI_NOOP); 99 intel_ring_advance(ring); 100 101 return 0; 102 } 103 104 static int 105 gen4_render_ring_flush(struct drm_i915_gem_request *req, 106 u32 invalidate_domains, 107 u32 flush_domains) 108 { 109 struct intel_engine_cs *ring = req->ring; 110 struct drm_device *dev = ring->dev; 111 u32 cmd; 112 int ret; 113 114 /* 115 * read/write caches: 116 * 117 * I915_GEM_DOMAIN_RENDER is always invalidated, but is 118 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 119 * also flushed at 2d versus 3d pipeline switches. 120 * 121 * read-only caches: 122 * 123 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 124 * MI_READ_FLUSH is set, and is always flushed on 965. 125 * 126 * I915_GEM_DOMAIN_COMMAND may not exist? 127 * 128 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 129 * invalidated when MI_EXE_FLUSH is set. 130 * 131 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 132 * invalidated with every MI_FLUSH. 133 * 134 * TLBs: 135 * 136 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 137 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 138 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 139 * are flushed at any MI_FLUSH. 140 */ 141 142 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 143 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) 144 cmd &= ~MI_NO_WRITE_FLUSH; 145 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 146 cmd |= MI_EXE_FLUSH; 147 148 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && 149 (IS_G4X(dev) || IS_GEN5(dev))) 150 cmd |= MI_INVALIDATE_ISP; 151 152 ret = intel_ring_begin(req, 2); 153 if (ret) 154 return ret; 155 156 intel_ring_emit(ring, cmd); 157 intel_ring_emit(ring, MI_NOOP); 158 intel_ring_advance(ring); 159 160 return 0; 161 } 162 163 /** 164 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 165 * implementing two workarounds on gen6. From section 1.4.7.1 166 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: 167 * 168 * [DevSNB-C+{W/A}] Before any depth stall flush (including those 169 * produced by non-pipelined state commands), software needs to first 170 * send a PIPE_CONTROL with no bits set except Post-Sync Operation != 171 * 0. 172 * 173 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable 174 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. 175 * 176 * And the workaround for these two requires this workaround first: 177 * 178 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent 179 * BEFORE the pipe-control with a post-sync op and no write-cache 180 * flushes. 181 * 182 * And this last workaround is tricky because of the requirements on 183 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM 184 * volume 2 part 1: 185 * 186 * "1 of the following must also be set: 187 * - Render Target Cache Flush Enable ([12] of DW1) 188 * - Depth Cache Flush Enable ([0] of DW1) 189 * - Stall at Pixel Scoreboard ([1] of DW1) 190 * - Depth Stall ([13] of DW1) 191 * - Post-Sync Operation ([13] of DW1) 192 * - Notify Enable ([8] of DW1)" 193 * 194 * The cache flushes require the workaround flush that triggered this 195 * one, so we can't use it. Depth stall would trigger the same. 196 * Post-sync nonzero is what triggered this second workaround, so we 197 * can't use that one either. Notify enable is IRQs, which aren't 198 * really our business. That leaves only stall at scoreboard. 199 */ 200 static int 201 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) 202 { 203 struct intel_engine_cs *ring = req->ring; 204 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 205 int ret; 206 207 ret = intel_ring_begin(req, 6); 208 if (ret) 209 return ret; 210 211 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 212 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 213 PIPE_CONTROL_STALL_AT_SCOREBOARD); 214 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 215 intel_ring_emit(ring, 0); /* low dword */ 216 intel_ring_emit(ring, 0); /* high dword */ 217 intel_ring_emit(ring, MI_NOOP); 218 intel_ring_advance(ring); 219 220 ret = intel_ring_begin(req, 6); 221 if (ret) 222 return ret; 223 224 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); 225 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); 226 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ 227 intel_ring_emit(ring, 0); 228 intel_ring_emit(ring, 0); 229 intel_ring_emit(ring, MI_NOOP); 230 intel_ring_advance(ring); 231 232 return 0; 233 } 234 235 static int 236 gen6_render_ring_flush(struct drm_i915_gem_request *req, 237 u32 invalidate_domains, u32 flush_domains) 238 { 239 struct intel_engine_cs *ring = req->ring; 240 u32 flags = 0; 241 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 242 int ret; 243 244 /* Force SNB workarounds for PIPE_CONTROL flushes */ 245 ret = intel_emit_post_sync_nonzero_flush(req); 246 if (ret) 247 return ret; 248 249 /* Just flush everything. Experiments have shown that reducing the 250 * number of bits based on the write domains has little performance 251 * impact. 252 */ 253 if (flush_domains) { 254 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 255 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 256 /* 257 * Ensure that any following seqno writes only happen 258 * when the render cache is indeed flushed. 259 */ 260 flags |= PIPE_CONTROL_CS_STALL; 261 } 262 if (invalidate_domains) { 263 flags |= PIPE_CONTROL_TLB_INVALIDATE; 264 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 265 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 266 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 267 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 268 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 269 /* 270 * TLB invalidate requires a post-sync write. 271 */ 272 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL; 273 } 274 275 ret = intel_ring_begin(req, 4); 276 if (ret) 277 return ret; 278 279 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 280 intel_ring_emit(ring, flags); 281 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 282 intel_ring_emit(ring, 0); 283 intel_ring_advance(ring); 284 285 return 0; 286 } 287 288 static int 289 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req) 290 { 291 struct intel_engine_cs *ring = req->ring; 292 int ret; 293 294 ret = intel_ring_begin(req, 4); 295 if (ret) 296 return ret; 297 298 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 299 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | 300 PIPE_CONTROL_STALL_AT_SCOREBOARD); 301 intel_ring_emit(ring, 0); 302 intel_ring_emit(ring, 0); 303 intel_ring_advance(ring); 304 305 return 0; 306 } 307 308 static int 309 gen7_render_ring_flush(struct drm_i915_gem_request *req, 310 u32 invalidate_domains, u32 flush_domains) 311 { 312 struct intel_engine_cs *ring = req->ring; 313 u32 flags = 0; 314 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 315 int ret; 316 317 /* 318 * Ensure that any following seqno writes only happen when the render 319 * cache is indeed flushed. 320 * 321 * Workaround: 4th PIPE_CONTROL command (except the ones with only 322 * read-cache invalidate bits set) must have the CS_STALL bit set. We 323 * don't try to be clever and just set it unconditionally. 324 */ 325 flags |= PIPE_CONTROL_CS_STALL; 326 327 /* Just flush everything. Experiments have shown that reducing the 328 * number of bits based on the write domains has little performance 329 * impact. 330 */ 331 if (flush_domains) { 332 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 333 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 334 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 335 flags |= PIPE_CONTROL_FLUSH_ENABLE; 336 } 337 if (invalidate_domains) { 338 flags |= PIPE_CONTROL_TLB_INVALIDATE; 339 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 340 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 341 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 342 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 343 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 344 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR; 345 /* 346 * TLB invalidate requires a post-sync write. 347 */ 348 flags |= PIPE_CONTROL_QW_WRITE; 349 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 350 351 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; 352 353 /* Workaround: we must issue a pipe_control with CS-stall bit 354 * set before a pipe_control command that has the state cache 355 * invalidate bit set. */ 356 gen7_render_ring_cs_stall_wa(req); 357 } 358 359 ret = intel_ring_begin(req, 4); 360 if (ret) 361 return ret; 362 363 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 364 intel_ring_emit(ring, flags); 365 intel_ring_emit(ring, scratch_addr); 366 intel_ring_emit(ring, 0); 367 intel_ring_advance(ring); 368 369 return 0; 370 } 371 372 static int 373 gen8_emit_pipe_control(struct drm_i915_gem_request *req, 374 u32 flags, u32 scratch_addr) 375 { 376 struct intel_engine_cs *ring = req->ring; 377 int ret; 378 379 ret = intel_ring_begin(req, 6); 380 if (ret) 381 return ret; 382 383 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); 384 intel_ring_emit(ring, flags); 385 intel_ring_emit(ring, scratch_addr); 386 intel_ring_emit(ring, 0); 387 intel_ring_emit(ring, 0); 388 intel_ring_emit(ring, 0); 389 intel_ring_advance(ring); 390 391 return 0; 392 } 393 394 static int 395 gen8_render_ring_flush(struct drm_i915_gem_request *req, 396 u32 invalidate_domains, u32 flush_domains) 397 { 398 u32 flags = 0; 399 u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 400 int ret; 401 402 flags |= PIPE_CONTROL_CS_STALL; 403 404 if (flush_domains) { 405 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; 406 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; 407 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; 408 flags |= PIPE_CONTROL_FLUSH_ENABLE; 409 } 410 if (invalidate_domains) { 411 flags |= PIPE_CONTROL_TLB_INVALIDATE; 412 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; 413 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; 414 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 415 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 416 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 417 flags |= PIPE_CONTROL_QW_WRITE; 418 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 419 420 /* WaCsStallBeforeStateCacheInvalidate:bdw,chv */ 421 ret = gen8_emit_pipe_control(req, 422 PIPE_CONTROL_CS_STALL | 423 PIPE_CONTROL_STALL_AT_SCOREBOARD, 424 0); 425 if (ret) 426 return ret; 427 } 428 429 return gen8_emit_pipe_control(req, flags, scratch_addr); 430 } 431 432 static void ring_write_tail(struct intel_engine_cs *ring, 433 u32 value) 434 { 435 struct drm_i915_private *dev_priv = ring->dev->dev_private; 436 I915_WRITE_TAIL(ring, value); 437 } 438 439 u64 intel_ring_get_active_head(struct intel_engine_cs *ring) 440 { 441 struct drm_i915_private *dev_priv = ring->dev->dev_private; 442 u64 acthd; 443 444 if (INTEL_INFO(ring->dev)->gen >= 8) 445 acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base), 446 RING_ACTHD_UDW(ring->mmio_base)); 447 else if (INTEL_INFO(ring->dev)->gen >= 4) 448 acthd = I915_READ(RING_ACTHD(ring->mmio_base)); 449 else 450 acthd = I915_READ(ACTHD); 451 452 return acthd; 453 } 454 455 static void ring_setup_phys_status_page(struct intel_engine_cs *ring) 456 { 457 struct drm_i915_private *dev_priv = ring->dev->dev_private; 458 u32 addr; 459 460 addr = dev_priv->status_page_dmah->busaddr; 461 if (INTEL_INFO(ring->dev)->gen >= 4) 462 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 463 I915_WRITE(HWS_PGA, addr); 464 } 465 466 static void intel_ring_setup_status_page(struct intel_engine_cs *ring) 467 { 468 struct drm_device *dev = ring->dev; 469 struct drm_i915_private *dev_priv = ring->dev->dev_private; 470 i915_reg_t mmio; 471 472 /* The ring status page addresses are no longer next to the rest of 473 * the ring registers as of gen7. 474 */ 475 if (IS_GEN7(dev)) { 476 switch (ring->id) { 477 case RCS: 478 mmio = RENDER_HWS_PGA_GEN7; 479 break; 480 case BCS: 481 mmio = BLT_HWS_PGA_GEN7; 482 break; 483 /* 484 * VCS2 actually doesn't exist on Gen7. Only shut up 485 * gcc switch check warning 486 */ 487 case VCS2: 488 case VCS: 489 mmio = BSD_HWS_PGA_GEN7; 490 break; 491 case VECS: 492 mmio = VEBOX_HWS_PGA_GEN7; 493 break; 494 } 495 } else if (IS_GEN6(ring->dev)) { 496 mmio = RING_HWS_PGA_GEN6(ring->mmio_base); 497 } else { 498 /* XXX: gen8 returns to sanity */ 499 mmio = RING_HWS_PGA(ring->mmio_base); 500 } 501 502 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); 503 POSTING_READ(mmio); 504 505 /* 506 * Flush the TLB for this page 507 * 508 * FIXME: These two bits have disappeared on gen8, so a question 509 * arises: do we still need this and if so how should we go about 510 * invalidating the TLB? 511 */ 512 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { 513 i915_reg_t reg = RING_INSTPM(ring->mmio_base); 514 515 /* ring should be idle before issuing a sync flush*/ 516 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); 517 518 I915_WRITE(reg, 519 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 520 INSTPM_SYNC_FLUSH)); 521 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, 522 1000)) 523 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 524 ring->name); 525 } 526 } 527 528 static bool stop_ring(struct intel_engine_cs *ring) 529 { 530 struct drm_i915_private *dev_priv = to_i915(ring->dev); 531 532 if (!IS_GEN2(ring->dev)) { 533 I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); 534 if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { 535 DRM_ERROR("%s : timed out trying to stop ring\n", ring->name); 536 /* Sometimes we observe that the idle flag is not 537 * set even though the ring is empty. So double 538 * check before giving up. 539 */ 540 if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring)) 541 return false; 542 } 543 } 544 545 I915_WRITE_CTL(ring, 0); 546 I915_WRITE_HEAD(ring, 0); 547 ring->write_tail(ring, 0); 548 549 if (!IS_GEN2(ring->dev)) { 550 (void)I915_READ_CTL(ring); 551 I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); 552 } 553 554 return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; 555 } 556 557 static int init_ring_common(struct intel_engine_cs *ring) 558 { 559 struct drm_device *dev = ring->dev; 560 struct drm_i915_private *dev_priv = dev->dev_private; 561 struct intel_ringbuffer *ringbuf = ring->buffer; 562 struct drm_i915_gem_object *obj = ringbuf->obj; 563 int ret = 0; 564 565 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 566 567 if (!stop_ring(ring)) { 568 /* G45 ring initialization often fails to reset head to zero */ 569 DRM_DEBUG_KMS("%s head not reset to zero " 570 "ctl %08x head %08x tail %08x start %08x\n", 571 ring->name, 572 I915_READ_CTL(ring), 573 I915_READ_HEAD(ring), 574 I915_READ_TAIL(ring), 575 I915_READ_START(ring)); 576 577 if (!stop_ring(ring)) { 578 DRM_ERROR("failed to set %s head to zero " 579 "ctl %08x head %08x tail %08x start %08x\n", 580 ring->name, 581 I915_READ_CTL(ring), 582 I915_READ_HEAD(ring), 583 I915_READ_TAIL(ring), 584 I915_READ_START(ring)); 585 ret = -EIO; 586 goto out; 587 } 588 } 589 590 if (I915_NEED_GFX_HWS(dev)) 591 intel_ring_setup_status_page(ring); 592 else 593 ring_setup_phys_status_page(ring); 594 595 /* Enforce ordering by reading HEAD register back */ 596 I915_READ_HEAD(ring); 597 598 /* Initialize the ring. This must happen _after_ we've cleared the ring 599 * registers with the above sequence (the readback of the HEAD registers 600 * also enforces ordering), otherwise the hw might lose the new ring 601 * register values. */ 602 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); 603 604 /* WaClearRingBufHeadRegAtInit:ctg,elk */ 605 if (I915_READ_HEAD(ring)) 606 DRM_DEBUG("%s initialization failed [head=%08x], fudging\n", 607 ring->name, I915_READ_HEAD(ring)); 608 I915_WRITE_HEAD(ring, 0); 609 (void)I915_READ_HEAD(ring); 610 611 I915_WRITE_CTL(ring, 612 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) 613 | RING_VALID); 614 615 /* If the head is still not zero, the ring is dead */ 616 if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && 617 I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && 618 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { 619 DRM_ERROR("%s initialization failed " 620 "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n", 621 ring->name, 622 I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID, 623 I915_READ_HEAD(ring), I915_READ_TAIL(ring), 624 I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj)); 625 ret = -EIO; 626 goto out; 627 } 628 629 ringbuf->last_retired_head = -1; 630 ringbuf->head = I915_READ_HEAD(ring); 631 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 632 intel_ring_update_space(ringbuf); 633 634 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 635 636 out: 637 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 638 639 return ret; 640 } 641 642 void 643 intel_fini_pipe_control(struct intel_engine_cs *ring) 644 { 645 struct drm_device *dev = ring->dev; 646 647 if (ring->scratch.obj == NULL) 648 return; 649 650 if (INTEL_INFO(dev)->gen >= 5) { 651 kunmap(sg_page(ring->scratch.obj->pages->sgl)); 652 i915_gem_object_ggtt_unpin(ring->scratch.obj); 653 } 654 655 drm_gem_object_unreference(&ring->scratch.obj->base); 656 ring->scratch.obj = NULL; 657 } 658 659 int 660 intel_init_pipe_control(struct intel_engine_cs *ring) 661 { 662 int ret; 663 664 WARN_ON(ring->scratch.obj); 665 666 ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); 667 if (ring->scratch.obj == NULL) { 668 DRM_ERROR("Failed to allocate seqno page\n"); 669 ret = -ENOMEM; 670 goto err; 671 } 672 673 ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); 674 if (ret) 675 goto err_unref; 676 677 ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0); 678 if (ret) 679 goto err_unref; 680 681 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj); 682 ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl)); 683 if (ring->scratch.cpu_page == NULL) { 684 ret = -ENOMEM; 685 goto err_unpin; 686 } 687 688 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 689 ring->name, ring->scratch.gtt_offset); 690 return 0; 691 692 err_unpin: 693 i915_gem_object_ggtt_unpin(ring->scratch.obj); 694 err_unref: 695 drm_gem_object_unreference(&ring->scratch.obj->base); 696 err: 697 return ret; 698 } 699 700 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) 701 { 702 int ret, i; 703 struct intel_engine_cs *ring = req->ring; 704 struct drm_device *dev = ring->dev; 705 struct drm_i915_private *dev_priv = dev->dev_private; 706 struct i915_workarounds *w = &dev_priv->workarounds; 707 708 if (w->count == 0) 709 return 0; 710 711 ring->gpu_caches_dirty = true; 712 ret = intel_ring_flush_all_caches(req); 713 if (ret) 714 return ret; 715 716 ret = intel_ring_begin(req, (w->count * 2 + 2)); 717 if (ret) 718 return ret; 719 720 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count)); 721 for (i = 0; i < w->count; i++) { 722 intel_ring_emit_reg(ring, w->reg[i].addr); 723 intel_ring_emit(ring, w->reg[i].value); 724 } 725 intel_ring_emit(ring, MI_NOOP); 726 727 intel_ring_advance(ring); 728 729 ring->gpu_caches_dirty = true; 730 ret = intel_ring_flush_all_caches(req); 731 if (ret) 732 return ret; 733 734 DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count); 735 736 return 0; 737 } 738 739 static int intel_rcs_ctx_init(struct drm_i915_gem_request *req) 740 { 741 int ret; 742 743 ret = intel_ring_workarounds_emit(req); 744 if (ret != 0) 745 return ret; 746 747 ret = i915_gem_render_state_init(req); 748 if (ret) 749 DRM_ERROR("init render state: %d\n", ret); 750 751 return ret; 752 } 753 754 static int wa_add(struct drm_i915_private *dev_priv, 755 i915_reg_t addr, 756 const u32 mask, const u32 val) 757 { 758 const u32 idx = dev_priv->workarounds.count; 759 760 if (WARN_ON(idx >= I915_MAX_WA_REGS)) 761 return -ENOSPC; 762 763 dev_priv->workarounds.reg[idx].addr = addr; 764 dev_priv->workarounds.reg[idx].value = val; 765 dev_priv->workarounds.reg[idx].mask = mask; 766 767 dev_priv->workarounds.count++; 768 769 return 0; 770 } 771 772 #define WA_REG(addr, mask, val) do { \ 773 const int r = wa_add(dev_priv, (addr), (mask), (val)); \ 774 if (r) \ 775 return r; \ 776 } while (0) 777 778 #define WA_SET_BIT_MASKED(addr, mask) \ 779 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask)) 780 781 #define WA_CLR_BIT_MASKED(addr, mask) \ 782 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask)) 783 784 #define WA_SET_FIELD_MASKED(addr, mask, value) \ 785 WA_REG(addr, mask, _MASKED_FIELD(mask, value)) 786 787 #define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask)) 788 #define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask)) 789 790 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val) 791 792 static int gen8_init_workarounds(struct intel_engine_cs *ring) 793 { 794 struct drm_device *dev = ring->dev; 795 struct drm_i915_private *dev_priv = dev->dev_private; 796 797 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); 798 799 /* WaDisableAsyncFlipPerfMode:bdw,chv */ 800 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE); 801 802 /* WaDisablePartialInstShootdown:bdw,chv */ 803 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 804 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 805 806 /* Use Force Non-Coherent whenever executing a 3D context. This is a 807 * workaround for for a possible hang in the unlikely event a TLB 808 * invalidation occurs during a PSD flush. 809 */ 810 /* WaForceEnableNonCoherent:bdw,chv */ 811 /* WaHdcDisableFetchWhenMasked:bdw,chv */ 812 WA_SET_BIT_MASKED(HDC_CHICKEN0, 813 HDC_DONOT_FETCH_MEM_WHEN_MASKED | 814 HDC_FORCE_NON_COHERENT); 815 816 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0: 817 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping 818 * polygons in the same 8x4 pixel/sample area to be processed without 819 * stalling waiting for the earlier ones to write to Hierarchical Z 820 * buffer." 821 * 822 * This optimization is off by default for BDW and CHV; turn it on. 823 */ 824 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); 825 826 /* Wa4x4STCOptimizationDisable:bdw,chv */ 827 WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE); 828 829 /* 830 * BSpec recommends 8x4 when MSAA is used, 831 * however in practice 16x4 seems fastest. 832 * 833 * Note that PS/WM thread counts depend on the WIZ hashing 834 * disable bit, which we don't touch here, but it's good 835 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 836 */ 837 WA_SET_FIELD_MASKED(GEN7_GT_MODE, 838 GEN6_WIZ_HASHING_MASK, 839 GEN6_WIZ_HASHING_16x4); 840 841 return 0; 842 } 843 844 static int bdw_init_workarounds(struct intel_engine_cs *ring) 845 { 846 int ret; 847 struct drm_device *dev = ring->dev; 848 struct drm_i915_private *dev_priv = dev->dev_private; 849 850 ret = gen8_init_workarounds(ring); 851 if (ret) 852 return ret; 853 854 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ 855 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); 856 857 /* WaDisableDopClockGating:bdw */ 858 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, 859 DOP_CLOCK_GATING_DISABLE); 860 861 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 862 GEN8_SAMPLER_POWER_BYPASS_DIS); 863 864 WA_SET_BIT_MASKED(HDC_CHICKEN0, 865 /* WaForceContextSaveRestoreNonCoherent:bdw */ 866 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 867 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ 868 (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); 869 870 return 0; 871 } 872 873 static int chv_init_workarounds(struct intel_engine_cs *ring) 874 { 875 int ret; 876 struct drm_device *dev = ring->dev; 877 struct drm_i915_private *dev_priv = dev->dev_private; 878 879 ret = gen8_init_workarounds(ring); 880 if (ret) 881 return ret; 882 883 /* WaDisableThreadStallDopClockGating:chv */ 884 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); 885 886 /* Improve HiZ throughput on CHV. */ 887 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); 888 889 return 0; 890 } 891 892 static int gen9_init_workarounds(struct intel_engine_cs *ring) 893 { 894 struct drm_device *dev = ring->dev; 895 struct drm_i915_private *dev_priv = dev->dev_private; 896 uint32_t tmp; 897 898 /* WaEnableLbsSlaRetryTimerDecrement:skl */ 899 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | 900 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); 901 902 /* WaDisableKillLogic:bxt,skl */ 903 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 904 ECOCHK_DIS_TLB); 905 906 /* WaDisablePartialInstShootdown:skl,bxt */ 907 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 908 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 909 910 /* Syncing dependencies between camera and graphics:skl,bxt */ 911 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 912 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); 913 914 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ 915 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 916 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 917 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 918 GEN9_DG_MIRROR_FIX_ENABLE); 919 920 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 921 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 922 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 923 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, 924 GEN9_RHWO_OPTIMIZATION_DISABLE); 925 /* 926 * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set 927 * but we do that in per ctx batchbuffer as there is an issue 928 * with this register not getting restored on ctx restore 929 */ 930 } 931 932 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */ 933 if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER) || IS_BROXTON(dev)) 934 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, 935 GEN9_ENABLE_YV12_BUGFIX); 936 937 /* Wa4x4STCOptimizationDisable:skl,bxt */ 938 /* WaDisablePartialResolveInVc:skl,bxt */ 939 WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE | 940 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE)); 941 942 /* WaCcsTlbPrefetchDisable:skl,bxt */ 943 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 944 GEN9_CCS_TLB_PREFETCH_ENABLE); 945 946 /* WaDisableMaskBasedCammingInRCC:skl,bxt */ 947 if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) || 948 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 949 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, 950 PIXEL_MASK_CAMMING_DISABLE); 951 952 /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ 953 tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; 954 if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) || 955 IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) 956 tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; 957 WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); 958 959 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */ 960 if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0)) 961 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 962 GEN8_SAMPLER_POWER_BYPASS_DIS); 963 964 /* WaDisableSTUnitPowerOptimization:skl,bxt */ 965 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); 966 967 return 0; 968 } 969 970 static int skl_tune_iz_hashing(struct intel_engine_cs *ring) 971 { 972 struct drm_device *dev = ring->dev; 973 struct drm_i915_private *dev_priv = dev->dev_private; 974 u8 vals[3] = { 0, 0, 0 }; 975 unsigned int i; 976 977 for (i = 0; i < 3; i++) { 978 u8 ss; 979 980 /* 981 * Only consider slices where one, and only one, subslice has 7 982 * EUs 983 */ 984 if (!is_power_of_2(dev_priv->info.subslice_7eu[i])) 985 continue; 986 987 /* 988 * subslice_7eu[i] != 0 (because of the check above) and 989 * ss_max == 4 (maximum number of subslices possible per slice) 990 * 991 * -> 0 <= ss <= 3; 992 */ 993 ss = ffs(dev_priv->info.subslice_7eu[i]) - 1; 994 vals[i] = 3 - ss; 995 } 996 997 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0) 998 return 0; 999 1000 /* Tune IZ hashing. See intel_device_info_runtime_init() */ 1001 WA_SET_FIELD_MASKED(GEN7_GT_MODE, 1002 GEN9_IZ_HASHING_MASK(2) | 1003 GEN9_IZ_HASHING_MASK(1) | 1004 GEN9_IZ_HASHING_MASK(0), 1005 GEN9_IZ_HASHING(2, vals[2]) | 1006 GEN9_IZ_HASHING(1, vals[1]) | 1007 GEN9_IZ_HASHING(0, vals[0])); 1008 1009 return 0; 1010 } 1011 1012 static int skl_init_workarounds(struct intel_engine_cs *ring) 1013 { 1014 int ret; 1015 struct drm_device *dev = ring->dev; 1016 struct drm_i915_private *dev_priv = dev->dev_private; 1017 1018 ret = gen9_init_workarounds(ring); 1019 if (ret) 1020 return ret; 1021 1022 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { 1023 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ 1024 I915_WRITE(FF_SLICE_CS_CHICKEN2, 1025 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); 1026 } 1027 1028 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes 1029 * involving this register should also be added to WA batch as required. 1030 */ 1031 if (IS_SKL_REVID(dev, 0, SKL_REVID_E0)) 1032 /* WaDisableLSQCROPERFforOCL:skl */ 1033 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 1034 GEN8_LQSC_RO_PERF_DIS); 1035 1036 /* WaEnableGapsTsvCreditFix:skl */ 1037 if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) { 1038 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | 1039 GEN9_GAPS_TSV_CREDIT_DISABLE)); 1040 } 1041 1042 /* WaDisablePowerCompilerClockGating:skl */ 1043 if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0)) 1044 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1045 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1046 1047 if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) { 1048 /* 1049 *Use Force Non-Coherent whenever executing a 3D context. This 1050 * is a workaround for a possible hang in the unlikely event 1051 * a TLB invalidation occurs during a PSD flush. 1052 */ 1053 /* WaForceEnableNonCoherent:skl */ 1054 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1055 HDC_FORCE_NON_COHERENT); 1056 1057 /* WaDisableHDCInvalidation:skl */ 1058 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | 1059 BDW_DISABLE_HDC_INVALIDATION); 1060 } 1061 1062 /* WaBarrierPerformanceFixDisable:skl */ 1063 if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0)) 1064 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1065 HDC_FENCE_DEST_SLM_DISABLE | 1066 HDC_BARRIER_PERFORMANCE_DISABLE); 1067 1068 /* WaDisableSbeCacheDispatchPortSharing:skl */ 1069 if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) 1070 WA_SET_BIT_MASKED( 1071 GEN7_HALF_SLICE_CHICKEN1, 1072 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1073 1074 return skl_tune_iz_hashing(ring); 1075 } 1076 1077 static int bxt_init_workarounds(struct intel_engine_cs *ring) 1078 { 1079 int ret; 1080 struct drm_device *dev = ring->dev; 1081 struct drm_i915_private *dev_priv = dev->dev_private; 1082 1083 ret = gen9_init_workarounds(ring); 1084 if (ret) 1085 return ret; 1086 1087 /* WaStoreMultiplePTEenable:bxt */ 1088 /* This is a requirement according to Hardware specification */ 1089 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 1090 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); 1091 1092 /* WaSetClckGatingDisableMedia:bxt */ 1093 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1094 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & 1095 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); 1096 } 1097 1098 /* WaDisableThreadStallDopClockGating:bxt */ 1099 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 1100 STALL_DOP_GATING_DISABLE); 1101 1102 /* WaDisableSbeCacheDispatchPortSharing:bxt */ 1103 if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { 1104 WA_SET_BIT_MASKED( 1105 GEN7_HALF_SLICE_CHICKEN1, 1106 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1107 } 1108 1109 return 0; 1110 } 1111 1112 int init_workarounds_ring(struct intel_engine_cs *ring) 1113 { 1114 struct drm_device *dev = ring->dev; 1115 struct drm_i915_private *dev_priv = dev->dev_private; 1116 1117 WARN_ON(ring->id != RCS); 1118 1119 dev_priv->workarounds.count = 0; 1120 1121 if (IS_BROADWELL(dev)) 1122 return bdw_init_workarounds(ring); 1123 1124 if (IS_CHERRYVIEW(dev)) 1125 return chv_init_workarounds(ring); 1126 1127 if (IS_SKYLAKE(dev)) 1128 return skl_init_workarounds(ring); 1129 1130 if (IS_BROXTON(dev)) 1131 return bxt_init_workarounds(ring); 1132 1133 return 0; 1134 } 1135 1136 static int init_render_ring(struct intel_engine_cs *ring) 1137 { 1138 struct drm_device *dev = ring->dev; 1139 struct drm_i915_private *dev_priv = dev->dev_private; 1140 int ret = init_ring_common(ring); 1141 if (ret) 1142 return ret; 1143 1144 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 1145 if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) 1146 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 1147 1148 /* We need to disable the AsyncFlip performance optimisations in order 1149 * to use MI_WAIT_FOR_EVENT within the CS. It should already be 1150 * programmed to '1' on all products. 1151 * 1152 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv 1153 */ 1154 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) 1155 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 1156 1157 /* Required for the hardware to program scanline values for waiting */ 1158 /* WaEnableFlushTlbInvalidationMode:snb */ 1159 if (INTEL_INFO(dev)->gen == 6) 1160 I915_WRITE(GFX_MODE, 1161 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 1162 1163 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 1164 if (IS_GEN7(dev)) 1165 I915_WRITE(GFX_MODE_GEN7, 1166 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 1167 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 1168 1169 if (IS_GEN6(dev)) { 1170 /* From the Sandybridge PRM, volume 1 part 3, page 24: 1171 * "If this bit is set, STCunit will have LRA as replacement 1172 * policy. [...] This bit must be reset. LRA replacement 1173 * policy is not supported." 1174 */ 1175 I915_WRITE(CACHE_MODE_0, 1176 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 1177 } 1178 1179 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) 1180 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1181 1182 if (HAS_L3_DPF(dev)) 1183 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 1184 1185 return init_workarounds_ring(ring); 1186 } 1187 1188 static void render_ring_cleanup(struct intel_engine_cs *ring) 1189 { 1190 struct drm_device *dev = ring->dev; 1191 struct drm_i915_private *dev_priv = dev->dev_private; 1192 1193 if (dev_priv->semaphore_obj) { 1194 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); 1195 drm_gem_object_unreference(&dev_priv->semaphore_obj->base); 1196 dev_priv->semaphore_obj = NULL; 1197 } 1198 1199 intel_fini_pipe_control(ring); 1200 } 1201 1202 static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, 1203 unsigned int num_dwords) 1204 { 1205 #define MBOX_UPDATE_DWORDS 8 1206 struct intel_engine_cs *signaller = signaller_req->ring; 1207 struct drm_device *dev = signaller->dev; 1208 struct drm_i915_private *dev_priv = dev->dev_private; 1209 struct intel_engine_cs *waiter; 1210 int i, ret, num_rings; 1211 1212 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1213 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1214 #undef MBOX_UPDATE_DWORDS 1215 1216 ret = intel_ring_begin(signaller_req, num_dwords); 1217 if (ret) 1218 return ret; 1219 1220 for_each_ring(waiter, dev_priv, i) { 1221 u32 seqno; 1222 u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; 1223 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1224 continue; 1225 1226 seqno = i915_gem_request_get_seqno(signaller_req); 1227 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); 1228 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | 1229 PIPE_CONTROL_QW_WRITE | 1230 PIPE_CONTROL_FLUSH_ENABLE); 1231 intel_ring_emit(signaller, lower_32_bits(gtt_offset)); 1232 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 1233 intel_ring_emit(signaller, seqno); 1234 intel_ring_emit(signaller, 0); 1235 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 1236 MI_SEMAPHORE_TARGET(waiter->id)); 1237 intel_ring_emit(signaller, 0); 1238 } 1239 1240 return 0; 1241 } 1242 1243 static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, 1244 unsigned int num_dwords) 1245 { 1246 #define MBOX_UPDATE_DWORDS 6 1247 struct intel_engine_cs *signaller = signaller_req->ring; 1248 struct drm_device *dev = signaller->dev; 1249 struct drm_i915_private *dev_priv = dev->dev_private; 1250 struct intel_engine_cs *waiter; 1251 int i, ret, num_rings; 1252 1253 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1254 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1255 #undef MBOX_UPDATE_DWORDS 1256 1257 ret = intel_ring_begin(signaller_req, num_dwords); 1258 if (ret) 1259 return ret; 1260 1261 for_each_ring(waiter, dev_priv, i) { 1262 u32 seqno; 1263 u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; 1264 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1265 continue; 1266 1267 seqno = i915_gem_request_get_seqno(signaller_req); 1268 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | 1269 MI_FLUSH_DW_OP_STOREDW); 1270 intel_ring_emit(signaller, lower_32_bits(gtt_offset) | 1271 MI_FLUSH_DW_USE_GTT); 1272 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 1273 intel_ring_emit(signaller, seqno); 1274 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 1275 MI_SEMAPHORE_TARGET(waiter->id)); 1276 intel_ring_emit(signaller, 0); 1277 } 1278 1279 return 0; 1280 } 1281 1282 static int gen6_signal(struct drm_i915_gem_request *signaller_req, 1283 unsigned int num_dwords) 1284 { 1285 struct intel_engine_cs *signaller = signaller_req->ring; 1286 struct drm_device *dev = signaller->dev; 1287 struct drm_i915_private *dev_priv = dev->dev_private; 1288 struct intel_engine_cs *useless; 1289 int i, ret, num_rings; 1290 1291 #define MBOX_UPDATE_DWORDS 3 1292 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1293 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); 1294 #undef MBOX_UPDATE_DWORDS 1295 1296 ret = intel_ring_begin(signaller_req, num_dwords); 1297 if (ret) 1298 return ret; 1299 1300 for_each_ring(useless, dev_priv, i) { 1301 i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[i]; 1302 1303 if (i915_mmio_reg_valid(mbox_reg)) { 1304 u32 seqno = i915_gem_request_get_seqno(signaller_req); 1305 1306 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); 1307 intel_ring_emit_reg(signaller, mbox_reg); 1308 intel_ring_emit(signaller, seqno); 1309 } 1310 } 1311 1312 /* If num_dwords was rounded, make sure the tail pointer is correct */ 1313 if (num_rings % 2 == 0) 1314 intel_ring_emit(signaller, MI_NOOP); 1315 1316 return 0; 1317 } 1318 1319 /** 1320 * gen6_add_request - Update the semaphore mailbox registers 1321 * 1322 * @request - request to write to the ring 1323 * 1324 * Update the mailbox registers in the *other* rings with the current seqno. 1325 * This acts like a signal in the canonical semaphore. 1326 */ 1327 static int 1328 gen6_add_request(struct drm_i915_gem_request *req) 1329 { 1330 struct intel_engine_cs *ring = req->ring; 1331 int ret; 1332 1333 if (ring->semaphore.signal) 1334 ret = ring->semaphore.signal(req, 4); 1335 else 1336 ret = intel_ring_begin(req, 4); 1337 1338 if (ret) 1339 return ret; 1340 1341 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 1342 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1343 intel_ring_emit(ring, i915_gem_request_get_seqno(req)); 1344 intel_ring_emit(ring, MI_USER_INTERRUPT); 1345 __intel_ring_advance(ring); 1346 1347 return 0; 1348 } 1349 1350 static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, 1351 u32 seqno) 1352 { 1353 struct drm_i915_private *dev_priv = dev->dev_private; 1354 return dev_priv->last_seqno < seqno; 1355 } 1356 1357 /** 1358 * intel_ring_sync - sync the waiter to the signaller on seqno 1359 * 1360 * @waiter - ring that is waiting 1361 * @signaller - ring which has, or will signal 1362 * @seqno - seqno which the waiter will block on 1363 */ 1364 1365 static int 1366 gen8_ring_sync(struct drm_i915_gem_request *waiter_req, 1367 struct intel_engine_cs *signaller, 1368 u32 seqno) 1369 { 1370 struct intel_engine_cs *waiter = waiter_req->ring; 1371 struct drm_i915_private *dev_priv = waiter->dev->dev_private; 1372 int ret; 1373 1374 ret = intel_ring_begin(waiter_req, 4); 1375 if (ret) 1376 return ret; 1377 1378 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | 1379 MI_SEMAPHORE_GLOBAL_GTT | 1380 MI_SEMAPHORE_POLL | 1381 MI_SEMAPHORE_SAD_GTE_SDD); 1382 intel_ring_emit(waiter, seqno); 1383 intel_ring_emit(waiter, 1384 lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); 1385 intel_ring_emit(waiter, 1386 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); 1387 intel_ring_advance(waiter); 1388 return 0; 1389 } 1390 1391 static int 1392 gen6_ring_sync(struct drm_i915_gem_request *waiter_req, 1393 struct intel_engine_cs *signaller, 1394 u32 seqno) 1395 { 1396 struct intel_engine_cs *waiter = waiter_req->ring; 1397 u32 dw1 = MI_SEMAPHORE_MBOX | 1398 MI_SEMAPHORE_COMPARE | 1399 MI_SEMAPHORE_REGISTER; 1400 u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id]; 1401 int ret; 1402 1403 /* Throughout all of the GEM code, seqno passed implies our current 1404 * seqno is >= the last seqno executed. However for hardware the 1405 * comparison is strictly greater than. 1406 */ 1407 seqno -= 1; 1408 1409 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID); 1410 1411 ret = intel_ring_begin(waiter_req, 4); 1412 if (ret) 1413 return ret; 1414 1415 /* If seqno wrap happened, omit the wait with no-ops */ 1416 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { 1417 intel_ring_emit(waiter, dw1 | wait_mbox); 1418 intel_ring_emit(waiter, seqno); 1419 intel_ring_emit(waiter, 0); 1420 intel_ring_emit(waiter, MI_NOOP); 1421 } else { 1422 intel_ring_emit(waiter, MI_NOOP); 1423 intel_ring_emit(waiter, MI_NOOP); 1424 intel_ring_emit(waiter, MI_NOOP); 1425 intel_ring_emit(waiter, MI_NOOP); 1426 } 1427 intel_ring_advance(waiter); 1428 1429 return 0; 1430 } 1431 1432 #define PIPE_CONTROL_FLUSH(ring__, addr__) \ 1433 do { \ 1434 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ 1435 PIPE_CONTROL_DEPTH_STALL); \ 1436 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ 1437 intel_ring_emit(ring__, 0); \ 1438 intel_ring_emit(ring__, 0); \ 1439 } while (0) 1440 1441 static int 1442 pc_render_add_request(struct drm_i915_gem_request *req) 1443 { 1444 struct intel_engine_cs *ring = req->ring; 1445 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 1446 int ret; 1447 1448 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently 1449 * incoherent with writes to memory, i.e. completely fubar, 1450 * so we need to use PIPE_NOTIFY instead. 1451 * 1452 * However, we also need to workaround the qword write 1453 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to 1454 * memory before requesting an interrupt. 1455 */ 1456 ret = intel_ring_begin(req, 32); 1457 if (ret) 1458 return ret; 1459 1460 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 1461 PIPE_CONTROL_WRITE_FLUSH | 1462 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 1463 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 1464 intel_ring_emit(ring, i915_gem_request_get_seqno(req)); 1465 intel_ring_emit(ring, 0); 1466 PIPE_CONTROL_FLUSH(ring, scratch_addr); 1467 scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ 1468 PIPE_CONTROL_FLUSH(ring, scratch_addr); 1469 scratch_addr += 2 * CACHELINE_BYTES; 1470 PIPE_CONTROL_FLUSH(ring, scratch_addr); 1471 scratch_addr += 2 * CACHELINE_BYTES; 1472 PIPE_CONTROL_FLUSH(ring, scratch_addr); 1473 scratch_addr += 2 * CACHELINE_BYTES; 1474 PIPE_CONTROL_FLUSH(ring, scratch_addr); 1475 scratch_addr += 2 * CACHELINE_BYTES; 1476 PIPE_CONTROL_FLUSH(ring, scratch_addr); 1477 1478 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 1479 PIPE_CONTROL_WRITE_FLUSH | 1480 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 1481 PIPE_CONTROL_NOTIFY); 1482 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 1483 intel_ring_emit(ring, i915_gem_request_get_seqno(req)); 1484 intel_ring_emit(ring, 0); 1485 __intel_ring_advance(ring); 1486 1487 return 0; 1488 } 1489 1490 static u32 1491 gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 1492 { 1493 /* Workaround to force correct ordering between irq and seqno writes on 1494 * ivb (and maybe also on snb) by reading from a CS register (like 1495 * ACTHD) before reading the status page. */ 1496 if (!lazy_coherency) { 1497 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1498 POSTING_READ(RING_ACTHD(ring->mmio_base)); 1499 } 1500 1501 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 1502 } 1503 1504 static u32 1505 ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 1506 { 1507 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 1508 } 1509 1510 static void 1511 ring_set_seqno(struct intel_engine_cs *ring, u32 seqno) 1512 { 1513 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); 1514 } 1515 1516 static u32 1517 pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) 1518 { 1519 return ring->scratch.cpu_page[0]; 1520 } 1521 1522 static void 1523 pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno) 1524 { 1525 ring->scratch.cpu_page[0] = seqno; 1526 } 1527 1528 static bool 1529 gen5_ring_get_irq(struct intel_engine_cs *ring) 1530 { 1531 struct drm_device *dev = ring->dev; 1532 struct drm_i915_private *dev_priv = dev->dev_private; 1533 unsigned long flags; 1534 1535 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1536 return false; 1537 1538 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1539 if (ring->irq_refcount++ == 0) 1540 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1541 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1542 1543 return true; 1544 } 1545 1546 static void 1547 gen5_ring_put_irq(struct intel_engine_cs *ring) 1548 { 1549 struct drm_device *dev = ring->dev; 1550 struct drm_i915_private *dev_priv = dev->dev_private; 1551 unsigned long flags; 1552 1553 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1554 if (--ring->irq_refcount == 0) 1555 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1556 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1557 } 1558 1559 static bool 1560 i9xx_ring_get_irq(struct intel_engine_cs *ring) 1561 { 1562 struct drm_device *dev = ring->dev; 1563 struct drm_i915_private *dev_priv = dev->dev_private; 1564 unsigned long flags; 1565 1566 if (!intel_irqs_enabled(dev_priv)) 1567 return false; 1568 1569 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1570 if (ring->irq_refcount++ == 0) { 1571 dev_priv->irq_mask &= ~ring->irq_enable_mask; 1572 I915_WRITE(IMR, dev_priv->irq_mask); 1573 POSTING_READ(IMR); 1574 } 1575 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1576 1577 return true; 1578 } 1579 1580 static void 1581 i9xx_ring_put_irq(struct intel_engine_cs *ring) 1582 { 1583 struct drm_device *dev = ring->dev; 1584 struct drm_i915_private *dev_priv = dev->dev_private; 1585 unsigned long flags; 1586 1587 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1588 if (--ring->irq_refcount == 0) { 1589 dev_priv->irq_mask |= ring->irq_enable_mask; 1590 I915_WRITE(IMR, dev_priv->irq_mask); 1591 POSTING_READ(IMR); 1592 } 1593 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1594 } 1595 1596 static bool 1597 i8xx_ring_get_irq(struct intel_engine_cs *ring) 1598 { 1599 struct drm_device *dev = ring->dev; 1600 struct drm_i915_private *dev_priv = dev->dev_private; 1601 unsigned long flags; 1602 1603 if (!intel_irqs_enabled(dev_priv)) 1604 return false; 1605 1606 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1607 if (ring->irq_refcount++ == 0) { 1608 dev_priv->irq_mask &= ~ring->irq_enable_mask; 1609 I915_WRITE16(IMR, dev_priv->irq_mask); 1610 POSTING_READ16(IMR); 1611 } 1612 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1613 1614 return true; 1615 } 1616 1617 static void 1618 i8xx_ring_put_irq(struct intel_engine_cs *ring) 1619 { 1620 struct drm_device *dev = ring->dev; 1621 struct drm_i915_private *dev_priv = dev->dev_private; 1622 unsigned long flags; 1623 1624 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1625 if (--ring->irq_refcount == 0) { 1626 dev_priv->irq_mask |= ring->irq_enable_mask; 1627 I915_WRITE16(IMR, dev_priv->irq_mask); 1628 POSTING_READ16(IMR); 1629 } 1630 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1631 } 1632 1633 static int 1634 bsd_ring_flush(struct drm_i915_gem_request *req, 1635 u32 invalidate_domains, 1636 u32 flush_domains) 1637 { 1638 struct intel_engine_cs *ring = req->ring; 1639 int ret; 1640 1641 ret = intel_ring_begin(req, 2); 1642 if (ret) 1643 return ret; 1644 1645 intel_ring_emit(ring, MI_FLUSH); 1646 intel_ring_emit(ring, MI_NOOP); 1647 intel_ring_advance(ring); 1648 return 0; 1649 } 1650 1651 static int 1652 i9xx_add_request(struct drm_i915_gem_request *req) 1653 { 1654 struct intel_engine_cs *ring = req->ring; 1655 int ret; 1656 1657 ret = intel_ring_begin(req, 4); 1658 if (ret) 1659 return ret; 1660 1661 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 1662 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1663 intel_ring_emit(ring, i915_gem_request_get_seqno(req)); 1664 intel_ring_emit(ring, MI_USER_INTERRUPT); 1665 __intel_ring_advance(ring); 1666 1667 return 0; 1668 } 1669 1670 static bool 1671 gen6_ring_get_irq(struct intel_engine_cs *ring) 1672 { 1673 struct drm_device *dev = ring->dev; 1674 struct drm_i915_private *dev_priv = dev->dev_private; 1675 unsigned long flags; 1676 1677 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1678 return false; 1679 1680 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1681 if (ring->irq_refcount++ == 0) { 1682 if (HAS_L3_DPF(dev) && ring->id == RCS) 1683 I915_WRITE_IMR(ring, 1684 ~(ring->irq_enable_mask | 1685 GT_PARITY_ERROR(dev))); 1686 else 1687 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1688 gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); 1689 } 1690 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1691 1692 return true; 1693 } 1694 1695 static void 1696 gen6_ring_put_irq(struct intel_engine_cs *ring) 1697 { 1698 struct drm_device *dev = ring->dev; 1699 struct drm_i915_private *dev_priv = dev->dev_private; 1700 unsigned long flags; 1701 1702 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1703 if (--ring->irq_refcount == 0) { 1704 if (HAS_L3_DPF(dev) && ring->id == RCS) 1705 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 1706 else 1707 I915_WRITE_IMR(ring, ~0); 1708 gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); 1709 } 1710 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1711 } 1712 1713 static bool 1714 hsw_vebox_get_irq(struct intel_engine_cs *ring) 1715 { 1716 struct drm_device *dev = ring->dev; 1717 struct drm_i915_private *dev_priv = dev->dev_private; 1718 unsigned long flags; 1719 1720 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1721 return false; 1722 1723 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1724 if (ring->irq_refcount++ == 0) { 1725 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1726 gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask); 1727 } 1728 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1729 1730 return true; 1731 } 1732 1733 static void 1734 hsw_vebox_put_irq(struct intel_engine_cs *ring) 1735 { 1736 struct drm_device *dev = ring->dev; 1737 struct drm_i915_private *dev_priv = dev->dev_private; 1738 unsigned long flags; 1739 1740 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1741 if (--ring->irq_refcount == 0) { 1742 I915_WRITE_IMR(ring, ~0); 1743 gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask); 1744 } 1745 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1746 } 1747 1748 static bool 1749 gen8_ring_get_irq(struct intel_engine_cs *ring) 1750 { 1751 struct drm_device *dev = ring->dev; 1752 struct drm_i915_private *dev_priv = dev->dev_private; 1753 unsigned long flags; 1754 1755 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 1756 return false; 1757 1758 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1759 if (ring->irq_refcount++ == 0) { 1760 if (HAS_L3_DPF(dev) && ring->id == RCS) { 1761 I915_WRITE_IMR(ring, 1762 ~(ring->irq_enable_mask | 1763 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); 1764 } else { 1765 I915_WRITE_IMR(ring, ~ring->irq_enable_mask); 1766 } 1767 POSTING_READ(RING_IMR(ring->mmio_base)); 1768 } 1769 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1770 1771 return true; 1772 } 1773 1774 static void 1775 gen8_ring_put_irq(struct intel_engine_cs *ring) 1776 { 1777 struct drm_device *dev = ring->dev; 1778 struct drm_i915_private *dev_priv = dev->dev_private; 1779 unsigned long flags; 1780 1781 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1782 if (--ring->irq_refcount == 0) { 1783 if (HAS_L3_DPF(dev) && ring->id == RCS) { 1784 I915_WRITE_IMR(ring, 1785 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); 1786 } else { 1787 I915_WRITE_IMR(ring, ~0); 1788 } 1789 POSTING_READ(RING_IMR(ring->mmio_base)); 1790 } 1791 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1792 } 1793 1794 static int 1795 i965_dispatch_execbuffer(struct drm_i915_gem_request *req, 1796 u64 offset, u32 length, 1797 unsigned dispatch_flags) 1798 { 1799 struct intel_engine_cs *ring = req->ring; 1800 int ret; 1801 1802 ret = intel_ring_begin(req, 2); 1803 if (ret) 1804 return ret; 1805 1806 intel_ring_emit(ring, 1807 MI_BATCH_BUFFER_START | 1808 MI_BATCH_GTT | 1809 (dispatch_flags & I915_DISPATCH_SECURE ? 1810 0 : MI_BATCH_NON_SECURE_I965)); 1811 intel_ring_emit(ring, offset); 1812 intel_ring_advance(ring); 1813 1814 return 0; 1815 } 1816 1817 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ 1818 #define I830_BATCH_LIMIT (256*1024) 1819 #define I830_TLB_ENTRIES (2) 1820 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) 1821 static int 1822 i830_dispatch_execbuffer(struct drm_i915_gem_request *req, 1823 u64 offset, u32 len, 1824 unsigned dispatch_flags) 1825 { 1826 struct intel_engine_cs *ring = req->ring; 1827 u32 cs_offset = ring->scratch.gtt_offset; 1828 int ret; 1829 1830 ret = intel_ring_begin(req, 6); 1831 if (ret) 1832 return ret; 1833 1834 /* Evict the invalid PTE TLBs */ 1835 intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA); 1836 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); 1837 intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */ 1838 intel_ring_emit(ring, cs_offset); 1839 intel_ring_emit(ring, 0xdeadbeef); 1840 intel_ring_emit(ring, MI_NOOP); 1841 intel_ring_advance(ring); 1842 1843 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) { 1844 if (len > I830_BATCH_LIMIT) 1845 return -ENOSPC; 1846 1847 ret = intel_ring_begin(req, 6 + 2); 1848 if (ret) 1849 return ret; 1850 1851 /* Blit the batch (which has now all relocs applied) to the 1852 * stable batch scratch bo area (so that the CS never 1853 * stumbles over its tlb invalidation bug) ... 1854 */ 1855 intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); 1856 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); 1857 intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096); 1858 intel_ring_emit(ring, cs_offset); 1859 intel_ring_emit(ring, 4096); 1860 intel_ring_emit(ring, offset); 1861 1862 intel_ring_emit(ring, MI_FLUSH); 1863 intel_ring_emit(ring, MI_NOOP); 1864 intel_ring_advance(ring); 1865 1866 /* ... and execute it. */ 1867 offset = cs_offset; 1868 } 1869 1870 ret = intel_ring_begin(req, 4); 1871 if (ret) 1872 return ret; 1873 1874 intel_ring_emit(ring, MI_BATCH_BUFFER); 1875 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ? 1876 0 : MI_BATCH_NON_SECURE)); 1877 intel_ring_emit(ring, offset + len - 8); 1878 intel_ring_emit(ring, MI_NOOP); 1879 intel_ring_advance(ring); 1880 1881 return 0; 1882 } 1883 1884 static int 1885 i915_dispatch_execbuffer(struct drm_i915_gem_request *req, 1886 u64 offset, u32 len, 1887 unsigned dispatch_flags) 1888 { 1889 struct intel_engine_cs *ring = req->ring; 1890 int ret; 1891 1892 ret = intel_ring_begin(req, 2); 1893 if (ret) 1894 return ret; 1895 1896 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1897 intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ? 1898 0 : MI_BATCH_NON_SECURE)); 1899 intel_ring_advance(ring); 1900 1901 return 0; 1902 } 1903 1904 static void cleanup_status_page(struct intel_engine_cs *ring) 1905 { 1906 struct drm_i915_gem_object *obj; 1907 1908 obj = ring->status_page.obj; 1909 if (obj == NULL) 1910 return; 1911 1912 kunmap(sg_page(obj->pages->sgl)); 1913 i915_gem_object_ggtt_unpin(obj); 1914 drm_gem_object_unreference(&obj->base); 1915 ring->status_page.obj = NULL; 1916 } 1917 1918 static int init_status_page(struct intel_engine_cs *ring) 1919 { 1920 struct drm_i915_gem_object *obj; 1921 1922 if ((obj = ring->status_page.obj) == NULL) { 1923 unsigned flags; 1924 int ret; 1925 1926 obj = i915_gem_alloc_object(ring->dev, 4096); 1927 if (obj == NULL) { 1928 DRM_ERROR("Failed to allocate status page\n"); 1929 return -ENOMEM; 1930 } 1931 1932 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1933 if (ret) 1934 goto err_unref; 1935 1936 flags = 0; 1937 if (!HAS_LLC(ring->dev)) 1938 /* On g33, we cannot place HWS above 256MiB, so 1939 * restrict its pinning to the low mappable arena. 1940 * Though this restriction is not documented for 1941 * gen4, gen5, or byt, they also behave similarly 1942 * and hang if the HWS is placed at the top of the 1943 * GTT. To generalise, it appears that all !llc 1944 * platforms have issues with us placing the HWS 1945 * above the mappable region (even though we never 1946 * actualy map it). 1947 */ 1948 flags |= PIN_MAPPABLE; 1949 ret = i915_gem_obj_ggtt_pin(obj, 4096, flags); 1950 if (ret) { 1951 err_unref: 1952 drm_gem_object_unreference(&obj->base); 1953 return ret; 1954 } 1955 1956 ring->status_page.obj = obj; 1957 } 1958 1959 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); 1960 ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); 1961 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1962 1963 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 1964 ring->name, ring->status_page.gfx_addr); 1965 1966 return 0; 1967 } 1968 1969 static int init_phys_status_page(struct intel_engine_cs *ring) 1970 { 1971 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1972 1973 if (!dev_priv->status_page_dmah) { 1974 dev_priv->status_page_dmah = 1975 drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); 1976 if (!dev_priv->status_page_dmah) 1977 return -ENOMEM; 1978 } 1979 1980 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1981 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1982 1983 return 0; 1984 } 1985 1986 void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 1987 { 1988 if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen) 1989 vunmap(ringbuf->virtual_start); 1990 else 1991 iounmap(ringbuf->virtual_start); 1992 ringbuf->virtual_start = NULL; 1993 i915_gem_object_ggtt_unpin(ringbuf->obj); 1994 } 1995 1996 static u32 *vmap_obj(struct drm_i915_gem_object *obj) 1997 { 1998 struct sg_page_iter sg_iter; 1999 struct vm_page **pages; 2000 void *addr; 2001 int i; 2002 2003 pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); 2004 if (pages == NULL) 2005 return NULL; 2006 2007 i = 0; 2008 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) 2009 pages[i++] = sg_page_iter_page(&sg_iter); 2010 2011 addr = vmap(pages, i, 0, PAGE_KERNEL); 2012 drm_free_large(pages); 2013 2014 return addr; 2015 } 2016 2017 int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, 2018 struct intel_ringbuffer *ringbuf) 2019 { 2020 struct drm_i915_private *dev_priv = to_i915(dev); 2021 struct drm_i915_gem_object *obj = ringbuf->obj; 2022 int ret; 2023 2024 if (HAS_LLC(dev_priv) && !obj->stolen) { 2025 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0); 2026 if (ret) 2027 return ret; 2028 2029 ret = i915_gem_object_set_to_cpu_domain(obj, true); 2030 if (ret) { 2031 i915_gem_object_ggtt_unpin(obj); 2032 return ret; 2033 } 2034 2035 ringbuf->virtual_start = (char *)vmap_obj(obj); 2036 if (ringbuf->virtual_start == NULL) { 2037 i915_gem_object_ggtt_unpin(obj); 2038 return -ENOMEM; 2039 } 2040 } else { 2041 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); 2042 if (ret) 2043 return ret; 2044 2045 ret = i915_gem_object_set_to_gtt_domain(obj, true); 2046 if (ret) { 2047 i915_gem_object_ggtt_unpin(obj); 2048 return ret; 2049 } 2050 2051 ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base + 2052 i915_gem_obj_ggtt_offset(obj), ringbuf->size); 2053 if (ringbuf->virtual_start == NULL) { 2054 i915_gem_object_ggtt_unpin(obj); 2055 return -EINVAL; 2056 } 2057 } 2058 2059 return 0; 2060 } 2061 2062 static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 2063 { 2064 drm_gem_object_unreference(&ringbuf->obj->base); 2065 ringbuf->obj = NULL; 2066 } 2067 2068 static int intel_alloc_ringbuffer_obj(struct drm_device *dev, 2069 struct intel_ringbuffer *ringbuf) 2070 { 2071 struct drm_i915_gem_object *obj; 2072 2073 obj = NULL; 2074 if (!HAS_LLC(dev)) 2075 obj = i915_gem_object_create_stolen(dev, ringbuf->size); 2076 if (obj == NULL) 2077 obj = i915_gem_alloc_object(dev, ringbuf->size); 2078 if (obj == NULL) 2079 return -ENOMEM; 2080 2081 /* mark ring buffers as read-only from GPU side by default */ 2082 obj->gt_ro = 1; 2083 2084 ringbuf->obj = obj; 2085 2086 return 0; 2087 } 2088 2089 struct intel_ringbuffer * 2090 intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) 2091 { 2092 struct intel_ringbuffer *ring; 2093 int ret; 2094 2095 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 2096 if (ring == NULL) { 2097 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n", 2098 engine->name); 2099 return ERR_PTR(-ENOMEM); 2100 } 2101 2102 ring->ring = engine; 2103 list_add(&ring->link, &engine->buffers); 2104 2105 ring->size = size; 2106 /* Workaround an erratum on the i830 which causes a hang if 2107 * the TAIL pointer points to within the last 2 cachelines 2108 * of the buffer. 2109 */ 2110 ring->effective_size = size; 2111 if (IS_I830(engine->dev) || IS_845G(engine->dev)) 2112 ring->effective_size -= 2 * CACHELINE_BYTES; 2113 2114 ring->last_retired_head = -1; 2115 intel_ring_update_space(ring); 2116 2117 ret = intel_alloc_ringbuffer_obj(engine->dev, ring); 2118 if (ret) { 2119 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n", 2120 engine->name, ret); 2121 list_del(&ring->link); 2122 kfree(ring); 2123 return ERR_PTR(ret); 2124 } 2125 2126 return ring; 2127 } 2128 2129 void 2130 intel_ringbuffer_free(struct intel_ringbuffer *ring) 2131 { 2132 intel_destroy_ringbuffer_obj(ring); 2133 list_del(&ring->link); 2134 kfree(ring); 2135 } 2136 2137 static int intel_init_ring_buffer(struct drm_device *dev, 2138 struct intel_engine_cs *ring) 2139 { 2140 struct intel_ringbuffer *ringbuf; 2141 int ret; 2142 2143 WARN_ON(ring->buffer); 2144 2145 ring->dev = dev; 2146 INIT_LIST_HEAD(&ring->active_list); 2147 INIT_LIST_HEAD(&ring->request_list); 2148 INIT_LIST_HEAD(&ring->execlist_queue); 2149 INIT_LIST_HEAD(&ring->buffers); 2150 i915_gem_batch_pool_init(dev, &ring->batch_pool); 2151 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); 2152 2153 init_waitqueue_head(&ring->irq_queue); 2154 2155 ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE); 2156 if (IS_ERR(ringbuf)) { 2157 ret = PTR_ERR(ringbuf); 2158 goto error; 2159 } 2160 ring->buffer = ringbuf; 2161 2162 if (I915_NEED_GFX_HWS(dev)) { 2163 ret = init_status_page(ring); 2164 if (ret) 2165 goto error; 2166 } else { 2167 BUG_ON(ring->id != RCS); 2168 ret = init_phys_status_page(ring); 2169 if (ret) 2170 goto error; 2171 } 2172 2173 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); 2174 if (ret) { 2175 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", 2176 ring->name, ret); 2177 intel_destroy_ringbuffer_obj(ringbuf); 2178 goto error; 2179 } 2180 2181 ret = i915_cmd_parser_init_ring(ring); 2182 if (ret) 2183 goto error; 2184 2185 return 0; 2186 2187 error: 2188 intel_cleanup_ring_buffer(ring); 2189 return ret; 2190 } 2191 2192 void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) 2193 { 2194 struct drm_i915_private *dev_priv; 2195 2196 if (!intel_ring_initialized(ring)) 2197 return; 2198 2199 dev_priv = to_i915(ring->dev); 2200 2201 if (ring->buffer) { 2202 intel_stop_ring_buffer(ring); 2203 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); 2204 2205 intel_unpin_ringbuffer_obj(ring->buffer); 2206 intel_ringbuffer_free(ring->buffer); 2207 ring->buffer = NULL; 2208 } 2209 2210 if (ring->cleanup) 2211 ring->cleanup(ring); 2212 2213 cleanup_status_page(ring); 2214 2215 i915_cmd_parser_fini_ring(ring); 2216 i915_gem_batch_pool_fini(&ring->batch_pool); 2217 ring->dev = NULL; 2218 } 2219 2220 static int ring_wait_for_space(struct intel_engine_cs *ring, int n) 2221 { 2222 struct intel_ringbuffer *ringbuf = ring->buffer; 2223 struct drm_i915_gem_request *request; 2224 unsigned space; 2225 int ret; 2226 2227 if (intel_ring_space(ringbuf) >= n) 2228 return 0; 2229 2230 /* The whole point of reserving space is to not wait! */ 2231 WARN_ON(ringbuf->reserved_in_use); 2232 2233 list_for_each_entry(request, &ring->request_list, list) { 2234 space = __intel_ring_space(request->postfix, ringbuf->tail, 2235 ringbuf->size); 2236 if (space >= n) 2237 break; 2238 } 2239 2240 if (WARN_ON(&request->list == &ring->request_list)) 2241 return -ENOSPC; 2242 2243 ret = i915_wait_request(request); 2244 if (ret) 2245 return ret; 2246 2247 ringbuf->space = space; 2248 return 0; 2249 } 2250 2251 static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) 2252 { 2253 uint32_t __iomem *virt; 2254 int rem = ringbuf->size - ringbuf->tail; 2255 2256 virt = (unsigned int *)((char *)ringbuf->virtual_start + ringbuf->tail); 2257 rem /= 4; 2258 while (rem--) 2259 iowrite32(MI_NOOP, virt++); 2260 2261 ringbuf->tail = 0; 2262 intel_ring_update_space(ringbuf); 2263 } 2264 2265 int intel_ring_idle(struct intel_engine_cs *ring) 2266 { 2267 struct drm_i915_gem_request *req; 2268 2269 /* Wait upon the last request to be completed */ 2270 if (list_empty(&ring->request_list)) 2271 return 0; 2272 2273 req = list_entry(ring->request_list.prev, 2274 struct drm_i915_gem_request, 2275 list); 2276 2277 /* Make sure we do not trigger any retires */ 2278 return __i915_wait_request(req, 2279 atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter), 2280 to_i915(ring->dev)->mm.interruptible, 2281 NULL, NULL); 2282 } 2283 2284 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) 2285 { 2286 request->ringbuf = request->ring->buffer; 2287 return 0; 2288 } 2289 2290 int intel_ring_reserve_space(struct drm_i915_gem_request *request) 2291 { 2292 /* 2293 * The first call merely notes the reserve request and is common for 2294 * all back ends. The subsequent localised _begin() call actually 2295 * ensures that the reservation is available. Without the begin, if 2296 * the request creator immediately submitted the request without 2297 * adding any commands to it then there might not actually be 2298 * sufficient room for the submission commands. 2299 */ 2300 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); 2301 2302 return intel_ring_begin(request, 0); 2303 } 2304 2305 void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size) 2306 { 2307 WARN_ON(ringbuf->reserved_size); 2308 WARN_ON(ringbuf->reserved_in_use); 2309 2310 ringbuf->reserved_size = size; 2311 } 2312 2313 void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf) 2314 { 2315 WARN_ON(ringbuf->reserved_in_use); 2316 2317 ringbuf->reserved_size = 0; 2318 ringbuf->reserved_in_use = false; 2319 } 2320 2321 void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) 2322 { 2323 WARN_ON(ringbuf->reserved_in_use); 2324 2325 ringbuf->reserved_in_use = true; 2326 ringbuf->reserved_tail = ringbuf->tail; 2327 } 2328 2329 void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) 2330 { 2331 WARN_ON(!ringbuf->reserved_in_use); 2332 if (ringbuf->tail > ringbuf->reserved_tail) { 2333 WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size, 2334 "request reserved size too small: %d vs %d!\n", 2335 ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size); 2336 } else { 2337 /* 2338 * The ring was wrapped while the reserved space was in use. 2339 * That means that some unknown amount of the ring tail was 2340 * no-op filled and skipped. Thus simply adding the ring size 2341 * to the tail and doing the above space check will not work. 2342 * Rather than attempt to track how much tail was skipped, 2343 * it is much simpler to say that also skipping the sanity 2344 * check every once in a while is not a big issue. 2345 */ 2346 } 2347 2348 ringbuf->reserved_size = 0; 2349 ringbuf->reserved_in_use = false; 2350 } 2351 2352 static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes) 2353 { 2354 struct intel_ringbuffer *ringbuf = ring->buffer; 2355 int remain_usable = ringbuf->effective_size - ringbuf->tail; 2356 int remain_actual = ringbuf->size - ringbuf->tail; 2357 int ret, total_bytes, wait_bytes = 0; 2358 bool need_wrap = false; 2359 2360 if (ringbuf->reserved_in_use) 2361 total_bytes = bytes; 2362 else 2363 total_bytes = bytes + ringbuf->reserved_size; 2364 2365 if (unlikely(bytes > remain_usable)) { 2366 /* 2367 * Not enough space for the basic request. So need to flush 2368 * out the remainder and then wait for base + reserved. 2369 */ 2370 wait_bytes = remain_actual + total_bytes; 2371 need_wrap = true; 2372 } else { 2373 if (unlikely(total_bytes > remain_usable)) { 2374 /* 2375 * The base request will fit but the reserved space 2376 * falls off the end. So only need to to wait for the 2377 * reserved size after flushing out the remainder. 2378 */ 2379 wait_bytes = remain_actual + ringbuf->reserved_size; 2380 need_wrap = true; 2381 } else if (total_bytes > ringbuf->space) { 2382 /* No wrapping required, just waiting. */ 2383 wait_bytes = total_bytes; 2384 } 2385 } 2386 2387 if (wait_bytes) { 2388 ret = ring_wait_for_space(ring, wait_bytes); 2389 if (unlikely(ret)) 2390 return ret; 2391 2392 if (need_wrap) 2393 __wrap_ring_buffer(ringbuf); 2394 } 2395 2396 return 0; 2397 } 2398 2399 int intel_ring_begin(struct drm_i915_gem_request *req, 2400 int num_dwords) 2401 { 2402 struct intel_engine_cs *ring; 2403 struct drm_i915_private *dev_priv; 2404 int ret; 2405 2406 WARN_ON(req == NULL); 2407 ring = req->ring; 2408 dev_priv = ring->dev->dev_private; 2409 2410 ret = i915_gem_check_wedge(&dev_priv->gpu_error, 2411 dev_priv->mm.interruptible); 2412 if (ret) 2413 return ret; 2414 2415 ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t)); 2416 if (ret) 2417 return ret; 2418 2419 ring->buffer->space -= num_dwords * sizeof(uint32_t); 2420 return 0; 2421 } 2422 2423 /* Align the ring tail to a cacheline boundary */ 2424 int intel_ring_cacheline_align(struct drm_i915_gem_request *req) 2425 { 2426 struct intel_engine_cs *ring = req->ring; 2427 int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 2428 int ret; 2429 2430 if (num_dwords == 0) 2431 return 0; 2432 2433 num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; 2434 ret = intel_ring_begin(req, num_dwords); 2435 if (ret) 2436 return ret; 2437 2438 while (num_dwords--) 2439 intel_ring_emit(ring, MI_NOOP); 2440 2441 intel_ring_advance(ring); 2442 2443 return 0; 2444 } 2445 2446 void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno) 2447 { 2448 struct drm_device *dev = ring->dev; 2449 struct drm_i915_private *dev_priv = dev->dev_private; 2450 2451 if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) { 2452 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); 2453 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); 2454 if (HAS_VEBOX(dev)) 2455 I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); 2456 } 2457 2458 ring->set_seqno(ring, seqno); 2459 ring->hangcheck.seqno = seqno; 2460 } 2461 2462 static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring, 2463 u32 value) 2464 { 2465 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2466 2467 /* Every tail move must follow the sequence below */ 2468 2469 /* Disable notification that the ring is IDLE. The GT 2470 * will then assume that it is busy and bring it out of rc6. 2471 */ 2472 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 2473 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2474 2475 /* Clear the context id. Here be magic! */ 2476 I915_WRITE64(GEN6_BSD_RNCID, 0x0); 2477 2478 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 2479 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & 2480 GEN6_BSD_SLEEP_INDICATOR) == 0, 2481 50)) 2482 DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); 2483 2484 /* Now that the ring is fully powered up, update the tail */ 2485 I915_WRITE_TAIL(ring, value); 2486 POSTING_READ(RING_TAIL(ring->mmio_base)); 2487 2488 /* Let the ring send IDLE messages to the GT again, 2489 * and so let it sleep to conserve power when idle. 2490 */ 2491 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 2492 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2493 } 2494 2495 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, 2496 u32 invalidate, u32 flush) 2497 { 2498 struct intel_engine_cs *ring = req->ring; 2499 uint32_t cmd; 2500 int ret; 2501 2502 ret = intel_ring_begin(req, 4); 2503 if (ret) 2504 return ret; 2505 2506 cmd = MI_FLUSH_DW; 2507 if (INTEL_INFO(ring->dev)->gen >= 8) 2508 cmd += 1; 2509 2510 /* We always require a command barrier so that subsequent 2511 * commands, such as breadcrumb interrupts, are strictly ordered 2512 * wrt the contents of the write cache being flushed to memory 2513 * (and thus being coherent from the CPU). 2514 */ 2515 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 2516 2517 /* 2518 * Bspec vol 1c.5 - video engine command streamer: 2519 * "If ENABLED, all TLBs will be invalidated once the flush 2520 * operation is complete. This bit is only valid when the 2521 * Post-Sync Operation field is a value of 1h or 3h." 2522 */ 2523 if (invalidate & I915_GEM_GPU_DOMAINS) 2524 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; 2525 2526 intel_ring_emit(ring, cmd); 2527 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2528 if (INTEL_INFO(ring->dev)->gen >= 8) { 2529 intel_ring_emit(ring, 0); /* upper addr */ 2530 intel_ring_emit(ring, 0); /* value */ 2531 } else { 2532 intel_ring_emit(ring, 0); 2533 intel_ring_emit(ring, MI_NOOP); 2534 } 2535 intel_ring_advance(ring); 2536 return 0; 2537 } 2538 2539 static int 2540 gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, 2541 u64 offset, u32 len, 2542 unsigned dispatch_flags) 2543 { 2544 struct intel_engine_cs *ring = req->ring; 2545 bool ppgtt = USES_PPGTT(ring->dev) && 2546 !(dispatch_flags & I915_DISPATCH_SECURE); 2547 int ret; 2548 2549 ret = intel_ring_begin(req, 4); 2550 if (ret) 2551 return ret; 2552 2553 /* FIXME(BDW): Address space and security selectors. */ 2554 intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) | 2555 (dispatch_flags & I915_DISPATCH_RS ? 2556 MI_BATCH_RESOURCE_STREAMER : 0)); 2557 intel_ring_emit(ring, lower_32_bits(offset)); 2558 intel_ring_emit(ring, upper_32_bits(offset)); 2559 intel_ring_emit(ring, MI_NOOP); 2560 intel_ring_advance(ring); 2561 2562 return 0; 2563 } 2564 2565 static int 2566 hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, 2567 u64 offset, u32 len, 2568 unsigned dispatch_flags) 2569 { 2570 struct intel_engine_cs *ring = req->ring; 2571 int ret; 2572 2573 ret = intel_ring_begin(req, 2); 2574 if (ret) 2575 return ret; 2576 2577 intel_ring_emit(ring, 2578 MI_BATCH_BUFFER_START | 2579 (dispatch_flags & I915_DISPATCH_SECURE ? 2580 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) | 2581 (dispatch_flags & I915_DISPATCH_RS ? 2582 MI_BATCH_RESOURCE_STREAMER : 0)); 2583 /* bit0-7 is the length on GEN6+ */ 2584 intel_ring_emit(ring, offset); 2585 intel_ring_advance(ring); 2586 2587 return 0; 2588 } 2589 2590 static int 2591 gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, 2592 u64 offset, u32 len, 2593 unsigned dispatch_flags) 2594 { 2595 struct intel_engine_cs *ring = req->ring; 2596 int ret; 2597 2598 ret = intel_ring_begin(req, 2); 2599 if (ret) 2600 return ret; 2601 2602 intel_ring_emit(ring, 2603 MI_BATCH_BUFFER_START | 2604 (dispatch_flags & I915_DISPATCH_SECURE ? 2605 0 : MI_BATCH_NON_SECURE_I965)); 2606 /* bit0-7 is the length on GEN6+ */ 2607 intel_ring_emit(ring, offset); 2608 intel_ring_advance(ring); 2609 2610 return 0; 2611 } 2612 2613 /* Blitter support (SandyBridge+) */ 2614 2615 static int gen6_ring_flush(struct drm_i915_gem_request *req, 2616 u32 invalidate, u32 flush) 2617 { 2618 struct intel_engine_cs *ring = req->ring; 2619 struct drm_device *dev = ring->dev; 2620 uint32_t cmd; 2621 int ret; 2622 2623 ret = intel_ring_begin(req, 4); 2624 if (ret) 2625 return ret; 2626 2627 cmd = MI_FLUSH_DW; 2628 if (INTEL_INFO(dev)->gen >= 8) 2629 cmd += 1; 2630 2631 /* We always require a command barrier so that subsequent 2632 * commands, such as breadcrumb interrupts, are strictly ordered 2633 * wrt the contents of the write cache being flushed to memory 2634 * (and thus being coherent from the CPU). 2635 */ 2636 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; 2637 2638 /* 2639 * Bspec vol 1c.3 - blitter engine command streamer: 2640 * "If ENABLED, all TLBs will be invalidated once the flush 2641 * operation is complete. This bit is only valid when the 2642 * Post-Sync Operation field is a value of 1h or 3h." 2643 */ 2644 if (invalidate & I915_GEM_DOMAIN_RENDER) 2645 cmd |= MI_INVALIDATE_TLB; 2646 intel_ring_emit(ring, cmd); 2647 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2648 if (INTEL_INFO(dev)->gen >= 8) { 2649 intel_ring_emit(ring, 0); /* upper addr */ 2650 intel_ring_emit(ring, 0); /* value */ 2651 } else { 2652 intel_ring_emit(ring, 0); 2653 intel_ring_emit(ring, MI_NOOP); 2654 } 2655 intel_ring_advance(ring); 2656 2657 return 0; 2658 } 2659 2660 int intel_init_render_ring_buffer(struct drm_device *dev) 2661 { 2662 struct drm_i915_private *dev_priv = dev->dev_private; 2663 struct intel_engine_cs *ring = &dev_priv->ring[RCS]; 2664 struct drm_i915_gem_object *obj; 2665 int ret; 2666 2667 ring->name = "render ring"; 2668 ring->id = RCS; 2669 ring->mmio_base = RENDER_RING_BASE; 2670 2671 if (INTEL_INFO(dev)->gen >= 8) { 2672 if (i915_semaphore_is_enabled(dev)) { 2673 obj = i915_gem_alloc_object(dev, 4096); 2674 if (obj == NULL) { 2675 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); 2676 i915.semaphores = 0; 2677 } else { 2678 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 2679 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK); 2680 if (ret != 0) { 2681 drm_gem_object_unreference(&obj->base); 2682 DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n"); 2683 i915.semaphores = 0; 2684 } else 2685 dev_priv->semaphore_obj = obj; 2686 } 2687 } 2688 2689 ring->init_context = intel_rcs_ctx_init; 2690 ring->add_request = gen6_add_request; 2691 ring->flush = gen8_render_ring_flush; 2692 ring->irq_get = gen8_ring_get_irq; 2693 ring->irq_put = gen8_ring_put_irq; 2694 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2695 ring->get_seqno = gen6_ring_get_seqno; 2696 ring->set_seqno = ring_set_seqno; 2697 if (i915_semaphore_is_enabled(dev)) { 2698 WARN_ON(!dev_priv->semaphore_obj); 2699 ring->semaphore.sync_to = gen8_ring_sync; 2700 ring->semaphore.signal = gen8_rcs_signal; 2701 GEN8_RING_SEMAPHORE_INIT; 2702 } 2703 } else if (INTEL_INFO(dev)->gen >= 6) { 2704 ring->init_context = intel_rcs_ctx_init; 2705 ring->add_request = gen6_add_request; 2706 ring->flush = gen7_render_ring_flush; 2707 if (INTEL_INFO(dev)->gen == 6) 2708 ring->flush = gen6_render_ring_flush; 2709 ring->irq_get = gen6_ring_get_irq; 2710 ring->irq_put = gen6_ring_put_irq; 2711 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 2712 ring->get_seqno = gen6_ring_get_seqno; 2713 ring->set_seqno = ring_set_seqno; 2714 if (i915_semaphore_is_enabled(dev)) { 2715 ring->semaphore.sync_to = gen6_ring_sync; 2716 ring->semaphore.signal = gen6_signal; 2717 /* 2718 * The current semaphore is only applied on pre-gen8 2719 * platform. And there is no VCS2 ring on the pre-gen8 2720 * platform. So the semaphore between RCS and VCS2 is 2721 * initialized as INVALID. Gen8 will initialize the 2722 * sema between VCS2 and RCS later. 2723 */ 2724 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; 2725 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; 2726 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; 2727 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; 2728 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2729 ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; 2730 ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; 2731 ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; 2732 ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; 2733 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2734 } 2735 } else if (IS_GEN5(dev)) { 2736 ring->add_request = pc_render_add_request; 2737 ring->flush = gen4_render_ring_flush; 2738 ring->get_seqno = pc_render_get_seqno; 2739 ring->set_seqno = pc_render_set_seqno; 2740 ring->irq_get = gen5_ring_get_irq; 2741 ring->irq_put = gen5_ring_put_irq; 2742 ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT | 2743 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; 2744 } else { 2745 ring->add_request = i9xx_add_request; 2746 if (INTEL_INFO(dev)->gen < 4) 2747 ring->flush = gen2_render_ring_flush; 2748 else 2749 ring->flush = gen4_render_ring_flush; 2750 ring->get_seqno = ring_get_seqno; 2751 ring->set_seqno = ring_set_seqno; 2752 if (IS_GEN2(dev)) { 2753 ring->irq_get = i8xx_ring_get_irq; 2754 ring->irq_put = i8xx_ring_put_irq; 2755 } else { 2756 ring->irq_get = i9xx_ring_get_irq; 2757 ring->irq_put = i9xx_ring_put_irq; 2758 } 2759 ring->irq_enable_mask = I915_USER_INTERRUPT; 2760 } 2761 ring->write_tail = ring_write_tail; 2762 2763 if (IS_HASWELL(dev)) 2764 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 2765 else if (IS_GEN8(dev)) 2766 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2767 else if (INTEL_INFO(dev)->gen >= 6) 2768 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2769 else if (INTEL_INFO(dev)->gen >= 4) 2770 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2771 else if (IS_I830(dev) || IS_845G(dev)) 2772 ring->dispatch_execbuffer = i830_dispatch_execbuffer; 2773 else 2774 ring->dispatch_execbuffer = i915_dispatch_execbuffer; 2775 ring->init_hw = init_render_ring; 2776 ring->cleanup = render_ring_cleanup; 2777 2778 /* Workaround batchbuffer to combat CS tlb bug. */ 2779 if (HAS_BROKEN_CS_TLB(dev)) { 2780 obj = i915_gem_alloc_object(dev, I830_WA_SIZE); 2781 if (obj == NULL) { 2782 DRM_ERROR("Failed to allocate batch bo\n"); 2783 return -ENOMEM; 2784 } 2785 2786 ret = i915_gem_obj_ggtt_pin(obj, 0, 0); 2787 if (ret != 0) { 2788 drm_gem_object_unreference(&obj->base); 2789 DRM_ERROR("Failed to ping batch bo\n"); 2790 return ret; 2791 } 2792 2793 ring->scratch.obj = obj; 2794 ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); 2795 } 2796 2797 ret = intel_init_ring_buffer(dev, ring); 2798 if (ret) 2799 return ret; 2800 2801 if (INTEL_INFO(dev)->gen >= 5) { 2802 ret = intel_init_pipe_control(ring); 2803 if (ret) 2804 return ret; 2805 } 2806 2807 return 0; 2808 } 2809 2810 int intel_init_bsd_ring_buffer(struct drm_device *dev) 2811 { 2812 struct drm_i915_private *dev_priv = dev->dev_private; 2813 struct intel_engine_cs *ring = &dev_priv->ring[VCS]; 2814 2815 ring->name = "bsd ring"; 2816 ring->id = VCS; 2817 2818 ring->write_tail = ring_write_tail; 2819 if (INTEL_INFO(dev)->gen >= 6) { 2820 ring->mmio_base = GEN6_BSD_RING_BASE; 2821 /* gen6 bsd needs a special wa for tail updates */ 2822 if (IS_GEN6(dev)) 2823 ring->write_tail = gen6_bsd_ring_write_tail; 2824 ring->flush = gen6_bsd_ring_flush; 2825 ring->add_request = gen6_add_request; 2826 ring->get_seqno = gen6_ring_get_seqno; 2827 ring->set_seqno = ring_set_seqno; 2828 if (INTEL_INFO(dev)->gen >= 8) { 2829 ring->irq_enable_mask = 2830 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 2831 ring->irq_get = gen8_ring_get_irq; 2832 ring->irq_put = gen8_ring_put_irq; 2833 ring->dispatch_execbuffer = 2834 gen8_ring_dispatch_execbuffer; 2835 if (i915_semaphore_is_enabled(dev)) { 2836 ring->semaphore.sync_to = gen8_ring_sync; 2837 ring->semaphore.signal = gen8_xcs_signal; 2838 GEN8_RING_SEMAPHORE_INIT; 2839 } 2840 } else { 2841 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2842 ring->irq_get = gen6_ring_get_irq; 2843 ring->irq_put = gen6_ring_put_irq; 2844 ring->dispatch_execbuffer = 2845 gen6_ring_dispatch_execbuffer; 2846 if (i915_semaphore_is_enabled(dev)) { 2847 ring->semaphore.sync_to = gen6_ring_sync; 2848 ring->semaphore.signal = gen6_signal; 2849 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; 2850 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; 2851 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB; 2852 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE; 2853 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2854 ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC; 2855 ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; 2856 ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC; 2857 ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC; 2858 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2859 } 2860 } 2861 } else { 2862 ring->mmio_base = BSD_RING_BASE; 2863 ring->flush = bsd_ring_flush; 2864 ring->add_request = i9xx_add_request; 2865 ring->get_seqno = ring_get_seqno; 2866 ring->set_seqno = ring_set_seqno; 2867 if (IS_GEN5(dev)) { 2868 ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 2869 ring->irq_get = gen5_ring_get_irq; 2870 ring->irq_put = gen5_ring_put_irq; 2871 } else { 2872 ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; 2873 ring->irq_get = i9xx_ring_get_irq; 2874 ring->irq_put = i9xx_ring_put_irq; 2875 } 2876 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 2877 } 2878 ring->init_hw = init_ring_common; 2879 2880 return intel_init_ring_buffer(dev, ring); 2881 } 2882 2883 /** 2884 * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3) 2885 */ 2886 int intel_init_bsd2_ring_buffer(struct drm_device *dev) 2887 { 2888 struct drm_i915_private *dev_priv = dev->dev_private; 2889 struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; 2890 2891 ring->name = "bsd2 ring"; 2892 ring->id = VCS2; 2893 2894 ring->write_tail = ring_write_tail; 2895 ring->mmio_base = GEN8_BSD2_RING_BASE; 2896 ring->flush = gen6_bsd_ring_flush; 2897 ring->add_request = gen6_add_request; 2898 ring->get_seqno = gen6_ring_get_seqno; 2899 ring->set_seqno = ring_set_seqno; 2900 ring->irq_enable_mask = 2901 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; 2902 ring->irq_get = gen8_ring_get_irq; 2903 ring->irq_put = gen8_ring_put_irq; 2904 ring->dispatch_execbuffer = 2905 gen8_ring_dispatch_execbuffer; 2906 if (i915_semaphore_is_enabled(dev)) { 2907 ring->semaphore.sync_to = gen8_ring_sync; 2908 ring->semaphore.signal = gen8_xcs_signal; 2909 GEN8_RING_SEMAPHORE_INIT; 2910 } 2911 ring->init_hw = init_ring_common; 2912 2913 return intel_init_ring_buffer(dev, ring); 2914 } 2915 2916 int intel_init_blt_ring_buffer(struct drm_device *dev) 2917 { 2918 struct drm_i915_private *dev_priv = dev->dev_private; 2919 struct intel_engine_cs *ring = &dev_priv->ring[BCS]; 2920 2921 ring->name = "blitter ring"; 2922 ring->id = BCS; 2923 2924 ring->mmio_base = BLT_RING_BASE; 2925 ring->write_tail = ring_write_tail; 2926 ring->flush = gen6_ring_flush; 2927 ring->add_request = gen6_add_request; 2928 ring->get_seqno = gen6_ring_get_seqno; 2929 ring->set_seqno = ring_set_seqno; 2930 if (INTEL_INFO(dev)->gen >= 8) { 2931 ring->irq_enable_mask = 2932 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 2933 ring->irq_get = gen8_ring_get_irq; 2934 ring->irq_put = gen8_ring_put_irq; 2935 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2936 if (i915_semaphore_is_enabled(dev)) { 2937 ring->semaphore.sync_to = gen8_ring_sync; 2938 ring->semaphore.signal = gen8_xcs_signal; 2939 GEN8_RING_SEMAPHORE_INIT; 2940 } 2941 } else { 2942 ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2943 ring->irq_get = gen6_ring_get_irq; 2944 ring->irq_put = gen6_ring_put_irq; 2945 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 2946 if (i915_semaphore_is_enabled(dev)) { 2947 ring->semaphore.signal = gen6_signal; 2948 ring->semaphore.sync_to = gen6_ring_sync; 2949 /* 2950 * The current semaphore is only applied on pre-gen8 2951 * platform. And there is no VCS2 ring on the pre-gen8 2952 * platform. So the semaphore between BCS and VCS2 is 2953 * initialized as INVALID. Gen8 will initialize the 2954 * sema between BCS and VCS2 later. 2955 */ 2956 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR; 2957 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV; 2958 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; 2959 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE; 2960 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 2961 ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC; 2962 ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC; 2963 ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; 2964 ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC; 2965 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 2966 } 2967 } 2968 ring->init_hw = init_ring_common; 2969 2970 return intel_init_ring_buffer(dev, ring); 2971 } 2972 2973 int intel_init_vebox_ring_buffer(struct drm_device *dev) 2974 { 2975 struct drm_i915_private *dev_priv = dev->dev_private; 2976 struct intel_engine_cs *ring = &dev_priv->ring[VECS]; 2977 2978 ring->name = "video enhancement ring"; 2979 ring->id = VECS; 2980 2981 ring->mmio_base = VEBOX_RING_BASE; 2982 ring->write_tail = ring_write_tail; 2983 ring->flush = gen6_ring_flush; 2984 ring->add_request = gen6_add_request; 2985 ring->get_seqno = gen6_ring_get_seqno; 2986 ring->set_seqno = ring_set_seqno; 2987 2988 if (INTEL_INFO(dev)->gen >= 8) { 2989 ring->irq_enable_mask = 2990 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; 2991 ring->irq_get = gen8_ring_get_irq; 2992 ring->irq_put = gen8_ring_put_irq; 2993 ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; 2994 if (i915_semaphore_is_enabled(dev)) { 2995 ring->semaphore.sync_to = gen8_ring_sync; 2996 ring->semaphore.signal = gen8_xcs_signal; 2997 GEN8_RING_SEMAPHORE_INIT; 2998 } 2999 } else { 3000 ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 3001 ring->irq_get = hsw_vebox_get_irq; 3002 ring->irq_put = hsw_vebox_put_irq; 3003 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 3004 if (i915_semaphore_is_enabled(dev)) { 3005 ring->semaphore.sync_to = gen6_ring_sync; 3006 ring->semaphore.signal = gen6_signal; 3007 ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; 3008 ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV; 3009 ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB; 3010 ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; 3011 ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; 3012 ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC; 3013 ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC; 3014 ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC; 3015 ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; 3016 ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; 3017 } 3018 } 3019 ring->init_hw = init_ring_common; 3020 3021 return intel_init_ring_buffer(dev, ring); 3022 } 3023 3024 int 3025 intel_ring_flush_all_caches(struct drm_i915_gem_request *req) 3026 { 3027 struct intel_engine_cs *ring = req->ring; 3028 int ret; 3029 3030 if (!ring->gpu_caches_dirty) 3031 return 0; 3032 3033 ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS); 3034 if (ret) 3035 return ret; 3036 3037 trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS); 3038 3039 ring->gpu_caches_dirty = false; 3040 return 0; 3041 } 3042 3043 int 3044 intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req) 3045 { 3046 struct intel_engine_cs *ring = req->ring; 3047 uint32_t flush_domains; 3048 int ret; 3049 3050 flush_domains = 0; 3051 if (ring->gpu_caches_dirty) 3052 flush_domains = I915_GEM_GPU_DOMAINS; 3053 3054 ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains); 3055 if (ret) 3056 return ret; 3057 3058 trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains); 3059 3060 ring->gpu_caches_dirty = false; 3061 return 0; 3062 } 3063 3064 void 3065 intel_stop_ring_buffer(struct intel_engine_cs *ring) 3066 { 3067 int ret; 3068 3069 if (!intel_ring_initialized(ring)) 3070 return; 3071 3072 ret = intel_ring_idle(ring); 3073 if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) 3074 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 3075 ring->name, ret); 3076 3077 stop_ring(ring); 3078 } 3079