1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2016 Intel Corporation 4 */ 5 6 #include <linux/string_helpers.h> 7 8 #include <drm/drm_print.h> 9 10 #include "gem/i915_gem_context.h" 11 #include "gem/i915_gem_internal.h" 12 #include "gt/intel_gt_regs.h" 13 14 #include "i915_cmd_parser.h" 15 #include "i915_drv.h" 16 #include "intel_breadcrumbs.h" 17 #include "intel_context.h" 18 #include "intel_engine.h" 19 #include "intel_engine_pm.h" 20 #include "intel_engine_regs.h" 21 #include "intel_engine_user.h" 22 #include "intel_execlists_submission.h" 23 #include "intel_gt.h" 24 #include "intel_gt_mcr.h" 25 #include "intel_gt_pm.h" 26 #include "intel_gt_requests.h" 27 #include "intel_lrc.h" 28 #include "intel_lrc_reg.h" 29 #include "intel_reset.h" 30 #include "intel_ring.h" 31 #include "uc/intel_guc_submission.h" 32 33 /* Haswell does have the CXT_SIZE register however it does not appear to be 34 * valid. Now, docs explain in dwords what is in the context object. The full 35 * size is 70720 bytes, however, the power context and execlist context will 36 * never be saved (power context is stored elsewhere, and execlists don't work 37 * on HSW) - so the final size, including the extra state required for the 38 * Resource Streamer, is 66944 bytes, which rounds to 17 pages. 39 */ 40 #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE) 41 42 #define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) 43 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) 44 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE) 45 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE) 46 47 #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE) 48 49 #define MAX_MMIO_BASES 3 50 struct engine_info { 51 u8 class; 52 u8 instance; 53 /* mmio bases table *must* be sorted in reverse graphics_ver order */ 54 struct engine_mmio_base { 55 u32 graphics_ver : 8; 56 u32 base : 24; 57 } mmio_bases[MAX_MMIO_BASES]; 58 }; 59 60 static const struct engine_info intel_engines[] = { 61 [RCS0] = { 62 .class = RENDER_CLASS, 63 .instance = 0, 64 .mmio_bases = { 65 { .graphics_ver = 1, .base = RENDER_RING_BASE } 66 }, 67 }, 68 [BCS0] = { 69 .class = COPY_ENGINE_CLASS, 70 .instance = 0, 71 .mmio_bases = { 72 { .graphics_ver = 6, .base = BLT_RING_BASE } 73 }, 74 }, 75 [BCS1] = { 76 .class = COPY_ENGINE_CLASS, 77 .instance = 1, 78 .mmio_bases = { 79 { .graphics_ver = 12, .base = XEHPC_BCS1_RING_BASE } 80 }, 81 }, 82 [BCS2] = { 83 .class = COPY_ENGINE_CLASS, 84 .instance = 2, 85 .mmio_bases = { 86 { .graphics_ver = 12, .base = XEHPC_BCS2_RING_BASE } 87 }, 88 }, 89 [BCS3] = { 90 .class = COPY_ENGINE_CLASS, 91 .instance = 3, 92 .mmio_bases = { 93 { .graphics_ver = 12, .base = XEHPC_BCS3_RING_BASE } 94 }, 95 }, 96 [BCS4] = { 97 .class = COPY_ENGINE_CLASS, 98 .instance = 4, 99 .mmio_bases = { 100 { .graphics_ver = 12, .base = XEHPC_BCS4_RING_BASE } 101 }, 102 }, 103 [BCS5] = { 104 .class = COPY_ENGINE_CLASS, 105 .instance = 5, 106 .mmio_bases = { 107 { .graphics_ver = 12, .base = XEHPC_BCS5_RING_BASE } 108 }, 109 }, 110 [BCS6] = { 111 .class = COPY_ENGINE_CLASS, 112 .instance = 6, 113 .mmio_bases = { 114 { .graphics_ver = 12, .base = XEHPC_BCS6_RING_BASE } 115 }, 116 }, 117 [BCS7] = { 118 .class = COPY_ENGINE_CLASS, 119 .instance = 7, 120 .mmio_bases = { 121 { .graphics_ver = 12, .base = XEHPC_BCS7_RING_BASE } 122 }, 123 }, 124 [BCS8] = { 125 .class = COPY_ENGINE_CLASS, 126 .instance = 8, 127 .mmio_bases = { 128 { .graphics_ver = 12, .base = XEHPC_BCS8_RING_BASE } 129 }, 130 }, 131 [VCS0] = { 132 .class = VIDEO_DECODE_CLASS, 133 .instance = 0, 134 .mmio_bases = { 135 { .graphics_ver = 11, .base = GEN11_BSD_RING_BASE }, 136 { .graphics_ver = 6, .base = GEN6_BSD_RING_BASE }, 137 { .graphics_ver = 4, .base = BSD_RING_BASE } 138 }, 139 }, 140 [VCS1] = { 141 .class = VIDEO_DECODE_CLASS, 142 .instance = 1, 143 .mmio_bases = { 144 { .graphics_ver = 11, .base = GEN11_BSD2_RING_BASE }, 145 { .graphics_ver = 8, .base = GEN8_BSD2_RING_BASE } 146 }, 147 }, 148 [VCS2] = { 149 .class = VIDEO_DECODE_CLASS, 150 .instance = 2, 151 .mmio_bases = { 152 { .graphics_ver = 11, .base = GEN11_BSD3_RING_BASE } 153 }, 154 }, 155 [VCS3] = { 156 .class = VIDEO_DECODE_CLASS, 157 .instance = 3, 158 .mmio_bases = { 159 { .graphics_ver = 11, .base = GEN11_BSD4_RING_BASE } 160 }, 161 }, 162 [VCS4] = { 163 .class = VIDEO_DECODE_CLASS, 164 .instance = 4, 165 .mmio_bases = { 166 { .graphics_ver = 12, .base = XEHP_BSD5_RING_BASE } 167 }, 168 }, 169 [VCS5] = { 170 .class = VIDEO_DECODE_CLASS, 171 .instance = 5, 172 .mmio_bases = { 173 { .graphics_ver = 12, .base = XEHP_BSD6_RING_BASE } 174 }, 175 }, 176 [VCS6] = { 177 .class = VIDEO_DECODE_CLASS, 178 .instance = 6, 179 .mmio_bases = { 180 { .graphics_ver = 12, .base = XEHP_BSD7_RING_BASE } 181 }, 182 }, 183 [VCS7] = { 184 .class = VIDEO_DECODE_CLASS, 185 .instance = 7, 186 .mmio_bases = { 187 { .graphics_ver = 12, .base = XEHP_BSD8_RING_BASE } 188 }, 189 }, 190 [VECS0] = { 191 .class = VIDEO_ENHANCEMENT_CLASS, 192 .instance = 0, 193 .mmio_bases = { 194 { .graphics_ver = 11, .base = GEN11_VEBOX_RING_BASE }, 195 { .graphics_ver = 7, .base = VEBOX_RING_BASE } 196 }, 197 }, 198 [VECS1] = { 199 .class = VIDEO_ENHANCEMENT_CLASS, 200 .instance = 1, 201 .mmio_bases = { 202 { .graphics_ver = 11, .base = GEN11_VEBOX2_RING_BASE } 203 }, 204 }, 205 [VECS2] = { 206 .class = VIDEO_ENHANCEMENT_CLASS, 207 .instance = 2, 208 .mmio_bases = { 209 { .graphics_ver = 12, .base = XEHP_VEBOX3_RING_BASE } 210 }, 211 }, 212 [VECS3] = { 213 .class = VIDEO_ENHANCEMENT_CLASS, 214 .instance = 3, 215 .mmio_bases = { 216 { .graphics_ver = 12, .base = XEHP_VEBOX4_RING_BASE } 217 }, 218 }, 219 [CCS0] = { 220 .class = COMPUTE_CLASS, 221 .instance = 0, 222 .mmio_bases = { 223 { .graphics_ver = 12, .base = GEN12_COMPUTE0_RING_BASE } 224 } 225 }, 226 [CCS1] = { 227 .class = COMPUTE_CLASS, 228 .instance = 1, 229 .mmio_bases = { 230 { .graphics_ver = 12, .base = GEN12_COMPUTE1_RING_BASE } 231 } 232 }, 233 [CCS2] = { 234 .class = COMPUTE_CLASS, 235 .instance = 2, 236 .mmio_bases = { 237 { .graphics_ver = 12, .base = GEN12_COMPUTE2_RING_BASE } 238 } 239 }, 240 [CCS3] = { 241 .class = COMPUTE_CLASS, 242 .instance = 3, 243 .mmio_bases = { 244 { .graphics_ver = 12, .base = GEN12_COMPUTE3_RING_BASE } 245 } 246 }, 247 }; 248 249 /** 250 * intel_engine_context_size() - return the size of the context for an engine 251 * @gt: the gt 252 * @class: engine class 253 * 254 * Each engine class may require a different amount of space for a context 255 * image. 256 * 257 * Return: size (in bytes) of an engine class specific context image 258 * 259 * Note: this size includes the HWSP, which is part of the context image 260 * in LRC mode, but does not include the "shared data page" used with 261 * GuC submission. The caller should account for this if using the GuC. 262 */ 263 u32 intel_engine_context_size(struct intel_gt *gt, u8 class) 264 { 265 struct intel_uncore *uncore = gt->uncore; 266 u32 cxt_size; 267 268 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE); 269 270 switch (class) { 271 case COMPUTE_CLASS: 272 fallthrough; 273 case RENDER_CLASS: 274 switch (GRAPHICS_VER(gt->i915)) { 275 default: 276 MISSING_CASE(GRAPHICS_VER(gt->i915)); 277 return DEFAULT_LR_CONTEXT_RENDER_SIZE; 278 case 12: 279 case 11: 280 return GEN11_LR_CONTEXT_RENDER_SIZE; 281 case 9: 282 return GEN9_LR_CONTEXT_RENDER_SIZE; 283 case 8: 284 return GEN8_LR_CONTEXT_RENDER_SIZE; 285 case 7: 286 if (IS_HASWELL(gt->i915)) 287 return HSW_CXT_TOTAL_SIZE; 288 289 cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE); 290 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64, 291 PAGE_SIZE); 292 case 6: 293 cxt_size = intel_uncore_read(uncore, CXT_SIZE); 294 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64, 295 PAGE_SIZE); 296 case 5: 297 case 4: 298 /* 299 * There is a discrepancy here between the size reported 300 * by the register and the size of the context layout 301 * in the docs. Both are described as authorative! 302 * 303 * The discrepancy is on the order of a few cachelines, 304 * but the total is under one page (4k), which is our 305 * minimum allocation anyway so it should all come 306 * out in the wash. 307 */ 308 cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1; 309 drm_dbg(>->i915->drm, 310 "graphics_ver = %d CXT_SIZE = %d bytes [0x%08x]\n", 311 GRAPHICS_VER(gt->i915), cxt_size * 64, 312 cxt_size - 1); 313 return round_up(cxt_size * 64, PAGE_SIZE); 314 case 3: 315 case 2: 316 /* For the special day when i810 gets merged. */ 317 case 1: 318 return 0; 319 } 320 break; 321 default: 322 MISSING_CASE(class); 323 fallthrough; 324 case VIDEO_DECODE_CLASS: 325 case VIDEO_ENHANCEMENT_CLASS: 326 case COPY_ENGINE_CLASS: 327 if (GRAPHICS_VER(gt->i915) < 8) 328 return 0; 329 return GEN8_LR_CONTEXT_OTHER_SIZE; 330 } 331 } 332 333 static u32 __engine_mmio_base(struct drm_i915_private *i915, 334 const struct engine_mmio_base *bases) 335 { 336 int i; 337 338 for (i = 0; i < MAX_MMIO_BASES; i++) 339 if (GRAPHICS_VER(i915) >= bases[i].graphics_ver) 340 break; 341 342 GEM_BUG_ON(i == MAX_MMIO_BASES); 343 GEM_BUG_ON(!bases[i].base); 344 345 return bases[i].base; 346 } 347 348 static void __sprint_engine_name(struct intel_engine_cs *engine) 349 { 350 /* 351 * Before we know what the uABI name for this engine will be, 352 * we still would like to keep track of this engine in the debug logs. 353 * We throw in a ' here as a reminder that this isn't its final name. 354 */ 355 GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u", 356 intel_engine_class_repr(engine->class), 357 engine->instance) >= sizeof(engine->name)); 358 } 359 360 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask) 361 { 362 /* 363 * Though they added more rings on g4x/ilk, they did not add 364 * per-engine HWSTAM until gen6. 365 */ 366 if (GRAPHICS_VER(engine->i915) < 6 && engine->class != RENDER_CLASS) 367 return; 368 369 if (GRAPHICS_VER(engine->i915) >= 3) 370 ENGINE_WRITE(engine, RING_HWSTAM, mask); 371 else 372 ENGINE_WRITE16(engine, RING_HWSTAM, mask); 373 } 374 375 static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine) 376 { 377 /* Mask off all writes into the unknown HWSP */ 378 intel_engine_set_hwsp_writemask(engine, ~0u); 379 } 380 381 static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir) 382 { 383 GEM_DEBUG_WARN_ON(iir); 384 } 385 386 static u32 get_reset_domain(u8 ver, enum intel_engine_id id) 387 { 388 u32 reset_domain; 389 390 if (ver >= 11) { 391 static const u32 engine_reset_domains[] = { 392 [RCS0] = GEN11_GRDOM_RENDER, 393 [BCS0] = GEN11_GRDOM_BLT, 394 [BCS1] = XEHPC_GRDOM_BLT1, 395 [BCS2] = XEHPC_GRDOM_BLT2, 396 [BCS3] = XEHPC_GRDOM_BLT3, 397 [BCS4] = XEHPC_GRDOM_BLT4, 398 [BCS5] = XEHPC_GRDOM_BLT5, 399 [BCS6] = XEHPC_GRDOM_BLT6, 400 [BCS7] = XEHPC_GRDOM_BLT7, 401 [BCS8] = XEHPC_GRDOM_BLT8, 402 [VCS0] = GEN11_GRDOM_MEDIA, 403 [VCS1] = GEN11_GRDOM_MEDIA2, 404 [VCS2] = GEN11_GRDOM_MEDIA3, 405 [VCS3] = GEN11_GRDOM_MEDIA4, 406 [VCS4] = GEN11_GRDOM_MEDIA5, 407 [VCS5] = GEN11_GRDOM_MEDIA6, 408 [VCS6] = GEN11_GRDOM_MEDIA7, 409 [VCS7] = GEN11_GRDOM_MEDIA8, 410 [VECS0] = GEN11_GRDOM_VECS, 411 [VECS1] = GEN11_GRDOM_VECS2, 412 [VECS2] = GEN11_GRDOM_VECS3, 413 [VECS3] = GEN11_GRDOM_VECS4, 414 [CCS0] = GEN11_GRDOM_RENDER, 415 [CCS1] = GEN11_GRDOM_RENDER, 416 [CCS2] = GEN11_GRDOM_RENDER, 417 [CCS3] = GEN11_GRDOM_RENDER, 418 }; 419 GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) || 420 !engine_reset_domains[id]); 421 reset_domain = engine_reset_domains[id]; 422 } else { 423 static const u32 engine_reset_domains[] = { 424 [RCS0] = GEN6_GRDOM_RENDER, 425 [BCS0] = GEN6_GRDOM_BLT, 426 [VCS0] = GEN6_GRDOM_MEDIA, 427 [VCS1] = GEN8_GRDOM_MEDIA2, 428 [VECS0] = GEN6_GRDOM_VECS, 429 }; 430 GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) || 431 !engine_reset_domains[id]); 432 reset_domain = engine_reset_domains[id]; 433 } 434 435 return reset_domain; 436 } 437 438 static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id, 439 u8 logical_instance) 440 { 441 const struct engine_info *info = &intel_engines[id]; 442 struct drm_i915_private *i915 = gt->i915; 443 struct intel_engine_cs *engine; 444 u8 guc_class; 445 446 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH)); 447 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH)); 448 BUILD_BUG_ON(I915_MAX_VCS > (MAX_ENGINE_INSTANCE + 1)); 449 BUILD_BUG_ON(I915_MAX_VECS > (MAX_ENGINE_INSTANCE + 1)); 450 451 if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine))) 452 return -EINVAL; 453 454 if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS)) 455 return -EINVAL; 456 457 if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE)) 458 return -EINVAL; 459 460 if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance])) 461 return -EINVAL; 462 463 engine = kzalloc(sizeof(*engine), GFP_KERNEL); 464 if (!engine) 465 return -ENOMEM; 466 467 BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES); 468 469 INIT_LIST_HEAD(&engine->pinned_contexts_list); 470 engine->id = id; 471 engine->legacy_idx = INVALID_ENGINE; 472 engine->mask = BIT(id); 473 engine->reset_domain = get_reset_domain(GRAPHICS_VER(gt->i915), 474 id); 475 engine->i915 = i915; 476 engine->gt = gt; 477 engine->uncore = gt->uncore; 478 guc_class = engine_class_to_guc_class(info->class); 479 engine->guc_id = MAKE_GUC_ID(guc_class, info->instance); 480 engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases); 481 482 engine->irq_handler = nop_irq_handler; 483 484 engine->class = info->class; 485 engine->instance = info->instance; 486 engine->logical_mask = BIT(logical_instance); 487 __sprint_engine_name(engine); 488 489 if ((engine->class == COMPUTE_CLASS && !RCS_MASK(engine->gt) && 490 __ffs(CCS_MASK(engine->gt)) == engine->instance) || 491 engine->class == RENDER_CLASS) 492 engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE; 493 494 /* features common between engines sharing EUs */ 495 if (engine->class == RENDER_CLASS || engine->class == COMPUTE_CLASS) { 496 engine->flags |= I915_ENGINE_HAS_RCS_REG_STATE; 497 engine->flags |= I915_ENGINE_HAS_EU_PRIORITY; 498 } 499 500 engine->props.heartbeat_interval_ms = 501 CONFIG_DRM_I915_HEARTBEAT_INTERVAL; 502 engine->props.max_busywait_duration_ns = 503 CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT; 504 engine->props.preempt_timeout_ms = 505 CONFIG_DRM_I915_PREEMPT_TIMEOUT; 506 engine->props.stop_timeout_ms = 507 CONFIG_DRM_I915_STOP_TIMEOUT; 508 engine->props.timeslice_duration_ms = 509 CONFIG_DRM_I915_TIMESLICE_DURATION; 510 511 /* Override to uninterruptible for OpenCL workloads. */ 512 if (GRAPHICS_VER(i915) == 12 && (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE)) 513 engine->props.preempt_timeout_ms = 0; 514 515 /* Cap properties according to any system limits */ 516 #define CLAMP_PROP(field) \ 517 do { \ 518 u64 clamp = intel_clamp_##field(engine, engine->props.field); \ 519 if (clamp != engine->props.field) { \ 520 drm_notice(&engine->i915->drm, \ 521 "Warning, clamping %s to %lld to prevent overflow\n", \ 522 #field, clamp); \ 523 engine->props.field = clamp; \ 524 } \ 525 } while (0) 526 527 CLAMP_PROP(heartbeat_interval_ms); 528 CLAMP_PROP(max_busywait_duration_ns); 529 CLAMP_PROP(preempt_timeout_ms); 530 CLAMP_PROP(stop_timeout_ms); 531 CLAMP_PROP(timeslice_duration_ms); 532 533 #undef CLAMP_PROP 534 535 engine->defaults = engine->props; /* never to change again */ 536 537 engine->context_size = intel_engine_context_size(gt, engine->class); 538 if (WARN_ON(engine->context_size > BIT(20))) 539 engine->context_size = 0; 540 if (engine->context_size) 541 DRIVER_CAPS(i915)->has_logical_contexts = true; 542 543 ewma__engine_latency_init(&engine->latency); 544 seqcount_init(&engine->stats.execlists.lock); 545 546 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); 547 548 /* Scrub mmio state on takeover */ 549 intel_engine_sanitize_mmio(engine); 550 551 gt->engine_class[info->class][info->instance] = engine; 552 gt->engine[id] = engine; 553 554 return 0; 555 } 556 557 u64 intel_clamp_heartbeat_interval_ms(struct intel_engine_cs *engine, u64 value) 558 { 559 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)); 560 561 return value; 562 } 563 564 u64 intel_clamp_max_busywait_duration_ns(struct intel_engine_cs *engine, u64 value) 565 { 566 value = min(value, jiffies_to_nsecs(2)); 567 568 return value; 569 } 570 571 u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value) 572 { 573 /* 574 * NB: The GuC API only supports 32bit values. However, the limit is further 575 * reduced due to internal calculations which would otherwise overflow. 576 */ 577 if (intel_guc_submission_is_wanted(&engine->gt->uc.guc)) 578 value = min_t(u64, value, guc_policy_max_preempt_timeout_ms()); 579 580 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)); 581 582 return value; 583 } 584 585 u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value) 586 { 587 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)); 588 589 return value; 590 } 591 592 u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value) 593 { 594 /* 595 * NB: The GuC API only supports 32bit values. However, the limit is further 596 * reduced due to internal calculations which would otherwise overflow. 597 */ 598 if (intel_guc_submission_is_wanted(&engine->gt->uc.guc)) 599 value = min_t(u64, value, guc_policy_max_exec_quantum_ms()); 600 601 value = min_t(u64, value, jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT)); 602 603 return value; 604 } 605 606 static void __setup_engine_capabilities(struct intel_engine_cs *engine) 607 { 608 struct drm_i915_private *i915 = engine->i915; 609 610 if (engine->class == VIDEO_DECODE_CLASS) { 611 /* 612 * HEVC support is present on first engine instance 613 * before Gen11 and on all instances afterwards. 614 */ 615 if (GRAPHICS_VER(i915) >= 11 || 616 (GRAPHICS_VER(i915) >= 9 && engine->instance == 0)) 617 engine->uabi_capabilities |= 618 I915_VIDEO_CLASS_CAPABILITY_HEVC; 619 620 /* 621 * SFC block is present only on even logical engine 622 * instances. 623 */ 624 if ((GRAPHICS_VER(i915) >= 11 && 625 (engine->gt->info.vdbox_sfc_access & 626 BIT(engine->instance))) || 627 (GRAPHICS_VER(i915) >= 9 && engine->instance == 0)) 628 engine->uabi_capabilities |= 629 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC; 630 } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) { 631 if (GRAPHICS_VER(i915) >= 9 && 632 engine->gt->info.sfc_mask & BIT(engine->instance)) 633 engine->uabi_capabilities |= 634 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC; 635 } 636 } 637 638 static void intel_setup_engine_capabilities(struct intel_gt *gt) 639 { 640 struct intel_engine_cs *engine; 641 enum intel_engine_id id; 642 643 for_each_engine(engine, gt, id) 644 __setup_engine_capabilities(engine); 645 } 646 647 /** 648 * intel_engines_release() - free the resources allocated for Command Streamers 649 * @gt: pointer to struct intel_gt 650 */ 651 void intel_engines_release(struct intel_gt *gt) 652 { 653 struct intel_engine_cs *engine; 654 enum intel_engine_id id; 655 656 /* 657 * Before we release the resources held by engine, we must be certain 658 * that the HW is no longer accessing them -- having the GPU scribble 659 * to or read from a page being used for something else causes no end 660 * of fun. 661 * 662 * The GPU should be reset by this point, but assume the worst just 663 * in case we aborted before completely initialising the engines. 664 */ 665 GEM_BUG_ON(intel_gt_pm_is_awake(gt)); 666 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) 667 __intel_gt_reset(gt, ALL_ENGINES); 668 669 /* Decouple the backend; but keep the layout for late GPU resets */ 670 for_each_engine(engine, gt, id) { 671 if (!engine->release) 672 continue; 673 674 intel_wakeref_wait_for_idle(&engine->wakeref); 675 GEM_BUG_ON(intel_engine_pm_is_awake(engine)); 676 677 engine->release(engine); 678 engine->release = NULL; 679 680 memset(&engine->reset, 0, sizeof(engine->reset)); 681 } 682 } 683 684 void intel_engine_free_request_pool(struct intel_engine_cs *engine) 685 { 686 if (!engine->request_pool) 687 return; 688 689 #ifdef __linux__ 690 kmem_cache_free(i915_request_slab_cache(), engine->request_pool); 691 #else 692 pool_put(i915_request_slab_cache(), engine->request_pool); 693 #endif 694 } 695 696 void intel_engines_free(struct intel_gt *gt) 697 { 698 struct intel_engine_cs *engine; 699 enum intel_engine_id id; 700 701 /* Free the requests! dma-resv keeps fences around for an eternity */ 702 rcu_barrier(); 703 704 for_each_engine(engine, gt, id) { 705 intel_engine_free_request_pool(engine); 706 kfree(engine); 707 gt->engine[id] = NULL; 708 } 709 } 710 711 static 712 bool gen11_vdbox_has_sfc(struct intel_gt *gt, 713 unsigned int physical_vdbox, 714 unsigned int logical_vdbox, u16 vdbox_mask) 715 { 716 struct drm_i915_private *i915 = gt->i915; 717 718 /* 719 * In Gen11, only even numbered logical VDBOXes are hooked 720 * up to an SFC (Scaler & Format Converter) unit. 721 * In Gen12, Even numbered physical instance always are connected 722 * to an SFC. Odd numbered physical instances have SFC only if 723 * previous even instance is fused off. 724 * 725 * Starting with Xe_HP, there's also a dedicated SFC_ENABLE field 726 * in the fuse register that tells us whether a specific SFC is present. 727 */ 728 if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0) 729 return false; 730 else if (MEDIA_VER(i915) >= 12) 731 return (physical_vdbox % 2 == 0) || 732 !(BIT(physical_vdbox - 1) & vdbox_mask); 733 else if (MEDIA_VER(i915) == 11) 734 return logical_vdbox % 2 == 0; 735 736 return false; 737 } 738 739 static void engine_mask_apply_media_fuses(struct intel_gt *gt) 740 { 741 struct drm_i915_private *i915 = gt->i915; 742 unsigned int logical_vdbox = 0; 743 unsigned int i; 744 u32 media_fuse, fuse1; 745 u16 vdbox_mask; 746 u16 vebox_mask; 747 748 if (MEDIA_VER(gt->i915) < 11) 749 return; 750 751 /* 752 * On newer platforms the fusing register is called 'enable' and has 753 * enable semantics, while on older platforms it is called 'disable' 754 * and bits have disable semantices. 755 */ 756 media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE); 757 if (MEDIA_VER_FULL(i915) < IP_VER(12, 50)) 758 media_fuse = ~media_fuse; 759 760 vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; 761 vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> 762 GEN11_GT_VEBOX_DISABLE_SHIFT; 763 764 if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) { 765 fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1); 766 gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1); 767 } else { 768 gt->info.sfc_mask = ~0; 769 } 770 771 for (i = 0; i < I915_MAX_VCS; i++) { 772 if (!HAS_ENGINE(gt, _VCS(i))) { 773 vdbox_mask &= ~BIT(i); 774 continue; 775 } 776 777 if (!(BIT(i) & vdbox_mask)) { 778 gt->info.engine_mask &= ~BIT(_VCS(i)); 779 drm_dbg(&i915->drm, "vcs%u fused off\n", i); 780 continue; 781 } 782 783 if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask)) 784 gt->info.vdbox_sfc_access |= BIT(i); 785 logical_vdbox++; 786 } 787 drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n", 788 vdbox_mask, VDBOX_MASK(gt)); 789 GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt)); 790 791 for (i = 0; i < I915_MAX_VECS; i++) { 792 if (!HAS_ENGINE(gt, _VECS(i))) { 793 vebox_mask &= ~BIT(i); 794 continue; 795 } 796 797 if (!(BIT(i) & vebox_mask)) { 798 gt->info.engine_mask &= ~BIT(_VECS(i)); 799 drm_dbg(&i915->drm, "vecs%u fused off\n", i); 800 } 801 } 802 drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n", 803 vebox_mask, VEBOX_MASK(gt)); 804 GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt)); 805 } 806 807 static void engine_mask_apply_compute_fuses(struct intel_gt *gt) 808 { 809 struct drm_i915_private *i915 = gt->i915; 810 struct intel_gt_info *info = >->info; 811 int ss_per_ccs = info->sseu.max_subslices / I915_MAX_CCS; 812 unsigned long ccs_mask; 813 unsigned int i; 814 815 if (GRAPHICS_VER(i915) < 11) 816 return; 817 818 if (hweight32(CCS_MASK(gt)) <= 1) 819 return; 820 821 ccs_mask = intel_slicemask_from_xehp_dssmask(info->sseu.compute_subslice_mask, 822 ss_per_ccs); 823 /* 824 * If all DSS in a quadrant are fused off, the corresponding CCS 825 * engine is not available for use. 826 */ 827 for_each_clear_bit(i, &ccs_mask, I915_MAX_CCS) { 828 info->engine_mask &= ~BIT(_CCS(i)); 829 drm_dbg(&i915->drm, "ccs%u fused off\n", i); 830 } 831 } 832 833 static void engine_mask_apply_copy_fuses(struct intel_gt *gt) 834 { 835 struct drm_i915_private *i915 = gt->i915; 836 struct intel_gt_info *info = >->info; 837 unsigned long meml3_mask; 838 unsigned long quad; 839 840 if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) && 841 GRAPHICS_VER_FULL(i915) < IP_VER(12, 70))) 842 return; 843 844 meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3); 845 meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask); 846 847 /* 848 * Link Copy engines may be fused off according to meml3_mask. Each 849 * bit is a quad that houses 2 Link Copy and two Sub Copy engines. 850 */ 851 for_each_clear_bit(quad, &meml3_mask, GEN12_MAX_MSLICES) { 852 unsigned int instance = quad * 2 + 1; 853 intel_engine_mask_t mask = GENMASK(_BCS(instance + 1), 854 _BCS(instance)); 855 856 if (mask & info->engine_mask) { 857 drm_dbg(&i915->drm, "bcs%u fused off\n", instance); 858 drm_dbg(&i915->drm, "bcs%u fused off\n", instance + 1); 859 860 info->engine_mask &= ~mask; 861 } 862 } 863 } 864 865 /* 866 * Determine which engines are fused off in our particular hardware. 867 * Note that we have a catch-22 situation where we need to be able to access 868 * the blitter forcewake domain to read the engine fuses, but at the same time 869 * we need to know which engines are available on the system to know which 870 * forcewake domains are present. We solve this by intializing the forcewake 871 * domains based on the full engine mask in the platform capabilities before 872 * calling this function and pruning the domains for fused-off engines 873 * afterwards. 874 */ 875 static intel_engine_mask_t init_engine_mask(struct intel_gt *gt) 876 { 877 struct intel_gt_info *info = >->info; 878 879 GEM_BUG_ON(!info->engine_mask); 880 881 engine_mask_apply_media_fuses(gt); 882 engine_mask_apply_compute_fuses(gt); 883 engine_mask_apply_copy_fuses(gt); 884 885 return info->engine_mask; 886 } 887 888 static void populate_logical_ids(struct intel_gt *gt, u8 *logical_ids, 889 u8 class, const u8 *map, u8 num_instances) 890 { 891 int i, j; 892 u8 current_logical_id = 0; 893 894 for (j = 0; j < num_instances; ++j) { 895 for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) { 896 if (!HAS_ENGINE(gt, i) || 897 intel_engines[i].class != class) 898 continue; 899 900 if (intel_engines[i].instance == map[j]) { 901 logical_ids[intel_engines[i].instance] = 902 current_logical_id++; 903 break; 904 } 905 } 906 } 907 } 908 909 static void setup_logical_ids(struct intel_gt *gt, u8 *logical_ids, u8 class) 910 { 911 /* 912 * Logical to physical mapping is needed for proper support 913 * to split-frame feature. 914 */ 915 if (MEDIA_VER(gt->i915) >= 11 && class == VIDEO_DECODE_CLASS) { 916 const u8 map[] = { 0, 2, 4, 6, 1, 3, 5, 7 }; 917 918 populate_logical_ids(gt, logical_ids, class, 919 map, ARRAY_SIZE(map)); 920 } else { 921 int i; 922 u8 map[MAX_ENGINE_INSTANCE + 1]; 923 924 for (i = 0; i < MAX_ENGINE_INSTANCE + 1; ++i) 925 map[i] = i; 926 populate_logical_ids(gt, logical_ids, class, 927 map, ARRAY_SIZE(map)); 928 } 929 } 930 931 /** 932 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers 933 * @gt: pointer to struct intel_gt 934 * 935 * Return: non-zero if the initialization failed. 936 */ 937 int intel_engines_init_mmio(struct intel_gt *gt) 938 { 939 struct drm_i915_private *i915 = gt->i915; 940 const unsigned int engine_mask = init_engine_mask(gt); 941 unsigned int mask = 0; 942 unsigned int i, class; 943 u8 logical_ids[MAX_ENGINE_INSTANCE + 1]; 944 int err; 945 946 drm_WARN_ON(&i915->drm, engine_mask == 0); 947 drm_WARN_ON(&i915->drm, engine_mask & 948 GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES)); 949 950 if (i915_inject_probe_failure(i915)) 951 return -ENODEV; 952 953 for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) { 954 setup_logical_ids(gt, logical_ids, class); 955 956 for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) { 957 u8 instance = intel_engines[i].instance; 958 959 if (intel_engines[i].class != class || 960 !HAS_ENGINE(gt, i)) 961 continue; 962 963 err = intel_engine_setup(gt, i, 964 logical_ids[instance]); 965 if (err) 966 goto cleanup; 967 968 mask |= BIT(i); 969 } 970 } 971 972 /* 973 * Catch failures to update intel_engines table when the new engines 974 * are added to the driver by a warning and disabling the forgotten 975 * engines. 976 */ 977 if (drm_WARN_ON(&i915->drm, mask != engine_mask)) 978 gt->info.engine_mask = mask; 979 980 gt->info.num_engines = hweight32(mask); 981 982 intel_gt_check_and_clear_faults(gt); 983 984 intel_setup_engine_capabilities(gt); 985 986 intel_uncore_prune_engine_fw_domains(gt->uncore, gt); 987 988 return 0; 989 990 cleanup: 991 intel_engines_free(gt); 992 return err; 993 } 994 995 void intel_engine_init_execlists(struct intel_engine_cs *engine) 996 { 997 struct intel_engine_execlists * const execlists = &engine->execlists; 998 999 execlists->port_mask = 1; 1000 GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists))); 1001 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS); 1002 1003 memset(execlists->pending, 0, sizeof(execlists->pending)); 1004 execlists->active = 1005 memset(execlists->inflight, 0, sizeof(execlists->inflight)); 1006 } 1007 1008 static void cleanup_status_page(struct intel_engine_cs *engine) 1009 { 1010 struct i915_vma *vma; 1011 1012 /* Prevent writes into HWSP after returning the page to the system */ 1013 intel_engine_set_hwsp_writemask(engine, ~0u); 1014 1015 vma = fetch_and_zero(&engine->status_page.vma); 1016 if (!vma) 1017 return; 1018 1019 if (!HWS_NEEDS_PHYSICAL(engine->i915)) 1020 i915_vma_unpin(vma); 1021 1022 i915_gem_object_unpin_map(vma->obj); 1023 i915_gem_object_put(vma->obj); 1024 } 1025 1026 static int pin_ggtt_status_page(struct intel_engine_cs *engine, 1027 struct i915_gem_ww_ctx *ww, 1028 struct i915_vma *vma) 1029 { 1030 unsigned int flags; 1031 1032 if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt)) 1033 /* 1034 * On g33, we cannot place HWS above 256MiB, so 1035 * restrict its pinning to the low mappable arena. 1036 * Though this restriction is not documented for 1037 * gen4, gen5, or byt, they also behave similarly 1038 * and hang if the HWS is placed at the top of the 1039 * GTT. To generalise, it appears that all !llc 1040 * platforms have issues with us placing the HWS 1041 * above the mappable region (even though we never 1042 * actually map it). 1043 */ 1044 flags = PIN_MAPPABLE; 1045 else 1046 flags = PIN_HIGH; 1047 1048 return i915_ggtt_pin(vma, ww, 0, flags); 1049 } 1050 1051 static int init_status_page(struct intel_engine_cs *engine) 1052 { 1053 struct drm_i915_gem_object *obj; 1054 struct i915_gem_ww_ctx ww; 1055 struct i915_vma *vma; 1056 void *vaddr; 1057 int ret; 1058 1059 INIT_LIST_HEAD(&engine->status_page.timelines); 1060 1061 /* 1062 * Though the HWS register does support 36bit addresses, historically 1063 * we have had hangs and corruption reported due to wild writes if 1064 * the HWS is placed above 4G. We only allow objects to be allocated 1065 * in GFP_DMA32 for i965, and no earlier physical address users had 1066 * access to more than 4G. 1067 */ 1068 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); 1069 if (IS_ERR(obj)) { 1070 drm_err(&engine->i915->drm, 1071 "Failed to allocate status page\n"); 1072 return PTR_ERR(obj); 1073 } 1074 1075 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); 1076 1077 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 1078 if (IS_ERR(vma)) { 1079 ret = PTR_ERR(vma); 1080 goto err_put; 1081 } 1082 1083 i915_gem_ww_ctx_init(&ww, true); 1084 retry: 1085 ret = i915_gem_object_lock(obj, &ww); 1086 if (!ret && !HWS_NEEDS_PHYSICAL(engine->i915)) 1087 ret = pin_ggtt_status_page(engine, &ww, vma); 1088 if (ret) 1089 goto err; 1090 1091 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 1092 if (IS_ERR(vaddr)) { 1093 ret = PTR_ERR(vaddr); 1094 goto err_unpin; 1095 } 1096 1097 engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE); 1098 engine->status_page.vma = vma; 1099 1100 err_unpin: 1101 if (ret) 1102 i915_vma_unpin(vma); 1103 err: 1104 if (ret == -EDEADLK) { 1105 ret = i915_gem_ww_ctx_backoff(&ww); 1106 if (!ret) 1107 goto retry; 1108 } 1109 i915_gem_ww_ctx_fini(&ww); 1110 err_put: 1111 if (ret) 1112 i915_gem_object_put(obj); 1113 return ret; 1114 } 1115 1116 static int engine_setup_common(struct intel_engine_cs *engine) 1117 { 1118 int err; 1119 1120 init_llist_head(&engine->barrier_tasks); 1121 1122 err = init_status_page(engine); 1123 if (err) 1124 return err; 1125 1126 engine->breadcrumbs = intel_breadcrumbs_create(engine); 1127 if (!engine->breadcrumbs) { 1128 err = -ENOMEM; 1129 goto err_status; 1130 } 1131 1132 engine->sched_engine = i915_sched_engine_create(ENGINE_PHYSICAL); 1133 if (!engine->sched_engine) { 1134 err = -ENOMEM; 1135 goto err_sched_engine; 1136 } 1137 engine->sched_engine->private_data = engine; 1138 1139 err = intel_engine_init_cmd_parser(engine); 1140 if (err) 1141 goto err_cmd_parser; 1142 1143 intel_engine_init_execlists(engine); 1144 intel_engine_init__pm(engine); 1145 intel_engine_init_retire(engine); 1146 1147 /* Use the whole device by default */ 1148 engine->sseu = 1149 intel_sseu_from_device_info(&engine->gt->info.sseu); 1150 1151 intel_engine_init_workarounds(engine); 1152 intel_engine_init_whitelist(engine); 1153 intel_engine_init_ctx_wa(engine); 1154 1155 if (GRAPHICS_VER(engine->i915) >= 12) 1156 engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO; 1157 1158 return 0; 1159 1160 err_cmd_parser: 1161 i915_sched_engine_put(engine->sched_engine); 1162 err_sched_engine: 1163 intel_breadcrumbs_put(engine->breadcrumbs); 1164 err_status: 1165 cleanup_status_page(engine); 1166 return err; 1167 } 1168 1169 struct measure_breadcrumb { 1170 struct i915_request rq; 1171 struct intel_ring ring; 1172 u32 cs[2048]; 1173 }; 1174 1175 static int measure_breadcrumb_dw(struct intel_context *ce) 1176 { 1177 struct intel_engine_cs *engine = ce->engine; 1178 struct measure_breadcrumb *frame; 1179 int dw; 1180 1181 GEM_BUG_ON(!engine->gt->scratch); 1182 1183 frame = kzalloc(sizeof(*frame), GFP_KERNEL); 1184 if (!frame) 1185 return -ENOMEM; 1186 1187 frame->rq.engine = engine; 1188 frame->rq.context = ce; 1189 rcu_assign_pointer(frame->rq.timeline, ce->timeline); 1190 frame->rq.hwsp_seqno = ce->timeline->hwsp_seqno; 1191 1192 frame->ring.vaddr = frame->cs; 1193 frame->ring.size = sizeof(frame->cs); 1194 frame->ring.wrap = 1195 BITS_PER_TYPE(frame->ring.size) - ilog2(frame->ring.size); 1196 frame->ring.effective_size = frame->ring.size; 1197 intel_ring_update_space(&frame->ring); 1198 frame->rq.ring = &frame->ring; 1199 1200 mutex_lock(&ce->timeline->mutex); 1201 spin_lock_irq(&engine->sched_engine->lock); 1202 1203 dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs; 1204 1205 spin_unlock_irq(&engine->sched_engine->lock); 1206 mutex_unlock(&ce->timeline->mutex); 1207 1208 GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */ 1209 1210 kfree(frame); 1211 return dw; 1212 } 1213 1214 struct intel_context * 1215 intel_engine_create_pinned_context(struct intel_engine_cs *engine, 1216 struct i915_address_space *vm, 1217 unsigned int ring_size, 1218 unsigned int hwsp, 1219 struct lock_class_key *key, 1220 const char *name) 1221 { 1222 struct intel_context *ce; 1223 int err; 1224 1225 ce = intel_context_create(engine); 1226 if (IS_ERR(ce)) 1227 return ce; 1228 1229 __set_bit(CONTEXT_BARRIER_BIT, &ce->flags); 1230 ce->timeline = page_pack_bits(NULL, hwsp); 1231 ce->ring = NULL; 1232 ce->ring_size = ring_size; 1233 1234 i915_vm_put(ce->vm); 1235 ce->vm = i915_vm_get(vm); 1236 1237 err = intel_context_pin(ce); /* perma-pin so it is always available */ 1238 if (err) { 1239 intel_context_put(ce); 1240 return ERR_PTR(err); 1241 } 1242 1243 list_add_tail(&ce->pinned_contexts_link, &engine->pinned_contexts_list); 1244 1245 /* 1246 * Give our perma-pinned kernel timelines a separate lockdep class, 1247 * so that we can use them from within the normal user timelines 1248 * should we need to inject GPU operations during their request 1249 * construction. 1250 */ 1251 lockdep_set_class_and_name(&ce->timeline->mutex, key, name); 1252 1253 return ce; 1254 } 1255 1256 void intel_engine_destroy_pinned_context(struct intel_context *ce) 1257 { 1258 struct intel_engine_cs *engine = ce->engine; 1259 struct i915_vma *hwsp = engine->status_page.vma; 1260 1261 GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp); 1262 1263 mutex_lock(&hwsp->vm->mutex); 1264 list_del(&ce->timeline->engine_link); 1265 mutex_unlock(&hwsp->vm->mutex); 1266 1267 list_del(&ce->pinned_contexts_link); 1268 intel_context_unpin(ce); 1269 intel_context_put(ce); 1270 } 1271 1272 static struct intel_context * 1273 create_kernel_context(struct intel_engine_cs *engine) 1274 { 1275 static struct lock_class_key kernel; 1276 1277 return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K, 1278 I915_GEM_HWS_SEQNO_ADDR, 1279 &kernel, "kernel_context"); 1280 } 1281 1282 /** 1283 * intel_engines_init_common - initialize cengine state which might require hw access 1284 * @engine: Engine to initialize. 1285 * 1286 * Initializes @engine@ structure members shared between legacy and execlists 1287 * submission modes which do require hardware access. 1288 * 1289 * Typcally done at later stages of submission mode specific engine setup. 1290 * 1291 * Returns zero on success or an error code on failure. 1292 */ 1293 static int engine_init_common(struct intel_engine_cs *engine) 1294 { 1295 struct intel_context *ce; 1296 int ret; 1297 1298 engine->set_default_submission(engine); 1299 1300 /* 1301 * We may need to do things with the shrinker which 1302 * require us to immediately switch back to the default 1303 * context. This can cause a problem as pinning the 1304 * default context also requires GTT space which may not 1305 * be available. To avoid this we always pin the default 1306 * context. 1307 */ 1308 ce = create_kernel_context(engine); 1309 if (IS_ERR(ce)) 1310 return PTR_ERR(ce); 1311 1312 ret = measure_breadcrumb_dw(ce); 1313 if (ret < 0) 1314 goto err_context; 1315 1316 engine->emit_fini_breadcrumb_dw = ret; 1317 engine->kernel_context = ce; 1318 1319 return 0; 1320 1321 err_context: 1322 intel_engine_destroy_pinned_context(ce); 1323 return ret; 1324 } 1325 1326 int intel_engines_init(struct intel_gt *gt) 1327 { 1328 int (*setup)(struct intel_engine_cs *engine); 1329 struct intel_engine_cs *engine; 1330 enum intel_engine_id id; 1331 int err; 1332 1333 if (intel_uc_uses_guc_submission(>->uc)) { 1334 gt->submission_method = INTEL_SUBMISSION_GUC; 1335 setup = intel_guc_submission_setup; 1336 } else if (HAS_EXECLISTS(gt->i915)) { 1337 gt->submission_method = INTEL_SUBMISSION_ELSP; 1338 setup = intel_execlists_submission_setup; 1339 } else { 1340 gt->submission_method = INTEL_SUBMISSION_RING; 1341 setup = intel_ring_submission_setup; 1342 } 1343 1344 for_each_engine(engine, gt, id) { 1345 err = engine_setup_common(engine); 1346 if (err) 1347 return err; 1348 1349 err = setup(engine); 1350 if (err) 1351 return err; 1352 1353 err = engine_init_common(engine); 1354 if (err) 1355 return err; 1356 1357 intel_engine_add_user(engine); 1358 } 1359 1360 return 0; 1361 } 1362 1363 /** 1364 * intel_engines_cleanup_common - cleans up the engine state created by 1365 * the common initiailizers. 1366 * @engine: Engine to cleanup. 1367 * 1368 * This cleans up everything created by the common helpers. 1369 */ 1370 void intel_engine_cleanup_common(struct intel_engine_cs *engine) 1371 { 1372 GEM_BUG_ON(!list_empty(&engine->sched_engine->requests)); 1373 1374 i915_sched_engine_put(engine->sched_engine); 1375 intel_breadcrumbs_put(engine->breadcrumbs); 1376 1377 intel_engine_fini_retire(engine); 1378 intel_engine_cleanup_cmd_parser(engine); 1379 1380 if (engine->default_state) 1381 uao_detach(engine->default_state); 1382 1383 if (engine->kernel_context) 1384 intel_engine_destroy_pinned_context(engine->kernel_context); 1385 1386 GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); 1387 cleanup_status_page(engine); 1388 1389 intel_wa_list_free(&engine->ctx_wa_list); 1390 intel_wa_list_free(&engine->wa_list); 1391 intel_wa_list_free(&engine->whitelist); 1392 } 1393 1394 /** 1395 * intel_engine_resume - re-initializes the HW state of the engine 1396 * @engine: Engine to resume. 1397 * 1398 * Returns zero on success or an error code on failure. 1399 */ 1400 int intel_engine_resume(struct intel_engine_cs *engine) 1401 { 1402 intel_engine_apply_workarounds(engine); 1403 intel_engine_apply_whitelist(engine); 1404 1405 return engine->resume(engine); 1406 } 1407 1408 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) 1409 { 1410 struct drm_i915_private *i915 = engine->i915; 1411 1412 u64 acthd; 1413 1414 if (GRAPHICS_VER(i915) >= 8) 1415 acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW); 1416 else if (GRAPHICS_VER(i915) >= 4) 1417 acthd = ENGINE_READ(engine, RING_ACTHD); 1418 else 1419 acthd = ENGINE_READ(engine, ACTHD); 1420 1421 return acthd; 1422 } 1423 1424 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine) 1425 { 1426 u64 bbaddr; 1427 1428 if (GRAPHICS_VER(engine->i915) >= 8) 1429 bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW); 1430 else 1431 bbaddr = ENGINE_READ(engine, RING_BBADDR); 1432 1433 return bbaddr; 1434 } 1435 1436 static unsigned long stop_timeout(const struct intel_engine_cs *engine) 1437 { 1438 if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */ 1439 return 0; 1440 1441 /* 1442 * If we are doing a normal GPU reset, we can take our time and allow 1443 * the engine to quiesce. We've stopped submission to the engine, and 1444 * if we wait long enough an innocent context should complete and 1445 * leave the engine idle. So they should not be caught unaware by 1446 * the forthcoming GPU reset (which usually follows the stop_cs)! 1447 */ 1448 return READ_ONCE(engine->props.stop_timeout_ms); 1449 } 1450 1451 static int __intel_engine_stop_cs(struct intel_engine_cs *engine, 1452 int fast_timeout_us, 1453 int slow_timeout_ms) 1454 { 1455 struct intel_uncore *uncore = engine->uncore; 1456 const i915_reg_t mode = RING_MI_MODE(engine->mmio_base); 1457 int err; 1458 1459 intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING)); 1460 1461 /* 1462 * Wa_22011802037 : gen11, gen12, Prior to doing a reset, ensure CS is 1463 * stopped, set ring stop bit and prefetch disable bit to halt CS 1464 */ 1465 if (IS_GRAPHICS_VER(engine->i915, 11, 12)) 1466 intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base), 1467 _MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE)); 1468 1469 err = __intel_wait_for_register_fw(engine->uncore, mode, 1470 MODE_IDLE, MODE_IDLE, 1471 fast_timeout_us, 1472 slow_timeout_ms, 1473 NULL); 1474 1475 /* A final mmio read to let GPU writes be hopefully flushed to memory */ 1476 intel_uncore_posting_read_fw(uncore, mode); 1477 return err; 1478 } 1479 1480 int intel_engine_stop_cs(struct intel_engine_cs *engine) 1481 { 1482 int err = 0; 1483 1484 if (GRAPHICS_VER(engine->i915) < 3) 1485 return -ENODEV; 1486 1487 ENGINE_TRACE(engine, "\n"); 1488 /* 1489 * TODO: Find out why occasionally stopping the CS times out. Seen 1490 * especially with gem_eio tests. 1491 * 1492 * Occasionally trying to stop the cs times out, but does not adversely 1493 * affect functionality. The timeout is set as a config parameter that 1494 * defaults to 100ms. In most cases the follow up operation is to wait 1495 * for pending MI_FORCE_WAKES. The assumption is that this timeout is 1496 * sufficient for any pending MI_FORCEWAKEs to complete. Once root 1497 * caused, the caller must check and handle the return from this 1498 * function. 1499 */ 1500 if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) { 1501 ENGINE_TRACE(engine, 1502 "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n", 1503 ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR, 1504 ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR); 1505 1506 /* 1507 * Sometimes we observe that the idle flag is not 1508 * set even though the ring is empty. So double 1509 * check before giving up. 1510 */ 1511 if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) != 1512 (ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR)) 1513 err = -ETIMEDOUT; 1514 } 1515 1516 return err; 1517 } 1518 1519 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine) 1520 { 1521 ENGINE_TRACE(engine, "\n"); 1522 1523 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); 1524 } 1525 1526 static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine) 1527 { 1528 static const i915_reg_t _reg[I915_NUM_ENGINES] = { 1529 [RCS0] = MSG_IDLE_CS, 1530 [BCS0] = MSG_IDLE_BCS, 1531 [VCS0] = MSG_IDLE_VCS0, 1532 [VCS1] = MSG_IDLE_VCS1, 1533 [VCS2] = MSG_IDLE_VCS2, 1534 [VCS3] = MSG_IDLE_VCS3, 1535 [VCS4] = MSG_IDLE_VCS4, 1536 [VCS5] = MSG_IDLE_VCS5, 1537 [VCS6] = MSG_IDLE_VCS6, 1538 [VCS7] = MSG_IDLE_VCS7, 1539 [VECS0] = MSG_IDLE_VECS0, 1540 [VECS1] = MSG_IDLE_VECS1, 1541 [VECS2] = MSG_IDLE_VECS2, 1542 [VECS3] = MSG_IDLE_VECS3, 1543 [CCS0] = MSG_IDLE_CS, 1544 [CCS1] = MSG_IDLE_CS, 1545 [CCS2] = MSG_IDLE_CS, 1546 [CCS3] = MSG_IDLE_CS, 1547 }; 1548 u32 val; 1549 1550 if (!_reg[engine->id].reg) { 1551 drm_err(&engine->i915->drm, 1552 "MSG IDLE undefined for engine id %u\n", engine->id); 1553 return 0; 1554 } 1555 1556 val = intel_uncore_read(engine->uncore, _reg[engine->id]); 1557 1558 /* bits[29:25] & bits[13:9] >> shift */ 1559 return (val & (val >> 16) & MSG_IDLE_FW_MASK) >> MSG_IDLE_FW_SHIFT; 1560 } 1561 1562 static void __gpm_wait_for_fw_complete(struct intel_gt *gt, u32 fw_mask) 1563 { 1564 int ret; 1565 1566 /* Ensure GPM receives fw up/down after CS is stopped */ 1567 udelay(1); 1568 1569 /* Wait for forcewake request to complete in GPM */ 1570 ret = __intel_wait_for_register_fw(gt->uncore, 1571 GEN9_PWRGT_DOMAIN_STATUS, 1572 fw_mask, fw_mask, 5000, 0, NULL); 1573 1574 /* Ensure CS receives fw ack from GPM */ 1575 udelay(1); 1576 1577 if (ret) 1578 GT_TRACE(gt, "Failed to complete pending forcewake %d\n", ret); 1579 } 1580 1581 /* 1582 * Wa_22011802037:gen12: In addition to stopping the cs, we need to wait for any 1583 * pending MI_FORCE_WAKEUP requests that the CS has initiated to complete. The 1584 * pending status is indicated by bits[13:9] (masked by bits[29:25]) in the 1585 * MSG_IDLE register. There's one MSG_IDLE register per reset domain. Since we 1586 * are concerned only with the gt reset here, we use a logical OR of pending 1587 * forcewakeups from all reset domains and then wait for them to complete by 1588 * querying PWRGT_DOMAIN_STATUS. 1589 */ 1590 void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine) 1591 { 1592 u32 fw_pending = __cs_pending_mi_force_wakes(engine); 1593 1594 if (fw_pending) 1595 __gpm_wait_for_fw_complete(engine->gt, fw_pending); 1596 } 1597 1598 /* NB: please notice the memset */ 1599 void intel_engine_get_instdone(const struct intel_engine_cs *engine, 1600 struct intel_instdone *instdone) 1601 { 1602 struct drm_i915_private *i915 = engine->i915; 1603 struct intel_uncore *uncore = engine->uncore; 1604 u32 mmio_base = engine->mmio_base; 1605 int slice; 1606 int subslice; 1607 int iter; 1608 1609 memset(instdone, 0, sizeof(*instdone)); 1610 1611 if (GRAPHICS_VER(i915) >= 8) { 1612 instdone->instdone = 1613 intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); 1614 1615 if (engine->id != RCS0) 1616 return; 1617 1618 instdone->slice_common = 1619 intel_uncore_read(uncore, GEN7_SC_INSTDONE); 1620 if (GRAPHICS_VER(i915) >= 12) { 1621 instdone->slice_common_extra[0] = 1622 intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA); 1623 instdone->slice_common_extra[1] = 1624 intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2); 1625 } 1626 1627 for_each_ss_steering(iter, engine->gt, slice, subslice) { 1628 instdone->sampler[slice][subslice] = 1629 intel_gt_mcr_read(engine->gt, 1630 GEN7_SAMPLER_INSTDONE, 1631 slice, subslice); 1632 instdone->row[slice][subslice] = 1633 intel_gt_mcr_read(engine->gt, 1634 GEN7_ROW_INSTDONE, 1635 slice, subslice); 1636 } 1637 1638 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) { 1639 for_each_ss_steering(iter, engine->gt, slice, subslice) 1640 instdone->geom_svg[slice][subslice] = 1641 intel_gt_mcr_read(engine->gt, 1642 XEHPG_INSTDONE_GEOM_SVG, 1643 slice, subslice); 1644 } 1645 } else if (GRAPHICS_VER(i915) >= 7) { 1646 instdone->instdone = 1647 intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); 1648 1649 if (engine->id != RCS0) 1650 return; 1651 1652 instdone->slice_common = 1653 intel_uncore_read(uncore, GEN7_SC_INSTDONE); 1654 instdone->sampler[0][0] = 1655 intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE); 1656 instdone->row[0][0] = 1657 intel_uncore_read(uncore, GEN7_ROW_INSTDONE); 1658 } else if (GRAPHICS_VER(i915) >= 4) { 1659 instdone->instdone = 1660 intel_uncore_read(uncore, RING_INSTDONE(mmio_base)); 1661 if (engine->id == RCS0) 1662 /* HACK: Using the wrong struct member */ 1663 instdone->slice_common = 1664 intel_uncore_read(uncore, GEN4_INSTDONE1); 1665 } else { 1666 instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE); 1667 } 1668 } 1669 1670 static bool ring_is_idle(struct intel_engine_cs *engine) 1671 { 1672 bool idle = true; 1673 1674 if (I915_SELFTEST_ONLY(!engine->mmio_base)) 1675 return true; 1676 1677 if (!intel_engine_pm_get_if_awake(engine)) 1678 return true; 1679 1680 /* First check that no commands are left in the ring */ 1681 if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) != 1682 (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR)) 1683 idle = false; 1684 1685 /* No bit for gen2, so assume the CS parser is idle */ 1686 if (GRAPHICS_VER(engine->i915) > 2 && 1687 !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE)) 1688 idle = false; 1689 1690 intel_engine_pm_put(engine); 1691 1692 return idle; 1693 } 1694 1695 void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync) 1696 { 1697 struct tasklet_struct *t = &engine->sched_engine->tasklet; 1698 1699 if (!t->callback) 1700 return; 1701 1702 local_bh_disable(); 1703 if (tasklet_trylock(t)) { 1704 /* Must wait for any GPU reset in progress. */ 1705 if (__tasklet_is_enabled(t)) 1706 t->callback(t); 1707 tasklet_unlock(t); 1708 } 1709 local_bh_enable(); 1710 1711 /* Synchronise and wait for the tasklet on another CPU */ 1712 if (sync) 1713 tasklet_unlock_wait(t); 1714 } 1715 1716 /** 1717 * intel_engine_is_idle() - Report if the engine has finished process all work 1718 * @engine: the intel_engine_cs 1719 * 1720 * Return true if there are no requests pending, nothing left to be submitted 1721 * to hardware, and that the engine is idle. 1722 */ 1723 bool intel_engine_is_idle(struct intel_engine_cs *engine) 1724 { 1725 /* More white lies, if wedged, hw state is inconsistent */ 1726 if (intel_gt_is_wedged(engine->gt)) 1727 return true; 1728 1729 if (!intel_engine_pm_is_awake(engine)) 1730 return true; 1731 1732 /* Waiting to drain ELSP? */ 1733 intel_synchronize_hardirq(engine->i915); 1734 intel_engine_flush_submission(engine); 1735 1736 /* ELSP is empty, but there are ready requests? E.g. after reset */ 1737 if (!i915_sched_engine_is_empty(engine->sched_engine)) 1738 return false; 1739 1740 /* Ring stopped? */ 1741 return ring_is_idle(engine); 1742 } 1743 1744 bool intel_engines_are_idle(struct intel_gt *gt) 1745 { 1746 struct intel_engine_cs *engine; 1747 enum intel_engine_id id; 1748 1749 /* 1750 * If the driver is wedged, HW state may be very inconsistent and 1751 * report that it is still busy, even though we have stopped using it. 1752 */ 1753 if (intel_gt_is_wedged(gt)) 1754 return true; 1755 1756 /* Already parked (and passed an idleness test); must still be idle */ 1757 if (!READ_ONCE(gt->awake)) 1758 return true; 1759 1760 for_each_engine(engine, gt, id) { 1761 if (!intel_engine_is_idle(engine)) 1762 return false; 1763 } 1764 1765 return true; 1766 } 1767 1768 bool intel_engine_irq_enable(struct intel_engine_cs *engine) 1769 { 1770 if (!engine->irq_enable) 1771 return false; 1772 1773 /* Caller disables interrupts */ 1774 spin_lock(engine->gt->irq_lock); 1775 engine->irq_enable(engine); 1776 spin_unlock(engine->gt->irq_lock); 1777 1778 return true; 1779 } 1780 1781 void intel_engine_irq_disable(struct intel_engine_cs *engine) 1782 { 1783 if (!engine->irq_disable) 1784 return; 1785 1786 /* Caller disables interrupts */ 1787 spin_lock(engine->gt->irq_lock); 1788 engine->irq_disable(engine); 1789 spin_unlock(engine->gt->irq_lock); 1790 } 1791 1792 void intel_engines_reset_default_submission(struct intel_gt *gt) 1793 { 1794 struct intel_engine_cs *engine; 1795 enum intel_engine_id id; 1796 1797 for_each_engine(engine, gt, id) { 1798 if (engine->sanitize) 1799 engine->sanitize(engine); 1800 1801 engine->set_default_submission(engine); 1802 } 1803 } 1804 1805 bool intel_engine_can_store_dword(struct intel_engine_cs *engine) 1806 { 1807 switch (GRAPHICS_VER(engine->i915)) { 1808 case 2: 1809 return false; /* uses physical not virtual addresses */ 1810 case 3: 1811 /* maybe only uses physical not virtual addresses */ 1812 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915)); 1813 case 4: 1814 return !IS_I965G(engine->i915); /* who knows! */ 1815 case 6: 1816 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */ 1817 default: 1818 return true; 1819 } 1820 } 1821 1822 static struct intel_timeline *get_timeline(struct i915_request *rq) 1823 { 1824 struct intel_timeline *tl; 1825 1826 /* 1827 * Even though we are holding the engine->sched_engine->lock here, there 1828 * is no control over the submission queue per-se and we are 1829 * inspecting the active state at a random point in time, with an 1830 * unknown queue. Play safe and make sure the timeline remains valid. 1831 * (Only being used for pretty printing, one extra kref shouldn't 1832 * cause a camel stampede!) 1833 */ 1834 rcu_read_lock(); 1835 tl = rcu_dereference(rq->timeline); 1836 if (!kref_get_unless_zero(&tl->kref)) 1837 tl = NULL; 1838 rcu_read_unlock(); 1839 1840 return tl; 1841 } 1842 1843 static int print_ring(char *buf, int sz, struct i915_request *rq) 1844 { 1845 int len = 0; 1846 1847 if (!i915_request_signaled(rq)) { 1848 struct intel_timeline *tl = get_timeline(rq); 1849 1850 len = scnprintf(buf, sz, 1851 "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ", 1852 i915_ggtt_offset(rq->ring->vma), 1853 tl ? tl->hwsp_offset : 0, 1854 hwsp_seqno(rq), 1855 DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context), 1856 1000 * 1000)); 1857 1858 if (tl) 1859 intel_timeline_put(tl); 1860 } 1861 1862 return len; 1863 } 1864 1865 static void hexdump(struct drm_printer *m, const void *buf, size_t len) 1866 { 1867 STUB(); 1868 #ifdef notyet 1869 const size_t rowsize = 8 * sizeof(u32); 1870 const void *prev = NULL; 1871 bool skip = false; 1872 size_t pos; 1873 1874 for (pos = 0; pos < len; pos += rowsize) { 1875 char line[128]; 1876 1877 if (prev && !memcmp(prev, buf + pos, rowsize)) { 1878 if (!skip) { 1879 drm_printf(m, "*\n"); 1880 skip = true; 1881 } 1882 continue; 1883 } 1884 1885 WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos, 1886 rowsize, sizeof(u32), 1887 line, sizeof(line), 1888 false) >= sizeof(line)); 1889 drm_printf(m, "[%04zx] %s\n", pos, line); 1890 1891 prev = buf + pos; 1892 skip = false; 1893 } 1894 #endif 1895 } 1896 1897 static const char *repr_timer(const struct timeout *t) 1898 { 1899 if (!READ_ONCE(t->to_time)) 1900 return "inactive"; 1901 1902 if (timer_pending(t)) 1903 return "active"; 1904 1905 return "expired"; 1906 } 1907 1908 static void intel_engine_print_registers(struct intel_engine_cs *engine, 1909 struct drm_printer *m) 1910 { 1911 struct drm_i915_private *dev_priv = engine->i915; 1912 struct intel_engine_execlists * const execlists = &engine->execlists; 1913 u64 addr; 1914 1915 if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(dev_priv, 4, 7)) 1916 drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID)); 1917 if (HAS_EXECLISTS(dev_priv)) { 1918 drm_printf(m, "\tEL_STAT_HI: 0x%08x\n", 1919 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI)); 1920 drm_printf(m, "\tEL_STAT_LO: 0x%08x\n", 1921 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO)); 1922 } 1923 drm_printf(m, "\tRING_START: 0x%08x\n", 1924 ENGINE_READ(engine, RING_START)); 1925 drm_printf(m, "\tRING_HEAD: 0x%08x\n", 1926 ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR); 1927 drm_printf(m, "\tRING_TAIL: 0x%08x\n", 1928 ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR); 1929 drm_printf(m, "\tRING_CTL: 0x%08x%s\n", 1930 ENGINE_READ(engine, RING_CTL), 1931 ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : ""); 1932 if (GRAPHICS_VER(engine->i915) > 2) { 1933 drm_printf(m, "\tRING_MODE: 0x%08x%s\n", 1934 ENGINE_READ(engine, RING_MI_MODE), 1935 ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : ""); 1936 } 1937 1938 if (GRAPHICS_VER(dev_priv) >= 6) { 1939 drm_printf(m, "\tRING_IMR: 0x%08x\n", 1940 ENGINE_READ(engine, RING_IMR)); 1941 drm_printf(m, "\tRING_ESR: 0x%08x\n", 1942 ENGINE_READ(engine, RING_ESR)); 1943 drm_printf(m, "\tRING_EMR: 0x%08x\n", 1944 ENGINE_READ(engine, RING_EMR)); 1945 drm_printf(m, "\tRING_EIR: 0x%08x\n", 1946 ENGINE_READ(engine, RING_EIR)); 1947 } 1948 1949 addr = intel_engine_get_active_head(engine); 1950 drm_printf(m, "\tACTHD: 0x%08x_%08x\n", 1951 upper_32_bits(addr), lower_32_bits(addr)); 1952 addr = intel_engine_get_last_batch_head(engine); 1953 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n", 1954 upper_32_bits(addr), lower_32_bits(addr)); 1955 if (GRAPHICS_VER(dev_priv) >= 8) 1956 addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW); 1957 else if (GRAPHICS_VER(dev_priv) >= 4) 1958 addr = ENGINE_READ(engine, RING_DMA_FADD); 1959 else 1960 addr = ENGINE_READ(engine, DMA_FADD_I8XX); 1961 drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n", 1962 upper_32_bits(addr), lower_32_bits(addr)); 1963 if (GRAPHICS_VER(dev_priv) >= 4) { 1964 drm_printf(m, "\tIPEIR: 0x%08x\n", 1965 ENGINE_READ(engine, RING_IPEIR)); 1966 drm_printf(m, "\tIPEHR: 0x%08x\n", 1967 ENGINE_READ(engine, RING_IPEHR)); 1968 } else { 1969 drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR)); 1970 drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR)); 1971 } 1972 1973 if (HAS_EXECLISTS(dev_priv) && !intel_engine_uses_guc(engine)) { 1974 struct i915_request * const *port, *rq; 1975 const u32 *hws = 1976 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; 1977 const u8 num_entries = execlists->csb_size; 1978 unsigned int idx; 1979 u8 read, write; 1980 1981 drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n", 1982 str_yes_no(test_bit(TASKLET_STATE_SCHED, &engine->sched_engine->tasklet.state)), 1983 str_enabled_disabled(!atomic_read(&engine->sched_engine->tasklet.count)), 1984 repr_timer(&engine->execlists.preempt), 1985 repr_timer(&engine->execlists.timer)); 1986 1987 read = execlists->csb_head; 1988 write = READ_ONCE(*execlists->csb_write); 1989 1990 drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n", 1991 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO), 1992 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI), 1993 read, write, num_entries); 1994 1995 if (read >= num_entries) 1996 read = 0; 1997 if (write >= num_entries) 1998 write = 0; 1999 if (read > write) 2000 write += num_entries; 2001 while (read < write) { 2002 idx = ++read % num_entries; 2003 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n", 2004 idx, hws[idx * 2], hws[idx * 2 + 1]); 2005 } 2006 2007 i915_sched_engine_active_lock_bh(engine->sched_engine); 2008 rcu_read_lock(); 2009 for (port = execlists->active; (rq = *port); port++) { 2010 char hdr[160]; 2011 int len; 2012 2013 len = scnprintf(hdr, sizeof(hdr), 2014 "\t\tActive[%d]: ccid:%08x%s%s, ", 2015 (int)(port - execlists->active), 2016 rq->context->lrc.ccid, 2017 intel_context_is_closed(rq->context) ? "!" : "", 2018 intel_context_is_banned(rq->context) ? "*" : ""); 2019 len += print_ring(hdr + len, sizeof(hdr) - len, rq); 2020 scnprintf(hdr + len, sizeof(hdr) - len, "rq: "); 2021 i915_request_show(m, rq, hdr, 0); 2022 } 2023 for (port = execlists->pending; (rq = *port); port++) { 2024 char hdr[160]; 2025 int len; 2026 2027 len = scnprintf(hdr, sizeof(hdr), 2028 "\t\tPending[%d]: ccid:%08x%s%s, ", 2029 (int)(port - execlists->pending), 2030 rq->context->lrc.ccid, 2031 intel_context_is_closed(rq->context) ? "!" : "", 2032 intel_context_is_banned(rq->context) ? "*" : ""); 2033 len += print_ring(hdr + len, sizeof(hdr) - len, rq); 2034 scnprintf(hdr + len, sizeof(hdr) - len, "rq: "); 2035 i915_request_show(m, rq, hdr, 0); 2036 } 2037 rcu_read_unlock(); 2038 i915_sched_engine_active_unlock_bh(engine->sched_engine); 2039 } else if (GRAPHICS_VER(dev_priv) > 6) { 2040 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", 2041 ENGINE_READ(engine, RING_PP_DIR_BASE)); 2042 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n", 2043 ENGINE_READ(engine, RING_PP_DIR_BASE_READ)); 2044 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n", 2045 ENGINE_READ(engine, RING_PP_DIR_DCLV)); 2046 } 2047 } 2048 2049 static void print_request_ring(struct drm_printer *m, struct i915_request *rq) 2050 { 2051 struct i915_vma_resource *vma_res = rq->batch_res; 2052 void *ring; 2053 int size; 2054 2055 drm_printf(m, 2056 "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n", 2057 rq->head, rq->postfix, rq->tail, 2058 vma_res ? upper_32_bits(vma_res->start) : ~0u, 2059 vma_res ? lower_32_bits(vma_res->start) : ~0u); 2060 2061 size = rq->tail - rq->head; 2062 if (rq->tail < rq->head) 2063 size += rq->ring->size; 2064 2065 ring = kmalloc(size, GFP_ATOMIC); 2066 if (ring) { 2067 const void *vaddr = rq->ring->vaddr; 2068 unsigned int head = rq->head; 2069 unsigned int len = 0; 2070 2071 if (rq->tail < head) { 2072 len = rq->ring->size - head; 2073 memcpy(ring, vaddr + head, len); 2074 head = 0; 2075 } 2076 memcpy(ring + len, vaddr + head, size - len); 2077 2078 hexdump(m, ring, size); 2079 kfree(ring); 2080 } 2081 } 2082 2083 static unsigned long list_count(struct list_head *list) 2084 { 2085 struct list_head *pos; 2086 unsigned long count = 0; 2087 2088 list_for_each(pos, list) 2089 count++; 2090 2091 return count; 2092 } 2093 2094 static unsigned long read_ul(void *p, size_t x) 2095 { 2096 return *(unsigned long *)(p + x); 2097 } 2098 2099 static void print_properties(struct intel_engine_cs *engine, 2100 struct drm_printer *m) 2101 { 2102 static const struct pmap { 2103 size_t offset; 2104 const char *name; 2105 } props[] = { 2106 #define P(x) { \ 2107 .offset = offsetof(typeof(engine->props), x), \ 2108 .name = #x \ 2109 } 2110 P(heartbeat_interval_ms), 2111 P(max_busywait_duration_ns), 2112 P(preempt_timeout_ms), 2113 P(stop_timeout_ms), 2114 P(timeslice_duration_ms), 2115 2116 {}, 2117 #undef P 2118 }; 2119 const struct pmap *p; 2120 2121 drm_printf(m, "\tProperties:\n"); 2122 for (p = props; p->name; p++) 2123 drm_printf(m, "\t\t%s: %lu [default %lu]\n", 2124 p->name, 2125 read_ul(&engine->props, p->offset), 2126 read_ul(&engine->defaults, p->offset)); 2127 } 2128 2129 static void engine_dump_request(struct i915_request *rq, struct drm_printer *m, const char *msg) 2130 { 2131 struct intel_timeline *tl = get_timeline(rq); 2132 2133 i915_request_show(m, rq, msg, 0); 2134 2135 drm_printf(m, "\t\tring->start: 0x%08x\n", 2136 i915_ggtt_offset(rq->ring->vma)); 2137 drm_printf(m, "\t\tring->head: 0x%08x\n", 2138 rq->ring->head); 2139 drm_printf(m, "\t\tring->tail: 0x%08x\n", 2140 rq->ring->tail); 2141 drm_printf(m, "\t\tring->emit: 0x%08x\n", 2142 rq->ring->emit); 2143 drm_printf(m, "\t\tring->space: 0x%08x\n", 2144 rq->ring->space); 2145 2146 if (tl) { 2147 drm_printf(m, "\t\tring->hwsp: 0x%08x\n", 2148 tl->hwsp_offset); 2149 intel_timeline_put(tl); 2150 } 2151 2152 print_request_ring(m, rq); 2153 2154 if (rq->context->lrc_reg_state) { 2155 drm_printf(m, "Logical Ring Context:\n"); 2156 hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE); 2157 } 2158 } 2159 2160 void intel_engine_dump_active_requests(struct list_head *requests, 2161 struct i915_request *hung_rq, 2162 struct drm_printer *m) 2163 { 2164 struct i915_request *rq; 2165 const char *msg; 2166 enum i915_request_state state; 2167 2168 list_for_each_entry(rq, requests, sched.link) { 2169 if (rq == hung_rq) 2170 continue; 2171 2172 state = i915_test_request_state(rq); 2173 if (state < I915_REQUEST_QUEUED) 2174 continue; 2175 2176 if (state == I915_REQUEST_ACTIVE) 2177 msg = "\t\tactive on engine"; 2178 else 2179 msg = "\t\tactive in queue"; 2180 2181 engine_dump_request(rq, m, msg); 2182 } 2183 } 2184 2185 static void engine_dump_active_requests(struct intel_engine_cs *engine, struct drm_printer *m) 2186 { 2187 struct i915_request *hung_rq = NULL; 2188 struct intel_context *ce; 2189 bool guc; 2190 2191 /* 2192 * No need for an engine->irq_seqno_barrier() before the seqno reads. 2193 * The GPU is still running so requests are still executing and any 2194 * hardware reads will be out of date by the time they are reported. 2195 * But the intention here is just to report an instantaneous snapshot 2196 * so that's fine. 2197 */ 2198 lockdep_assert_held(&engine->sched_engine->lock); 2199 2200 drm_printf(m, "\tRequests:\n"); 2201 2202 guc = intel_uc_uses_guc_submission(&engine->gt->uc); 2203 if (guc) { 2204 ce = intel_engine_get_hung_context(engine); 2205 if (ce) 2206 hung_rq = intel_context_find_active_request(ce); 2207 } else { 2208 hung_rq = intel_engine_execlist_find_hung_request(engine); 2209 } 2210 2211 if (hung_rq) 2212 engine_dump_request(hung_rq, m, "\t\thung"); 2213 2214 if (guc) 2215 intel_guc_dump_active_requests(engine, hung_rq, m); 2216 else 2217 intel_engine_dump_active_requests(&engine->sched_engine->requests, 2218 hung_rq, m); 2219 } 2220 2221 void intel_engine_dump(struct intel_engine_cs *engine, 2222 struct drm_printer *m, 2223 const char *header, ...) 2224 { 2225 struct i915_gpu_error * const error = &engine->i915->gpu_error; 2226 struct i915_request *rq; 2227 intel_wakeref_t wakeref; 2228 unsigned long flags; 2229 ktime_t dummy; 2230 2231 if (header) { 2232 va_list ap; 2233 2234 va_start(ap, header); 2235 drm_vprintf(m, header, &ap); 2236 va_end(ap); 2237 } 2238 2239 if (intel_gt_is_wedged(engine->gt)) 2240 drm_printf(m, "*** WEDGED ***\n"); 2241 2242 drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count)); 2243 drm_printf(m, "\tBarriers?: %s\n", 2244 str_yes_no(!llist_empty(&engine->barrier_tasks))); 2245 drm_printf(m, "\tLatency: %luus\n", 2246 ewma__engine_latency_read(&engine->latency)); 2247 if (intel_engine_supports_stats(engine)) 2248 drm_printf(m, "\tRuntime: %llums\n", 2249 ktime_to_ms(intel_engine_get_busy_time(engine, 2250 &dummy))); 2251 drm_printf(m, "\tForcewake: %x domains, %d active\n", 2252 engine->fw_domain, READ_ONCE(engine->fw_active)); 2253 2254 rcu_read_lock(); 2255 rq = READ_ONCE(engine->heartbeat.systole); 2256 if (rq) 2257 drm_printf(m, "\tHeartbeat: %d ms ago\n", 2258 jiffies_to_msecs(jiffies - rq->emitted_jiffies)); 2259 rcu_read_unlock(); 2260 drm_printf(m, "\tReset count: %d (global %d)\n", 2261 i915_reset_engine_count(error, engine), 2262 i915_reset_count(error)); 2263 print_properties(engine, m); 2264 2265 spin_lock_irqsave(&engine->sched_engine->lock, flags); 2266 engine_dump_active_requests(engine, m); 2267 2268 drm_printf(m, "\tOn hold?: %lu\n", 2269 list_count(&engine->sched_engine->hold)); 2270 spin_unlock_irqrestore(&engine->sched_engine->lock, flags); 2271 2272 drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base); 2273 wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm); 2274 if (wakeref) { 2275 intel_engine_print_registers(engine, m); 2276 intel_runtime_pm_put(engine->uncore->rpm, wakeref); 2277 } else { 2278 drm_printf(m, "\tDevice is asleep; skipping register dump\n"); 2279 } 2280 2281 intel_execlists_show_requests(engine, m, i915_request_show, 8); 2282 2283 drm_printf(m, "HWSP:\n"); 2284 hexdump(m, engine->status_page.addr, PAGE_SIZE); 2285 2286 drm_printf(m, "Idle? %s\n", str_yes_no(intel_engine_is_idle(engine))); 2287 2288 intel_engine_print_breadcrumbs(engine, m); 2289 } 2290 2291 /** 2292 * intel_engine_get_busy_time() - Return current accumulated engine busyness 2293 * @engine: engine to report on 2294 * @now: monotonic timestamp of sampling 2295 * 2296 * Returns accumulated time @engine was busy since engine stats were enabled. 2297 */ 2298 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now) 2299 { 2300 return engine->busyness(engine, now); 2301 } 2302 2303 struct intel_context * 2304 intel_engine_create_virtual(struct intel_engine_cs **siblings, 2305 unsigned int count, unsigned long flags) 2306 { 2307 if (count == 0) 2308 return ERR_PTR(-EINVAL); 2309 2310 if (count == 1 && !(flags & FORCE_VIRTUAL)) 2311 return intel_context_create(siblings[0]); 2312 2313 GEM_BUG_ON(!siblings[0]->cops->create_virtual); 2314 return siblings[0]->cops->create_virtual(siblings, count, flags); 2315 } 2316 2317 struct i915_request * 2318 intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine) 2319 { 2320 struct i915_request *request, *active = NULL; 2321 2322 /* 2323 * This search does not work in GuC submission mode. However, the GuC 2324 * will report the hanging context directly to the driver itself. So 2325 * the driver should never get here when in GuC mode. 2326 */ 2327 GEM_BUG_ON(intel_uc_uses_guc_submission(&engine->gt->uc)); 2328 2329 /* 2330 * We are called by the error capture, reset and to dump engine 2331 * state at random points in time. In particular, note that neither is 2332 * crucially ordered with an interrupt. After a hang, the GPU is dead 2333 * and we assume that no more writes can happen (we waited long enough 2334 * for all writes that were in transaction to be flushed) - adding an 2335 * extra delay for a recent interrupt is pointless. Hence, we do 2336 * not need an engine->irq_seqno_barrier() before the seqno reads. 2337 * At all other times, we must assume the GPU is still running, but 2338 * we only care about the snapshot of this moment. 2339 */ 2340 lockdep_assert_held(&engine->sched_engine->lock); 2341 2342 rcu_read_lock(); 2343 request = execlists_active(&engine->execlists); 2344 if (request) { 2345 struct intel_timeline *tl = request->context->timeline; 2346 2347 list_for_each_entry_from_reverse(request, &tl->requests, link) { 2348 if (__i915_request_is_complete(request)) 2349 break; 2350 2351 active = request; 2352 } 2353 } 2354 rcu_read_unlock(); 2355 if (active) 2356 return active; 2357 2358 list_for_each_entry(request, &engine->sched_engine->requests, 2359 sched.link) { 2360 if (i915_test_request_state(request) != I915_REQUEST_ACTIVE) 2361 continue; 2362 2363 active = request; 2364 break; 2365 } 2366 2367 return active; 2368 } 2369 2370 void xehp_enable_ccs_engines(struct intel_engine_cs *engine) 2371 { 2372 /* 2373 * If there are any non-fused-off CCS engines, we need to enable CCS 2374 * support in the RCU_MODE register. This only needs to be done once, 2375 * so for simplicity we'll take care of this in the RCS engine's 2376 * resume handler; since the RCS and all CCS engines belong to the 2377 * same reset domain and are reset together, this will also take care 2378 * of re-applying the setting after i915-triggered resets. 2379 */ 2380 if (!CCS_MASK(engine->gt)) 2381 return; 2382 2383 intel_uncore_write(engine->uncore, GEN12_RCU_MODE, 2384 _MASKED_BIT_ENABLE(GEN12_RCU_MODE_CCS_ENABLE)); 2385 } 2386 2387 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2388 #include "mock_engine.c" 2389 #include "selftest_engine.c" 2390 #include "selftest_engine_cs.c" 2391 #endif 2392