1 /* 2 * Copyright © 2008-2018 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 */ 24 25 #ifndef I915_REQUEST_H 26 #define I915_REQUEST_H 27 28 #include <linux/dma-fence.h> 29 #include <linux/hrtimer.h> 30 #include <linux/irq_work.h> 31 #include <linux/llist.h> 32 #include <linux/lockdep.h> 33 34 #include "gem/i915_gem_context_types.h" 35 #include "gt/intel_context_types.h" 36 #include "gt/intel_engine_types.h" 37 #include "gt/intel_timeline_types.h" 38 39 #include "i915_gem.h" 40 #include "i915_scheduler.h" 41 #include "i915_selftest.h" 42 #include "i915_sw_fence.h" 43 #include "i915_vma_resource.h" 44 45 #include <uapi/drm/i915_drm.h> 46 47 struct drm_file; 48 struct drm_i915_gem_object; 49 struct drm_printer; 50 struct i915_deps; 51 struct i915_request; 52 53 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 54 struct i915_capture_list { 55 struct i915_vma_resource *vma_res; 56 struct i915_capture_list *next; 57 }; 58 59 void i915_request_free_capture_list(struct i915_capture_list *capture); 60 #else 61 #define i915_request_free_capture_list(_a) do {} while (0) 62 #endif 63 64 #define RQ_TRACE(rq, fmt, ...) do { \ 65 const struct i915_request *rq__ = (rq); \ 66 ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \ 67 rq__->fence.context, rq__->fence.seqno, \ 68 hwsp_seqno(rq__), ##__VA_ARGS__); \ 69 } while (0) 70 71 enum { 72 /* 73 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW. 74 * 75 * Set by __i915_request_submit() on handing over to HW, and cleared 76 * by __i915_request_unsubmit() if we preempt this request. 77 * 78 * Finally cleared for consistency on retiring the request, when 79 * we know the HW is no longer running this request. 80 * 81 * See i915_request_is_active() 82 */ 83 I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS, 84 85 /* 86 * I915_FENCE_FLAG_PQUEUE - this request is ready for execution 87 * 88 * Using the scheduler, when a request is ready for execution it is put 89 * into the priority queue, and removed from that queue when transferred 90 * to the HW runlists. We want to track its membership within the 91 * priority queue so that we can easily check before rescheduling. 92 * 93 * See i915_request_in_priority_queue() 94 */ 95 I915_FENCE_FLAG_PQUEUE, 96 97 /* 98 * I915_FENCE_FLAG_HOLD - this request is currently on hold 99 * 100 * This request has been suspended, pending an ongoing investigation. 101 */ 102 I915_FENCE_FLAG_HOLD, 103 104 /* 105 * I915_FENCE_FLAG_INITIAL_BREADCRUMB - this request has the initial 106 * breadcrumb that marks the end of semaphore waits and start of the 107 * user payload. 108 */ 109 I915_FENCE_FLAG_INITIAL_BREADCRUMB, 110 111 /* 112 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list 113 * 114 * Internal bookkeeping used by the breadcrumb code to track when 115 * a request is on the various signal_list. 116 */ 117 I915_FENCE_FLAG_SIGNAL, 118 119 /* 120 * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted 121 * 122 * The execution of some requests should not be interrupted. This is 123 * a sensitive operation as it makes the request super important, 124 * blocking other higher priority work. Abuse of this flag will 125 * lead to quality of service issues. 126 */ 127 I915_FENCE_FLAG_NOPREEMPT, 128 129 /* 130 * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue 131 * 132 * A high priority sentinel request may be submitted to clear the 133 * submission queue. As it will be the only request in-flight, upon 134 * execution all other active requests will have been preempted and 135 * unsubmitted. This preemptive pulse is used to re-evaluate the 136 * in-flight requests, particularly in cases where an active context 137 * is banned and those active requests need to be cancelled. 138 */ 139 I915_FENCE_FLAG_SENTINEL, 140 141 /* 142 * I915_FENCE_FLAG_BOOST - upclock the gpu for this request 143 * 144 * Some requests are more important than others! In particular, a 145 * request that the user is waiting on is typically required for 146 * interactive latency, for which we want to minimise by upclocking 147 * the GPU. Here we track such boost requests on a per-request basis. 148 */ 149 I915_FENCE_FLAG_BOOST, 150 151 /* 152 * I915_FENCE_FLAG_SUBMIT_PARALLEL - request with a context in a 153 * parent-child relationship (parallel submission, multi-lrc) should 154 * trigger a submission to the GuC rather than just moving the context 155 * tail. 156 */ 157 I915_FENCE_FLAG_SUBMIT_PARALLEL, 158 159 /* 160 * I915_FENCE_FLAG_SKIP_PARALLEL - request with a context in a 161 * parent-child relationship (parallel submission, multi-lrc) that 162 * hit an error while generating requests in the execbuf IOCTL. 163 * Indicates this request should be skipped as another request in 164 * submission / relationship encoutered an error. 165 */ 166 I915_FENCE_FLAG_SKIP_PARALLEL, 167 168 /* 169 * I915_FENCE_FLAG_COMPOSITE - Indicates fence is part of a composite 170 * fence (dma_fence_array) and i915 generated for parallel submission. 171 */ 172 I915_FENCE_FLAG_COMPOSITE, 173 }; 174 175 /** 176 * Request queue structure. 177 * 178 * The request queue allows us to note sequence numbers that have been emitted 179 * and may be associated with active buffers to be retired. 180 * 181 * By keeping this list, we can avoid having to do questionable sequence 182 * number comparisons on buffer last_read|write_seqno. It also allows an 183 * emission time to be associated with the request for tracking how far ahead 184 * of the GPU the submission is. 185 * 186 * When modifying this structure be very aware that we perform a lockless 187 * RCU lookup of it that may race against reallocation of the struct 188 * from the slab freelist. We intentionally do not zero the structure on 189 * allocation so that the lookup can use the dangling pointers (and is 190 * cogniscent that those pointers may be wrong). Instead, everything that 191 * needs to be initialised must be done so explicitly. 192 * 193 * The requests are reference counted. 194 */ 195 struct i915_request { 196 struct dma_fence fence; 197 spinlock_t lock; 198 199 struct drm_i915_private *i915; 200 201 /** 202 * Context and ring buffer related to this request 203 * Contexts are refcounted, so when this request is associated with a 204 * context, we must increment the context's refcount, to guarantee that 205 * it persists while any request is linked to it. Requests themselves 206 * are also refcounted, so the request will only be freed when the last 207 * reference to it is dismissed, and the code in 208 * i915_request_free() will then decrement the refcount on the 209 * context. 210 */ 211 struct intel_engine_cs *engine; 212 struct intel_context *context; 213 struct intel_ring *ring; 214 struct intel_timeline __rcu *timeline; 215 216 struct list_head signal_link; 217 struct llist_node signal_node; 218 219 /* 220 * The rcu epoch of when this request was allocated. Used to judiciously 221 * apply backpressure on future allocations to ensure that under 222 * mempressure there is sufficient RCU ticks for us to reclaim our 223 * RCU protected slabs. 224 */ 225 unsigned long rcustate; 226 227 /* 228 * We pin the timeline->mutex while constructing the request to 229 * ensure that no caller accidentally drops it during construction. 230 * The timeline->mutex must be held to ensure that only this caller 231 * can use the ring and manipulate the associated timeline during 232 * construction. 233 */ 234 struct pin_cookie cookie; 235 236 /* 237 * Fences for the various phases in the request's lifetime. 238 * 239 * The submit fence is used to await upon all of the request's 240 * dependencies. When it is signaled, the request is ready to run. 241 * It is used by the driver to then queue the request for execution. 242 */ 243 struct i915_sw_fence submit; 244 union { 245 wait_queue_entry_t submitq; 246 struct i915_sw_dma_fence_cb dmaq; 247 struct i915_request_duration_cb { 248 struct dma_fence_cb cb; 249 ktime_t emitted; 250 } duration; 251 }; 252 struct llist_head execute_cb; 253 struct i915_sw_fence semaphore; 254 /** 255 * @submit_work: complete submit fence from an IRQ if needed for 256 * locking hierarchy reasons. 257 */ 258 struct irq_work submit_work; 259 260 /* 261 * A list of everyone we wait upon, and everyone who waits upon us. 262 * Even though we will not be submitted to the hardware before the 263 * submit fence is signaled (it waits for all external events as well 264 * as our own requests), the scheduler still needs to know the 265 * dependency tree for the lifetime of the request (from execbuf 266 * to retirement), i.e. bidirectional dependency information for the 267 * request not tied to individual fences. 268 */ 269 struct i915_sched_node sched; 270 struct i915_dependency dep; 271 intel_engine_mask_t execution_mask; 272 273 /* 274 * A convenience pointer to the current breadcrumb value stored in 275 * the HW status page (or our timeline's local equivalent). The full 276 * path would be rq->hw_context->ring->timeline->hwsp_seqno. 277 */ 278 const u32 *hwsp_seqno; 279 280 /** Position in the ring of the start of the request */ 281 u32 head; 282 283 /** Position in the ring of the start of the user packets */ 284 u32 infix; 285 286 /** 287 * Position in the ring of the start of the postfix. 288 * This is required to calculate the maximum available ring space 289 * without overwriting the postfix. 290 */ 291 u32 postfix; 292 293 /** Position in the ring of the end of the whole request */ 294 u32 tail; 295 296 /** Position in the ring of the end of any workarounds after the tail */ 297 u32 wa_tail; 298 299 /** Preallocate space in the ring for the emitting the request */ 300 u32 reserved_space; 301 302 /** Batch buffer pointer for selftest internal use. */ 303 I915_SELFTEST_DECLARE(struct i915_vma *batch); 304 305 struct i915_vma_resource *batch_res; 306 307 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 308 /** 309 * Additional buffers requested by userspace to be captured upon 310 * a GPU hang. The vma/obj on this list are protected by their 311 * active reference - all objects on this list must also be 312 * on the active_list (of their final request). 313 */ 314 struct i915_capture_list *capture_list; 315 #endif 316 317 /** Time at which this request was emitted, in jiffies. */ 318 unsigned long emitted_jiffies; 319 320 /** timeline->request entry for this request */ 321 struct list_head link; 322 323 /** Watchdog support fields. */ 324 struct i915_request_watchdog { 325 struct llist_node link; 326 struct timeout timer; 327 } watchdog; 328 329 /** 330 * @guc_fence_link: Requests may need to be stalled when using GuC 331 * submission waiting for certain GuC operations to complete. If that is 332 * the case, stalled requests are added to a per context list of stalled 333 * requests. The below list_head is the link in that list. Protected by 334 * ce->guc_state.lock. 335 */ 336 struct list_head guc_fence_link; 337 338 /** 339 * @guc_prio: Priority level while the request is in flight. Differs 340 * from i915 scheduler priority. See comment above 341 * I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details. Protected by 342 * ce->guc_active.lock. Two special values (GUC_PRIO_INIT and 343 * GUC_PRIO_FINI) outside the GuC priority range are used to indicate 344 * if the priority has not been initialized yet or if no more updates 345 * are possible because the request has completed. 346 */ 347 #define GUC_PRIO_INIT 0xff 348 #define GUC_PRIO_FINI 0xfe 349 u8 guc_prio; 350 351 I915_SELFTEST_DECLARE(struct { 352 struct list_head link; 353 unsigned long delay; 354 } mock;) 355 }; 356 357 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) 358 359 extern const struct dma_fence_ops i915_fence_ops; 360 361 static inline bool dma_fence_is_i915(const struct dma_fence *fence) 362 { 363 return fence->ops == &i915_fence_ops; 364 } 365 366 #ifdef __linux__ 367 struct kmem_cache *i915_request_slab_cache(void); 368 #else 369 struct pool *i915_request_slab_cache(void); 370 #endif 371 372 struct i915_request * __must_check 373 __i915_request_create(struct intel_context *ce, gfp_t gfp); 374 struct i915_request * __must_check 375 i915_request_create(struct intel_context *ce); 376 377 void __i915_request_skip(struct i915_request *rq); 378 bool i915_request_set_error_once(struct i915_request *rq, int error); 379 struct i915_request *i915_request_mark_eio(struct i915_request *rq); 380 381 struct i915_request *__i915_request_commit(struct i915_request *request); 382 void __i915_request_queue(struct i915_request *rq, 383 const struct i915_sched_attr *attr); 384 void __i915_request_queue_bh(struct i915_request *rq); 385 386 bool i915_request_retire(struct i915_request *rq); 387 void i915_request_retire_upto(struct i915_request *rq); 388 389 static inline struct i915_request * 390 to_request(struct dma_fence *fence) 391 { 392 /* We assume that NULL fence/request are interoperable */ 393 BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0); 394 GEM_BUG_ON(fence && !dma_fence_is_i915(fence)); 395 return container_of(fence, struct i915_request, fence); 396 } 397 398 static inline struct i915_request * 399 i915_request_get(struct i915_request *rq) 400 { 401 return to_request(dma_fence_get(&rq->fence)); 402 } 403 404 static inline struct i915_request * 405 i915_request_get_rcu(struct i915_request *rq) 406 { 407 return to_request(dma_fence_get_rcu(&rq->fence)); 408 } 409 410 static inline void 411 i915_request_put(struct i915_request *rq) 412 { 413 dma_fence_put(&rq->fence); 414 } 415 416 int i915_request_await_object(struct i915_request *to, 417 struct drm_i915_gem_object *obj, 418 bool write); 419 int i915_request_await_dma_fence(struct i915_request *rq, 420 struct dma_fence *fence); 421 int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps); 422 int i915_request_await_execution(struct i915_request *rq, 423 struct dma_fence *fence); 424 425 void i915_request_add(struct i915_request *rq); 426 427 bool __i915_request_submit(struct i915_request *request); 428 void i915_request_submit(struct i915_request *request); 429 430 void __i915_request_unsubmit(struct i915_request *request); 431 void i915_request_unsubmit(struct i915_request *request); 432 433 void i915_request_cancel(struct i915_request *rq, int error); 434 435 long i915_request_wait_timeout(struct i915_request *rq, 436 unsigned int flags, 437 long timeout) 438 __attribute__((nonnull(1))); 439 440 long i915_request_wait(struct i915_request *rq, 441 unsigned int flags, 442 long timeout) 443 __attribute__((nonnull(1))); 444 #define I915_WAIT_INTERRUPTIBLE BIT(0) 445 #define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */ 446 #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */ 447 448 void i915_request_show(struct drm_printer *m, 449 const struct i915_request *rq, 450 const char *prefix, 451 int indent); 452 453 static inline bool i915_request_signaled(const struct i915_request *rq) 454 { 455 /* The request may live longer than its HWSP, so check flags first! */ 456 return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags); 457 } 458 459 static inline bool i915_request_is_active(const struct i915_request *rq) 460 { 461 return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); 462 } 463 464 static inline bool i915_request_in_priority_queue(const struct i915_request *rq) 465 { 466 return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); 467 } 468 469 static inline bool 470 i915_request_has_initial_breadcrumb(const struct i915_request *rq) 471 { 472 return test_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags); 473 } 474 475 /** 476 * Returns true if seq1 is later than seq2. 477 */ 478 static inline bool i915_seqno_passed(u32 seq1, u32 seq2) 479 { 480 return (s32)(seq1 - seq2) >= 0; 481 } 482 483 static inline u32 __hwsp_seqno(const struct i915_request *rq) 484 { 485 const u32 *hwsp = READ_ONCE(rq->hwsp_seqno); 486 487 return READ_ONCE(*hwsp); 488 } 489 490 /** 491 * hwsp_seqno - the current breadcrumb value in the HW status page 492 * @rq: the request, to chase the relevant HW status page 493 * 494 * The emphasis in naming here is that hwsp_seqno() is not a property of the 495 * request, but an indication of the current HW state (associated with this 496 * request). Its value will change as the GPU executes more requests. 497 * 498 * Returns the current breadcrumb value in the associated HW status page (or 499 * the local timeline's equivalent) for this request. The request itself 500 * has the associated breadcrumb value of rq->fence.seqno, when the HW 501 * status page has that breadcrumb or later, this request is complete. 502 */ 503 static inline u32 hwsp_seqno(const struct i915_request *rq) 504 { 505 u32 seqno; 506 507 rcu_read_lock(); /* the HWSP may be freed at runtime */ 508 seqno = __hwsp_seqno(rq); 509 rcu_read_unlock(); 510 511 return seqno; 512 } 513 514 static inline bool __i915_request_has_started(const struct i915_request *rq) 515 { 516 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1); 517 } 518 519 /** 520 * i915_request_started - check if the request has begun being executed 521 * @rq: the request 522 * 523 * If the timeline is not using initial breadcrumbs, a request is 524 * considered started if the previous request on its timeline (i.e. 525 * context) has been signaled. 526 * 527 * If the timeline is using semaphores, it will also be emitting an 528 * "initial breadcrumb" after the semaphores are complete and just before 529 * it began executing the user payload. A request can therefore be active 530 * on the HW and not yet started as it is still busywaiting on its 531 * dependencies (via HW semaphores). 532 * 533 * If the request has started, its dependencies will have been signaled 534 * (either by fences or by semaphores) and it will have begun processing 535 * the user payload. 536 * 537 * However, even if a request has started, it may have been preempted and 538 * so no longer active, or it may have already completed. 539 * 540 * See also i915_request_is_active(). 541 * 542 * Returns true if the request has begun executing the user payload, or 543 * has completed: 544 */ 545 static inline bool i915_request_started(const struct i915_request *rq) 546 { 547 bool result; 548 549 if (i915_request_signaled(rq)) 550 return true; 551 552 result = true; 553 rcu_read_lock(); /* the HWSP may be freed at runtime */ 554 if (likely(!i915_request_signaled(rq))) 555 /* Remember: started but may have since been preempted! */ 556 result = __i915_request_has_started(rq); 557 rcu_read_unlock(); 558 559 return result; 560 } 561 562 /** 563 * i915_request_is_running - check if the request may actually be executing 564 * @rq: the request 565 * 566 * Returns true if the request is currently submitted to hardware, has passed 567 * its start point (i.e. the context is setup and not busywaiting). Note that 568 * it may no longer be running by the time the function returns! 569 */ 570 static inline bool i915_request_is_running(const struct i915_request *rq) 571 { 572 bool result; 573 574 if (!i915_request_is_active(rq)) 575 return false; 576 577 rcu_read_lock(); 578 result = __i915_request_has_started(rq) && i915_request_is_active(rq); 579 rcu_read_unlock(); 580 581 return result; 582 } 583 584 /** 585 * i915_request_is_ready - check if the request is ready for execution 586 * @rq: the request 587 * 588 * Upon construction, the request is instructed to wait upon various 589 * signals before it is ready to be executed by the HW. That is, we do 590 * not want to start execution and read data before it is written. In practice, 591 * this is controlled with a mixture of interrupts and semaphores. Once 592 * the submit fence is completed, the backend scheduler will place the 593 * request into its queue and from there submit it for execution. So we 594 * can detect when a request is eligible for execution (and is under control 595 * of the scheduler) by querying where it is in any of the scheduler's lists. 596 * 597 * Returns true if the request is ready for execution (it may be inflight), 598 * false otherwise. 599 */ 600 static inline bool i915_request_is_ready(const struct i915_request *rq) 601 { 602 return !list_empty(&rq->sched.link); 603 } 604 605 static inline bool __i915_request_is_complete(const struct i915_request *rq) 606 { 607 return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); 608 } 609 610 static inline bool i915_request_completed(const struct i915_request *rq) 611 { 612 bool result; 613 614 if (i915_request_signaled(rq)) 615 return true; 616 617 result = true; 618 rcu_read_lock(); /* the HWSP may be freed at runtime */ 619 if (likely(!i915_request_signaled(rq))) 620 result = __i915_request_is_complete(rq); 621 rcu_read_unlock(); 622 623 return result; 624 } 625 626 static inline void i915_request_mark_complete(struct i915_request *rq) 627 { 628 WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */ 629 (u32 *)&rq->fence.seqno); 630 } 631 632 static inline bool i915_request_has_waitboost(const struct i915_request *rq) 633 { 634 return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags); 635 } 636 637 static inline bool i915_request_has_nopreempt(const struct i915_request *rq) 638 { 639 /* Preemption should only be disabled very rarely */ 640 return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags)); 641 } 642 643 static inline bool i915_request_has_sentinel(const struct i915_request *rq) 644 { 645 return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags)); 646 } 647 648 static inline bool i915_request_on_hold(const struct i915_request *rq) 649 { 650 return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags)); 651 } 652 653 static inline void i915_request_set_hold(struct i915_request *rq) 654 { 655 set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags); 656 } 657 658 static inline void i915_request_clear_hold(struct i915_request *rq) 659 { 660 clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags); 661 } 662 663 static inline struct intel_timeline * 664 i915_request_timeline(const struct i915_request *rq) 665 { 666 /* Valid only while the request is being constructed (or retired). */ 667 return rcu_dereference_protected(rq->timeline, 668 lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex) || 669 test_bit(CONTEXT_IS_PARKING, &rq->context->flags)); 670 } 671 672 static inline struct i915_gem_context * 673 i915_request_gem_context(const struct i915_request *rq) 674 { 675 /* Valid only while the request is being constructed (or retired). */ 676 return rcu_dereference_protected(rq->context->gem_context, true); 677 } 678 679 static inline struct intel_timeline * 680 i915_request_active_timeline(const struct i915_request *rq) 681 { 682 /* 683 * When in use during submission, we are protected by a guarantee that 684 * the context/timeline is pinned and must remain pinned until after 685 * this submission. 686 */ 687 return rcu_dereference_protected(rq->timeline, 688 lockdep_is_held(&rq->engine->sched_engine->lock)); 689 } 690 691 static inline u32 692 i915_request_active_seqno(const struct i915_request *rq) 693 { 694 u32 hwsp_phys_base = 695 page_mask_bits(i915_request_active_timeline(rq)->hwsp_offset); 696 u32 hwsp_relative_offset = offset_in_page(rq->hwsp_seqno); 697 698 /* 699 * Because of wraparound, we cannot simply take tl->hwsp_offset, 700 * but instead use the fact that the relative for vaddr is the 701 * offset as for hwsp_offset. Take the top bits from tl->hwsp_offset 702 * and combine them with the relative offset in rq->hwsp_seqno. 703 * 704 * As rw->hwsp_seqno is rewritten when signaled, this only works 705 * when the request isn't signaled yet, but at that point you 706 * no longer need the offset. 707 */ 708 709 return hwsp_phys_base + hwsp_relative_offset; 710 } 711 712 bool 713 i915_request_active_engine(struct i915_request *rq, 714 struct intel_engine_cs **active); 715 716 void i915_request_notify_execute_cb_imm(struct i915_request *rq); 717 718 enum i915_request_state { 719 I915_REQUEST_UNKNOWN = 0, 720 I915_REQUEST_COMPLETE, 721 I915_REQUEST_PENDING, 722 I915_REQUEST_QUEUED, 723 I915_REQUEST_ACTIVE, 724 }; 725 726 enum i915_request_state i915_test_request_state(struct i915_request *rq); 727 728 void i915_request_module_exit(void); 729 int i915_request_module_init(void); 730 731 #endif /* I915_REQUEST_H */ 732