xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/i915/gt/intel_timeline_types.h (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: intel_timeline_types.h,v 1.2 2021/12/18 23:45:30 riastradh Exp $	*/
2 
3 /*
4  * SPDX-License-Identifier: MIT
5  *
6  * Copyright © 2016 Intel Corporation
7  */
8 
9 #ifndef __I915_TIMELINE_TYPES_H__
10 #define __I915_TIMELINE_TYPES_H__
11 
12 #include <linux/list.h>
13 #include <linux/kref.h>
14 #include <linux/mutex.h>
15 #include <linux/rcupdate.h>
16 #include <linux/types.h>
17 
18 #include "i915_active_types.h"
19 
20 struct i915_vma;
21 struct i915_syncmap;
22 struct intel_gt;
23 struct intel_timeline_hwsp;
24 
25 struct intel_timeline {
26 	u64 fence_context;
27 	u32 seqno;
28 
29 	struct mutex mutex; /* protects the flow of requests */
30 
31 	/*
32 	 * pin_count and active_count track essentially the same thing:
33 	 * How many requests are in flight or may be under construction.
34 	 *
35 	 * We need two distinct counters so that we can assign different
36 	 * lifetimes to the events for different use-cases. For example,
37 	 * we want to permanently keep the timeline pinned for the kernel
38 	 * context so that we can issue requests at any time without having
39 	 * to acquire space in the GGTT. However, we want to keep tracking
40 	 * the activity (to be able to detect when we become idle) along that
41 	 * permanently pinned timeline and so end up requiring two counters.
42 	 *
43 	 * Note that the active_count is protected by the intel_timeline.mutex,
44 	 * but the pin_count is protected by a combination of serialisation
45 	 * from the intel_context caller plus internal atomicity.
46 	 */
47 	atomic_t pin_count;
48 	atomic_t active_count;
49 
50 	const u32 *hwsp_seqno;
51 	struct i915_vma *hwsp_ggtt;
52 	u32 hwsp_offset;
53 
54 	struct intel_timeline_cacheline *hwsp_cacheline;
55 
56 	bool has_initial_breadcrumb;
57 
58 	/**
59 	 * List of breadcrumbs associated with GPU requests currently
60 	 * outstanding.
61 	 */
62 	struct list_head requests;
63 
64 	/*
65 	 * Contains an RCU guarded pointer to the last request. No reference is
66 	 * held to the request, users must carefully acquire a reference to
67 	 * the request using i915_active_fence_get(), or manage the RCU
68 	 * protection themselves (cf the i915_active_fence API).
69 	 */
70 	struct i915_active_fence last_request;
71 
72 	/** A chain of completed timelines ready for early retirement. */
73 	struct intel_timeline *retire;
74 
75 	/**
76 	 * We track the most recent seqno that we wait on in every context so
77 	 * that we only have to emit a new await and dependency on a more
78 	 * recent sync point. As the contexts may be executed out-of-order, we
79 	 * have to track each individually and can not rely on an absolute
80 	 * global_seqno. When we know that all tracked fences are completed
81 	 * (i.e. when the driver is idle), we know that the syncmap is
82 	 * redundant and we can discard it without loss of generality.
83 	 */
84 	struct i915_syncmap *sync;
85 
86 	struct list_head link;
87 	struct intel_gt *gt;
88 
89 	struct kref kref;
90 	struct rcu_head rcu;
91 };
92 
93 struct intel_timeline_cacheline {
94 	struct i915_active active;
95 
96 	struct intel_timeline_hwsp *hwsp;
97 	void *vaddr;
98 
99 	struct rcu_head rcu;
100 };
101 
102 #endif /* __I915_TIMELINE_TYPES_H__ */
103