xref: /openbsd-src/sys/dev/pci/drm/i915/i915_vma_resource.h (revision f005ef32267c16bdb134f0e9fa4477dbe07c263a)
11bb76ff1Sjsg /* SPDX-License-Identifier: MIT */
21bb76ff1Sjsg /*
31bb76ff1Sjsg  * Copyright © 2021 Intel Corporation
41bb76ff1Sjsg  */
51bb76ff1Sjsg 
61bb76ff1Sjsg #ifndef __I915_VMA_RESOURCE_H__
71bb76ff1Sjsg #define __I915_VMA_RESOURCE_H__
81bb76ff1Sjsg 
91bb76ff1Sjsg #include <linux/dma-fence.h>
101bb76ff1Sjsg #include <linux/refcount.h>
111bb76ff1Sjsg 
121bb76ff1Sjsg #include "i915_gem.h"
131bb76ff1Sjsg #include "i915_scatterlist.h"
141bb76ff1Sjsg #include "i915_sw_fence.h"
151bb76ff1Sjsg #include "intel_runtime_pm.h"
161bb76ff1Sjsg 
171bb76ff1Sjsg struct intel_memory_region;
181bb76ff1Sjsg 
191bb76ff1Sjsg struct i915_page_sizes {
201bb76ff1Sjsg 	/**
211bb76ff1Sjsg 	 * The sg mask of the pages sg_table. i.e the mask of
221bb76ff1Sjsg 	 * the lengths for each sg entry.
231bb76ff1Sjsg 	 */
241bb76ff1Sjsg 	unsigned int phys;
251bb76ff1Sjsg 
261bb76ff1Sjsg 	/**
271bb76ff1Sjsg 	 * The gtt page sizes we are allowed to use given the
281bb76ff1Sjsg 	 * sg mask and the supported page sizes. This will
291bb76ff1Sjsg 	 * express the smallest unit we can use for the whole
301bb76ff1Sjsg 	 * object, as well as the larger sizes we may be able
311bb76ff1Sjsg 	 * to use opportunistically.
321bb76ff1Sjsg 	 */
331bb76ff1Sjsg 	unsigned int sg;
341bb76ff1Sjsg };
351bb76ff1Sjsg 
361bb76ff1Sjsg /**
37*f005ef32Sjsg  * struct i915_vma_bindinfo - Information needed for async bind
38*f005ef32Sjsg  * only but that can be dropped after the bind has taken place.
39*f005ef32Sjsg  * Consider making this a separate argument to the bind_vma
40*f005ef32Sjsg  * op, coalescing with other arguments like vm, stash, cache_level
41*f005ef32Sjsg  * and flags
42*f005ef32Sjsg  * @pages: The pages sg-table.
43*f005ef32Sjsg  * @page_sizes: Page sizes of the pages.
44*f005ef32Sjsg  * @pages_rsgt: Refcounted sg-table when delayed object destruction
45*f005ef32Sjsg  * is supported. May be NULL.
46*f005ef32Sjsg  * @readonly: Whether the vma should be bound read-only.
47*f005ef32Sjsg  * @lmem: Whether the vma points to lmem.
48*f005ef32Sjsg  */
49*f005ef32Sjsg struct i915_vma_bindinfo {
50*f005ef32Sjsg 	struct sg_table *pages;
51*f005ef32Sjsg 	struct i915_page_sizes page_sizes;
52*f005ef32Sjsg 	struct i915_refct_sgt *pages_rsgt;
53*f005ef32Sjsg 	bool readonly:1;
54*f005ef32Sjsg 	bool lmem:1;
55*f005ef32Sjsg };
56*f005ef32Sjsg 
57*f005ef32Sjsg /**
581bb76ff1Sjsg  * struct i915_vma_resource - Snapshotted unbind information.
591bb76ff1Sjsg  * @unbind_fence: Fence to mark unbinding complete. Note that this fence
601bb76ff1Sjsg  * is not considered published until unbind is scheduled, and as such it
611bb76ff1Sjsg  * is illegal to access this fence before scheduled unbind other than
621bb76ff1Sjsg  * for refcounting.
631bb76ff1Sjsg  * @lock: The @unbind_fence lock.
641bb76ff1Sjsg  * @hold_count: Number of holders blocking the fence from finishing.
651bb76ff1Sjsg  * The vma itself is keeping a hold, which is released when unbind
661bb76ff1Sjsg  * is scheduled.
671bb76ff1Sjsg  * @work: Work struct for deferred unbind work.
681bb76ff1Sjsg  * @chain: Pointer to struct i915_sw_fence used to await dependencies.
691bb76ff1Sjsg  * @rb: Rb node for the vm's pending unbind interval tree.
701bb76ff1Sjsg  * @__subtree_last: Interval tree private member.
71*f005ef32Sjsg  * @wakeref: wakeref.
721bb76ff1Sjsg  * @vm: non-refcounted pointer to the vm. This is for internal use only and
731bb76ff1Sjsg  * this member is cleared after vm_resource unbind.
741bb76ff1Sjsg  * @mr: The memory region of the object pointed to by the vma.
751bb76ff1Sjsg  * @ops: Pointer to the backend i915_vma_ops.
761bb76ff1Sjsg  * @private: Bind backend private info.
77*f005ef32Sjsg  * @start: Offset into the address space of bind range start. Note that
78*f005ef32Sjsg  * this is after any padding that might have been allocated.
79*f005ef32Sjsg  * @node_size: Size of the allocated range manager node with padding
80*f005ef32Sjsg  * subtracted.
811bb76ff1Sjsg  * @vma_size: Bind size.
82*f005ef32Sjsg  * @guard: The size of guard area preceding and trailing the bind.
831bb76ff1Sjsg  * @page_sizes_gtt: Resulting page sizes from the bind operation.
841bb76ff1Sjsg  * @bound_flags: Flags indicating binding status.
851bb76ff1Sjsg  * @allocated: Backend private data. TODO: Should move into @private.
861bb76ff1Sjsg  * @immediate_unbind: Unbind can be done immediately and doesn't need to be
871bb76ff1Sjsg  * deferred to a work item awaiting unsignaled fences. This is a hack.
881bb76ff1Sjsg  * (dma_fence_work uses a fence flag for this, but this seems slightly
891bb76ff1Sjsg  * cleaner).
901bb76ff1Sjsg  * @needs_wakeref: Whether a wakeref is needed during unbind. Since we can't
911bb76ff1Sjsg  * take a wakeref in the dma-fence signalling critical path, it needs to be
921bb76ff1Sjsg  * taken when the unbind is scheduled.
931bb76ff1Sjsg  * @skip_pte_rewrite: During ggtt suspend and vm takedown pte rewriting
941bb76ff1Sjsg  * needs to be skipped for unbind.
951bb76ff1Sjsg  * @tlb: pointer for obj->mm.tlb, if async unbind. Otherwise, NULL
961bb76ff1Sjsg  *
971bb76ff1Sjsg  * The lifetime of a struct i915_vma_resource is from a binding request to
981bb76ff1Sjsg  * the actual possible asynchronous unbind has completed.
991bb76ff1Sjsg  */
1001bb76ff1Sjsg struct i915_vma_resource {
1011bb76ff1Sjsg 	struct dma_fence unbind_fence;
1021bb76ff1Sjsg 	/* See above for description of the lock. */
1031bb76ff1Sjsg 	spinlock_t lock;
1041bb76ff1Sjsg 	refcount_t hold_count;
1051bb76ff1Sjsg 	struct work_struct work;
1061bb76ff1Sjsg 	struct i915_sw_fence chain;
1071bb76ff1Sjsg 	struct rb_node rb;
1081bb76ff1Sjsg 	u64 __subtree_last;
1091bb76ff1Sjsg 	struct i915_address_space *vm;
1101bb76ff1Sjsg 	intel_wakeref_t wakeref;
1111bb76ff1Sjsg 
1121bb76ff1Sjsg 	/**
113*f005ef32Sjsg 	 * @bi: Information needed for async bind only but that can be dropped
114*f005ef32Sjsg 	 * after the bind has taken place.
115*f005ef32Sjsg 	 *
116*f005ef32Sjsg 	 * Consider making this a separate argument to the bind_vma op,
117*f005ef32Sjsg 	 * coalescing with other arguments like vm, stash, cache_level and flags
1181bb76ff1Sjsg 	 */
119*f005ef32Sjsg 	struct i915_vma_bindinfo bi;
1201bb76ff1Sjsg 
1211bb76ff1Sjsg #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
1221bb76ff1Sjsg 	struct intel_memory_region *mr;
1231bb76ff1Sjsg #endif
1241bb76ff1Sjsg 	const struct i915_vma_ops *ops;
1251bb76ff1Sjsg 	void *private;
1261bb76ff1Sjsg 	u64 start;
1271bb76ff1Sjsg 	u64 node_size;
1281bb76ff1Sjsg 	u64 vma_size;
129*f005ef32Sjsg 	u32 guard;
1301bb76ff1Sjsg 	u32 page_sizes_gtt;
1311bb76ff1Sjsg 
1321bb76ff1Sjsg 	u32 bound_flags;
1331bb76ff1Sjsg 	bool allocated:1;
1341bb76ff1Sjsg 	bool immediate_unbind:1;
1351bb76ff1Sjsg 	bool needs_wakeref:1;
1361bb76ff1Sjsg 	bool skip_pte_rewrite:1;
1371bb76ff1Sjsg 
1381bb76ff1Sjsg 	u32 *tlb;
1391bb76ff1Sjsg };
1401bb76ff1Sjsg 
1411bb76ff1Sjsg bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
1421bb76ff1Sjsg 			    bool *lockdep_cookie);
1431bb76ff1Sjsg 
1441bb76ff1Sjsg void i915_vma_resource_unhold(struct i915_vma_resource *vma_res,
1451bb76ff1Sjsg 			      bool lockdep_cookie);
1461bb76ff1Sjsg 
1471bb76ff1Sjsg struct i915_vma_resource *i915_vma_resource_alloc(void);
1481bb76ff1Sjsg 
1491bb76ff1Sjsg void i915_vma_resource_free(struct i915_vma_resource *vma_res);
1501bb76ff1Sjsg 
1511bb76ff1Sjsg struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
1521bb76ff1Sjsg 					   u32 *tlb);
1531bb76ff1Sjsg 
1541bb76ff1Sjsg void __i915_vma_resource_init(struct i915_vma_resource *vma_res);
1551bb76ff1Sjsg 
1561bb76ff1Sjsg /**
1571bb76ff1Sjsg  * i915_vma_resource_get - Take a reference on a vma resource
1581bb76ff1Sjsg  * @vma_res: The vma resource on which to take a reference.
1591bb76ff1Sjsg  *
1601bb76ff1Sjsg  * Return: The @vma_res pointer
1611bb76ff1Sjsg  */
1621bb76ff1Sjsg static inline struct i915_vma_resource
i915_vma_resource_get(struct i915_vma_resource * vma_res)1631bb76ff1Sjsg *i915_vma_resource_get(struct i915_vma_resource *vma_res)
1641bb76ff1Sjsg {
1651bb76ff1Sjsg 	dma_fence_get(&vma_res->unbind_fence);
1661bb76ff1Sjsg 	return vma_res;
1671bb76ff1Sjsg }
1681bb76ff1Sjsg 
1691bb76ff1Sjsg /**
1701bb76ff1Sjsg  * i915_vma_resource_put - Release a reference to a struct i915_vma_resource
1711bb76ff1Sjsg  * @vma_res: The resource
1721bb76ff1Sjsg  */
i915_vma_resource_put(struct i915_vma_resource * vma_res)1731bb76ff1Sjsg static inline void i915_vma_resource_put(struct i915_vma_resource *vma_res)
1741bb76ff1Sjsg {
1751bb76ff1Sjsg 	dma_fence_put(&vma_res->unbind_fence);
1761bb76ff1Sjsg }
1771bb76ff1Sjsg 
1781bb76ff1Sjsg /**
1791bb76ff1Sjsg  * i915_vma_resource_init - Initialize a vma resource.
1801bb76ff1Sjsg  * @vma_res: The vma resource to initialize
1811bb76ff1Sjsg  * @vm: Pointer to the vm.
1821bb76ff1Sjsg  * @pages: The pages sg-table.
1831bb76ff1Sjsg  * @page_sizes: Page sizes of the pages.
1841bb76ff1Sjsg  * @pages_rsgt: Pointer to a struct i915_refct_sgt of an object with
1851bb76ff1Sjsg  * delayed destruction.
1861bb76ff1Sjsg  * @readonly: Whether the vma should be bound read-only.
1871bb76ff1Sjsg  * @lmem: Whether the vma points to lmem.
1881bb76ff1Sjsg  * @mr: The memory region of the object the vma points to.
1891bb76ff1Sjsg  * @ops: The backend ops.
1901bb76ff1Sjsg  * @private: Bind backend private info.
191*f005ef32Sjsg  * @start: Offset into the address space of bind range start after padding.
192*f005ef32Sjsg  * @node_size: Size of the allocated range manager node minus padding.
1931bb76ff1Sjsg  * @size: Bind size.
194*f005ef32Sjsg  * @guard: The size of the guard area preceding and trailing the bind.
1951bb76ff1Sjsg  *
1961bb76ff1Sjsg  * Initializes a vma resource allocated using i915_vma_resource_alloc().
1971bb76ff1Sjsg  * The reason for having separate allocate and initialize function is that
1981bb76ff1Sjsg  * initialization may need to be performed from under a lock where
1991bb76ff1Sjsg  * allocation is not allowed.
2001bb76ff1Sjsg  */
i915_vma_resource_init(struct i915_vma_resource * vma_res,struct i915_address_space * vm,struct sg_table * pages,const struct i915_page_sizes * page_sizes,struct i915_refct_sgt * pages_rsgt,bool readonly,bool lmem,struct intel_memory_region * mr,const struct i915_vma_ops * ops,void * private,u64 start,u64 node_size,u64 size,u32 guard)2011bb76ff1Sjsg static inline void i915_vma_resource_init(struct i915_vma_resource *vma_res,
2021bb76ff1Sjsg 					  struct i915_address_space *vm,
2031bb76ff1Sjsg 					  struct sg_table *pages,
2041bb76ff1Sjsg 					  const struct i915_page_sizes *page_sizes,
2051bb76ff1Sjsg 					  struct i915_refct_sgt *pages_rsgt,
2061bb76ff1Sjsg 					  bool readonly,
2071bb76ff1Sjsg 					  bool lmem,
2081bb76ff1Sjsg 					  struct intel_memory_region *mr,
2091bb76ff1Sjsg 					  const struct i915_vma_ops *ops,
2101bb76ff1Sjsg 					  void *private,
2111bb76ff1Sjsg 					  u64 start,
2121bb76ff1Sjsg 					  u64 node_size,
213*f005ef32Sjsg 					  u64 size,
214*f005ef32Sjsg 					  u32 guard)
2151bb76ff1Sjsg {
2161bb76ff1Sjsg 	__i915_vma_resource_init(vma_res);
2171bb76ff1Sjsg 	vma_res->vm = vm;
2181bb76ff1Sjsg 	vma_res->bi.pages = pages;
2191bb76ff1Sjsg 	vma_res->bi.page_sizes = *page_sizes;
2201bb76ff1Sjsg 	if (pages_rsgt)
2211bb76ff1Sjsg 		vma_res->bi.pages_rsgt = i915_refct_sgt_get(pages_rsgt);
2221bb76ff1Sjsg 	vma_res->bi.readonly = readonly;
2231bb76ff1Sjsg 	vma_res->bi.lmem = lmem;
2241bb76ff1Sjsg #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
2251bb76ff1Sjsg 	vma_res->mr = mr;
2261bb76ff1Sjsg #endif
2271bb76ff1Sjsg 	vma_res->ops = ops;
2281bb76ff1Sjsg 	vma_res->private = private;
2291bb76ff1Sjsg 	vma_res->start = start;
2301bb76ff1Sjsg 	vma_res->node_size = node_size;
2311bb76ff1Sjsg 	vma_res->vma_size = size;
232*f005ef32Sjsg 	vma_res->guard = guard;
2331bb76ff1Sjsg }
2341bb76ff1Sjsg 
i915_vma_resource_fini(struct i915_vma_resource * vma_res)2351bb76ff1Sjsg static inline void i915_vma_resource_fini(struct i915_vma_resource *vma_res)
2361bb76ff1Sjsg {
2371bb76ff1Sjsg 	GEM_BUG_ON(refcount_read(&vma_res->hold_count) != 1);
2381bb76ff1Sjsg 	if (vma_res->bi.pages_rsgt)
2391bb76ff1Sjsg 		i915_refct_sgt_put(vma_res->bi.pages_rsgt);
2401bb76ff1Sjsg 	i915_sw_fence_fini(&vma_res->chain);
2411bb76ff1Sjsg }
2421bb76ff1Sjsg 
2431bb76ff1Sjsg int i915_vma_resource_bind_dep_sync(struct i915_address_space *vm,
2441bb76ff1Sjsg 				    u64 first,
2451bb76ff1Sjsg 				    u64 last,
2461bb76ff1Sjsg 				    bool intr);
2471bb76ff1Sjsg 
2481bb76ff1Sjsg int i915_vma_resource_bind_dep_await(struct i915_address_space *vm,
2491bb76ff1Sjsg 				     struct i915_sw_fence *sw_fence,
2501bb76ff1Sjsg 				     u64 first,
2511bb76ff1Sjsg 				     u64 last,
2521bb76ff1Sjsg 				     bool intr,
2531bb76ff1Sjsg 				     gfp_t gfp);
2541bb76ff1Sjsg 
2551bb76ff1Sjsg void i915_vma_resource_bind_dep_sync_all(struct i915_address_space *vm);
2561bb76ff1Sjsg 
2571bb76ff1Sjsg void i915_vma_resource_module_exit(void);
2581bb76ff1Sjsg 
2591bb76ff1Sjsg int i915_vma_resource_module_init(void);
2601bb76ff1Sjsg 
2611bb76ff1Sjsg #endif
262