xref: /openbsd-src/sys/dev/pci/drm/i915/gem/i915_gem_object.c (revision b93b6389736be4c5d3b1371fc019cfb5286eafa9)
1c349dbc7Sjsg /*
2c349dbc7Sjsg  * Copyright © 2017 Intel Corporation
3c349dbc7Sjsg  *
4c349dbc7Sjsg  * Permission is hereby granted, free of charge, to any person obtaining a
5c349dbc7Sjsg  * copy of this software and associated documentation files (the "Software"),
6c349dbc7Sjsg  * to deal in the Software without restriction, including without limitation
7c349dbc7Sjsg  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8c349dbc7Sjsg  * and/or sell copies of the Software, and to permit persons to whom the
9c349dbc7Sjsg  * Software is furnished to do so, subject to the following conditions:
10c349dbc7Sjsg  *
11c349dbc7Sjsg  * The above copyright notice and this permission notice (including the next
12c349dbc7Sjsg  * paragraph) shall be included in all copies or substantial portions of the
13c349dbc7Sjsg  * Software.
14c349dbc7Sjsg  *
15c349dbc7Sjsg  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16c349dbc7Sjsg  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17c349dbc7Sjsg  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18c349dbc7Sjsg  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19c349dbc7Sjsg  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20c349dbc7Sjsg  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21c349dbc7Sjsg  * IN THE SOFTWARE.
22c349dbc7Sjsg  *
23c349dbc7Sjsg  */
24c349dbc7Sjsg 
251bb76ff1Sjsg #include <linux/highmem.h>
26c349dbc7Sjsg #include <linux/sched/mm.h>
27c349dbc7Sjsg 
281bb76ff1Sjsg #include <drm/drm_cache.h>
291bb76ff1Sjsg 
30c349dbc7Sjsg #include "display/intel_frontbuffer.h"
311bb76ff1Sjsg #include "pxp/intel_pxp.h"
321bb76ff1Sjsg 
33c349dbc7Sjsg #include "i915_drv.h"
341bb76ff1Sjsg #include "i915_file_private.h"
35c349dbc7Sjsg #include "i915_gem_clflush.h"
36c349dbc7Sjsg #include "i915_gem_context.h"
371bb76ff1Sjsg #include "i915_gem_dmabuf.h"
38c349dbc7Sjsg #include "i915_gem_mman.h"
39c349dbc7Sjsg #include "i915_gem_object.h"
401bb76ff1Sjsg #include "i915_gem_ttm.h"
415ca02815Sjsg #include "i915_memcpy.h"
42c349dbc7Sjsg #include "i915_trace.h"
43c349dbc7Sjsg 
445ca02815Sjsg static struct pool slab_objects;
455ca02815Sjsg 
465ca02815Sjsg static const struct drm_gem_object_funcs i915_gem_object_funcs;
47c349dbc7Sjsg 
i915_gem_get_pat_index(struct drm_i915_private * i915,enum i915_cache_level level)48*f005ef32Sjsg unsigned int i915_gem_get_pat_index(struct drm_i915_private *i915,
49*f005ef32Sjsg 				    enum i915_cache_level level)
50*f005ef32Sjsg {
51*f005ef32Sjsg 	if (drm_WARN_ON(&i915->drm, level >= I915_MAX_CACHE_LEVEL))
52*f005ef32Sjsg 		return 0;
53*f005ef32Sjsg 
54*f005ef32Sjsg 	return INTEL_INFO(i915)->cachelevel_to_pat[level];
55*f005ef32Sjsg }
56*f005ef32Sjsg 
i915_gem_object_has_cache_level(const struct drm_i915_gem_object * obj,enum i915_cache_level lvl)57*f005ef32Sjsg bool i915_gem_object_has_cache_level(const struct drm_i915_gem_object *obj,
58*f005ef32Sjsg 				     enum i915_cache_level lvl)
59*f005ef32Sjsg {
60*f005ef32Sjsg 	/*
61*f005ef32Sjsg 	 * In case the pat_index is set by user space, this kernel mode
62*f005ef32Sjsg 	 * driver should leave the coherency to be managed by user space,
63*f005ef32Sjsg 	 * simply return true here.
64*f005ef32Sjsg 	 */
65*f005ef32Sjsg 	if (obj->pat_set_by_user)
66*f005ef32Sjsg 		return true;
67*f005ef32Sjsg 
68*f005ef32Sjsg 	/*
69*f005ef32Sjsg 	 * Otherwise the pat_index should have been converted from cache_level
70*f005ef32Sjsg 	 * so that the following comparison is valid.
71*f005ef32Sjsg 	 */
72*f005ef32Sjsg 	return obj->pat_index == i915_gem_get_pat_index(obj_to_i915(obj), lvl);
73*f005ef32Sjsg }
74*f005ef32Sjsg 
i915_gem_object_alloc(void)75c349dbc7Sjsg struct drm_i915_gem_object *i915_gem_object_alloc(void)
76c349dbc7Sjsg {
775ca02815Sjsg 	struct drm_i915_gem_object *obj;
785ca02815Sjsg 
79c349dbc7Sjsg #ifdef __linux__
805ca02815Sjsg 	obj = kmem_cache_zalloc(slab_objects, GFP_KERNEL);
81c349dbc7Sjsg #else
825ca02815Sjsg 	obj = pool_get(&slab_objects, PR_WAITOK | PR_ZERO);
83c349dbc7Sjsg #endif
845ca02815Sjsg 	if (!obj)
855ca02815Sjsg 		return NULL;
865ca02815Sjsg 	obj->base.funcs = &i915_gem_object_funcs;
875ca02815Sjsg 
885ca02815Sjsg 	return obj;
89c349dbc7Sjsg }
90c349dbc7Sjsg 
i915_gem_object_free(struct drm_i915_gem_object * obj)91c349dbc7Sjsg void i915_gem_object_free(struct drm_i915_gem_object *obj)
92c349dbc7Sjsg {
93c349dbc7Sjsg #ifdef __linux__
945ca02815Sjsg 	return kmem_cache_free(slab_objects, obj);
95c349dbc7Sjsg #else
965ca02815Sjsg 	pool_put(&slab_objects, obj);
97c349dbc7Sjsg #endif
98c349dbc7Sjsg }
99c349dbc7Sjsg 
i915_gem_object_init(struct drm_i915_gem_object * obj,const struct drm_i915_gem_object_ops * ops,struct lock_class_key * key,unsigned flags)100c349dbc7Sjsg void i915_gem_object_init(struct drm_i915_gem_object *obj,
101c349dbc7Sjsg 			  const struct drm_i915_gem_object_ops *ops,
1025ca02815Sjsg 			  struct lock_class_key *key, unsigned flags)
103c349dbc7Sjsg {
1045ca02815Sjsg 	/*
1055ca02815Sjsg 	 * A gem object is embedded both in a struct ttm_buffer_object :/ and
1065ca02815Sjsg 	 * in a drm_i915_gem_object. Make sure they are aliased.
1075ca02815Sjsg 	 */
1085ca02815Sjsg 	BUILD_BUG_ON(offsetof(typeof(*obj), base) !=
1095ca02815Sjsg 		     offsetof(typeof(*obj), __do_not_access.base));
110c349dbc7Sjsg 
11163b35fb2Sjsg 	mtx_init(&obj->vma.lock, IPL_NONE);
112c349dbc7Sjsg 	INIT_LIST_HEAD(&obj->vma.list);
113c349dbc7Sjsg 
114c349dbc7Sjsg 	INIT_LIST_HEAD(&obj->mm.link);
115c349dbc7Sjsg 
116c349dbc7Sjsg 	INIT_LIST_HEAD(&obj->lut_list);
117ad8b1aafSjsg 	mtx_init(&obj->lut_lock, IPL_NONE);
118c349dbc7Sjsg 
11963b35fb2Sjsg 	mtx_init(&obj->mmo.lock, IPL_NONE);
120c349dbc7Sjsg 	obj->mmo.offsets = RB_ROOT;
121c349dbc7Sjsg 
122c349dbc7Sjsg 	init_rcu_head(&obj->rcu);
123c349dbc7Sjsg 
124c349dbc7Sjsg 	obj->ops = ops;
1255ca02815Sjsg 	GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
1265ca02815Sjsg 	obj->flags = flags;
127c349dbc7Sjsg 
128c349dbc7Sjsg 	obj->mm.madv = I915_MADV_WILLNEED;
129c349dbc7Sjsg 	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
130c349dbc7Sjsg 	rw_init(&obj->mm.get_page.lock, "mmget");
1315ca02815Sjsg 	INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN);
1325ca02815Sjsg 	rw_init(&obj->mm.get_dma_page.lock, "mmgetd");
133c349dbc7Sjsg }
134c349dbc7Sjsg 
135c349dbc7Sjsg /**
1361bb76ff1Sjsg  * __i915_gem_object_fini - Clean up a GEM object initialization
1371bb76ff1Sjsg  * @obj: The gem object to cleanup
1381bb76ff1Sjsg  *
1391bb76ff1Sjsg  * This function cleans up gem object fields that are set up by
1401bb76ff1Sjsg  * drm_gem_private_object_init() and i915_gem_object_init().
1411bb76ff1Sjsg  * It's primarily intended as a helper for backends that need to
1421bb76ff1Sjsg  * clean up the gem object in separate steps.
1431bb76ff1Sjsg  */
__i915_gem_object_fini(struct drm_i915_gem_object * obj)1441bb76ff1Sjsg void __i915_gem_object_fini(struct drm_i915_gem_object *obj)
1451bb76ff1Sjsg {
1461bb76ff1Sjsg 	mutex_destroy(&obj->mm.get_page.lock);
1471bb76ff1Sjsg 	mutex_destroy(&obj->mm.get_dma_page.lock);
1481bb76ff1Sjsg 	dma_resv_fini(&obj->base._resv);
1491bb76ff1Sjsg }
1501bb76ff1Sjsg 
1511bb76ff1Sjsg /**
1521bb76ff1Sjsg  * i915_gem_object_set_cache_coherency - Mark up the object's coherency levels
1531bb76ff1Sjsg  * for a given cache_level
154c349dbc7Sjsg  * @obj: #drm_i915_gem_object
155c349dbc7Sjsg  * @cache_level: cache level
156c349dbc7Sjsg  */
i915_gem_object_set_cache_coherency(struct drm_i915_gem_object * obj,unsigned int cache_level)157c349dbc7Sjsg void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
158c349dbc7Sjsg 					 unsigned int cache_level)
159c349dbc7Sjsg {
1601bb76ff1Sjsg 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
1611bb76ff1Sjsg 
162*f005ef32Sjsg 	obj->pat_index = i915_gem_get_pat_index(i915, cache_level);
163c349dbc7Sjsg 
164c349dbc7Sjsg 	if (cache_level != I915_CACHE_NONE)
165c349dbc7Sjsg 		obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
166c349dbc7Sjsg 				       I915_BO_CACHE_COHERENT_FOR_WRITE);
1671bb76ff1Sjsg 	else if (HAS_LLC(i915))
168c349dbc7Sjsg 		obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
169c349dbc7Sjsg 	else
170c349dbc7Sjsg 		obj->cache_coherent = 0;
171c349dbc7Sjsg 
172c349dbc7Sjsg 	obj->cache_dirty =
1731bb76ff1Sjsg 		!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) &&
1741bb76ff1Sjsg 		!IS_DGFX(i915);
1751bb76ff1Sjsg }
1761bb76ff1Sjsg 
177*f005ef32Sjsg /**
178*f005ef32Sjsg  * i915_gem_object_set_pat_index - set PAT index to be used in PTE encode
179*f005ef32Sjsg  * @obj: #drm_i915_gem_object
180*f005ef32Sjsg  * @pat_index: PAT index
181*f005ef32Sjsg  *
182*f005ef32Sjsg  * This is a clone of i915_gem_object_set_cache_coherency taking pat index
183*f005ef32Sjsg  * instead of cache_level as its second argument.
184*f005ef32Sjsg  */
i915_gem_object_set_pat_index(struct drm_i915_gem_object * obj,unsigned int pat_index)185*f005ef32Sjsg void i915_gem_object_set_pat_index(struct drm_i915_gem_object *obj,
186*f005ef32Sjsg 				   unsigned int pat_index)
187*f005ef32Sjsg {
188*f005ef32Sjsg 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
189*f005ef32Sjsg 
190*f005ef32Sjsg 	if (obj->pat_index == pat_index)
191*f005ef32Sjsg 		return;
192*f005ef32Sjsg 
193*f005ef32Sjsg 	obj->pat_index = pat_index;
194*f005ef32Sjsg 
195*f005ef32Sjsg 	if (pat_index != i915_gem_get_pat_index(i915, I915_CACHE_NONE))
196*f005ef32Sjsg 		obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
197*f005ef32Sjsg 				       I915_BO_CACHE_COHERENT_FOR_WRITE);
198*f005ef32Sjsg 	else if (HAS_LLC(i915))
199*f005ef32Sjsg 		obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
200*f005ef32Sjsg 	else
201*f005ef32Sjsg 		obj->cache_coherent = 0;
202*f005ef32Sjsg 
203*f005ef32Sjsg 	obj->cache_dirty =
204*f005ef32Sjsg 		!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) &&
205*f005ef32Sjsg 		!IS_DGFX(i915);
206*f005ef32Sjsg }
207*f005ef32Sjsg 
i915_gem_object_can_bypass_llc(struct drm_i915_gem_object * obj)2081bb76ff1Sjsg bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj)
2091bb76ff1Sjsg {
2101bb76ff1Sjsg 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
2111bb76ff1Sjsg 
2121bb76ff1Sjsg 	/*
2131bb76ff1Sjsg 	 * This is purely from a security perspective, so we simply don't care
2141bb76ff1Sjsg 	 * about non-userspace objects being able to bypass the LLC.
2151bb76ff1Sjsg 	 */
2161bb76ff1Sjsg 	if (!(obj->flags & I915_BO_ALLOC_USER))
2171bb76ff1Sjsg 		return false;
2181bb76ff1Sjsg 
2191bb76ff1Sjsg 	/*
220*f005ef32Sjsg 	 * Always flush cache for UMD objects at creation time.
221*f005ef32Sjsg 	 */
222*f005ef32Sjsg 	if (obj->pat_set_by_user)
223*f005ef32Sjsg 		return true;
224*f005ef32Sjsg 
225*f005ef32Sjsg 	/*
2261bb76ff1Sjsg 	 * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it
2271bb76ff1Sjsg 	 * possible for userspace to bypass the GTT caching bits set by the
2281bb76ff1Sjsg 	 * kernel, as per the given object cache_level. This is troublesome
2291bb76ff1Sjsg 	 * since the heavy flush we apply when first gathering the pages is
2301bb76ff1Sjsg 	 * skipped if the kernel thinks the object is coherent with the GPU. As
2311bb76ff1Sjsg 	 * a result it might be possible to bypass the cache and read the
2321bb76ff1Sjsg 	 * contents of the page directly, which could be stale data. If it's
2331bb76ff1Sjsg 	 * just a case of userspace shooting themselves in the foot then so be
2341bb76ff1Sjsg 	 * it, but since i915 takes the stance of always zeroing memory before
2351bb76ff1Sjsg 	 * handing it to userspace, we need to prevent this.
2361bb76ff1Sjsg 	 */
237*f005ef32Sjsg 	return (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915));
238c349dbc7Sjsg }
239c349dbc7Sjsg 
i915_gem_close_object(struct drm_gem_object * gem,struct drm_file * file)2405ca02815Sjsg static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
241c349dbc7Sjsg {
242c349dbc7Sjsg 	struct drm_i915_gem_object *obj = to_intel_bo(gem);
243c349dbc7Sjsg 	struct drm_i915_file_private *fpriv = file->driver_priv;
244ad8b1aafSjsg 	struct i915_lut_handle bookmark = {};
245c349dbc7Sjsg 	struct i915_mmap_offset *mmo, *mn;
246c349dbc7Sjsg 	struct i915_lut_handle *lut, *ln;
247c349dbc7Sjsg 	DRM_LIST_HEAD(close);
248c349dbc7Sjsg 
249ad8b1aafSjsg 	spin_lock(&obj->lut_lock);
250c349dbc7Sjsg 	list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
251c349dbc7Sjsg 		struct i915_gem_context *ctx = lut->ctx;
252c349dbc7Sjsg 
253ad8b1aafSjsg 		if (ctx && ctx->file_priv == fpriv) {
254c349dbc7Sjsg 			i915_gem_context_get(ctx);
255c349dbc7Sjsg 			list_move(&lut->obj_link, &close);
256c349dbc7Sjsg 		}
257ad8b1aafSjsg 
258ad8b1aafSjsg 		/* Break long locks, and carefully continue on from this spot */
259ad8b1aafSjsg 		if (&ln->obj_link != &obj->lut_list) {
260ad8b1aafSjsg 			list_add_tail(&bookmark.obj_link, &ln->obj_link);
261ad8b1aafSjsg 			if (cond_resched_lock(&obj->lut_lock))
262ad8b1aafSjsg 				list_safe_reset_next(&bookmark, ln, obj_link);
263ad8b1aafSjsg 			__list_del_entry(&bookmark.obj_link);
264ad8b1aafSjsg 		}
265ad8b1aafSjsg 	}
266ad8b1aafSjsg 	spin_unlock(&obj->lut_lock);
267c349dbc7Sjsg 
268c349dbc7Sjsg 	spin_lock(&obj->mmo.lock);
269c349dbc7Sjsg 	rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
2700608e4cbSjsg 		drm_vma_node_revoke(&mmo->vma_node, file);
271c349dbc7Sjsg 	spin_unlock(&obj->mmo.lock);
272c349dbc7Sjsg 
273c349dbc7Sjsg 	list_for_each_entry_safe(lut, ln, &close, obj_link) {
274c349dbc7Sjsg 		struct i915_gem_context *ctx = lut->ctx;
275c349dbc7Sjsg 		struct i915_vma *vma;
276c349dbc7Sjsg 
277c349dbc7Sjsg 		/*
278c349dbc7Sjsg 		 * We allow the process to have multiple handles to the same
279c349dbc7Sjsg 		 * vma, in the same fd namespace, by virtue of flink/open.
280c349dbc7Sjsg 		 */
281c349dbc7Sjsg 
282ad8b1aafSjsg 		mutex_lock(&ctx->lut_mutex);
283c349dbc7Sjsg 		vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
284c349dbc7Sjsg 		if (vma) {
285c349dbc7Sjsg 			GEM_BUG_ON(vma->obj != obj);
286c349dbc7Sjsg 			GEM_BUG_ON(!atomic_read(&vma->open_count));
287c349dbc7Sjsg 			i915_vma_close(vma);
288c349dbc7Sjsg 		}
289ad8b1aafSjsg 		mutex_unlock(&ctx->lut_mutex);
290c349dbc7Sjsg 
291c349dbc7Sjsg 		i915_gem_context_put(lut->ctx);
292c349dbc7Sjsg 		i915_lut_handle_free(lut);
293c349dbc7Sjsg 		i915_gem_object_put(obj);
294c349dbc7Sjsg 	}
295c349dbc7Sjsg }
296c349dbc7Sjsg 
__i915_gem_free_object_rcu(struct rcu_head * head)2975ca02815Sjsg void __i915_gem_free_object_rcu(struct rcu_head *head)
298c349dbc7Sjsg {
299c349dbc7Sjsg 	struct drm_i915_gem_object *obj =
300c349dbc7Sjsg 		container_of(head, typeof(*obj), rcu);
301c349dbc7Sjsg 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
302c349dbc7Sjsg 
303c349dbc7Sjsg #ifdef __OpenBSD__
304c349dbc7Sjsg 	if (obj->base.uao)
305c349dbc7Sjsg 		uao_detach(obj->base.uao);
306c349dbc7Sjsg #endif
307c349dbc7Sjsg 
308c349dbc7Sjsg 	i915_gem_object_free(obj);
309c349dbc7Sjsg 
310c349dbc7Sjsg 	GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
311c349dbc7Sjsg 	atomic_dec(&i915->mm.free_count);
312c349dbc7Sjsg }
313c349dbc7Sjsg 
__i915_gem_object_free_mmaps(struct drm_i915_gem_object * obj)314ad8b1aafSjsg static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
315ad8b1aafSjsg {
316ad8b1aafSjsg 	/* Skip serialisation and waking the device if known to be not used. */
317ad8b1aafSjsg 
3181bb76ff1Sjsg 	if (obj->userfault_count && !IS_DGFX(to_i915(obj->base.dev)))
319ad8b1aafSjsg 		i915_gem_object_release_mmap_gtt(obj);
320ad8b1aafSjsg 
321ad8b1aafSjsg 	if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
322ad8b1aafSjsg 		struct i915_mmap_offset *mmo, *mn;
323ad8b1aafSjsg 
324ad8b1aafSjsg 		i915_gem_object_release_mmap_offset(obj);
325ad8b1aafSjsg 
326ad8b1aafSjsg 		rbtree_postorder_for_each_entry_safe(mmo, mn,
327ad8b1aafSjsg 						     &obj->mmo.offsets,
328ad8b1aafSjsg 						     offset) {
329ad8b1aafSjsg 			drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
330ad8b1aafSjsg 					      &mmo->vma_node);
331ad8b1aafSjsg 			kfree(mmo);
332ad8b1aafSjsg 		}
333ad8b1aafSjsg 		obj->mmo.offsets = RB_ROOT;
334ad8b1aafSjsg 	}
335ad8b1aafSjsg }
336ad8b1aafSjsg 
3371bb76ff1Sjsg /**
3381bb76ff1Sjsg  * __i915_gem_object_pages_fini - Clean up pages use of a gem object
3391bb76ff1Sjsg  * @obj: The gem object to clean up
3401bb76ff1Sjsg  *
3411bb76ff1Sjsg  * This function cleans up usage of the object mm.pages member. It
3421bb76ff1Sjsg  * is intended for backends that need to clean up a gem object in
3431bb76ff1Sjsg  * separate steps and needs to be called when the object is idle before
3441bb76ff1Sjsg  * the object's backing memory is freed.
3451bb76ff1Sjsg  */
__i915_gem_object_pages_fini(struct drm_i915_gem_object * obj)3461bb76ff1Sjsg void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
347c349dbc7Sjsg {
3481bb76ff1Sjsg 	assert_object_held_shared(obj);
349c349dbc7Sjsg 
350c349dbc7Sjsg 	if (!list_empty(&obj->vma.list)) {
351c349dbc7Sjsg 		struct i915_vma *vma;
352c349dbc7Sjsg 
353c349dbc7Sjsg 		spin_lock(&obj->vma.lock);
354c349dbc7Sjsg 		while ((vma = list_first_entry_or_null(&obj->vma.list,
355c349dbc7Sjsg 						       struct i915_vma,
356c349dbc7Sjsg 						       obj_link))) {
357c349dbc7Sjsg 			GEM_BUG_ON(vma->obj != obj);
358c349dbc7Sjsg 			spin_unlock(&obj->vma.lock);
359c349dbc7Sjsg 
3601bb76ff1Sjsg 			i915_vma_destroy(vma);
361c349dbc7Sjsg 
362c349dbc7Sjsg 			spin_lock(&obj->vma.lock);
363c349dbc7Sjsg 		}
364c349dbc7Sjsg 		spin_unlock(&obj->vma.lock);
365c349dbc7Sjsg 	}
366c349dbc7Sjsg 
367ad8b1aafSjsg 	__i915_gem_object_free_mmaps(obj);
368c349dbc7Sjsg 
369c349dbc7Sjsg 	atomic_set(&obj->mm.pages_pin_count, 0);
370*f005ef32Sjsg 
371*f005ef32Sjsg 	/*
372*f005ef32Sjsg 	 * dma_buf_unmap_attachment() requires reservation to be
373*f005ef32Sjsg 	 * locked. The imported GEM shouldn't share reservation lock
374*f005ef32Sjsg 	 * and ttm_bo_cleanup_memtype_use() shouldn't be invoked for
375*f005ef32Sjsg 	 * dma-buf, so it's safe to take the lock.
376*f005ef32Sjsg 	 */
377*f005ef32Sjsg 	if (obj->base.import_attach)
378*f005ef32Sjsg 		i915_gem_object_lock(obj, NULL);
379*f005ef32Sjsg 
380c349dbc7Sjsg 	__i915_gem_object_put_pages(obj);
381*f005ef32Sjsg 
382*f005ef32Sjsg 	if (obj->base.import_attach)
383*f005ef32Sjsg 		i915_gem_object_unlock(obj);
384*f005ef32Sjsg 
385c349dbc7Sjsg 	GEM_BUG_ON(i915_gem_object_has_pages(obj));
3861bb76ff1Sjsg }
3871bb76ff1Sjsg 
__i915_gem_free_object(struct drm_i915_gem_object * obj)3881bb76ff1Sjsg void __i915_gem_free_object(struct drm_i915_gem_object *obj)
3891bb76ff1Sjsg {
3901bb76ff1Sjsg 	trace_i915_gem_object_destroy(obj);
3911bb76ff1Sjsg 
3921bb76ff1Sjsg 	GEM_BUG_ON(!list_empty(&obj->lut_list));
3931bb76ff1Sjsg 
394c349dbc7Sjsg 	bitmap_free(obj->bit_17);
395c349dbc7Sjsg 
396c349dbc7Sjsg 	if (obj->base.import_attach)
397c349dbc7Sjsg 		drm_prime_gem_destroy(&obj->base, NULL);
398c349dbc7Sjsg 
399c349dbc7Sjsg 	drm_gem_free_mmap_offset(&obj->base);
400c349dbc7Sjsg 
401c349dbc7Sjsg 	if (obj->ops->release)
402c349dbc7Sjsg 		obj->ops->release(obj);
403c349dbc7Sjsg 
4045ca02815Sjsg 	if (obj->mm.n_placements > 1)
4055ca02815Sjsg 		kfree(obj->mm.placements);
4065ca02815Sjsg 
4075ca02815Sjsg 	if (obj->shares_resv_from)
4085ca02815Sjsg 		i915_vm_resv_put(obj->shares_resv_from);
4091bb76ff1Sjsg 
4101bb76ff1Sjsg 	__i915_gem_object_fini(obj);
4115ca02815Sjsg }
4125ca02815Sjsg 
__i915_gem_free_objects(struct drm_i915_private * i915,struct llist_node * freed)4135ca02815Sjsg static void __i915_gem_free_objects(struct drm_i915_private *i915,
4145ca02815Sjsg 				    struct llist_node *freed)
4155ca02815Sjsg {
4165ca02815Sjsg 	struct drm_i915_gem_object *obj, *on;
4175ca02815Sjsg 
4185ca02815Sjsg 	llist_for_each_entry_safe(obj, on, freed, freed) {
4195ca02815Sjsg 		might_sleep();
4205ca02815Sjsg 		if (obj->ops->delayed_free) {
4215ca02815Sjsg 			obj->ops->delayed_free(obj);
4225ca02815Sjsg 			continue;
4235ca02815Sjsg 		}
4241bb76ff1Sjsg 
4251bb76ff1Sjsg 		__i915_gem_object_pages_fini(obj);
4265ca02815Sjsg 		__i915_gem_free_object(obj);
4275ca02815Sjsg 
428c349dbc7Sjsg 		/* But keep the pointer alive for RCU-protected lookups */
429c349dbc7Sjsg 		call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
430c349dbc7Sjsg 		cond_resched();
431c349dbc7Sjsg 	}
432c349dbc7Sjsg }
433c349dbc7Sjsg 
i915_gem_flush_free_objects(struct drm_i915_private * i915)434c349dbc7Sjsg void i915_gem_flush_free_objects(struct drm_i915_private *i915)
435c349dbc7Sjsg {
436c349dbc7Sjsg 	struct llist_node *freed = llist_del_all(&i915->mm.free_list);
437c349dbc7Sjsg 
438c349dbc7Sjsg 	if (unlikely(freed))
439c349dbc7Sjsg 		__i915_gem_free_objects(i915, freed);
440c349dbc7Sjsg }
441c349dbc7Sjsg 
__i915_gem_free_work(struct work_struct * work)442c349dbc7Sjsg static void __i915_gem_free_work(struct work_struct *work)
443c349dbc7Sjsg {
444c349dbc7Sjsg 	struct drm_i915_private *i915 =
445c349dbc7Sjsg 		container_of(work, struct drm_i915_private, mm.free_work);
446c349dbc7Sjsg 
447c349dbc7Sjsg 	i915_gem_flush_free_objects(i915);
448c349dbc7Sjsg }
449c349dbc7Sjsg 
i915_gem_free_object(struct drm_gem_object * gem_obj)4505ca02815Sjsg static void i915_gem_free_object(struct drm_gem_object *gem_obj)
451c349dbc7Sjsg {
452c349dbc7Sjsg 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
453c349dbc7Sjsg 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
454c349dbc7Sjsg 
455c349dbc7Sjsg 	GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
456c349dbc7Sjsg 
457c349dbc7Sjsg 	/*
458c349dbc7Sjsg 	 * Before we free the object, make sure any pure RCU-only
459c349dbc7Sjsg 	 * read-side critical sections are complete, e.g.
460c349dbc7Sjsg 	 * i915_gem_busy_ioctl(). For the corresponding synchronized
461c349dbc7Sjsg 	 * lookup see i915_gem_object_lookup_rcu().
462c349dbc7Sjsg 	 */
463c349dbc7Sjsg 	atomic_inc(&i915->mm.free_count);
464c349dbc7Sjsg 
465c349dbc7Sjsg 	/*
466c349dbc7Sjsg 	 * Since we require blocking on struct_mutex to unbind the freed
467c349dbc7Sjsg 	 * object from the GPU before releasing resources back to the
468c349dbc7Sjsg 	 * system, we can not do that directly from the RCU callback (which may
469c349dbc7Sjsg 	 * be a softirq context), but must instead then defer that work onto a
470c349dbc7Sjsg 	 * kthread. We use the RCU callback rather than move the freed object
471c349dbc7Sjsg 	 * directly onto the work queue so that we can mix between using the
472c349dbc7Sjsg 	 * worker and performing frees directly from subsequent allocations for
473c349dbc7Sjsg 	 * crude but effective memory throttling.
474c349dbc7Sjsg 	 */
4755ca02815Sjsg 
476c349dbc7Sjsg 	if (llist_add(&obj->freed, &i915->mm.free_list))
477c349dbc7Sjsg 		queue_work(i915->wq, &i915->mm.free_work);
478c349dbc7Sjsg }
479c349dbc7Sjsg 
__i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object * obj,enum fb_op_origin origin)480c349dbc7Sjsg void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
481c349dbc7Sjsg 					 enum fb_op_origin origin)
482c349dbc7Sjsg {
483c349dbc7Sjsg 	struct intel_frontbuffer *front;
484c349dbc7Sjsg 
485*f005ef32Sjsg 	front = i915_gem_object_get_frontbuffer(obj);
486c349dbc7Sjsg 	if (front) {
487c349dbc7Sjsg 		intel_frontbuffer_flush(front, origin);
488c349dbc7Sjsg 		intel_frontbuffer_put(front);
489c349dbc7Sjsg 	}
490c349dbc7Sjsg }
491c349dbc7Sjsg 
__i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object * obj,enum fb_op_origin origin)492c349dbc7Sjsg void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
493c349dbc7Sjsg 					      enum fb_op_origin origin)
494c349dbc7Sjsg {
495c349dbc7Sjsg 	struct intel_frontbuffer *front;
496c349dbc7Sjsg 
497*f005ef32Sjsg 	front = i915_gem_object_get_frontbuffer(obj);
498c349dbc7Sjsg 	if (front) {
499c349dbc7Sjsg 		intel_frontbuffer_invalidate(front, origin);
500c349dbc7Sjsg 		intel_frontbuffer_put(front);
501c349dbc7Sjsg 	}
502c349dbc7Sjsg }
503c349dbc7Sjsg 
5045ca02815Sjsg static void
i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object * obj,u64 offset,void * dst,int size)5055ca02815Sjsg i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
5065ca02815Sjsg {
507*f005ef32Sjsg 	pgoff_t idx = offset >> PAGE_SHIFT;
5085ca02815Sjsg 	void *src_map;
5095ca02815Sjsg 	void *src_ptr;
5105ca02815Sjsg 
511*f005ef32Sjsg 	src_map = kmap_atomic(i915_gem_object_get_page(obj, idx));
5125ca02815Sjsg 
5135ca02815Sjsg 	src_ptr = src_map + offset_in_page(offset);
5145ca02815Sjsg 	if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
5155ca02815Sjsg 		drm_clflush_virt_range(src_ptr, size);
5165ca02815Sjsg 	memcpy(dst, src_ptr, size);
5175ca02815Sjsg 
5185ca02815Sjsg 	kunmap_atomic(src_map);
5195ca02815Sjsg }
5205ca02815Sjsg 
5215ca02815Sjsg static void
i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object * obj,u64 offset,void * dst,int size)5225ca02815Sjsg i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
5235ca02815Sjsg {
524*f005ef32Sjsg 	pgoff_t idx = offset >> PAGE_SHIFT;
525*f005ef32Sjsg 	dma_addr_t dma = i915_gem_object_get_dma_address(obj, idx);
5265ca02815Sjsg 	void __iomem *src_map;
5275ca02815Sjsg 	void __iomem *src_ptr;
5285ca02815Sjsg 
5295ca02815Sjsg 	src_map = io_mapping_map_wc(&obj->mm.region->iomap,
5305ca02815Sjsg 				    dma - obj->mm.region->region.start,
5315ca02815Sjsg 				    PAGE_SIZE);
5325ca02815Sjsg 
5335ca02815Sjsg 	src_ptr = src_map + offset_in_page(offset);
5345ca02815Sjsg 	if (!i915_memcpy_from_wc(dst, (void __force *)src_ptr, size))
5355ca02815Sjsg 		memcpy_fromio(dst, src_ptr, size);
5365ca02815Sjsg 
5375ca02815Sjsg 	io_mapping_unmap(src_map);
5385ca02815Sjsg }
5395ca02815Sjsg 
object_has_mappable_iomem(struct drm_i915_gem_object * obj)540*f005ef32Sjsg static bool object_has_mappable_iomem(struct drm_i915_gem_object *obj)
541*f005ef32Sjsg {
542*f005ef32Sjsg 	GEM_BUG_ON(!i915_gem_object_has_iomem(obj));
543*f005ef32Sjsg 
544*f005ef32Sjsg 	if (IS_DGFX(to_i915(obj->base.dev)))
545*f005ef32Sjsg 		return i915_ttm_resource_mappable(i915_gem_to_ttm(obj)->resource);
546*f005ef32Sjsg 
547*f005ef32Sjsg 	return true;
548*f005ef32Sjsg }
549*f005ef32Sjsg 
5505ca02815Sjsg /**
5515ca02815Sjsg  * i915_gem_object_read_from_page - read data from the page of a GEM object
5525ca02815Sjsg  * @obj: GEM object to read from
5535ca02815Sjsg  * @offset: offset within the object
5545ca02815Sjsg  * @dst: buffer to store the read data
5555ca02815Sjsg  * @size: size to read
5565ca02815Sjsg  *
5575ca02815Sjsg  * Reads data from @obj at the specified offset. The requested region to read
5585ca02815Sjsg  * from can't cross a page boundary. The caller must ensure that @obj pages
5595ca02815Sjsg  * are pinned and that @obj is synced wrt. any related writes.
5605ca02815Sjsg  *
5611bb76ff1Sjsg  * Return: %0 on success or -ENODEV if the type of @obj's backing store is
5625ca02815Sjsg  * unsupported.
5635ca02815Sjsg  */
i915_gem_object_read_from_page(struct drm_i915_gem_object * obj,u64 offset,void * dst,int size)5645ca02815Sjsg int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
5655ca02815Sjsg {
566*f005ef32Sjsg 	GEM_BUG_ON(overflows_type(offset >> PAGE_SHIFT, pgoff_t));
5675ca02815Sjsg 	GEM_BUG_ON(offset >= obj->base.size);
5685ca02815Sjsg 	GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size);
5695ca02815Sjsg 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
5705ca02815Sjsg 
5715ca02815Sjsg 	if (i915_gem_object_has_struct_page(obj))
5725ca02815Sjsg 		i915_gem_object_read_from_page_kmap(obj, offset, dst, size);
573*f005ef32Sjsg 	else if (i915_gem_object_has_iomem(obj) && object_has_mappable_iomem(obj))
5745ca02815Sjsg 		i915_gem_object_read_from_page_iomap(obj, offset, dst, size);
5755ca02815Sjsg 	else
5765ca02815Sjsg 		return -ENODEV;
5775ca02815Sjsg 
5785ca02815Sjsg 	return 0;
5795ca02815Sjsg }
5805ca02815Sjsg 
5815ca02815Sjsg /**
5825ca02815Sjsg  * i915_gem_object_evictable - Whether object is likely evictable after unbind.
5835ca02815Sjsg  * @obj: The object to check
5845ca02815Sjsg  *
5855ca02815Sjsg  * This function checks whether the object is likely unvictable after unbind.
5865ca02815Sjsg  * If the object is not locked when checking, the result is only advisory.
5875ca02815Sjsg  * If the object is locked when checking, and the function returns true,
5885ca02815Sjsg  * then an eviction should indeed be possible. But since unlocked vma
5895ca02815Sjsg  * unpinning and unbinding is currently possible, the object can actually
5905ca02815Sjsg  * become evictable even if this function returns false.
5915ca02815Sjsg  *
5925ca02815Sjsg  * Return: true if the object may be evictable. False otherwise.
5935ca02815Sjsg  */
i915_gem_object_evictable(struct drm_i915_gem_object * obj)5945ca02815Sjsg bool i915_gem_object_evictable(struct drm_i915_gem_object *obj)
5955ca02815Sjsg {
5965ca02815Sjsg 	struct i915_vma *vma;
5975ca02815Sjsg 	int pin_count = atomic_read(&obj->mm.pages_pin_count);
5985ca02815Sjsg 
5995ca02815Sjsg 	if (!pin_count)
6005ca02815Sjsg 		return true;
6015ca02815Sjsg 
6025ca02815Sjsg 	spin_lock(&obj->vma.lock);
6035ca02815Sjsg 	list_for_each_entry(vma, &obj->vma.list, obj_link) {
6045ca02815Sjsg 		if (i915_vma_is_pinned(vma)) {
6055ca02815Sjsg 			spin_unlock(&obj->vma.lock);
6065ca02815Sjsg 			return false;
6075ca02815Sjsg 		}
6085ca02815Sjsg 		if (atomic_read(&vma->pages_count))
6095ca02815Sjsg 			pin_count--;
6105ca02815Sjsg 	}
6115ca02815Sjsg 	spin_unlock(&obj->vma.lock);
6125ca02815Sjsg 	GEM_WARN_ON(pin_count < 0);
6135ca02815Sjsg 
6145ca02815Sjsg 	return pin_count == 0;
6155ca02815Sjsg }
6165ca02815Sjsg 
6175ca02815Sjsg /**
6185ca02815Sjsg  * i915_gem_object_migratable - Whether the object is migratable out of the
6195ca02815Sjsg  * current region.
6205ca02815Sjsg  * @obj: Pointer to the object.
6215ca02815Sjsg  *
6225ca02815Sjsg  * Return: Whether the object is allowed to be resident in other
6235ca02815Sjsg  * regions than the current while pages are present.
6245ca02815Sjsg  */
i915_gem_object_migratable(struct drm_i915_gem_object * obj)6255ca02815Sjsg bool i915_gem_object_migratable(struct drm_i915_gem_object *obj)
6265ca02815Sjsg {
6275ca02815Sjsg 	struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
6285ca02815Sjsg 
6295ca02815Sjsg 	if (!mr)
6305ca02815Sjsg 		return false;
6315ca02815Sjsg 
6325ca02815Sjsg 	return obj->mm.n_placements > 1;
6335ca02815Sjsg }
6345ca02815Sjsg 
6355ca02815Sjsg /**
6365ca02815Sjsg  * i915_gem_object_has_struct_page - Whether the object is page-backed
6375ca02815Sjsg  * @obj: The object to query.
6385ca02815Sjsg  *
6395ca02815Sjsg  * This function should only be called while the object is locked or pinned,
6405ca02815Sjsg  * otherwise the page backing may change under the caller.
6415ca02815Sjsg  *
6425ca02815Sjsg  * Return: True if page-backed, false otherwise.
6435ca02815Sjsg  */
i915_gem_object_has_struct_page(const struct drm_i915_gem_object * obj)6445ca02815Sjsg bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
6455ca02815Sjsg {
6465ca02815Sjsg #ifdef CONFIG_LOCKDEP
6475ca02815Sjsg 	if (IS_DGFX(to_i915(obj->base.dev)) &&
6485ca02815Sjsg 	    i915_gem_object_evictable((void __force *)obj))
6495ca02815Sjsg 		assert_object_held_shared(obj);
6505ca02815Sjsg #endif
6515ca02815Sjsg 	return obj->mem_flags & I915_BO_FLAG_STRUCT_PAGE;
6525ca02815Sjsg }
6535ca02815Sjsg 
6545ca02815Sjsg /**
6555ca02815Sjsg  * i915_gem_object_has_iomem - Whether the object is iomem-backed
6565ca02815Sjsg  * @obj: The object to query.
6575ca02815Sjsg  *
6585ca02815Sjsg  * This function should only be called while the object is locked or pinned,
6595ca02815Sjsg  * otherwise the iomem backing may change under the caller.
6605ca02815Sjsg  *
6615ca02815Sjsg  * Return: True if iomem-backed, false otherwise.
6625ca02815Sjsg  */
i915_gem_object_has_iomem(const struct drm_i915_gem_object * obj)6635ca02815Sjsg bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
6645ca02815Sjsg {
6655ca02815Sjsg #ifdef CONFIG_LOCKDEP
6665ca02815Sjsg 	if (IS_DGFX(to_i915(obj->base.dev)) &&
6675ca02815Sjsg 	    i915_gem_object_evictable((void __force *)obj))
6685ca02815Sjsg 		assert_object_held_shared(obj);
6695ca02815Sjsg #endif
6705ca02815Sjsg 	return obj->mem_flags & I915_BO_FLAG_IOMEM;
6715ca02815Sjsg }
6725ca02815Sjsg 
6735ca02815Sjsg /**
6745ca02815Sjsg  * i915_gem_object_can_migrate - Whether an object likely can be migrated
6755ca02815Sjsg  *
6765ca02815Sjsg  * @obj: The object to migrate
6775ca02815Sjsg  * @id: The region intended to migrate to
6785ca02815Sjsg  *
6795ca02815Sjsg  * Check whether the object backend supports migration to the
6805ca02815Sjsg  * given region. Note that pinning may affect the ability to migrate as
6815ca02815Sjsg  * returned by this function.
6825ca02815Sjsg  *
6835ca02815Sjsg  * This function is primarily intended as a helper for checking the
6845ca02815Sjsg  * possibility to migrate objects and might be slightly less permissive
6855ca02815Sjsg  * than i915_gem_object_migrate() when it comes to objects with the
6865ca02815Sjsg  * I915_BO_ALLOC_USER flag set.
6875ca02815Sjsg  *
6885ca02815Sjsg  * Return: true if migration is possible, false otherwise.
6895ca02815Sjsg  */
i915_gem_object_can_migrate(struct drm_i915_gem_object * obj,enum intel_region_id id)6905ca02815Sjsg bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
6915ca02815Sjsg 				 enum intel_region_id id)
6925ca02815Sjsg {
6935ca02815Sjsg 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
6945ca02815Sjsg 	unsigned int num_allowed = obj->mm.n_placements;
6955ca02815Sjsg 	struct intel_memory_region *mr;
6965ca02815Sjsg 	unsigned int i;
6975ca02815Sjsg 
6985ca02815Sjsg 	GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN);
6995ca02815Sjsg 	GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
7005ca02815Sjsg 
7015ca02815Sjsg 	mr = i915->mm.regions[id];
7025ca02815Sjsg 	if (!mr)
7035ca02815Sjsg 		return false;
7045ca02815Sjsg 
7051bb76ff1Sjsg 	if (!IS_ALIGNED(obj->base.size, mr->min_page_size))
7061bb76ff1Sjsg 		return false;
7071bb76ff1Sjsg 
7085ca02815Sjsg 	if (obj->mm.region == mr)
7095ca02815Sjsg 		return true;
7105ca02815Sjsg 
7115ca02815Sjsg 	if (!i915_gem_object_evictable(obj))
7125ca02815Sjsg 		return false;
7135ca02815Sjsg 
7145ca02815Sjsg 	if (!obj->ops->migrate)
7155ca02815Sjsg 		return false;
7165ca02815Sjsg 
7175ca02815Sjsg 	if (!(obj->flags & I915_BO_ALLOC_USER))
7185ca02815Sjsg 		return true;
7195ca02815Sjsg 
7205ca02815Sjsg 	if (num_allowed == 0)
7215ca02815Sjsg 		return false;
7225ca02815Sjsg 
7235ca02815Sjsg 	for (i = 0; i < num_allowed; ++i) {
7245ca02815Sjsg 		if (mr == obj->mm.placements[i])
7255ca02815Sjsg 			return true;
7265ca02815Sjsg 	}
7275ca02815Sjsg 
7285ca02815Sjsg 	return false;
7295ca02815Sjsg }
7305ca02815Sjsg 
7315ca02815Sjsg /**
7325ca02815Sjsg  * i915_gem_object_migrate - Migrate an object to the desired region id
7335ca02815Sjsg  * @obj: The object to migrate.
7345ca02815Sjsg  * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may
7355ca02815Sjsg  * not be successful in evicting other objects to make room for this object.
7365ca02815Sjsg  * @id: The region id to migrate to.
7375ca02815Sjsg  *
7385ca02815Sjsg  * Attempt to migrate the object to the desired memory region. The
7395ca02815Sjsg  * object backend must support migration and the object may not be
7405ca02815Sjsg  * pinned, (explicitly pinned pages or pinned vmas). The object must
7415ca02815Sjsg  * be locked.
7425ca02815Sjsg  * On successful completion, the object will have pages pointing to
7435ca02815Sjsg  * memory in the new region, but an async migration task may not have
7445ca02815Sjsg  * completed yet, and to accomplish that, i915_gem_object_wait_migration()
7455ca02815Sjsg  * must be called.
7465ca02815Sjsg  *
7475ca02815Sjsg  * Note: the @ww parameter is not used yet, but included to make sure
7485ca02815Sjsg  * callers put some effort into obtaining a valid ww ctx if one is
7495ca02815Sjsg  * available.
7505ca02815Sjsg  *
7515ca02815Sjsg  * Return: 0 on success. Negative error code on failure. In particular may
7525ca02815Sjsg  * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance
7535ca02815Sjsg  * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and
7545ca02815Sjsg  * -EBUSY if the object is pinned.
7555ca02815Sjsg  */
i915_gem_object_migrate(struct drm_i915_gem_object * obj,struct i915_gem_ww_ctx * ww,enum intel_region_id id)7565ca02815Sjsg int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
7575ca02815Sjsg 			    struct i915_gem_ww_ctx *ww,
7585ca02815Sjsg 			    enum intel_region_id id)
7595ca02815Sjsg {
7601bb76ff1Sjsg 	return __i915_gem_object_migrate(obj, ww, id, obj->flags);
7611bb76ff1Sjsg }
7621bb76ff1Sjsg 
7631bb76ff1Sjsg /**
7641bb76ff1Sjsg  * __i915_gem_object_migrate - Migrate an object to the desired region id, with
7651bb76ff1Sjsg  * control of the extra flags
7661bb76ff1Sjsg  * @obj: The object to migrate.
7671bb76ff1Sjsg  * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may
7681bb76ff1Sjsg  * not be successful in evicting other objects to make room for this object.
7691bb76ff1Sjsg  * @id: The region id to migrate to.
7701bb76ff1Sjsg  * @flags: The object flags. Normally just obj->flags.
7711bb76ff1Sjsg  *
7721bb76ff1Sjsg  * Attempt to migrate the object to the desired memory region. The
7731bb76ff1Sjsg  * object backend must support migration and the object may not be
7741bb76ff1Sjsg  * pinned, (explicitly pinned pages or pinned vmas). The object must
7751bb76ff1Sjsg  * be locked.
7761bb76ff1Sjsg  * On successful completion, the object will have pages pointing to
7771bb76ff1Sjsg  * memory in the new region, but an async migration task may not have
7781bb76ff1Sjsg  * completed yet, and to accomplish that, i915_gem_object_wait_migration()
7791bb76ff1Sjsg  * must be called.
7801bb76ff1Sjsg  *
7811bb76ff1Sjsg  * Note: the @ww parameter is not used yet, but included to make sure
7821bb76ff1Sjsg  * callers put some effort into obtaining a valid ww ctx if one is
7831bb76ff1Sjsg  * available.
7841bb76ff1Sjsg  *
7851bb76ff1Sjsg  * Return: 0 on success. Negative error code on failure. In particular may
7861bb76ff1Sjsg  * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance
7871bb76ff1Sjsg  * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and
7881bb76ff1Sjsg  * -EBUSY if the object is pinned.
7891bb76ff1Sjsg  */
__i915_gem_object_migrate(struct drm_i915_gem_object * obj,struct i915_gem_ww_ctx * ww,enum intel_region_id id,unsigned int flags)7901bb76ff1Sjsg int __i915_gem_object_migrate(struct drm_i915_gem_object *obj,
7911bb76ff1Sjsg 			      struct i915_gem_ww_ctx *ww,
7921bb76ff1Sjsg 			      enum intel_region_id id,
7931bb76ff1Sjsg 			      unsigned int flags)
7941bb76ff1Sjsg {
7955ca02815Sjsg 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
7965ca02815Sjsg 	struct intel_memory_region *mr;
7975ca02815Sjsg 
7985ca02815Sjsg 	GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN);
7995ca02815Sjsg 	GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
8005ca02815Sjsg 	assert_object_held(obj);
8015ca02815Sjsg 
8025ca02815Sjsg 	mr = i915->mm.regions[id];
8035ca02815Sjsg 	GEM_BUG_ON(!mr);
8045ca02815Sjsg 
8055ca02815Sjsg 	if (!i915_gem_object_can_migrate(obj, id))
8065ca02815Sjsg 		return -EINVAL;
8075ca02815Sjsg 
8085ca02815Sjsg 	if (!obj->ops->migrate) {
8095ca02815Sjsg 		if (GEM_WARN_ON(obj->mm.region != mr))
8105ca02815Sjsg 			return -EINVAL;
8115ca02815Sjsg 		return 0;
8125ca02815Sjsg 	}
8135ca02815Sjsg 
8141bb76ff1Sjsg 	return obj->ops->migrate(obj, mr, flags);
8155ca02815Sjsg }
8165ca02815Sjsg 
8175ca02815Sjsg /**
8185ca02815Sjsg  * i915_gem_object_placement_possible - Check whether the object can be
8195ca02815Sjsg  * placed at certain memory type
8205ca02815Sjsg  * @obj: Pointer to the object
8215ca02815Sjsg  * @type: The memory type to check
8225ca02815Sjsg  *
8235ca02815Sjsg  * Return: True if the object can be placed in @type. False otherwise.
8245ca02815Sjsg  */
i915_gem_object_placement_possible(struct drm_i915_gem_object * obj,enum intel_memory_type type)8255ca02815Sjsg bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
8265ca02815Sjsg 					enum intel_memory_type type)
8275ca02815Sjsg {
8285ca02815Sjsg 	unsigned int i;
8295ca02815Sjsg 
8305ca02815Sjsg 	if (!obj->mm.n_placements) {
8315ca02815Sjsg 		switch (type) {
8325ca02815Sjsg 		case INTEL_MEMORY_LOCAL:
8335ca02815Sjsg 			return i915_gem_object_has_iomem(obj);
8345ca02815Sjsg 		case INTEL_MEMORY_SYSTEM:
8355ca02815Sjsg 			return i915_gem_object_has_pages(obj);
8365ca02815Sjsg 		default:
8375ca02815Sjsg 			/* Ignore stolen for now */
8385ca02815Sjsg 			GEM_BUG_ON(1);
8395ca02815Sjsg 			return false;
8405ca02815Sjsg 		}
8415ca02815Sjsg 	}
8425ca02815Sjsg 
8435ca02815Sjsg 	for (i = 0; i < obj->mm.n_placements; i++) {
8445ca02815Sjsg 		if (obj->mm.placements[i]->type == type)
8455ca02815Sjsg 			return true;
8465ca02815Sjsg 	}
8475ca02815Sjsg 
8485ca02815Sjsg 	return false;
8495ca02815Sjsg }
8505ca02815Sjsg 
8511bb76ff1Sjsg /**
8521bb76ff1Sjsg  * i915_gem_object_needs_ccs_pages - Check whether the object requires extra
8531bb76ff1Sjsg  * pages when placed in system-memory, in order to save and later restore the
8541bb76ff1Sjsg  * flat-CCS aux state when the object is moved between local-memory and
8551bb76ff1Sjsg  * system-memory
8561bb76ff1Sjsg  * @obj: Pointer to the object
8571bb76ff1Sjsg  *
8581bb76ff1Sjsg  * Return: True if the object needs extra ccs pages. False otherwise.
8591bb76ff1Sjsg  */
i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object * obj)8601bb76ff1Sjsg bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
8611bb76ff1Sjsg {
8621bb76ff1Sjsg 	bool lmem_placement = false;
8631bb76ff1Sjsg 	int i;
8641bb76ff1Sjsg 
8651bb76ff1Sjsg 	if (!HAS_FLAT_CCS(to_i915(obj->base.dev)))
8661bb76ff1Sjsg 		return false;
8671bb76ff1Sjsg 
868889aef23Sjsg 	if (obj->flags & I915_BO_ALLOC_CCS_AUX)
869889aef23Sjsg 		return true;
870889aef23Sjsg 
8711bb76ff1Sjsg 	for (i = 0; i < obj->mm.n_placements; i++) {
8721bb76ff1Sjsg 		/* Compression is not allowed for the objects with smem placement */
8731bb76ff1Sjsg 		if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
8741bb76ff1Sjsg 			return false;
8751bb76ff1Sjsg 		if (!lmem_placement &&
8761bb76ff1Sjsg 		    obj->mm.placements[i]->type == INTEL_MEMORY_LOCAL)
8771bb76ff1Sjsg 			lmem_placement = true;
8781bb76ff1Sjsg 	}
8791bb76ff1Sjsg 
8801bb76ff1Sjsg 	return lmem_placement;
8811bb76ff1Sjsg }
8821bb76ff1Sjsg 
i915_gem_init__objects(struct drm_i915_private * i915)883c349dbc7Sjsg void i915_gem_init__objects(struct drm_i915_private *i915)
884c349dbc7Sjsg {
885c349dbc7Sjsg 	INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
886c349dbc7Sjsg }
887c349dbc7Sjsg 
i915_objects_module_exit(void)8885ca02815Sjsg void i915_objects_module_exit(void)
889c349dbc7Sjsg {
890c349dbc7Sjsg #ifdef __linux__
8915ca02815Sjsg 	kmem_cache_destroy(slab_objects);
892c349dbc7Sjsg #else
8935ca02815Sjsg 	pool_destroy(&slab_objects);
894c349dbc7Sjsg #endif
895c349dbc7Sjsg }
896c349dbc7Sjsg 
i915_objects_module_init(void)8975ca02815Sjsg int __init i915_objects_module_init(void)
898c349dbc7Sjsg {
899c349dbc7Sjsg #ifdef __linux__
9005ca02815Sjsg 	slab_objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
9015ca02815Sjsg 	if (!slab_objects)
902c349dbc7Sjsg 		return -ENOMEM;
903c349dbc7Sjsg #else
9045ca02815Sjsg 	pool_init(&slab_objects, sizeof(struct drm_i915_gem_object),
9050f557061Sjsg 	    CACHELINESIZE, IPL_NONE, 0, "drmobj", NULL);
906c349dbc7Sjsg #endif
907c349dbc7Sjsg 
908c349dbc7Sjsg 	return 0;
909c349dbc7Sjsg }
910c349dbc7Sjsg 
9115ca02815Sjsg static const struct drm_gem_object_funcs i915_gem_object_funcs = {
9125ca02815Sjsg 	.free = i915_gem_free_object,
9135ca02815Sjsg 	.close = i915_gem_close_object,
9145ca02815Sjsg 	.export = i915_gem_prime_export,
9155ca02815Sjsg };
9165ca02815Sjsg 
9171bb76ff1Sjsg /**
9181bb76ff1Sjsg  * i915_gem_object_get_moving_fence - Get the object's moving fence if any
9191bb76ff1Sjsg  * @obj: The object whose moving fence to get.
9201bb76ff1Sjsg  * @fence: The resulting fence
9211bb76ff1Sjsg  *
9221bb76ff1Sjsg  * A non-signaled moving fence means that there is an async operation
9231bb76ff1Sjsg  * pending on the object that needs to be waited on before setting up
9241bb76ff1Sjsg  * any GPU- or CPU PTEs to the object's pages.
9251bb76ff1Sjsg  *
9261bb76ff1Sjsg  * Return: Negative error code or 0 for success.
9271bb76ff1Sjsg  */
i915_gem_object_get_moving_fence(struct drm_i915_gem_object * obj,struct dma_fence ** fence)9281bb76ff1Sjsg int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
9291bb76ff1Sjsg 				     struct dma_fence **fence)
9301bb76ff1Sjsg {
9311bb76ff1Sjsg 	return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL,
9321bb76ff1Sjsg 				      fence);
9331bb76ff1Sjsg }
9341bb76ff1Sjsg 
9351bb76ff1Sjsg /**
9361bb76ff1Sjsg  * i915_gem_object_wait_moving_fence - Wait for the object's moving fence if any
9371bb76ff1Sjsg  * @obj: The object whose moving fence to wait for.
9381bb76ff1Sjsg  * @intr: Whether to wait interruptible.
9391bb76ff1Sjsg  *
9401bb76ff1Sjsg  * If the moving fence signaled without an error, it is detached from the
9411bb76ff1Sjsg  * object and put.
9421bb76ff1Sjsg  *
9431bb76ff1Sjsg  * Return: 0 if successful, -ERESTARTSYS if the wait was interrupted,
9441bb76ff1Sjsg  * negative error code if the async operation represented by the
9451bb76ff1Sjsg  * moving fence failed.
9461bb76ff1Sjsg  */
i915_gem_object_wait_moving_fence(struct drm_i915_gem_object * obj,bool intr)9471bb76ff1Sjsg int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
9481bb76ff1Sjsg 				      bool intr)
9491bb76ff1Sjsg {
9501bb76ff1Sjsg 	long ret;
9511bb76ff1Sjsg 
9521bb76ff1Sjsg 	assert_object_held(obj);
9531bb76ff1Sjsg 
9541bb76ff1Sjsg 	ret = dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL,
9551bb76ff1Sjsg 				    intr, MAX_SCHEDULE_TIMEOUT);
9561bb76ff1Sjsg 	if (!ret)
9571bb76ff1Sjsg 		ret = -ETIME;
9581bb76ff1Sjsg 	else if (ret > 0 && i915_gem_object_has_unknown_state(obj))
9591bb76ff1Sjsg 		ret = -EIO;
9601bb76ff1Sjsg 
9611bb76ff1Sjsg 	return ret < 0 ? ret : 0;
9621bb76ff1Sjsg }
9631bb76ff1Sjsg 
964*f005ef32Sjsg /*
9651bb76ff1Sjsg  * i915_gem_object_has_unknown_state - Return true if the object backing pages are
9661bb76ff1Sjsg  * in an unknown_state. This means that userspace must NEVER be allowed to touch
9671bb76ff1Sjsg  * the pages, with either the GPU or CPU.
9681bb76ff1Sjsg  *
9691bb76ff1Sjsg  * ONLY valid to be called after ensuring that all kernel fences have signalled
9701bb76ff1Sjsg  * (in particular the fence for moving/clearing the object).
9711bb76ff1Sjsg  */
i915_gem_object_has_unknown_state(struct drm_i915_gem_object * obj)9721bb76ff1Sjsg bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj)
9731bb76ff1Sjsg {
9741bb76ff1Sjsg 	/*
9751bb76ff1Sjsg 	 * The below barrier pairs with the dma_fence_signal() in
9761bb76ff1Sjsg 	 * __memcpy_work(). We should only sample the unknown_state after all
9771bb76ff1Sjsg 	 * the kernel fences have signalled.
9781bb76ff1Sjsg 	 */
9791bb76ff1Sjsg 	smp_rmb();
9801bb76ff1Sjsg 	return obj->mm.unknown_state;
9811bb76ff1Sjsg }
9821bb76ff1Sjsg 
983c349dbc7Sjsg #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
984c349dbc7Sjsg #include "selftests/huge_gem_object.c"
985c349dbc7Sjsg #include "selftests/huge_pages.c"
9865ca02815Sjsg #include "selftests/i915_gem_migrate.c"
987c349dbc7Sjsg #include "selftests/i915_gem_object.c"
988c349dbc7Sjsg #include "selftests/i915_gem_coherency.c"
989c349dbc7Sjsg #endif
990