xref: /openbsd-src/sys/dev/pci/drm/i915/gem/i915_gem_pages.c (revision 5a20fcd45428c476512c22e53aba80028e359882)
1c349dbc7Sjsg /*
2c349dbc7Sjsg  * SPDX-License-Identifier: MIT
3c349dbc7Sjsg  *
4c349dbc7Sjsg  * Copyright © 2014-2016 Intel Corporation
5c349dbc7Sjsg  */
6c349dbc7Sjsg 
71bb76ff1Sjsg #include <drm/drm_cache.h>
81bb76ff1Sjsg 
91bb76ff1Sjsg #include "gt/intel_gt.h"
10*f005ef32Sjsg #include "gt/intel_tlb.h"
111bb76ff1Sjsg 
12c349dbc7Sjsg #include "i915_drv.h"
13c349dbc7Sjsg #include "i915_gem_object.h"
14c349dbc7Sjsg #include "i915_scatterlist.h"
15c349dbc7Sjsg #include "i915_gem_lmem.h"
16c349dbc7Sjsg #include "i915_gem_mman.h"
17c349dbc7Sjsg 
__i915_gem_object_set_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)18c349dbc7Sjsg void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
19*f005ef32Sjsg 				 struct sg_table *pages)
20c349dbc7Sjsg {
21c349dbc7Sjsg 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
221bb76ff1Sjsg 	unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
235ca02815Sjsg 	bool shrinkable;
24c349dbc7Sjsg 	int i;
25c349dbc7Sjsg 
265ca02815Sjsg 	assert_object_held_shared(obj);
27c349dbc7Sjsg 
28c349dbc7Sjsg 	if (i915_gem_object_is_volatile(obj))
29c349dbc7Sjsg 		obj->mm.madv = I915_MADV_DONTNEED;
30c349dbc7Sjsg 
31c349dbc7Sjsg 	/* Make the pages coherent with the GPU (flushing any swapin). */
32c349dbc7Sjsg 	if (obj->cache_dirty) {
331bb76ff1Sjsg 		WARN_ON_ONCE(IS_DGFX(i915));
34c349dbc7Sjsg 		obj->write_domain = 0;
35c349dbc7Sjsg 		if (i915_gem_object_has_struct_page(obj))
36c349dbc7Sjsg 			drm_clflush_sg(pages);
37c349dbc7Sjsg 		obj->cache_dirty = false;
38c349dbc7Sjsg 	}
39c349dbc7Sjsg 
40c349dbc7Sjsg 	obj->mm.get_page.sg_pos = pages->sgl;
41c349dbc7Sjsg 	obj->mm.get_page.sg_idx = 0;
425ca02815Sjsg 	obj->mm.get_dma_page.sg_pos = pages->sgl;
435ca02815Sjsg 	obj->mm.get_dma_page.sg_idx = 0;
44c349dbc7Sjsg 
45c349dbc7Sjsg 	obj->mm.pages = pages;
46c349dbc7Sjsg 
47*f005ef32Sjsg 	obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl);
48*f005ef32Sjsg 	GEM_BUG_ON(!obj->mm.page_sizes.phys);
49c349dbc7Sjsg 
50c349dbc7Sjsg 	/*
51c349dbc7Sjsg 	 * Calculate the supported page-sizes which fit into the given
52c349dbc7Sjsg 	 * sg_page_sizes. This will give us the page-sizes which we may be able
53c349dbc7Sjsg 	 * to use opportunistically when later inserting into the GTT. For
54c349dbc7Sjsg 	 * example if phys=2G, then in theory we should be able to use 1G, 2M,
55c349dbc7Sjsg 	 * 64K or 4K pages, although in practice this will depend on a number of
56c349dbc7Sjsg 	 * other factors.
57c349dbc7Sjsg 	 */
58c349dbc7Sjsg 	obj->mm.page_sizes.sg = 0;
59c349dbc7Sjsg 	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
60c349dbc7Sjsg 		if (obj->mm.page_sizes.phys & ~0u << i)
61c349dbc7Sjsg 			obj->mm.page_sizes.sg |= BIT(i);
62c349dbc7Sjsg 	}
63c349dbc7Sjsg 	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
64c349dbc7Sjsg 
655ca02815Sjsg 	shrinkable = i915_gem_object_is_shrinkable(obj);
665ca02815Sjsg 
675ca02815Sjsg 	if (i915_gem_object_is_tiled(obj) &&
681bb76ff1Sjsg 	    i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) {
695ca02815Sjsg 		GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
705ca02815Sjsg 		i915_gem_object_set_tiling_quirk(obj);
715ca02815Sjsg 		GEM_BUG_ON(!list_empty(&obj->mm.link));
725ca02815Sjsg 		atomic_inc(&obj->mm.shrink_pin);
735ca02815Sjsg 		shrinkable = false;
745ca02815Sjsg 	}
755ca02815Sjsg 
761bb76ff1Sjsg 	if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) {
77c349dbc7Sjsg 		struct list_head *list;
78c349dbc7Sjsg 		unsigned long flags;
79c349dbc7Sjsg 
805ca02815Sjsg 		assert_object_held(obj);
81c349dbc7Sjsg 		spin_lock_irqsave(&i915->mm.obj_lock, flags);
82c349dbc7Sjsg 
83c349dbc7Sjsg 		i915->mm.shrink_count++;
84c349dbc7Sjsg 		i915->mm.shrink_memory += obj->base.size;
85c349dbc7Sjsg 
86c349dbc7Sjsg 		if (obj->mm.madv != I915_MADV_WILLNEED)
87c349dbc7Sjsg 			list = &i915->mm.purge_list;
88c349dbc7Sjsg 		else
89c349dbc7Sjsg 			list = &i915->mm.shrink_list;
90c349dbc7Sjsg 		list_add_tail(&obj->mm.link, list);
91c349dbc7Sjsg 
92c349dbc7Sjsg 		atomic_set(&obj->mm.shrink_pin, 0);
93c349dbc7Sjsg 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
94c349dbc7Sjsg 	}
95c349dbc7Sjsg }
96c349dbc7Sjsg 
____i915_gem_object_get_pages(struct drm_i915_gem_object * obj)97c349dbc7Sjsg int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
98c349dbc7Sjsg {
99c349dbc7Sjsg 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
100c349dbc7Sjsg 	int err;
101c349dbc7Sjsg 
1025ca02815Sjsg 	assert_object_held_shared(obj);
1035ca02815Sjsg 
104c349dbc7Sjsg 	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
105c349dbc7Sjsg 		drm_dbg(&i915->drm,
106c349dbc7Sjsg 			"Attempting to obtain a purgeable object\n");
107c349dbc7Sjsg 		return -EFAULT;
108c349dbc7Sjsg 	}
109c349dbc7Sjsg 
110c349dbc7Sjsg 	err = obj->ops->get_pages(obj);
111c349dbc7Sjsg 	GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
112c349dbc7Sjsg 
113c349dbc7Sjsg 	return err;
114c349dbc7Sjsg }
115c349dbc7Sjsg 
116c349dbc7Sjsg /* Ensure that the associated pages are gathered from the backing storage
117c349dbc7Sjsg  * and pinned into our object. i915_gem_object_pin_pages() may be called
118c349dbc7Sjsg  * multiple times before they are released by a single call to
119c349dbc7Sjsg  * i915_gem_object_unpin_pages() - once the pages are no longer referenced
120c349dbc7Sjsg  * either as a result of memory pressure (reaping pages under the shrinker)
121c349dbc7Sjsg  * or as the object is itself released.
122c349dbc7Sjsg  */
__i915_gem_object_get_pages(struct drm_i915_gem_object * obj)123c349dbc7Sjsg int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
124c349dbc7Sjsg {
125c349dbc7Sjsg 	int err;
126c349dbc7Sjsg 
1275ca02815Sjsg 	assert_object_held(obj);
1285ca02815Sjsg 
1295ca02815Sjsg 	assert_object_held_shared(obj);
130c349dbc7Sjsg 
131c349dbc7Sjsg 	if (unlikely(!i915_gem_object_has_pages(obj))) {
132c349dbc7Sjsg 		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
133c349dbc7Sjsg 
134c349dbc7Sjsg 		err = ____i915_gem_object_get_pages(obj);
135c349dbc7Sjsg 		if (err)
1365ca02815Sjsg 			return err;
137c349dbc7Sjsg 
138c349dbc7Sjsg 		smp_mb__before_atomic();
139c349dbc7Sjsg 	}
140c349dbc7Sjsg 	atomic_inc(&obj->mm.pages_pin_count);
141c349dbc7Sjsg 
1425ca02815Sjsg 	return 0;
1435ca02815Sjsg }
1445ca02815Sjsg 
i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object * obj)1455ca02815Sjsg int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
1465ca02815Sjsg {
1475ca02815Sjsg 	struct i915_gem_ww_ctx ww;
1485ca02815Sjsg 	int err;
1495ca02815Sjsg 
1505ca02815Sjsg 	i915_gem_ww_ctx_init(&ww, true);
1515ca02815Sjsg retry:
1525ca02815Sjsg 	err = i915_gem_object_lock(obj, &ww);
1535ca02815Sjsg 	if (!err)
1545ca02815Sjsg 		err = i915_gem_object_pin_pages(obj);
1555ca02815Sjsg 
1565ca02815Sjsg 	if (err == -EDEADLK) {
1575ca02815Sjsg 		err = i915_gem_ww_ctx_backoff(&ww);
1585ca02815Sjsg 		if (!err)
1595ca02815Sjsg 			goto retry;
1605ca02815Sjsg 	}
1615ca02815Sjsg 	i915_gem_ww_ctx_fini(&ww);
162c349dbc7Sjsg 	return err;
163c349dbc7Sjsg }
164c349dbc7Sjsg 
165c349dbc7Sjsg /* Immediately discard the backing storage */
i915_gem_object_truncate(struct drm_i915_gem_object * obj)1661bb76ff1Sjsg int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
167c349dbc7Sjsg {
168c349dbc7Sjsg 	if (obj->ops->truncate)
1691bb76ff1Sjsg 		return obj->ops->truncate(obj);
170c349dbc7Sjsg 
1711bb76ff1Sjsg 	return 0;
172c349dbc7Sjsg }
173c349dbc7Sjsg 
__i915_gem_object_reset_page_iter(struct drm_i915_gem_object * obj)174c349dbc7Sjsg static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
175c349dbc7Sjsg {
176c349dbc7Sjsg 	struct radix_tree_iter iter;
177c349dbc7Sjsg 	void __rcu **slot;
178c349dbc7Sjsg 
179c349dbc7Sjsg 	rcu_read_lock();
180c349dbc7Sjsg 	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
181c349dbc7Sjsg 		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
1825ca02815Sjsg 	radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
1835ca02815Sjsg 		radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
184c349dbc7Sjsg 	rcu_read_unlock();
185c349dbc7Sjsg }
186c349dbc7Sjsg 
unmap_object(struct drm_i915_gem_object * obj,void * ptr)187c349dbc7Sjsg static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
188c349dbc7Sjsg {
189c349dbc7Sjsg 	if (is_vmalloc_addr(ptr))
190c349dbc7Sjsg 		vunmap(ptr, obj->base.size);
191c349dbc7Sjsg }
192c349dbc7Sjsg 
flush_tlb_invalidate(struct drm_i915_gem_object * obj)1931bb76ff1Sjsg static void flush_tlb_invalidate(struct drm_i915_gem_object *obj)
1941bb76ff1Sjsg {
1951bb76ff1Sjsg 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
196*f005ef32Sjsg 	struct intel_gt *gt;
197*f005ef32Sjsg 	int id;
1981bb76ff1Sjsg 
199*f005ef32Sjsg 	for_each_gt(gt, i915, id) {
200*f005ef32Sjsg 		if (!obj->mm.tlb[id])
201*f005ef32Sjsg 			continue;
2021bb76ff1Sjsg 
203*f005ef32Sjsg 		intel_gt_invalidate_tlb_full(gt, obj->mm.tlb[id]);
204*f005ef32Sjsg 		obj->mm.tlb[id] = 0;
205*f005ef32Sjsg 	}
2061bb76ff1Sjsg }
2071bb76ff1Sjsg 
208c349dbc7Sjsg struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object * obj)209c349dbc7Sjsg __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
210c349dbc7Sjsg {
211c349dbc7Sjsg 	struct sg_table *pages;
212c349dbc7Sjsg 
2135ca02815Sjsg 	assert_object_held_shared(obj);
2145ca02815Sjsg 
215c349dbc7Sjsg 	pages = fetch_and_zero(&obj->mm.pages);
216c349dbc7Sjsg 	if (IS_ERR_OR_NULL(pages))
217c349dbc7Sjsg 		return pages;
218c349dbc7Sjsg 
219c349dbc7Sjsg 	if (i915_gem_object_is_volatile(obj))
220c349dbc7Sjsg 		obj->mm.madv = I915_MADV_WILLNEED;
221c349dbc7Sjsg 
2221bb76ff1Sjsg 	if (!i915_gem_object_has_self_managed_shrink_list(obj))
223c349dbc7Sjsg 		i915_gem_object_make_unshrinkable(obj);
224c349dbc7Sjsg 
225c349dbc7Sjsg 	if (obj->mm.mapping) {
226c349dbc7Sjsg 		unmap_object(obj, page_mask_bits(obj->mm.mapping));
227c349dbc7Sjsg 		obj->mm.mapping = NULL;
228c349dbc7Sjsg 	}
229c349dbc7Sjsg 
230c349dbc7Sjsg 	__i915_gem_object_reset_page_iter(obj);
231c349dbc7Sjsg 	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
232c349dbc7Sjsg 
2331bb76ff1Sjsg 	flush_tlb_invalidate(obj);
2341fd8e27eSjsg 
235c349dbc7Sjsg 	return pages;
236c349dbc7Sjsg }
237c349dbc7Sjsg 
__i915_gem_object_put_pages(struct drm_i915_gem_object * obj)238c349dbc7Sjsg int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
239c349dbc7Sjsg {
240c349dbc7Sjsg 	struct sg_table *pages;
241c349dbc7Sjsg 
242c349dbc7Sjsg 	if (i915_gem_object_has_pinned_pages(obj))
243c349dbc7Sjsg 		return -EBUSY;
244c349dbc7Sjsg 
245c349dbc7Sjsg 	/* May be called by shrinker from within get_pages() (on another bo) */
2465ca02815Sjsg 	assert_object_held_shared(obj);
247c349dbc7Sjsg 
248c349dbc7Sjsg 	i915_gem_object_release_mmap_offset(obj);
249c349dbc7Sjsg 
250c349dbc7Sjsg 	/*
251c349dbc7Sjsg 	 * ->put_pages might need to allocate memory for the bit17 swizzle
252c349dbc7Sjsg 	 * array, hence protect them from being reaped by removing them from gtt
253c349dbc7Sjsg 	 * lists early.
254c349dbc7Sjsg 	 */
255c349dbc7Sjsg 	pages = __i915_gem_object_unset_pages(obj);
256c349dbc7Sjsg 
257c349dbc7Sjsg 	/*
258c349dbc7Sjsg 	 * XXX Temporary hijinx to avoid updating all backends to handle
259c349dbc7Sjsg 	 * NULL pages. In the future, when we have more asynchronous
260c349dbc7Sjsg 	 * get_pages backends we should be better able to handle the
261c349dbc7Sjsg 	 * cancellation of the async task in a more uniform manner.
262c349dbc7Sjsg 	 */
2635ca02815Sjsg 	if (!IS_ERR_OR_NULL(pages))
264c349dbc7Sjsg 		obj->ops->put_pages(obj, pages);
265c349dbc7Sjsg 
2665ca02815Sjsg 	return 0;
267c349dbc7Sjsg }
268c349dbc7Sjsg 
269c349dbc7Sjsg /* The 'mapping' part of i915_gem_object_pin_map() below */
i915_gem_object_map_page(struct drm_i915_gem_object * obj,enum i915_map_type type)270ad8b1aafSjsg static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
271c349dbc7Sjsg 				      enum i915_map_type type)
272c349dbc7Sjsg {
273ad8b1aafSjsg 	unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
274ad8b1aafSjsg 	struct vm_page *stack[32], **pages = stack, *page;
275ad8b1aafSjsg 	struct sgt_iter iter;
276c349dbc7Sjsg 	pgprot_t pgprot;
277ad8b1aafSjsg 	void *vaddr;
278c349dbc7Sjsg 
279c349dbc7Sjsg 	switch (type) {
280c349dbc7Sjsg 	default:
281c349dbc7Sjsg 		MISSING_CASE(type);
282ad8b1aafSjsg 		fallthrough;	/* to use PAGE_KERNEL anyway */
283c349dbc7Sjsg 	case I915_MAP_WB:
284ad8b1aafSjsg 		/*
285ad8b1aafSjsg 		 * On 32b, highmem using a finite set of indirect PTE (i.e.
286ad8b1aafSjsg 		 * vmap) to provide virtual mappings of the high pages.
287ad8b1aafSjsg 		 * As these are finite, map_new_virtual() must wait for some
288ad8b1aafSjsg 		 * other kmap() to finish when it runs out. If we map a large
289ad8b1aafSjsg 		 * number of objects, there is no method for it to tell us
290ad8b1aafSjsg 		 * to release the mappings, and we deadlock.
291ad8b1aafSjsg 		 *
292ad8b1aafSjsg 		 * However, if we make an explicit vmap of the page, that
293ad8b1aafSjsg 		 * uses a larger vmalloc arena, and also has the ability
294ad8b1aafSjsg 		 * to tell us to release unwanted mappings. Most importantly,
295ad8b1aafSjsg 		 * it will fail and propagate an error instead of waiting
296ad8b1aafSjsg 		 * forever.
297ad8b1aafSjsg 		 *
298ad8b1aafSjsg 		 * So if the page is beyond the 32b boundary, make an explicit
299ad8b1aafSjsg 		 * vmap.
300ad8b1aafSjsg 		 */
301ad8b1aafSjsg #ifdef notyet
302ad8b1aafSjsg 		if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
303ad8b1aafSjsg 			return page_address(sg_page(obj->mm.pages->sgl));
304ad8b1aafSjsg #endif
305c349dbc7Sjsg 		pgprot = PAGE_KERNEL;
306c349dbc7Sjsg 		break;
307c349dbc7Sjsg 	case I915_MAP_WC:
308c349dbc7Sjsg 		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
309c349dbc7Sjsg 		break;
310c349dbc7Sjsg 	}
311c349dbc7Sjsg 
312ad8b1aafSjsg 	if (n_pages > ARRAY_SIZE(stack)) {
313ad8b1aafSjsg 		/* Too big for stack -- allocate temporary array instead */
314ad8b1aafSjsg 		pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
315ad8b1aafSjsg 		if (!pages)
3165ca02815Sjsg 			return ERR_PTR(-ENOMEM);
317ad8b1aafSjsg 	}
318c349dbc7Sjsg 
319ad8b1aafSjsg 	i = 0;
320ad8b1aafSjsg 	for_each_sgt_page(page, iter, obj->mm.pages)
321ad8b1aafSjsg 		pages[i++] = page;
322ad8b1aafSjsg 	vaddr = vmap(pages, n_pages, 0, pgprot);
323ad8b1aafSjsg 	if (pages != stack)
324ad8b1aafSjsg 		kvfree(pages);
3255ca02815Sjsg 
3265ca02815Sjsg 	return vaddr ?: ERR_PTR(-ENOMEM);
327ad8b1aafSjsg }
328ad8b1aafSjsg 
i915_gem_object_map_pfn(struct drm_i915_gem_object * obj,enum i915_map_type type)329ad8b1aafSjsg static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
330ad8b1aafSjsg 				     enum i915_map_type type)
331ad8b1aafSjsg {
332ad8b1aafSjsg 	resource_size_t iomap = obj->mm.region->iomap.base -
333ad8b1aafSjsg 		obj->mm.region->region.start;
334ad8b1aafSjsg 	unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
335ad8b1aafSjsg 	unsigned long stack[32], *pfns = stack, i;
336c349dbc7Sjsg 	struct sgt_iter iter;
337c349dbc7Sjsg 	dma_addr_t addr;
338ad8b1aafSjsg 	void *vaddr;
339c349dbc7Sjsg 
3405ca02815Sjsg 	GEM_BUG_ON(type != I915_MAP_WC);
341c349dbc7Sjsg 
342ad8b1aafSjsg 	if (n_pfn > ARRAY_SIZE(stack)) {
343ad8b1aafSjsg 		/* Too big for stack -- allocate temporary array instead */
344ad8b1aafSjsg 		pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
345ad8b1aafSjsg 		if (!pfns)
3465ca02815Sjsg 			return ERR_PTR(-ENOMEM);
347c349dbc7Sjsg 	}
348c349dbc7Sjsg 
349ad8b1aafSjsg 	i = 0;
350ad8b1aafSjsg 	for_each_sgt_daddr(addr, iter, obj->mm.pages)
351ad8b1aafSjsg 		pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
352ad8b1aafSjsg 	vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
353ad8b1aafSjsg 	if (pfns != stack)
354ad8b1aafSjsg 		kvfree(pfns);
3555ca02815Sjsg 
3565ca02815Sjsg 	return vaddr ?: ERR_PTR(-ENOMEM);
357c349dbc7Sjsg }
358ad8b1aafSjsg 
359c349dbc7Sjsg /* get, pin, and map the pages of the object into kernel space */
i915_gem_object_pin_map(struct drm_i915_gem_object * obj,enum i915_map_type type)360c349dbc7Sjsg void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
361c349dbc7Sjsg 			      enum i915_map_type type)
362c349dbc7Sjsg {
363c349dbc7Sjsg 	enum i915_map_type has_type;
364c349dbc7Sjsg 	bool pinned;
365c349dbc7Sjsg 	void *ptr;
366c349dbc7Sjsg 	int err;
367c349dbc7Sjsg 
3685ca02815Sjsg 	if (!i915_gem_object_has_struct_page(obj) &&
3695ca02815Sjsg 	    !i915_gem_object_has_iomem(obj))
370c349dbc7Sjsg 		return ERR_PTR(-ENXIO);
371c349dbc7Sjsg 
3721bb76ff1Sjsg 	if (WARN_ON_ONCE(obj->flags & I915_BO_ALLOC_GPU_ONLY))
3731bb76ff1Sjsg 		return ERR_PTR(-EINVAL);
3741bb76ff1Sjsg 
3755ca02815Sjsg 	assert_object_held(obj);
376c349dbc7Sjsg 
377c349dbc7Sjsg 	pinned = !(type & I915_MAP_OVERRIDE);
378c349dbc7Sjsg 	type &= ~I915_MAP_OVERRIDE;
379c349dbc7Sjsg 
380c349dbc7Sjsg 	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
381c349dbc7Sjsg 		if (unlikely(!i915_gem_object_has_pages(obj))) {
382c349dbc7Sjsg 			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
383c349dbc7Sjsg 
384c349dbc7Sjsg 			err = ____i915_gem_object_get_pages(obj);
385c349dbc7Sjsg 			if (err)
3865ca02815Sjsg 				return ERR_PTR(err);
387c349dbc7Sjsg 
388c349dbc7Sjsg 			smp_mb__before_atomic();
389c349dbc7Sjsg 		}
390c349dbc7Sjsg 		atomic_inc(&obj->mm.pages_pin_count);
391c349dbc7Sjsg 		pinned = false;
392c349dbc7Sjsg 	}
393c349dbc7Sjsg 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
394c349dbc7Sjsg 
3955ca02815Sjsg 	/*
3965ca02815Sjsg 	 * For discrete our CPU mappings needs to be consistent in order to
3975ca02815Sjsg 	 * function correctly on !x86. When mapping things through TTM, we use
3985ca02815Sjsg 	 * the same rules to determine the caching type.
3995ca02815Sjsg 	 *
4005ca02815Sjsg 	 * The caching rules, starting from DG1:
4015ca02815Sjsg 	 *
4025ca02815Sjsg 	 *	- If the object can be placed in device local-memory, then the
4035ca02815Sjsg 	 *	  pages should be allocated and mapped as write-combined only.
4045ca02815Sjsg 	 *
4055ca02815Sjsg 	 *	- Everything else is always allocated and mapped as write-back,
4065ca02815Sjsg 	 *	  with the guarantee that everything is also coherent with the
4075ca02815Sjsg 	 *	  GPU.
4085ca02815Sjsg 	 *
4095ca02815Sjsg 	 * Internal users of lmem are already expected to get this right, so no
4105ca02815Sjsg 	 * fudging needed there.
4115ca02815Sjsg 	 */
4125ca02815Sjsg 	if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) {
4135ca02815Sjsg 		if (type != I915_MAP_WC && !obj->mm.n_placements) {
4145ca02815Sjsg 			ptr = ERR_PTR(-ENODEV);
4155ca02815Sjsg 			goto err_unpin;
4165ca02815Sjsg 		}
4175ca02815Sjsg 
4185ca02815Sjsg 		type = I915_MAP_WC;
4195ca02815Sjsg 	} else if (IS_DGFX(to_i915(obj->base.dev))) {
4205ca02815Sjsg 		type = I915_MAP_WB;
4215ca02815Sjsg 	}
4225ca02815Sjsg 
423c349dbc7Sjsg 	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
424c349dbc7Sjsg 	if (ptr && has_type != type) {
425c349dbc7Sjsg 		if (pinned) {
4265ca02815Sjsg 			ptr = ERR_PTR(-EBUSY);
427c349dbc7Sjsg 			goto err_unpin;
428c349dbc7Sjsg 		}
429c349dbc7Sjsg 
430c349dbc7Sjsg 		unmap_object(obj, ptr);
431c349dbc7Sjsg 
432c349dbc7Sjsg 		ptr = obj->mm.mapping = NULL;
433c349dbc7Sjsg 	}
434c349dbc7Sjsg 
435c349dbc7Sjsg 	if (!ptr) {
4361bb76ff1Sjsg 		err = i915_gem_object_wait_moving_fence(obj, true);
4371bb76ff1Sjsg 		if (err) {
4381bb76ff1Sjsg 			ptr = ERR_PTR(err);
4391bb76ff1Sjsg 			goto err_unpin;
4401bb76ff1Sjsg 		}
4411bb76ff1Sjsg 
4421bb76ff1Sjsg 		if (GEM_WARN_ON(type == I915_MAP_WC && !pat_enabled()))
4435ca02815Sjsg 			ptr = ERR_PTR(-ENODEV);
444ad8b1aafSjsg 		else if (i915_gem_object_has_struct_page(obj))
445ad8b1aafSjsg 			ptr = i915_gem_object_map_page(obj, type);
446ad8b1aafSjsg 		else
447ad8b1aafSjsg 			ptr = i915_gem_object_map_pfn(obj, type);
4485ca02815Sjsg 		if (IS_ERR(ptr))
449c349dbc7Sjsg 			goto err_unpin;
450c349dbc7Sjsg 
451c349dbc7Sjsg 		obj->mm.mapping = page_pack_bits(ptr, type);
452c349dbc7Sjsg 	}
453c349dbc7Sjsg 
454c349dbc7Sjsg 	return ptr;
455c349dbc7Sjsg 
456c349dbc7Sjsg err_unpin:
457c349dbc7Sjsg 	atomic_dec(&obj->mm.pages_pin_count);
4585ca02815Sjsg 	return ptr;
4595ca02815Sjsg }
4605ca02815Sjsg 
i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object * obj,enum i915_map_type type)4615ca02815Sjsg void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
4625ca02815Sjsg 				       enum i915_map_type type)
4635ca02815Sjsg {
4645ca02815Sjsg 	void *ret;
4655ca02815Sjsg 
4665ca02815Sjsg 	i915_gem_object_lock(obj, NULL);
4675ca02815Sjsg 	ret = i915_gem_object_pin_map(obj, type);
4685ca02815Sjsg 	i915_gem_object_unlock(obj);
4695ca02815Sjsg 
4705ca02815Sjsg 	return ret;
471c349dbc7Sjsg }
472c349dbc7Sjsg 
__i915_gem_object_flush_map(struct drm_i915_gem_object * obj,unsigned long offset,unsigned long size)473c349dbc7Sjsg void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
474c349dbc7Sjsg 				 unsigned long offset,
475c349dbc7Sjsg 				 unsigned long size)
476c349dbc7Sjsg {
477c349dbc7Sjsg 	enum i915_map_type has_type;
478c349dbc7Sjsg 	void *ptr;
479c349dbc7Sjsg 
480c349dbc7Sjsg 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
481c349dbc7Sjsg 	GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
482c349dbc7Sjsg 				     offset, size, obj->base.size));
483c349dbc7Sjsg 
484ad8b1aafSjsg 	wmb(); /* let all previous writes be visible to coherent partners */
485c349dbc7Sjsg 	obj->mm.dirty = true;
486c349dbc7Sjsg 
487c349dbc7Sjsg 	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
488c349dbc7Sjsg 		return;
489c349dbc7Sjsg 
490c349dbc7Sjsg 	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
491c349dbc7Sjsg 	if (has_type == I915_MAP_WC)
492c349dbc7Sjsg 		return;
493c349dbc7Sjsg 
494c349dbc7Sjsg 	drm_clflush_virt_range(ptr + offset, size);
495c349dbc7Sjsg 	if (size == obj->base.size) {
496c349dbc7Sjsg 		obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
497c349dbc7Sjsg 		obj->cache_dirty = false;
498c349dbc7Sjsg 	}
499c349dbc7Sjsg }
500c349dbc7Sjsg 
__i915_gem_object_release_map(struct drm_i915_gem_object * obj)501ad8b1aafSjsg void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
502ad8b1aafSjsg {
503ad8b1aafSjsg 	GEM_BUG_ON(!obj->mm.mapping);
504ad8b1aafSjsg 
505ad8b1aafSjsg 	/*
506ad8b1aafSjsg 	 * We allow removing the mapping from underneath pinned pages!
507ad8b1aafSjsg 	 *
508ad8b1aafSjsg 	 * Furthermore, since this is an unsafe operation reserved only
509ad8b1aafSjsg 	 * for construction time manipulation, we ignore locking prudence.
510ad8b1aafSjsg 	 */
511ad8b1aafSjsg 	unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
512ad8b1aafSjsg 
513ad8b1aafSjsg 	i915_gem_object_unpin_map(obj);
514ad8b1aafSjsg }
515ad8b1aafSjsg 
516c349dbc7Sjsg struct scatterlist *
__i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object * obj,struct i915_gem_object_page_iter * iter,pgoff_t n,unsigned int * offset)517*f005ef32Sjsg __i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj,
5185ca02815Sjsg 				   struct i915_gem_object_page_iter *iter,
519*f005ef32Sjsg 				   pgoff_t n,
520*f005ef32Sjsg 				   unsigned int *offset)
521*f005ef32Sjsg 
522c349dbc7Sjsg {
523*f005ef32Sjsg 	const bool dma = iter == &obj->mm.get_dma_page ||
524*f005ef32Sjsg 			 iter == &obj->ttm.get_io_page;
525c349dbc7Sjsg 	unsigned int idx, count;
526*f005ef32Sjsg 	struct scatterlist *sg;
527c349dbc7Sjsg 
528c349dbc7Sjsg 	might_sleep();
529c349dbc7Sjsg 	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
5305ca02815Sjsg 	if (!i915_gem_object_has_pinned_pages(obj))
5315ca02815Sjsg 		assert_object_held(obj);
532c349dbc7Sjsg 
533c349dbc7Sjsg 	/* As we iterate forward through the sg, we record each entry in a
534c349dbc7Sjsg 	 * radixtree for quick repeated (backwards) lookups. If we have seen
535c349dbc7Sjsg 	 * this index previously, we will have an entry for it.
536c349dbc7Sjsg 	 *
537c349dbc7Sjsg 	 * Initial lookup is O(N), but this is amortized to O(1) for
538c349dbc7Sjsg 	 * sequential page access (where each new request is consecutive
539c349dbc7Sjsg 	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
540c349dbc7Sjsg 	 * i.e. O(1) with a large constant!
541c349dbc7Sjsg 	 */
542c349dbc7Sjsg 	if (n < READ_ONCE(iter->sg_idx))
543c349dbc7Sjsg 		goto lookup;
544c349dbc7Sjsg 
545c349dbc7Sjsg 	mutex_lock(&iter->lock);
546c349dbc7Sjsg 
547c349dbc7Sjsg 	/* We prefer to reuse the last sg so that repeated lookup of this
548c349dbc7Sjsg 	 * (or the subsequent) sg are fast - comparing against the last
549c349dbc7Sjsg 	 * sg is faster than going through the radixtree.
550c349dbc7Sjsg 	 */
551c349dbc7Sjsg 
552c349dbc7Sjsg 	sg = iter->sg_pos;
553c349dbc7Sjsg 	idx = iter->sg_idx;
5545ca02815Sjsg 	count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
555c349dbc7Sjsg 
556c349dbc7Sjsg 	while (idx + count <= n) {
557c349dbc7Sjsg 		void *entry;
558c349dbc7Sjsg 		unsigned long i;
559c349dbc7Sjsg 		int ret;
560c349dbc7Sjsg 
561c349dbc7Sjsg 		/* If we cannot allocate and insert this entry, or the
562c349dbc7Sjsg 		 * individual pages from this range, cancel updating the
563c349dbc7Sjsg 		 * sg_idx so that on this lookup we are forced to linearly
564c349dbc7Sjsg 		 * scan onwards, but on future lookups we will try the
565c349dbc7Sjsg 		 * insertion again (in which case we need to be careful of
566c349dbc7Sjsg 		 * the error return reporting that we have already inserted
567c349dbc7Sjsg 		 * this index).
568c349dbc7Sjsg 		 */
569c349dbc7Sjsg 		ret = radix_tree_insert(&iter->radix, idx, sg);
570c349dbc7Sjsg 		if (ret && ret != -EEXIST)
571c349dbc7Sjsg 			goto scan;
572c349dbc7Sjsg 
573c349dbc7Sjsg 		entry = xa_mk_value(idx);
574c349dbc7Sjsg 		for (i = 1; i < count; i++) {
575c349dbc7Sjsg 			ret = radix_tree_insert(&iter->radix, idx + i, entry);
576c349dbc7Sjsg 			if (ret && ret != -EEXIST)
577c349dbc7Sjsg 				goto scan;
578c349dbc7Sjsg 		}
579c349dbc7Sjsg 
580c349dbc7Sjsg 		idx += count;
581c349dbc7Sjsg 		sg = ____sg_next(sg);
5825ca02815Sjsg 		count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
583c349dbc7Sjsg 	}
584c349dbc7Sjsg 
585c349dbc7Sjsg scan:
586c349dbc7Sjsg 	iter->sg_pos = sg;
587c349dbc7Sjsg 	iter->sg_idx = idx;
588c349dbc7Sjsg 
589c349dbc7Sjsg 	mutex_unlock(&iter->lock);
590c349dbc7Sjsg 
591c349dbc7Sjsg 	if (unlikely(n < idx)) /* insertion completed by another thread */
592c349dbc7Sjsg 		goto lookup;
593c349dbc7Sjsg 
594c349dbc7Sjsg 	/* In case we failed to insert the entry into the radixtree, we need
595c349dbc7Sjsg 	 * to look beyond the current sg.
596c349dbc7Sjsg 	 */
597c349dbc7Sjsg 	while (idx + count <= n) {
598c349dbc7Sjsg 		idx += count;
599c349dbc7Sjsg 		sg = ____sg_next(sg);
6005ca02815Sjsg 		count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
601c349dbc7Sjsg 	}
602c349dbc7Sjsg 
603c349dbc7Sjsg 	*offset = n - idx;
604c349dbc7Sjsg 	return sg;
605c349dbc7Sjsg 
606c349dbc7Sjsg lookup:
607c349dbc7Sjsg 	rcu_read_lock();
608c349dbc7Sjsg 
609c349dbc7Sjsg 	sg = radix_tree_lookup(&iter->radix, n);
610c349dbc7Sjsg 	GEM_BUG_ON(!sg);
611c349dbc7Sjsg 
612c349dbc7Sjsg 	/* If this index is in the middle of multi-page sg entry,
613c349dbc7Sjsg 	 * the radix tree will contain a value entry that points
614c349dbc7Sjsg 	 * to the start of that range. We will return the pointer to
615c349dbc7Sjsg 	 * the base page and the offset of this page within the
616c349dbc7Sjsg 	 * sg entry's range.
617c349dbc7Sjsg 	 */
618c349dbc7Sjsg 	*offset = 0;
619c349dbc7Sjsg 	if (unlikely(xa_is_value(sg))) {
620c349dbc7Sjsg 		unsigned long base = xa_to_value(sg);
621c349dbc7Sjsg 
622c349dbc7Sjsg 		sg = radix_tree_lookup(&iter->radix, base);
623c349dbc7Sjsg 		GEM_BUG_ON(!sg);
624c349dbc7Sjsg 
625c349dbc7Sjsg 		*offset = n - base;
626c349dbc7Sjsg 	}
627c349dbc7Sjsg 
628c349dbc7Sjsg 	rcu_read_unlock();
629c349dbc7Sjsg 
630c349dbc7Sjsg 	return sg;
631c349dbc7Sjsg }
632c349dbc7Sjsg 
633c349dbc7Sjsg struct vm_page *
__i915_gem_object_get_page(struct drm_i915_gem_object * obj,pgoff_t n)634*f005ef32Sjsg __i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n)
635c349dbc7Sjsg {
636c349dbc7Sjsg 	struct scatterlist *sg;
637c349dbc7Sjsg 	unsigned int offset;
638c349dbc7Sjsg 
639c349dbc7Sjsg 	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
640c349dbc7Sjsg 
641c349dbc7Sjsg 	sg = i915_gem_object_get_sg(obj, n, &offset);
642c349dbc7Sjsg 	return nth_page(sg_page(sg), offset);
643c349dbc7Sjsg }
644c349dbc7Sjsg 
645c349dbc7Sjsg /* Like i915_gem_object_get_page(), but mark the returned page dirty */
646c349dbc7Sjsg struct vm_page *
__i915_gem_object_get_dirty_page(struct drm_i915_gem_object * obj,pgoff_t n)647*f005ef32Sjsg __i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n)
648c349dbc7Sjsg {
649c349dbc7Sjsg 	struct vm_page *page;
650c349dbc7Sjsg 
651c349dbc7Sjsg 	page = i915_gem_object_get_page(obj, n);
652c349dbc7Sjsg 	if (!obj->mm.dirty)
653c349dbc7Sjsg 		set_page_dirty(page);
654c349dbc7Sjsg 
655c349dbc7Sjsg 	return page;
656c349dbc7Sjsg }
657c349dbc7Sjsg 
658c349dbc7Sjsg dma_addr_t
__i915_gem_object_get_dma_address_len(struct drm_i915_gem_object * obj,pgoff_t n,unsigned int * len)659*f005ef32Sjsg __i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
660*f005ef32Sjsg 				      pgoff_t n, unsigned int *len)
661c349dbc7Sjsg {
662c349dbc7Sjsg 	struct scatterlist *sg;
663c349dbc7Sjsg 	unsigned int offset;
664c349dbc7Sjsg 
6655ca02815Sjsg 	sg = i915_gem_object_get_sg_dma(obj, n, &offset);
666c349dbc7Sjsg 
667c349dbc7Sjsg 	if (len)
668c349dbc7Sjsg 		*len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
669c349dbc7Sjsg 
670c349dbc7Sjsg 	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
671c349dbc7Sjsg }
672c349dbc7Sjsg 
673c349dbc7Sjsg dma_addr_t
__i915_gem_object_get_dma_address(struct drm_i915_gem_object * obj,pgoff_t n)674*f005ef32Sjsg __i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n)
675c349dbc7Sjsg {
676c349dbc7Sjsg 	return i915_gem_object_get_dma_address_len(obj, n, NULL);
677c349dbc7Sjsg }
678