xref: /openbsd-src/sys/dev/pci/drm/i915/gem/i915_gem_mman.c (revision f46a341eeea409411cd35db6a031d18c6a612c5d)
1c349dbc7Sjsg /*
2c349dbc7Sjsg  * SPDX-License-Identifier: MIT
3c349dbc7Sjsg  *
4c349dbc7Sjsg  * Copyright © 2014-2016 Intel Corporation
5c349dbc7Sjsg  */
6c349dbc7Sjsg 
7c349dbc7Sjsg #include <linux/anon_inodes.h>
8c349dbc7Sjsg #include <linux/mman.h>
9c349dbc7Sjsg #include <linux/pfn_t.h>
10c349dbc7Sjsg #include <linux/sizes.h>
11c349dbc7Sjsg 
121bb76ff1Sjsg #include <drm/drm_cache.h>
131bb76ff1Sjsg 
14c349dbc7Sjsg #include "gt/intel_gt.h"
15c349dbc7Sjsg #include "gt/intel_gt_requests.h"
16c349dbc7Sjsg 
17c349dbc7Sjsg #include "i915_drv.h"
181bb76ff1Sjsg #include "i915_gem_evict.h"
19c349dbc7Sjsg #include "i915_gem_gtt.h"
20c349dbc7Sjsg #include "i915_gem_ioctls.h"
21c349dbc7Sjsg #include "i915_gem_object.h"
22c349dbc7Sjsg #include "i915_gem_mman.h"
231bb76ff1Sjsg #include "i915_mm.h"
24c349dbc7Sjsg #include "i915_trace.h"
25c349dbc7Sjsg #include "i915_user_extensions.h"
265ca02815Sjsg #include "i915_gem_ttm.h"
27c349dbc7Sjsg #include "i915_vma.h"
28c349dbc7Sjsg 
29c349dbc7Sjsg #ifdef __linux__
30c349dbc7Sjsg static inline bool
31c349dbc7Sjsg __vma_matches(struct vm_area_struct *vma, struct file *filp,
32c349dbc7Sjsg 	      unsigned long addr, unsigned long size)
33c349dbc7Sjsg {
34c349dbc7Sjsg 	if (vma->vm_file != filp)
35c349dbc7Sjsg 		return false;
36c349dbc7Sjsg 
37c349dbc7Sjsg 	return vma->vm_start == addr &&
38c349dbc7Sjsg 	       (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
39c349dbc7Sjsg }
40c349dbc7Sjsg #endif
41c349dbc7Sjsg 
42c349dbc7Sjsg /**
43c349dbc7Sjsg  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
44c349dbc7Sjsg  *			 it is mapped to.
45c349dbc7Sjsg  * @dev: drm device
46c349dbc7Sjsg  * @data: ioctl data blob
47c349dbc7Sjsg  * @file: drm file
48c349dbc7Sjsg  *
49c349dbc7Sjsg  * While the mapping holds a reference on the contents of the object, it doesn't
50c349dbc7Sjsg  * imply a ref on the object itself.
51c349dbc7Sjsg  *
52c349dbc7Sjsg  * IMPORTANT:
53c349dbc7Sjsg  *
54c349dbc7Sjsg  * DRM driver writers who look a this function as an example for how to do GEM
55c349dbc7Sjsg  * mmap support, please don't implement mmap support like here. The modern way
56c349dbc7Sjsg  * to implement DRM mmap support is with an mmap offset ioctl (like
57c349dbc7Sjsg  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
58c349dbc7Sjsg  * That way debug tooling like valgrind will understand what's going on, hiding
59c349dbc7Sjsg  * the mmap call in a driver private ioctl will break that. The i915 driver only
60c349dbc7Sjsg  * does cpu mmaps this way because we didn't know better.
61c349dbc7Sjsg  */
62c349dbc7Sjsg int
63c349dbc7Sjsg i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
64c349dbc7Sjsg 		    struct drm_file *file)
65c349dbc7Sjsg {
665ca02815Sjsg 	struct drm_i915_private *i915 = to_i915(dev);
67c349dbc7Sjsg 	struct drm_i915_gem_mmap *args = data;
68c349dbc7Sjsg 	struct drm_i915_gem_object *obj;
69c349dbc7Sjsg 	vaddr_t addr;
70c349dbc7Sjsg 	vsize_t size;
71c349dbc7Sjsg 	int ret;
72c349dbc7Sjsg 
73c349dbc7Sjsg #ifdef __OpenBSD__
74c349dbc7Sjsg 	if (args->size == 0 || args->offset & PAGE_MASK)
75c349dbc7Sjsg 		return -EINVAL;
76c349dbc7Sjsg 	size = round_page(args->size);
77c349dbc7Sjsg 	if (args->offset + size < args->offset)
78c349dbc7Sjsg 		return -EINVAL;
79c349dbc7Sjsg #endif
80c349dbc7Sjsg 
815ca02815Sjsg 	/*
825ca02815Sjsg 	 * mmap ioctl is disallowed for all discrete platforms,
835ca02815Sjsg 	 * and for all platforms with GRAPHICS_VER > 12.
845ca02815Sjsg 	 */
857b7e933fSjsg 	if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0))
865ca02815Sjsg 		return -EOPNOTSUPP;
875ca02815Sjsg 
88c349dbc7Sjsg 	if (args->flags & ~(I915_MMAP_WC))
89c349dbc7Sjsg 		return -EINVAL;
90c349dbc7Sjsg 
911bb76ff1Sjsg 	if (args->flags & I915_MMAP_WC && !pat_enabled())
92c349dbc7Sjsg 		return -ENODEV;
93c349dbc7Sjsg 
94c349dbc7Sjsg 	obj = i915_gem_object_lookup(file, args->handle);
95c349dbc7Sjsg 	if (!obj)
96c349dbc7Sjsg 		return -ENOENT;
97c349dbc7Sjsg 
98c349dbc7Sjsg 	/* prime objects have no backing filp to GEM mmap
99c349dbc7Sjsg 	 * pages from.
100c349dbc7Sjsg 	 */
101ad8b1aafSjsg #ifdef __linux__
102c349dbc7Sjsg 	if (!obj->base.filp) {
103c349dbc7Sjsg 		addr = -ENXIO;
104c349dbc7Sjsg 		goto err;
105c349dbc7Sjsg 	}
106ad8b1aafSjsg #else
107ad8b1aafSjsg 	if (!obj->base.uao) {
108ad8b1aafSjsg 		addr = -ENXIO;
109ad8b1aafSjsg 		goto err;
110ad8b1aafSjsg 	}
111ad8b1aafSjsg #endif
112c349dbc7Sjsg 
113c349dbc7Sjsg 	if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
114c349dbc7Sjsg 		addr = -EINVAL;
115c349dbc7Sjsg 		goto err;
116c349dbc7Sjsg 	}
117c349dbc7Sjsg 
118c349dbc7Sjsg #ifdef __linux__
119c349dbc7Sjsg 	addr = vm_mmap(obj->base.filp, 0, args->size,
120c349dbc7Sjsg 		       PROT_READ | PROT_WRITE, MAP_SHARED,
121c349dbc7Sjsg 		       args->offset);
122c349dbc7Sjsg 	if (IS_ERR_VALUE(addr))
123c349dbc7Sjsg 		goto err;
124c349dbc7Sjsg 
125c349dbc7Sjsg 	if (args->flags & I915_MMAP_WC) {
126c349dbc7Sjsg 		struct mm_struct *mm = current->mm;
127c349dbc7Sjsg 		struct vm_area_struct *vma;
128c349dbc7Sjsg 
129ad8b1aafSjsg 		if (mmap_write_lock_killable(mm)) {
130c349dbc7Sjsg 			addr = -EINTR;
131c349dbc7Sjsg 			goto err;
132c349dbc7Sjsg 		}
133c349dbc7Sjsg 		vma = find_vma(mm, addr);
134c349dbc7Sjsg 		if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
135c349dbc7Sjsg 			vma->vm_page_prot =
136c349dbc7Sjsg 				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
137c349dbc7Sjsg 		else
138c349dbc7Sjsg 			addr = -ENOMEM;
139ad8b1aafSjsg 		mmap_write_unlock(mm);
140c349dbc7Sjsg 		if (IS_ERR_VALUE(addr))
141c349dbc7Sjsg 			goto err;
142c349dbc7Sjsg 	}
143c349dbc7Sjsg 	i915_gem_object_put(obj);
144c349dbc7Sjsg #else
145c349dbc7Sjsg 	addr = 0;
146b304be39Sjsg 	uao_reference(obj->base.uao);
147c349dbc7Sjsg 	ret = -uvm_map(&curproc->p_vmspace->vm_map, &addr, size,
148c349dbc7Sjsg 	    obj->base.uao, args->offset, 0, UVM_MAPFLAG(PROT_READ | PROT_WRITE,
149c349dbc7Sjsg 	    PROT_READ | PROT_WRITE, MAP_INHERIT_SHARE, MADV_RANDOM,
150c349dbc7Sjsg 	    (args->flags & I915_MMAP_WC) ? UVM_FLAG_WC : 0));
151b304be39Sjsg 	if (ret != 0)
152b304be39Sjsg 		uao_detach(obj->base.uao);
153c349dbc7Sjsg 	i915_gem_object_put(obj);
154c349dbc7Sjsg 	if (ret)
155c349dbc7Sjsg 		return ret;
156c349dbc7Sjsg #endif
157c349dbc7Sjsg 
158c349dbc7Sjsg 	args->addr_ptr = (u64)addr;
159c349dbc7Sjsg 	return 0;
160c349dbc7Sjsg 
161c349dbc7Sjsg err:
162c349dbc7Sjsg 	i915_gem_object_put(obj);
163c349dbc7Sjsg 	return addr;
164c349dbc7Sjsg }
165c349dbc7Sjsg 
166c349dbc7Sjsg static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
167c349dbc7Sjsg {
168c349dbc7Sjsg 	return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
169c349dbc7Sjsg }
170c349dbc7Sjsg 
171c349dbc7Sjsg /**
172c349dbc7Sjsg  * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
173c349dbc7Sjsg  *
174c349dbc7Sjsg  * A history of the GTT mmap interface:
175c349dbc7Sjsg  *
176c349dbc7Sjsg  * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
177c349dbc7Sjsg  *     aligned and suitable for fencing, and still fit into the available
178c349dbc7Sjsg  *     mappable space left by the pinned display objects. A classic problem
179c349dbc7Sjsg  *     we called the page-fault-of-doom where we would ping-pong between
180c349dbc7Sjsg  *     two objects that could not fit inside the GTT and so the memcpy
181c349dbc7Sjsg  *     would page one object in at the expense of the other between every
182c349dbc7Sjsg  *     single byte.
183c349dbc7Sjsg  *
184c349dbc7Sjsg  * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
185c349dbc7Sjsg  *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
186c349dbc7Sjsg  *     object is too large for the available space (or simply too large
187c349dbc7Sjsg  *     for the mappable aperture!), a view is created instead and faulted
188c349dbc7Sjsg  *     into userspace. (This view is aligned and sized appropriately for
189c349dbc7Sjsg  *     fenced access.)
190c349dbc7Sjsg  *
191c349dbc7Sjsg  * 2 - Recognise WC as a separate cache domain so that we can flush the
192c349dbc7Sjsg  *     delayed writes via GTT before performing direct access via WC.
193c349dbc7Sjsg  *
194c349dbc7Sjsg  * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
195c349dbc7Sjsg  *     pagefault; swapin remains transparent.
196c349dbc7Sjsg  *
197c349dbc7Sjsg  * 4 - Support multiple fault handlers per object depending on object's
198c349dbc7Sjsg  *     backing storage (a.k.a. MMAP_OFFSET).
199c349dbc7Sjsg  *
200c349dbc7Sjsg  * Restrictions:
201c349dbc7Sjsg  *
202c349dbc7Sjsg  *  * snoopable objects cannot be accessed via the GTT. It can cause machine
203c349dbc7Sjsg  *    hangs on some architectures, corruption on others. An attempt to service
204c349dbc7Sjsg  *    a GTT page fault from a snoopable object will generate a SIGBUS.
205c349dbc7Sjsg  *
206c349dbc7Sjsg  *  * the object must be able to fit into RAM (physical memory, though no
207c349dbc7Sjsg  *    limited to the mappable aperture).
208c349dbc7Sjsg  *
209c349dbc7Sjsg  *
210c349dbc7Sjsg  * Caveats:
211c349dbc7Sjsg  *
212c349dbc7Sjsg  *  * a new GTT page fault will synchronize rendering from the GPU and flush
213c349dbc7Sjsg  *    all data to system memory. Subsequent access will not be synchronized.
214c349dbc7Sjsg  *
215c349dbc7Sjsg  *  * all mappings are revoked on runtime device suspend.
216c349dbc7Sjsg  *
217c349dbc7Sjsg  *  * there are only 8, 16 or 32 fence registers to share between all users
218c349dbc7Sjsg  *    (older machines require fence register for display and blitter access
219c349dbc7Sjsg  *    as well). Contention of the fence registers will cause the previous users
220c349dbc7Sjsg  *    to be unmapped and any new access will generate new page faults.
221c349dbc7Sjsg  *
222c349dbc7Sjsg  *  * running out of memory while servicing a fault may generate a SIGBUS,
223c349dbc7Sjsg  *    rather than the expected SIGSEGV.
224c349dbc7Sjsg  */
225c349dbc7Sjsg int i915_gem_mmap_gtt_version(void)
226c349dbc7Sjsg {
227c349dbc7Sjsg 	return 4;
228c349dbc7Sjsg }
229c349dbc7Sjsg 
2301bb76ff1Sjsg static inline struct i915_gtt_view
231c349dbc7Sjsg compute_partial_view(const struct drm_i915_gem_object *obj,
232c349dbc7Sjsg 		     pgoff_t page_offset,
233c349dbc7Sjsg 		     unsigned int chunk)
234c349dbc7Sjsg {
2351bb76ff1Sjsg 	struct i915_gtt_view view;
236c349dbc7Sjsg 
237c349dbc7Sjsg 	if (i915_gem_object_is_tiled(obj))
238ad8b1aafSjsg 		chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
239c349dbc7Sjsg 
2401bb76ff1Sjsg 	view.type = I915_GTT_VIEW_PARTIAL;
241c349dbc7Sjsg 	view.partial.offset = rounddown(page_offset, chunk);
242c349dbc7Sjsg 	view.partial.size =
243c349dbc7Sjsg 		min_t(unsigned int, chunk,
244c349dbc7Sjsg 		      (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
245c349dbc7Sjsg 
246c349dbc7Sjsg 	/* If the partial covers the entire object, just create a normal VMA. */
247c349dbc7Sjsg 	if (chunk >= obj->base.size >> PAGE_SHIFT)
2481bb76ff1Sjsg 		view.type = I915_GTT_VIEW_NORMAL;
249c349dbc7Sjsg 
250c349dbc7Sjsg 	return view;
251c349dbc7Sjsg }
252c349dbc7Sjsg 
253c349dbc7Sjsg #ifdef __linux__
254c349dbc7Sjsg 
255c349dbc7Sjsg static vm_fault_t i915_error_to_vmf_fault(int err)
256c349dbc7Sjsg {
257c349dbc7Sjsg 	switch (err) {
258c349dbc7Sjsg 	default:
259c349dbc7Sjsg 		WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
260ad8b1aafSjsg 		fallthrough;
261c349dbc7Sjsg 	case -EIO: /* shmemfs failure from swap device */
262c349dbc7Sjsg 	case -EFAULT: /* purged object */
263c349dbc7Sjsg 	case -ENODEV: /* bad object, how did you get here! */
264c349dbc7Sjsg 	case -ENXIO: /* unable to access backing store (on device) */
265c349dbc7Sjsg 		return VM_FAULT_SIGBUS;
266c349dbc7Sjsg 
267c349dbc7Sjsg 	case -ENOMEM: /* our allocation failure */
268c349dbc7Sjsg 		return VM_FAULT_OOM;
269c349dbc7Sjsg 
270c349dbc7Sjsg 	case 0:
271c349dbc7Sjsg 	case -EAGAIN:
272ad8b1aafSjsg 	case -ENOSPC: /* transient failure to evict? */
273d732525bSjsg 	case -ENOBUFS: /* temporarily out of fences? */
274c349dbc7Sjsg 	case -ERESTARTSYS:
275c349dbc7Sjsg 	case -EINTR:
276c349dbc7Sjsg 	case -EBUSY:
277c349dbc7Sjsg 		/*
278c349dbc7Sjsg 		 * EBUSY is ok: this just means that another thread
279c349dbc7Sjsg 		 * already did the job.
280c349dbc7Sjsg 		 */
281c349dbc7Sjsg 		return VM_FAULT_NOPAGE;
282c349dbc7Sjsg 	}
283c349dbc7Sjsg }
284c349dbc7Sjsg 
285c349dbc7Sjsg static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
286c349dbc7Sjsg {
287c349dbc7Sjsg 	struct vm_area_struct *area = vmf->vma;
288c349dbc7Sjsg 	struct i915_mmap_offset *mmo = area->vm_private_data;
289c349dbc7Sjsg 	struct drm_i915_gem_object *obj = mmo->obj;
290c349dbc7Sjsg 	resource_size_t iomap;
291c349dbc7Sjsg 	int err;
292c349dbc7Sjsg 
293c349dbc7Sjsg 	/* Sanity check that we allow writing into this object */
294c349dbc7Sjsg 	if (unlikely(i915_gem_object_is_readonly(obj) &&
295c349dbc7Sjsg 		     area->vm_flags & VM_WRITE))
296c349dbc7Sjsg 		return VM_FAULT_SIGBUS;
297c349dbc7Sjsg 
2985ca02815Sjsg 	if (i915_gem_object_lock_interruptible(obj, NULL))
2995ca02815Sjsg 		return VM_FAULT_NOPAGE;
3005ca02815Sjsg 
301c349dbc7Sjsg 	err = i915_gem_object_pin_pages(obj);
302c349dbc7Sjsg 	if (err)
303c349dbc7Sjsg 		goto out;
304c349dbc7Sjsg 
305c349dbc7Sjsg 	iomap = -1;
3065ca02815Sjsg 	if (!i915_gem_object_has_struct_page(obj)) {
307c349dbc7Sjsg 		iomap = obj->mm.region->iomap.base;
308c349dbc7Sjsg 		iomap -= obj->mm.region->region.start;
309c349dbc7Sjsg 	}
310c349dbc7Sjsg 
311c349dbc7Sjsg 	/* PTEs are revoked in obj->ops->put_pages() */
312c349dbc7Sjsg 	err = remap_io_sg(area,
313c349dbc7Sjsg 			  area->vm_start, area->vm_end - area->vm_start,
314c349dbc7Sjsg 			  obj->mm.pages->sgl, iomap);
315c349dbc7Sjsg 
316c349dbc7Sjsg 	if (area->vm_flags & VM_WRITE) {
317c349dbc7Sjsg 		GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
318c349dbc7Sjsg 		obj->mm.dirty = true;
319c349dbc7Sjsg 	}
320c349dbc7Sjsg 
321c349dbc7Sjsg 	i915_gem_object_unpin_pages(obj);
322c349dbc7Sjsg 
323c349dbc7Sjsg out:
3245ca02815Sjsg 	i915_gem_object_unlock(obj);
325c349dbc7Sjsg 	return i915_error_to_vmf_fault(err);
326c349dbc7Sjsg }
327c349dbc7Sjsg 
32808be9e2bSjsg static void set_address_limits(struct vm_area_struct *area,
32908be9e2bSjsg 			       struct i915_vma *vma,
33008be9e2bSjsg 			       unsigned long obj_offset,
33108be9e2bSjsg 			       unsigned long *start_vaddr,
33208be9e2bSjsg 			       unsigned long *end_vaddr)
33308be9e2bSjsg {
33408be9e2bSjsg 	unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */
33508be9e2bSjsg 	long start, end; /* memory boundaries */
33608be9e2bSjsg 
33708be9e2bSjsg 	/*
33808be9e2bSjsg 	 * Let's move into the ">> PAGE_SHIFT"
33908be9e2bSjsg 	 * domain to be sure not to lose bits
34008be9e2bSjsg 	 */
34108be9e2bSjsg 	vm_start = area->vm_start >> PAGE_SHIFT;
34208be9e2bSjsg 	vm_end = area->vm_end >> PAGE_SHIFT;
34308be9e2bSjsg 	vma_size = vma->size >> PAGE_SHIFT;
34408be9e2bSjsg 
34508be9e2bSjsg 	/*
34608be9e2bSjsg 	 * Calculate the memory boundaries by considering the offset
34708be9e2bSjsg 	 * provided by the user during memory mapping and the offset
34808be9e2bSjsg 	 * provided for the partial mapping.
34908be9e2bSjsg 	 */
35008be9e2bSjsg 	start = vm_start;
35108be9e2bSjsg 	start -= obj_offset;
35208be9e2bSjsg 	start += vma->gtt_view.partial.offset;
35308be9e2bSjsg 	end = start + vma_size;
35408be9e2bSjsg 
35508be9e2bSjsg 	start = max_t(long, start, vm_start);
35608be9e2bSjsg 	end = min_t(long, end, vm_end);
35708be9e2bSjsg 
35808be9e2bSjsg 	/* Let's move back into the "<< PAGE_SHIFT" domain */
35908be9e2bSjsg 	*start_vaddr = (unsigned long)start << PAGE_SHIFT;
36008be9e2bSjsg 	*end_vaddr = (unsigned long)end << PAGE_SHIFT;
36108be9e2bSjsg }
36208be9e2bSjsg 
363c349dbc7Sjsg static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
364c349dbc7Sjsg {
365c349dbc7Sjsg #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
366c349dbc7Sjsg 	struct vm_area_struct *area = vmf->vma;
367c349dbc7Sjsg 	struct i915_mmap_offset *mmo = area->vm_private_data;
368c349dbc7Sjsg 	struct drm_i915_gem_object *obj = mmo->obj;
369c349dbc7Sjsg 	struct drm_device *dev = obj->base.dev;
370c349dbc7Sjsg 	struct drm_i915_private *i915 = to_i915(dev);
371c349dbc7Sjsg 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
3721bb76ff1Sjsg 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
373c349dbc7Sjsg 	bool write = area->vm_flags & VM_WRITE;
374ad8b1aafSjsg 	struct i915_gem_ww_ctx ww;
37508be9e2bSjsg 	unsigned long obj_offset;
37608be9e2bSjsg 	unsigned long start, end; /* memory boundaries */
377c349dbc7Sjsg 	intel_wakeref_t wakeref;
378c349dbc7Sjsg 	struct i915_vma *vma;
379c349dbc7Sjsg 	pgoff_t page_offset;
38008be9e2bSjsg 	unsigned long pfn;
381c349dbc7Sjsg 	int srcu;
382c349dbc7Sjsg 	int ret;
383c349dbc7Sjsg 
38408be9e2bSjsg 	obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
385c349dbc7Sjsg 	page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
38608be9e2bSjsg 	page_offset += obj_offset;
387c349dbc7Sjsg 
388c349dbc7Sjsg 	trace_i915_gem_object_fault(obj, page_offset, true, write);
389c349dbc7Sjsg 
390c349dbc7Sjsg 	wakeref = intel_runtime_pm_get(rpm);
391c349dbc7Sjsg 
392ad8b1aafSjsg 	i915_gem_ww_ctx_init(&ww, true);
393ad8b1aafSjsg retry:
394ad8b1aafSjsg 	ret = i915_gem_object_lock(obj, &ww);
395c349dbc7Sjsg 	if (ret)
396c349dbc7Sjsg 		goto err_rpm;
397c349dbc7Sjsg 
398ad8b1aafSjsg 	/* Sanity check that we allow writing into this object */
399ad8b1aafSjsg 	if (i915_gem_object_is_readonly(obj) && write) {
400ad8b1aafSjsg 		ret = -EFAULT;
401ad8b1aafSjsg 		goto err_rpm;
402ad8b1aafSjsg 	}
403ad8b1aafSjsg 
404ad8b1aafSjsg 	ret = i915_gem_object_pin_pages(obj);
405ad8b1aafSjsg 	if (ret)
406ad8b1aafSjsg 		goto err_rpm;
407ad8b1aafSjsg 
408f005ef32Sjsg 	ret = intel_gt_reset_lock_interruptible(ggtt->vm.gt, &srcu);
409ad8b1aafSjsg 	if (ret)
410ad8b1aafSjsg 		goto err_pages;
411ad8b1aafSjsg 
412c349dbc7Sjsg 	/* Now pin it into the GTT as needed */
413ad8b1aafSjsg 	vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
414c349dbc7Sjsg 					  PIN_MAPPABLE |
415c349dbc7Sjsg 					  PIN_NONBLOCK /* NOWARN */ |
416c349dbc7Sjsg 					  PIN_NOEVICT);
417ad8b1aafSjsg 	if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
418c349dbc7Sjsg 		/* Use a partial view if it is bigger than available space */
4191bb76ff1Sjsg 		struct i915_gtt_view view =
420c349dbc7Sjsg 			compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
421c349dbc7Sjsg 		unsigned int flags;
422c349dbc7Sjsg 
423c349dbc7Sjsg 		flags = PIN_MAPPABLE | PIN_NOSEARCH;
4241bb76ff1Sjsg 		if (view.type == I915_GTT_VIEW_NORMAL)
425c349dbc7Sjsg 			flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
426c349dbc7Sjsg 
427c349dbc7Sjsg 		/*
428c349dbc7Sjsg 		 * Userspace is now writing through an untracked VMA, abandon
429c349dbc7Sjsg 		 * all hope that the hardware is able to track future writes.
430c349dbc7Sjsg 		 */
431c349dbc7Sjsg 
432ad8b1aafSjsg 		vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
433ad8b1aafSjsg 		if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
434c349dbc7Sjsg 			flags = PIN_MAPPABLE;
4351bb76ff1Sjsg 			view.type = I915_GTT_VIEW_PARTIAL;
436ad8b1aafSjsg 			vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
437c349dbc7Sjsg 		}
438c349dbc7Sjsg 
4391bb76ff1Sjsg 		/*
4401bb76ff1Sjsg 		 * The entire mappable GGTT is pinned? Unexpected!
4411bb76ff1Sjsg 		 * Try to evict the object we locked too, as normally we skip it
4421bb76ff1Sjsg 		 * due to lack of short term pinning inside execbuf.
4431bb76ff1Sjsg 		 */
4441bb76ff1Sjsg 		if (vma == ERR_PTR(-ENOSPC)) {
4451bb76ff1Sjsg 			ret = mutex_lock_interruptible(&ggtt->vm.mutex);
4461bb76ff1Sjsg 			if (!ret) {
4476823e11cSjsg 				ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
4481bb76ff1Sjsg 				mutex_unlock(&ggtt->vm.mutex);
4491bb76ff1Sjsg 			}
4501bb76ff1Sjsg 			if (ret)
4511bb76ff1Sjsg 				goto err_reset;
4521bb76ff1Sjsg 			vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
4531bb76ff1Sjsg 		}
454c349dbc7Sjsg 	}
455c349dbc7Sjsg 	if (IS_ERR(vma)) {
456c349dbc7Sjsg 		ret = PTR_ERR(vma);
457c349dbc7Sjsg 		goto err_reset;
458c349dbc7Sjsg 	}
459c349dbc7Sjsg 
460c349dbc7Sjsg 	/* Access to snoopable pages through the GTT is incoherent. */
461f005ef32Sjsg 	/*
462f005ef32Sjsg 	 * For objects created by userspace through GEM_CREATE with pat_index
463f005ef32Sjsg 	 * set by set_pat extension, coherency is managed by userspace, make
464f005ef32Sjsg 	 * sure we don't fail handling the vm fault by calling
465f005ef32Sjsg 	 * i915_gem_object_has_cache_level() which always return true for such
466f005ef32Sjsg 	 * objects. Otherwise this helper function would fall back to checking
467f005ef32Sjsg 	 * whether the object is un-cached.
468f005ef32Sjsg 	 */
469f005ef32Sjsg 	if (!(i915_gem_object_has_cache_level(obj, I915_CACHE_NONE) ||
470f005ef32Sjsg 	      HAS_LLC(i915))) {
471c349dbc7Sjsg 		ret = -EFAULT;
472c349dbc7Sjsg 		goto err_unpin;
473c349dbc7Sjsg 	}
474c349dbc7Sjsg 
475c349dbc7Sjsg 	ret = i915_vma_pin_fence(vma);
476c349dbc7Sjsg 	if (ret)
477c349dbc7Sjsg 		goto err_unpin;
478c349dbc7Sjsg 
47908be9e2bSjsg 	set_address_limits(area, vma, obj_offset, &start, &end);
48008be9e2bSjsg 
48108be9e2bSjsg 	pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
48208be9e2bSjsg 	pfn += (start - area->vm_start) >> PAGE_SHIFT;
48308be9e2bSjsg 	pfn += obj_offset - vma->gtt_view.partial.offset;
48408be9e2bSjsg 
485c349dbc7Sjsg 	/* Finally, remap it using the new GTT offset */
48608be9e2bSjsg 	ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap);
487c349dbc7Sjsg 	if (ret)
488c349dbc7Sjsg 		goto err_fence;
489c349dbc7Sjsg 
490c349dbc7Sjsg 	assert_rpm_wakelock_held(rpm);
491c349dbc7Sjsg 
492c349dbc7Sjsg 	/* Mark as being mmapped into userspace for later revocation */
4931bb76ff1Sjsg 	mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
494c349dbc7Sjsg 	if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
4951bb76ff1Sjsg 		list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list);
4961bb76ff1Sjsg 	mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
497c349dbc7Sjsg 
498c349dbc7Sjsg 	/* Track the mmo associated with the fenced vma */
499c349dbc7Sjsg 	vma->mmo = mmo;
500c349dbc7Sjsg 
5011bb76ff1Sjsg 	if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
5021bb76ff1Sjsg 		intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref,
503c349dbc7Sjsg 				   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
504c349dbc7Sjsg 
505c349dbc7Sjsg 	if (write) {
506c349dbc7Sjsg 		GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
507c349dbc7Sjsg 		i915_vma_set_ggtt_write(vma);
508c349dbc7Sjsg 		obj->mm.dirty = true;
509c349dbc7Sjsg 	}
510c349dbc7Sjsg 
511c349dbc7Sjsg err_fence:
512c349dbc7Sjsg 	i915_vma_unpin_fence(vma);
513c349dbc7Sjsg err_unpin:
514c349dbc7Sjsg 	__i915_vma_unpin(vma);
515c349dbc7Sjsg err_reset:
516c349dbc7Sjsg 	intel_gt_reset_unlock(ggtt->vm.gt, srcu);
517ad8b1aafSjsg err_pages:
518c349dbc7Sjsg 	i915_gem_object_unpin_pages(obj);
519ad8b1aafSjsg err_rpm:
520ad8b1aafSjsg 	if (ret == -EDEADLK) {
521ad8b1aafSjsg 		ret = i915_gem_ww_ctx_backoff(&ww);
522ad8b1aafSjsg 		if (!ret)
523ad8b1aafSjsg 			goto retry;
524ad8b1aafSjsg 	}
525ad8b1aafSjsg 	i915_gem_ww_ctx_fini(&ww);
526ad8b1aafSjsg 	intel_runtime_pm_put(rpm, wakeref);
527c349dbc7Sjsg 	return i915_error_to_vmf_fault(ret);
528c349dbc7Sjsg }
529c349dbc7Sjsg 
530ad8b1aafSjsg static int
531ad8b1aafSjsg vm_access(struct vm_area_struct *area, unsigned long addr,
532ad8b1aafSjsg 	  void *buf, int len, int write)
533ad8b1aafSjsg {
534ad8b1aafSjsg 	struct i915_mmap_offset *mmo = area->vm_private_data;
535ad8b1aafSjsg 	struct drm_i915_gem_object *obj = mmo->obj;
5365ca02815Sjsg 	struct i915_gem_ww_ctx ww;
537ad8b1aafSjsg 	void *vaddr;
5385ca02815Sjsg 	int err = 0;
539ad8b1aafSjsg 
540ad8b1aafSjsg 	if (i915_gem_object_is_readonly(obj) && write)
541ad8b1aafSjsg 		return -EACCES;
542ad8b1aafSjsg 
543ad8b1aafSjsg 	addr -= area->vm_start;
544170c7946Sjsg 	if (range_overflows_t(u64, addr, len, obj->base.size))
545ad8b1aafSjsg 		return -EINVAL;
546ad8b1aafSjsg 
5475ca02815Sjsg 	i915_gem_ww_ctx_init(&ww, true);
5485ca02815Sjsg retry:
5495ca02815Sjsg 	err = i915_gem_object_lock(obj, &ww);
5505ca02815Sjsg 	if (err)
5515ca02815Sjsg 		goto out;
5525ca02815Sjsg 
553ad8b1aafSjsg 	/* As this is primarily for debugging, let's focus on simplicity */
554ad8b1aafSjsg 	vaddr = i915_gem_object_pin_map(obj, I915_MAP_FORCE_WC);
5555ca02815Sjsg 	if (IS_ERR(vaddr)) {
5565ca02815Sjsg 		err = PTR_ERR(vaddr);
5575ca02815Sjsg 		goto out;
5585ca02815Sjsg 	}
559ad8b1aafSjsg 
560ad8b1aafSjsg 	if (write) {
561ad8b1aafSjsg 		memcpy(vaddr + addr, buf, len);
562ad8b1aafSjsg 		__i915_gem_object_flush_map(obj, addr, len);
563ad8b1aafSjsg 	} else {
564ad8b1aafSjsg 		memcpy(buf, vaddr + addr, len);
565ad8b1aafSjsg 	}
566ad8b1aafSjsg 
567ad8b1aafSjsg 	i915_gem_object_unpin_map(obj);
5685ca02815Sjsg out:
5695ca02815Sjsg 	if (err == -EDEADLK) {
5705ca02815Sjsg 		err = i915_gem_ww_ctx_backoff(&ww);
5715ca02815Sjsg 		if (!err)
5725ca02815Sjsg 			goto retry;
5735ca02815Sjsg 	}
5745ca02815Sjsg 	i915_gem_ww_ctx_fini(&ww);
5755ca02815Sjsg 
5765ca02815Sjsg 	if (err)
5775ca02815Sjsg 		return err;
578ad8b1aafSjsg 
579ad8b1aafSjsg 	return len;
580ad8b1aafSjsg }
581ad8b1aafSjsg 
582c349dbc7Sjsg #else /* !__linux__ */
583c349dbc7Sjsg 
584c349dbc7Sjsg static int i915_error_to_vmf_fault(int err)
585c349dbc7Sjsg {
586c349dbc7Sjsg 	switch (err) {
587c349dbc7Sjsg 	default:
588c349dbc7Sjsg 		WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
589ad8b1aafSjsg 		fallthrough;
590c349dbc7Sjsg 	case -EIO: /* shmemfs failure from swap device */
591c349dbc7Sjsg 	case -EFAULT: /* purged object */
592c349dbc7Sjsg 	case -ENODEV: /* bad object, how did you get here! */
593c349dbc7Sjsg 	case -ENXIO: /* unable to access backing store (on device) */
594*f46a341eSmpi 		return EACCES;
595c349dbc7Sjsg 
596c349dbc7Sjsg 	case -ENOMEM: /* our allocation failure */
597*f46a341eSmpi 		return EACCES; /* XXX */
598c349dbc7Sjsg 
599c349dbc7Sjsg 	case 0:
600c349dbc7Sjsg 	case -EAGAIN:
601ad8b1aafSjsg 	case -ENOSPC: /* transient failure to evict? */
602d732525bSjsg 	case -ENOBUFS: /* temporarily out of fences? */
603c349dbc7Sjsg 	case -ERESTART:
604c349dbc7Sjsg 	case -EINTR:
605c349dbc7Sjsg 	case -EBUSY:
606c349dbc7Sjsg 		/*
607c349dbc7Sjsg 		 * EBUSY is ok: this just means that another thread
608c349dbc7Sjsg 		 * already did the job.
609c349dbc7Sjsg 		 */
610*f46a341eSmpi 		return 0;
611c349dbc7Sjsg 	}
612c349dbc7Sjsg }
613c349dbc7Sjsg 
614c349dbc7Sjsg static int
615c349dbc7Sjsg vm_fault_cpu(struct i915_mmap_offset *mmo, struct uvm_faultinfo *ufi,
616c349dbc7Sjsg     vm_prot_t access_type)
617c349dbc7Sjsg {
618c349dbc7Sjsg 	struct vm_map_entry *entry = ufi->entry;
619c349dbc7Sjsg 	struct drm_i915_gem_object *obj = mmo->obj;
620c349dbc7Sjsg 	int write = !!(access_type & PROT_WRITE);
621c349dbc7Sjsg 	struct sg_table *pages;
622c349dbc7Sjsg 	struct sg_page_iter sg_iter;
623c349dbc7Sjsg 	vm_prot_t mapprot;
624c349dbc7Sjsg 	vaddr_t va = entry->start;
625c349dbc7Sjsg 	paddr_t pa, pa_flags = 0;
626c349dbc7Sjsg 	int flags;
627c349dbc7Sjsg 	int err;
628c349dbc7Sjsg 
629c349dbc7Sjsg 	/* Sanity check that we allow writing into this object */
630c349dbc7Sjsg 	if (unlikely(i915_gem_object_is_readonly(obj) && write)) {
631ec3489eeSmpi 		uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
632*f46a341eSmpi 		return EACCES;
633c349dbc7Sjsg 	}
634c349dbc7Sjsg 
6357fd8d2b8Sjsg 	if (i915_gem_object_lock_interruptible(obj, NULL))
636*f46a341eSmpi 		return EACCES;
6377fd8d2b8Sjsg 
638c349dbc7Sjsg 	err = i915_gem_object_pin_pages(obj);
639c349dbc7Sjsg 	if (err)
640c349dbc7Sjsg 		goto out;
641c349dbc7Sjsg 
642c349dbc7Sjsg 	flags = mapprot = entry->protection;
643c349dbc7Sjsg 	if (write == 0)
644c349dbc7Sjsg 		flags &= ~PROT_WRITE;
645c349dbc7Sjsg 
646c349dbc7Sjsg 	switch (mmo->mmap_type) {
647c349dbc7Sjsg 	case I915_MMAP_TYPE_WC:
648c349dbc7Sjsg 		pa_flags |= PMAP_WC;
649c349dbc7Sjsg 		break;
650c349dbc7Sjsg 	case I915_MMAP_TYPE_UC:
651c349dbc7Sjsg 		pa_flags |= PMAP_NOCACHE;
652c349dbc7Sjsg 		break;
653c349dbc7Sjsg 	default:
654c349dbc7Sjsg 		break;
655c349dbc7Sjsg 	}
656c349dbc7Sjsg 
657c349dbc7Sjsg 	pages = obj->mm.pages;
658c349dbc7Sjsg 	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
659c349dbc7Sjsg 		pa = sg_page_iter_dma_address(&sg_iter);
660c349dbc7Sjsg 		if (pmap_enter(ufi->orig_map->pmap, va, pa | pa_flags,
661c349dbc7Sjsg 		    mapprot, PMAP_CANFAIL | flags)) {
662c349dbc7Sjsg 			err = -ENOMEM;
663c349dbc7Sjsg 			break;
664c349dbc7Sjsg 		}
665c349dbc7Sjsg 		va += PAGE_SIZE;
666c349dbc7Sjsg 	}
667c349dbc7Sjsg 	pmap_update(ufi->orig_map->pmap);
668c349dbc7Sjsg 
669c349dbc7Sjsg 	if (write) {
670c349dbc7Sjsg 		GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
671c349dbc7Sjsg 		obj->mm.dirty = true;
672c349dbc7Sjsg 	}
673c349dbc7Sjsg 
674c349dbc7Sjsg 	i915_gem_object_unpin_pages(obj);
675c349dbc7Sjsg 
676c349dbc7Sjsg out:
6777fd8d2b8Sjsg 	i915_gem_object_unlock(obj);
678ec3489eeSmpi 	uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
679c349dbc7Sjsg 	return i915_error_to_vmf_fault(err);
680c349dbc7Sjsg }
681c349dbc7Sjsg 
682c349dbc7Sjsg int
683c349dbc7Sjsg remap_io_mapping(pmap_t pm, vm_prot_t mapprot,
684c349dbc7Sjsg     vaddr_t va, unsigned long pfn, unsigned long size)
685c349dbc7Sjsg {
686c349dbc7Sjsg 	vaddr_t end = va + size;
687c349dbc7Sjsg 	paddr_t pa = ptoa(pfn);
688c349dbc7Sjsg 
689c349dbc7Sjsg 	while (va < end) {
690c349dbc7Sjsg 		if (pmap_enter(pm, va, pa | PMAP_WC, mapprot, PMAP_CANFAIL | mapprot))
691c349dbc7Sjsg 			return -ENOMEM;
692c349dbc7Sjsg 		va += PAGE_SIZE;
693c349dbc7Sjsg 		pa += PAGE_SIZE;
694c349dbc7Sjsg 	}
695c349dbc7Sjsg 
696c349dbc7Sjsg 	return 0;
697c349dbc7Sjsg }
698c349dbc7Sjsg 
69908be9e2bSjsg static void set_address_limits(struct vm_map_entry *entry,
70008be9e2bSjsg 			       struct i915_vma *vma,
70108be9e2bSjsg 			       unsigned long obj_offset,
70208be9e2bSjsg 			       unsigned long *start_vaddr,
70308be9e2bSjsg 			       unsigned long *end_vaddr)
70408be9e2bSjsg {
70508be9e2bSjsg 	unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */
70608be9e2bSjsg 	long start, end; /* memory boundaries */
70708be9e2bSjsg 
70808be9e2bSjsg 	/*
70908be9e2bSjsg 	 * Let's move into the ">> PAGE_SHIFT"
71008be9e2bSjsg 	 * domain to be sure not to lose bits
71108be9e2bSjsg 	 */
71208be9e2bSjsg 	vm_start = entry->start >> PAGE_SHIFT;
71308be9e2bSjsg 	vm_end = entry->end >> PAGE_SHIFT;
71408be9e2bSjsg 	vma_size = vma->size >> PAGE_SHIFT;
71508be9e2bSjsg 
71608be9e2bSjsg 	/*
71708be9e2bSjsg 	 * Calculate the memory boundaries by considering the offset
71808be9e2bSjsg 	 * provided by the user during memory mapping and the offset
71908be9e2bSjsg 	 * provided for the partial mapping.
72008be9e2bSjsg 	 */
72108be9e2bSjsg 	start = vm_start;
72208be9e2bSjsg 	start -= obj_offset;
72308be9e2bSjsg 	start += vma->gtt_view.partial.offset;
72408be9e2bSjsg 	end = start + vma_size;
72508be9e2bSjsg 
72608be9e2bSjsg 	start = max_t(long, start, vm_start);
72708be9e2bSjsg 	end = min_t(long, end, vm_end);
72808be9e2bSjsg 
72908be9e2bSjsg 	/* Let's move back into the "<< PAGE_SHIFT" domain */
73008be9e2bSjsg 	*start_vaddr = (unsigned long)start << PAGE_SHIFT;
73108be9e2bSjsg 	*end_vaddr = (unsigned long)end << PAGE_SHIFT;
73208be9e2bSjsg }
73308be9e2bSjsg 
734c349dbc7Sjsg static int
735c349dbc7Sjsg vm_fault_gtt(struct i915_mmap_offset *mmo, struct uvm_faultinfo *ufi,
736c349dbc7Sjsg     vaddr_t vaddr, vm_prot_t access_type)
737c349dbc7Sjsg {
738c349dbc7Sjsg #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
739c349dbc7Sjsg 	struct vm_map_entry *entry = ufi->entry;
740c349dbc7Sjsg 	struct drm_i915_gem_object *obj = mmo->obj;
741c349dbc7Sjsg 	struct drm_device *dev = obj->base.dev;
742c349dbc7Sjsg 	struct drm_i915_private *i915 = to_i915(dev);
743c349dbc7Sjsg 	struct intel_runtime_pm *rpm = &i915->runtime_pm;
7441bb76ff1Sjsg 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
745c349dbc7Sjsg 	int write = !!(access_type & PROT_WRITE);
746ad8b1aafSjsg 	struct i915_gem_ww_ctx ww;
74708be9e2bSjsg 	unsigned long obj_offset;
74808be9e2bSjsg 	unsigned long start, end; /* memory boundaries */
749c349dbc7Sjsg 	intel_wakeref_t wakeref;
750c349dbc7Sjsg 	struct i915_vma *vma;
751c349dbc7Sjsg 	pgoff_t page_offset;
75208be9e2bSjsg 	unsigned long pfn;
753c349dbc7Sjsg 	int srcu;
754c349dbc7Sjsg 	int ret;
755c349dbc7Sjsg 
756ac888351Sjsg 	obj_offset = (entry->offset >> PAGE_SHIFT) - drm_vma_node_start(&mmo->vma_node);
757c349dbc7Sjsg 	page_offset = (vaddr - entry->start) >> PAGE_SHIFT;
758ac888351Sjsg 	page_offset += obj_offset;
759c349dbc7Sjsg 
760c349dbc7Sjsg 	trace_i915_gem_object_fault(obj, page_offset, true, write);
761c349dbc7Sjsg 
762c349dbc7Sjsg 	wakeref = intel_runtime_pm_get(rpm);
763c349dbc7Sjsg 
764ad8b1aafSjsg 	i915_gem_ww_ctx_init(&ww, true);
765ad8b1aafSjsg retry:
766ad8b1aafSjsg 	ret = i915_gem_object_lock(obj, &ww);
767c349dbc7Sjsg 	if (ret)
768c349dbc7Sjsg 		goto err_rpm;
769c349dbc7Sjsg 
770ad8b1aafSjsg 	/* Sanity check that we allow writing into this object */
771ad8b1aafSjsg 	if (i915_gem_object_is_readonly(obj) && write) {
772ad8b1aafSjsg 		ret = -EFAULT;
773ad8b1aafSjsg 		goto err_rpm;
774ad8b1aafSjsg 	}
775ad8b1aafSjsg 
776ad8b1aafSjsg 	ret = i915_gem_object_pin_pages(obj);
777ad8b1aafSjsg 	if (ret)
778ad8b1aafSjsg 		goto err_rpm;
779ad8b1aafSjsg 
780f005ef32Sjsg 	ret = intel_gt_reset_lock_interruptible(ggtt->vm.gt, &srcu);
781ad8b1aafSjsg 	if (ret)
782ad8b1aafSjsg 		goto err_pages;
783ad8b1aafSjsg 
784c349dbc7Sjsg 	/* Now pin it into the GTT as needed */
785ad8b1aafSjsg 	vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
786c349dbc7Sjsg 					  PIN_MAPPABLE |
787c349dbc7Sjsg 					  PIN_NONBLOCK /* NOWARN */ |
788c349dbc7Sjsg 					  PIN_NOEVICT);
789ad8b1aafSjsg 	if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
790c349dbc7Sjsg 		/* Use a partial view if it is bigger than available space */
7911bb76ff1Sjsg 		struct i915_gtt_view view =
792c349dbc7Sjsg 			compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
793c349dbc7Sjsg 		unsigned int flags;
794c349dbc7Sjsg 
795c349dbc7Sjsg 		flags = PIN_MAPPABLE | PIN_NOSEARCH;
7961bb76ff1Sjsg 		if (view.type == I915_GTT_VIEW_NORMAL)
797c349dbc7Sjsg 			flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
798c349dbc7Sjsg 
799c349dbc7Sjsg 		/*
800c349dbc7Sjsg 		 * Userspace is now writing through an untracked VMA, abandon
801c349dbc7Sjsg 		 * all hope that the hardware is able to track future writes.
802c349dbc7Sjsg 		 */
803c349dbc7Sjsg 
804ad8b1aafSjsg 		vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
805ad8b1aafSjsg 		if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
806c349dbc7Sjsg 			flags = PIN_MAPPABLE;
8071bb76ff1Sjsg 			view.type = I915_GTT_VIEW_PARTIAL;
808ad8b1aafSjsg 			vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
809c349dbc7Sjsg 		}
810c349dbc7Sjsg 
8111bb76ff1Sjsg 		/*
8121bb76ff1Sjsg 		 * The entire mappable GGTT is pinned? Unexpected!
8131bb76ff1Sjsg 		 * Try to evict the object we locked too, as normally we skip it
8141bb76ff1Sjsg 		 * due to lack of short term pinning inside execbuf.
8151bb76ff1Sjsg 		 */
8161bb76ff1Sjsg 		if (vma == ERR_PTR(-ENOSPC)) {
8171bb76ff1Sjsg 			ret = mutex_lock_interruptible(&ggtt->vm.mutex);
8181bb76ff1Sjsg 			if (!ret) {
8196823e11cSjsg 				ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
8201bb76ff1Sjsg 				mutex_unlock(&ggtt->vm.mutex);
8211bb76ff1Sjsg 			}
8221bb76ff1Sjsg 			if (ret)
8231bb76ff1Sjsg 				goto err_reset;
8241bb76ff1Sjsg 			vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
8251bb76ff1Sjsg 		}
826c349dbc7Sjsg 	}
827c349dbc7Sjsg 	if (IS_ERR(vma)) {
828c349dbc7Sjsg 		ret = PTR_ERR(vma);
829c349dbc7Sjsg 		goto err_reset;
830c349dbc7Sjsg 	}
831c349dbc7Sjsg 
832c349dbc7Sjsg 	/* Access to snoopable pages through the GTT is incoherent. */
833f005ef32Sjsg 	/*
834f005ef32Sjsg 	 * For objects created by userspace through GEM_CREATE with pat_index
835f005ef32Sjsg 	 * set by set_pat extension, coherency is managed by userspace, make
836f005ef32Sjsg 	 * sure we don't fail handling the vm fault by calling
837f005ef32Sjsg 	 * i915_gem_object_has_cache_level() which always return true for such
838f005ef32Sjsg 	 * objects. Otherwise this helper function would fall back to checking
839f005ef32Sjsg 	 * whether the object is un-cached.
840f005ef32Sjsg 	 */
841f005ef32Sjsg 	if (!(i915_gem_object_has_cache_level(obj, I915_CACHE_NONE) ||
842f005ef32Sjsg 	      HAS_LLC(i915))) {
843c349dbc7Sjsg 		ret = -EFAULT;
844c349dbc7Sjsg 		goto err_unpin;
845c349dbc7Sjsg 	}
846c349dbc7Sjsg 
847c349dbc7Sjsg 	ret = i915_vma_pin_fence(vma);
848c349dbc7Sjsg 	if (ret)
849c349dbc7Sjsg 		goto err_unpin;
850c349dbc7Sjsg 
85108be9e2bSjsg 	set_address_limits(entry, vma, obj_offset, &start, &end);
85208be9e2bSjsg 
85308be9e2bSjsg 	pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
85408be9e2bSjsg 	pfn += (start - entry->start) >> PAGE_SHIFT;
85508be9e2bSjsg 	pfn += obj_offset - vma->gtt_view.partial.offset;
85608be9e2bSjsg 
857c349dbc7Sjsg 	/* Finally, remap it using the new GTT offset */
858c349dbc7Sjsg 	ret = remap_io_mapping(ufi->orig_map->pmap, entry->protection,
85908be9e2bSjsg 			       start, pfn, end - start);
860c349dbc7Sjsg 	if (ret)
861c349dbc7Sjsg 		goto err_fence;
862c349dbc7Sjsg 
863c349dbc7Sjsg 	assert_rpm_wakelock_held(rpm);
864c349dbc7Sjsg 
865c349dbc7Sjsg 	/* Mark as being mmapped into userspace for later revocation */
8661bb76ff1Sjsg 	mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
867c349dbc7Sjsg 	if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
8681bb76ff1Sjsg 		list_add(&obj->userfault_link, &to_gt(i915)->ggtt->userfault_list);
8691bb76ff1Sjsg 	mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
870c349dbc7Sjsg 
871c349dbc7Sjsg 	/* Track the mmo associated with the fenced vma */
872c349dbc7Sjsg 	vma->mmo = mmo;
873c349dbc7Sjsg 
8741bb76ff1Sjsg 	if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
8751bb76ff1Sjsg 		intel_wakeref_auto(&i915->runtime_pm.userfault_wakeref,
876c349dbc7Sjsg 				   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
877c349dbc7Sjsg 
878c349dbc7Sjsg 	if (write) {
879c349dbc7Sjsg 		GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
880c349dbc7Sjsg 		i915_vma_set_ggtt_write(vma);
881c349dbc7Sjsg 		obj->mm.dirty = true;
882c349dbc7Sjsg 	}
883c349dbc7Sjsg 
884c349dbc7Sjsg err_fence:
885c349dbc7Sjsg 	i915_vma_unpin_fence(vma);
886c349dbc7Sjsg err_unpin:
887c349dbc7Sjsg 	__i915_vma_unpin(vma);
888c349dbc7Sjsg err_reset:
889c349dbc7Sjsg 	intel_gt_reset_unlock(ggtt->vm.gt, srcu);
890ad8b1aafSjsg err_pages:
891c349dbc7Sjsg 	i915_gem_object_unpin_pages(obj);
892ad8b1aafSjsg err_rpm:
893ad8b1aafSjsg 	if (ret == -EDEADLK) {
894ad8b1aafSjsg 		ret = i915_gem_ww_ctx_backoff(&ww);
895ad8b1aafSjsg 		if (!ret)
896ad8b1aafSjsg 			goto retry;
897ad8b1aafSjsg 	}
898ad8b1aafSjsg 	i915_gem_ww_ctx_fini(&ww);
899ad8b1aafSjsg 	intel_runtime_pm_put(rpm, wakeref);
900ec3489eeSmpi 	uvmfault_unlockall(ufi, NULL, &obj->base.uobj);
901c349dbc7Sjsg 	return i915_error_to_vmf_fault(ret);
902c349dbc7Sjsg }
903c349dbc7Sjsg 
904c349dbc7Sjsg int
905c349dbc7Sjsg i915_gem_fault(struct drm_gem_object *gem_obj, struct uvm_faultinfo *ufi,
906c349dbc7Sjsg     off_t offset, vaddr_t vaddr, vm_page_t *pps, int npages, int centeridx,
907c349dbc7Sjsg     vm_prot_t access_type, int flags)
908c349dbc7Sjsg {
909c349dbc7Sjsg 	struct drm_vma_offset_node *node;
910c349dbc7Sjsg 	struct drm_device *dev = gem_obj->dev;
911c349dbc7Sjsg 	struct vm_map_entry *entry = ufi->entry;
912c349dbc7Sjsg 	vsize_t size = entry->end - entry->start;
913c349dbc7Sjsg 	struct i915_mmap_offset *mmo = NULL;
914c349dbc7Sjsg 
915c349dbc7Sjsg 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
916c349dbc7Sjsg 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
917c349dbc7Sjsg 						  entry->offset >> PAGE_SHIFT,
918c349dbc7Sjsg 						  size >> PAGE_SHIFT);
919c349dbc7Sjsg 	if (likely(node))
920c349dbc7Sjsg 		mmo = container_of(node, struct i915_mmap_offset, vma_node);
921c349dbc7Sjsg 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
922c349dbc7Sjsg 	if (!mmo) {
923ec3489eeSmpi 		uvmfault_unlockall(ufi, NULL, &gem_obj->uobj);
924*f46a341eSmpi 		return EACCES;
925c349dbc7Sjsg 	}
926c349dbc7Sjsg 
927c349dbc7Sjsg 	KASSERT(gem_obj == &mmo->obj->base);
928c349dbc7Sjsg 
929c349dbc7Sjsg 	if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
930c349dbc7Sjsg 		return vm_fault_gtt(mmo, ufi, vaddr, access_type);
931c349dbc7Sjsg 
932c349dbc7Sjsg 	return vm_fault_cpu(mmo, ufi, access_type);
933c349dbc7Sjsg }
934c349dbc7Sjsg 
935c349dbc7Sjsg #endif /* !__linux__ */
936c349dbc7Sjsg 
937c349dbc7Sjsg void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
938c349dbc7Sjsg {
939c349dbc7Sjsg 	struct i915_vma *vma;
940c349dbc7Sjsg 
941c349dbc7Sjsg 	GEM_BUG_ON(!obj->userfault_count);
942c349dbc7Sjsg 
943c349dbc7Sjsg 	for_each_ggtt_vma(vma, obj)
944c349dbc7Sjsg 		i915_vma_revoke_mmap(vma);
945c349dbc7Sjsg 
946c349dbc7Sjsg 	GEM_BUG_ON(obj->userfault_count);
947c349dbc7Sjsg }
948c349dbc7Sjsg 
949c349dbc7Sjsg /*
950c349dbc7Sjsg  * It is vital that we remove the page mapping if we have mapped a tiled
951c349dbc7Sjsg  * object through the GTT and then lose the fence register due to
952c349dbc7Sjsg  * resource pressure. Similarly if the object has been moved out of the
953c349dbc7Sjsg  * aperture, than pages mapped into userspace must be revoked. Removing the
954c349dbc7Sjsg  * mapping will then trigger a page fault on the next user access, allowing
955c349dbc7Sjsg  * fixup by vm_fault_gtt().
956c349dbc7Sjsg  */
957ad8b1aafSjsg void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
958c349dbc7Sjsg {
959c349dbc7Sjsg 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
960c349dbc7Sjsg 	intel_wakeref_t wakeref;
961c349dbc7Sjsg 
962c349dbc7Sjsg 	/*
963c349dbc7Sjsg 	 * Serialisation between user GTT access and our code depends upon
964c349dbc7Sjsg 	 * revoking the CPU's PTE whilst the mutex is held. The next user
965c349dbc7Sjsg 	 * pagefault then has to wait until we release the mutex.
966c349dbc7Sjsg 	 *
967c349dbc7Sjsg 	 * Note that RPM complicates somewhat by adding an additional
968c349dbc7Sjsg 	 * requirement that operations to the GGTT be made holding the RPM
969c349dbc7Sjsg 	 * wakeref.
970c349dbc7Sjsg 	 */
971c349dbc7Sjsg 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
9721bb76ff1Sjsg 	mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
973c349dbc7Sjsg 
974c349dbc7Sjsg 	if (!obj->userfault_count)
975c349dbc7Sjsg 		goto out;
976c349dbc7Sjsg 
977c349dbc7Sjsg 	__i915_gem_object_release_mmap_gtt(obj);
978c349dbc7Sjsg 
979c349dbc7Sjsg 	/*
980c349dbc7Sjsg 	 * Ensure that the CPU's PTE are revoked and there are not outstanding
981c349dbc7Sjsg 	 * memory transactions from userspace before we return. The TLB
982c349dbc7Sjsg 	 * flushing implied above by changing the PTE above *should* be
983c349dbc7Sjsg 	 * sufficient, an extra barrier here just provides us with a bit
984c349dbc7Sjsg 	 * of paranoid documentation about our requirement to serialise
985c349dbc7Sjsg 	 * memory writes before touching registers / GSM.
986c349dbc7Sjsg 	 */
987c349dbc7Sjsg 	wmb();
988c349dbc7Sjsg 
989c349dbc7Sjsg out:
9901bb76ff1Sjsg 	mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
991c349dbc7Sjsg 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
992c349dbc7Sjsg }
993c349dbc7Sjsg 
9941bb76ff1Sjsg void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj)
9951bb76ff1Sjsg {
9961bb76ff1Sjsg 	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
9971bb76ff1Sjsg 	struct ttm_device *bdev = bo->bdev;
9981bb76ff1Sjsg 
9991bb76ff1Sjsg #ifdef __linux__
10001bb76ff1Sjsg 	drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
10011bb76ff1Sjsg #endif
10021bb76ff1Sjsg 
10031bb76ff1Sjsg 	/*
10041bb76ff1Sjsg 	 * We have exclusive access here via runtime suspend. All other callers
10051bb76ff1Sjsg 	 * must first grab the rpm wakeref.
10061bb76ff1Sjsg 	 */
10071bb76ff1Sjsg 	GEM_BUG_ON(!obj->userfault_count);
10081bb76ff1Sjsg 	list_del(&obj->userfault_link);
10091bb76ff1Sjsg 	obj->userfault_count = 0;
10101bb76ff1Sjsg }
10111bb76ff1Sjsg 
1012c349dbc7Sjsg void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
1013c349dbc7Sjsg {
1014c349dbc7Sjsg 	struct i915_mmap_offset *mmo, *mn;
1015c349dbc7Sjsg 
10161bb76ff1Sjsg 	if (obj->ops->unmap_virtual)
10171bb76ff1Sjsg 		obj->ops->unmap_virtual(obj);
10181bb76ff1Sjsg 
1019c349dbc7Sjsg 	spin_lock(&obj->mmo.lock);
1020c349dbc7Sjsg 	rbtree_postorder_for_each_entry_safe(mmo, mn,
1021c349dbc7Sjsg 					     &obj->mmo.offsets, offset) {
1022c349dbc7Sjsg 		/*
1023c349dbc7Sjsg 		 * vma_node_unmap for GTT mmaps handled already in
1024c349dbc7Sjsg 		 * __i915_gem_object_release_mmap_gtt
1025c349dbc7Sjsg 		 */
1026c349dbc7Sjsg 		if (mmo->mmap_type == I915_MMAP_TYPE_GTT)
1027c349dbc7Sjsg 			continue;
1028c349dbc7Sjsg 
1029c349dbc7Sjsg 		spin_unlock(&obj->mmo.lock);
1030c349dbc7Sjsg #ifdef __linux__
1031c349dbc7Sjsg 		drm_vma_node_unmap(&mmo->vma_node,
1032c349dbc7Sjsg 				   obj->base.dev->anon_inode->i_mapping);
1033c349dbc7Sjsg #endif
1034c349dbc7Sjsg 		spin_lock(&obj->mmo.lock);
1035c349dbc7Sjsg 	}
1036c349dbc7Sjsg 	spin_unlock(&obj->mmo.lock);
1037c349dbc7Sjsg }
1038c349dbc7Sjsg 
1039c349dbc7Sjsg static struct i915_mmap_offset *
1040c349dbc7Sjsg lookup_mmo(struct drm_i915_gem_object *obj,
1041c349dbc7Sjsg 	   enum i915_mmap_type mmap_type)
1042c349dbc7Sjsg {
1043c349dbc7Sjsg 	struct rb_node *rb;
1044c349dbc7Sjsg 
1045c349dbc7Sjsg 	spin_lock(&obj->mmo.lock);
1046c349dbc7Sjsg 	rb = obj->mmo.offsets.rb_node;
1047c349dbc7Sjsg 	while (rb) {
1048c349dbc7Sjsg 		struct i915_mmap_offset *mmo =
1049c349dbc7Sjsg 			rb_entry(rb, typeof(*mmo), offset);
1050c349dbc7Sjsg 
1051c349dbc7Sjsg 		if (mmo->mmap_type == mmap_type) {
1052c349dbc7Sjsg 			spin_unlock(&obj->mmo.lock);
1053c349dbc7Sjsg 			return mmo;
1054c349dbc7Sjsg 		}
1055c349dbc7Sjsg 
1056c349dbc7Sjsg 		if (mmo->mmap_type < mmap_type)
1057c349dbc7Sjsg 			rb = rb->rb_right;
1058c349dbc7Sjsg 		else
1059c349dbc7Sjsg 			rb = rb->rb_left;
1060c349dbc7Sjsg 	}
1061c349dbc7Sjsg 	spin_unlock(&obj->mmo.lock);
1062c349dbc7Sjsg 
1063c349dbc7Sjsg 	return NULL;
1064c349dbc7Sjsg }
1065c349dbc7Sjsg 
1066c349dbc7Sjsg static struct i915_mmap_offset *
1067c349dbc7Sjsg insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
1068c349dbc7Sjsg {
1069c349dbc7Sjsg 	struct rb_node *rb, **p;
1070c349dbc7Sjsg 
1071c349dbc7Sjsg 	spin_lock(&obj->mmo.lock);
1072c349dbc7Sjsg 	rb = NULL;
1073c349dbc7Sjsg 	p = &obj->mmo.offsets.rb_node;
1074c349dbc7Sjsg 	while (*p) {
1075c349dbc7Sjsg 		struct i915_mmap_offset *pos;
1076c349dbc7Sjsg 
1077c349dbc7Sjsg 		rb = *p;
1078c349dbc7Sjsg 		pos = rb_entry(rb, typeof(*pos), offset);
1079c349dbc7Sjsg 
1080c349dbc7Sjsg 		if (pos->mmap_type == mmo->mmap_type) {
1081c349dbc7Sjsg 			spin_unlock(&obj->mmo.lock);
1082c349dbc7Sjsg 			drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
1083c349dbc7Sjsg 					      &mmo->vma_node);
1084c349dbc7Sjsg 			kfree(mmo);
1085c349dbc7Sjsg 			return pos;
1086c349dbc7Sjsg 		}
1087c349dbc7Sjsg 
1088c349dbc7Sjsg 		if (pos->mmap_type < mmo->mmap_type)
1089c349dbc7Sjsg 			p = &rb->rb_right;
1090c349dbc7Sjsg 		else
1091c349dbc7Sjsg 			p = &rb->rb_left;
1092c349dbc7Sjsg 	}
1093c349dbc7Sjsg 	rb_link_node(&mmo->offset, rb, p);
1094c349dbc7Sjsg 	rb_insert_color(&mmo->offset, &obj->mmo.offsets);
1095c349dbc7Sjsg 	spin_unlock(&obj->mmo.lock);
1096c349dbc7Sjsg 
1097c349dbc7Sjsg 	return mmo;
1098c349dbc7Sjsg }
1099c349dbc7Sjsg 
1100c349dbc7Sjsg static struct i915_mmap_offset *
1101c349dbc7Sjsg mmap_offset_attach(struct drm_i915_gem_object *obj,
1102c349dbc7Sjsg 		   enum i915_mmap_type mmap_type,
1103c349dbc7Sjsg 		   struct drm_file *file)
1104c349dbc7Sjsg {
1105c349dbc7Sjsg 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
1106c349dbc7Sjsg 	struct i915_mmap_offset *mmo;
1107c349dbc7Sjsg 	int err;
1108c349dbc7Sjsg 
11095ca02815Sjsg 	GEM_BUG_ON(obj->ops->mmap_offset || obj->ops->mmap_ops);
11105ca02815Sjsg 
1111c349dbc7Sjsg 	mmo = lookup_mmo(obj, mmap_type);
1112c349dbc7Sjsg 	if (mmo)
1113c349dbc7Sjsg 		goto out;
1114c349dbc7Sjsg 
1115c349dbc7Sjsg 	mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
1116c349dbc7Sjsg 	if (!mmo)
1117c349dbc7Sjsg 		return ERR_PTR(-ENOMEM);
1118c349dbc7Sjsg 
1119c349dbc7Sjsg 	mmo->obj = obj;
1120c349dbc7Sjsg 	mmo->mmap_type = mmap_type;
1121c349dbc7Sjsg 	drm_vma_node_reset(&mmo->vma_node);
1122c349dbc7Sjsg 
1123c349dbc7Sjsg 	err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
1124c349dbc7Sjsg 				 &mmo->vma_node, obj->base.size / PAGE_SIZE);
1125c349dbc7Sjsg 	if (likely(!err))
1126c349dbc7Sjsg 		goto insert;
1127c349dbc7Sjsg 
1128c349dbc7Sjsg 	/* Attempt to reap some mmap space from dead objects */
11291bb76ff1Sjsg 	err = intel_gt_retire_requests_timeout(to_gt(i915), MAX_SCHEDULE_TIMEOUT,
11305ca02815Sjsg 					       NULL);
1131c349dbc7Sjsg 	if (err)
1132c349dbc7Sjsg 		goto err;
1133c349dbc7Sjsg 
1134c349dbc7Sjsg 	i915_gem_drain_freed_objects(i915);
1135c349dbc7Sjsg 	err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
1136c349dbc7Sjsg 				 &mmo->vma_node, obj->base.size / PAGE_SIZE);
1137c349dbc7Sjsg 	if (err)
1138c349dbc7Sjsg 		goto err;
1139c349dbc7Sjsg 
1140c349dbc7Sjsg insert:
1141c349dbc7Sjsg 	mmo = insert_mmo(obj, mmo);
1142c349dbc7Sjsg 	GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
1143c349dbc7Sjsg out:
1144c349dbc7Sjsg 	if (file)
1145a16ed256Sjsg 		drm_vma_node_allow_once(&mmo->vma_node, file);
1146c349dbc7Sjsg 	return mmo;
1147c349dbc7Sjsg 
1148c349dbc7Sjsg err:
1149c349dbc7Sjsg 	kfree(mmo);
1150c349dbc7Sjsg 	return ERR_PTR(err);
1151c349dbc7Sjsg }
1152c349dbc7Sjsg 
1153c349dbc7Sjsg static int
11545ca02815Sjsg __assign_mmap_offset(struct drm_i915_gem_object *obj,
11555ca02815Sjsg 		     enum i915_mmap_type mmap_type,
11565ca02815Sjsg 		     u64 *offset, struct drm_file *file)
11575ca02815Sjsg {
11585ca02815Sjsg 	struct i915_mmap_offset *mmo;
11595ca02815Sjsg 
11605ca02815Sjsg 	if (i915_gem_object_never_mmap(obj))
11615ca02815Sjsg 		return -ENODEV;
11625ca02815Sjsg 
11635ca02815Sjsg 	if (obj->ops->mmap_offset)  {
11645ca02815Sjsg 		if (mmap_type != I915_MMAP_TYPE_FIXED)
11655ca02815Sjsg 			return -ENODEV;
11665ca02815Sjsg 
11675ca02815Sjsg 		*offset = obj->ops->mmap_offset(obj);
11685ca02815Sjsg 		return 0;
11695ca02815Sjsg 	}
11705ca02815Sjsg 
11715ca02815Sjsg 	if (mmap_type == I915_MMAP_TYPE_FIXED)
11725ca02815Sjsg 		return -ENODEV;
11735ca02815Sjsg 
11745ca02815Sjsg 	if (mmap_type != I915_MMAP_TYPE_GTT &&
11755ca02815Sjsg 	    !i915_gem_object_has_struct_page(obj) &&
11765ca02815Sjsg 	    !i915_gem_object_has_iomem(obj))
11775ca02815Sjsg 		return -ENODEV;
11785ca02815Sjsg 
11795ca02815Sjsg 	mmo = mmap_offset_attach(obj, mmap_type, file);
11805ca02815Sjsg 	if (IS_ERR(mmo))
11815ca02815Sjsg 		return PTR_ERR(mmo);
11825ca02815Sjsg 
11835ca02815Sjsg 	*offset = drm_vma_node_offset_addr(&mmo->vma_node);
11845ca02815Sjsg 	return 0;
11855ca02815Sjsg }
11865ca02815Sjsg 
11875ca02815Sjsg static int
11885ca02815Sjsg __assign_mmap_offset_handle(struct drm_file *file,
1189c349dbc7Sjsg 			    u32 handle,
1190c349dbc7Sjsg 			    enum i915_mmap_type mmap_type,
1191c349dbc7Sjsg 			    u64 *offset)
1192c349dbc7Sjsg {
1193c349dbc7Sjsg 	struct drm_i915_gem_object *obj;
1194c349dbc7Sjsg 	int err;
1195c349dbc7Sjsg 
1196c349dbc7Sjsg 	obj = i915_gem_object_lookup(file, handle);
1197c349dbc7Sjsg 	if (!obj)
1198c349dbc7Sjsg 		return -ENOENT;
1199c349dbc7Sjsg 
12005ca02815Sjsg 	err = i915_gem_object_lock_interruptible(obj, NULL);
12015ca02815Sjsg 	if (err)
12025ca02815Sjsg 		goto out_put;
12035ca02815Sjsg 	err = __assign_mmap_offset(obj, mmap_type, offset, file);
12045ca02815Sjsg 	i915_gem_object_unlock(obj);
12055ca02815Sjsg out_put:
1206c349dbc7Sjsg 	i915_gem_object_put(obj);
1207c349dbc7Sjsg 	return err;
1208c349dbc7Sjsg }
1209c349dbc7Sjsg 
1210c349dbc7Sjsg int
1211c349dbc7Sjsg i915_gem_dumb_mmap_offset(struct drm_file *file,
1212c349dbc7Sjsg 			  struct drm_device *dev,
1213c349dbc7Sjsg 			  u32 handle,
1214c349dbc7Sjsg 			  u64 *offset)
1215c349dbc7Sjsg {
12161bb76ff1Sjsg 	struct drm_i915_private *i915 = to_i915(dev);
1217c349dbc7Sjsg 	enum i915_mmap_type mmap_type;
1218c349dbc7Sjsg 
12195ca02815Sjsg 	if (HAS_LMEM(to_i915(dev)))
12205ca02815Sjsg 		mmap_type = I915_MMAP_TYPE_FIXED;
12211bb76ff1Sjsg 	else if (pat_enabled())
1222c349dbc7Sjsg 		mmap_type = I915_MMAP_TYPE_WC;
12231bb76ff1Sjsg 	else if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
1224c349dbc7Sjsg 		return -ENODEV;
1225c349dbc7Sjsg 	else
1226c349dbc7Sjsg 		mmap_type = I915_MMAP_TYPE_GTT;
1227c349dbc7Sjsg 
12285ca02815Sjsg 	return __assign_mmap_offset_handle(file, handle, mmap_type, offset);
1229c349dbc7Sjsg }
1230c349dbc7Sjsg 
1231c349dbc7Sjsg /**
1232c349dbc7Sjsg  * i915_gem_mmap_offset_ioctl - prepare an object for GTT mmap'ing
1233c349dbc7Sjsg  * @dev: DRM device
1234c349dbc7Sjsg  * @data: GTT mapping ioctl data
1235c349dbc7Sjsg  * @file: GEM object info
1236c349dbc7Sjsg  *
1237c349dbc7Sjsg  * Simply returns the fake offset to userspace so it can mmap it.
1238c349dbc7Sjsg  * The mmap call will end up in drm_gem_mmap(), which will set things
1239c349dbc7Sjsg  * up so we can get faults in the handler above.
1240c349dbc7Sjsg  *
1241c349dbc7Sjsg  * The fault handler will take care of binding the object into the GTT
1242c349dbc7Sjsg  * (since it may have been evicted to make room for something), allocating
1243c349dbc7Sjsg  * a fence register, and mapping the appropriate aperture address into
1244c349dbc7Sjsg  * userspace.
1245c349dbc7Sjsg  */
1246c349dbc7Sjsg int
1247c349dbc7Sjsg i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
1248c349dbc7Sjsg 			   struct drm_file *file)
1249c349dbc7Sjsg {
1250c349dbc7Sjsg 	struct drm_i915_private *i915 = to_i915(dev);
1251c349dbc7Sjsg 	struct drm_i915_gem_mmap_offset *args = data;
1252c349dbc7Sjsg 	enum i915_mmap_type type;
1253c349dbc7Sjsg 	int err;
1254c349dbc7Sjsg 
1255c349dbc7Sjsg 	/*
1256c349dbc7Sjsg 	 * Historically we failed to check args.pad and args.offset
1257c349dbc7Sjsg 	 * and so we cannot use those fields for user input and we cannot
1258c349dbc7Sjsg 	 * add -EINVAL for them as the ABI is fixed, i.e. old userspace
1259c349dbc7Sjsg 	 * may be feeding in garbage in those fields.
1260c349dbc7Sjsg 	 *
1261c349dbc7Sjsg 	 * if (args->pad) return -EINVAL; is verbotten!
1262c349dbc7Sjsg 	 */
1263c349dbc7Sjsg 
1264c349dbc7Sjsg 	err = i915_user_extensions(u64_to_user_ptr(args->extensions),
1265c349dbc7Sjsg 				   NULL, 0, NULL);
1266c349dbc7Sjsg 	if (err)
1267c349dbc7Sjsg 		return err;
1268c349dbc7Sjsg 
1269c349dbc7Sjsg 	switch (args->flags) {
1270c349dbc7Sjsg 	case I915_MMAP_OFFSET_GTT:
12711bb76ff1Sjsg 		if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
1272c349dbc7Sjsg 			return -ENODEV;
1273c349dbc7Sjsg 		type = I915_MMAP_TYPE_GTT;
1274c349dbc7Sjsg 		break;
1275c349dbc7Sjsg 
1276c349dbc7Sjsg 	case I915_MMAP_OFFSET_WC:
12771bb76ff1Sjsg 		if (!pat_enabled())
1278c349dbc7Sjsg 			return -ENODEV;
1279c349dbc7Sjsg 		type = I915_MMAP_TYPE_WC;
1280c349dbc7Sjsg 		break;
1281c349dbc7Sjsg 
1282c349dbc7Sjsg 	case I915_MMAP_OFFSET_WB:
1283c349dbc7Sjsg 		type = I915_MMAP_TYPE_WB;
1284c349dbc7Sjsg 		break;
1285c349dbc7Sjsg 
1286c349dbc7Sjsg 	case I915_MMAP_OFFSET_UC:
12871bb76ff1Sjsg 		if (!pat_enabled())
1288c349dbc7Sjsg 			return -ENODEV;
1289c349dbc7Sjsg 		type = I915_MMAP_TYPE_UC;
1290c349dbc7Sjsg 		break;
1291c349dbc7Sjsg 
12925ca02815Sjsg 	case I915_MMAP_OFFSET_FIXED:
12935ca02815Sjsg 		type = I915_MMAP_TYPE_FIXED;
12945ca02815Sjsg 		break;
12955ca02815Sjsg 
1296c349dbc7Sjsg 	default:
1297c349dbc7Sjsg 		return -EINVAL;
1298c349dbc7Sjsg 	}
1299c349dbc7Sjsg 
13005ca02815Sjsg 	return __assign_mmap_offset_handle(file, args->handle, type, &args->offset);
1301c349dbc7Sjsg }
1302c349dbc7Sjsg 
1303c349dbc7Sjsg #ifdef __linux__
1304c349dbc7Sjsg 
1305c349dbc7Sjsg static void vm_open(struct vm_area_struct *vma)
1306c349dbc7Sjsg {
1307c349dbc7Sjsg 	struct i915_mmap_offset *mmo = vma->vm_private_data;
1308c349dbc7Sjsg 	struct drm_i915_gem_object *obj = mmo->obj;
1309c349dbc7Sjsg 
1310c349dbc7Sjsg 	GEM_BUG_ON(!obj);
1311c349dbc7Sjsg 	i915_gem_object_get(obj);
1312c349dbc7Sjsg }
1313c349dbc7Sjsg 
1314c349dbc7Sjsg static void vm_close(struct vm_area_struct *vma)
1315c349dbc7Sjsg {
1316c349dbc7Sjsg 	struct i915_mmap_offset *mmo = vma->vm_private_data;
1317c349dbc7Sjsg 	struct drm_i915_gem_object *obj = mmo->obj;
1318c349dbc7Sjsg 
1319c349dbc7Sjsg 	GEM_BUG_ON(!obj);
1320c349dbc7Sjsg 	i915_gem_object_put(obj);
1321c349dbc7Sjsg }
1322c349dbc7Sjsg 
1323c349dbc7Sjsg static const struct vm_operations_struct vm_ops_gtt = {
1324c349dbc7Sjsg 	.fault = vm_fault_gtt,
1325ad8b1aafSjsg 	.access = vm_access,
1326c349dbc7Sjsg 	.open = vm_open,
1327c349dbc7Sjsg 	.close = vm_close,
1328c349dbc7Sjsg };
1329c349dbc7Sjsg 
1330c349dbc7Sjsg static const struct vm_operations_struct vm_ops_cpu = {
1331c349dbc7Sjsg 	.fault = vm_fault_cpu,
1332ad8b1aafSjsg 	.access = vm_access,
1333c349dbc7Sjsg 	.open = vm_open,
1334c349dbc7Sjsg 	.close = vm_close,
1335c349dbc7Sjsg };
1336c349dbc7Sjsg 
1337c349dbc7Sjsg static int singleton_release(struct inode *inode, struct file *file)
1338c349dbc7Sjsg {
1339c349dbc7Sjsg 	struct drm_i915_private *i915 = file->private_data;
1340c349dbc7Sjsg 
1341c349dbc7Sjsg 	cmpxchg(&i915->gem.mmap_singleton, file, NULL);
1342c349dbc7Sjsg 	drm_dev_put(&i915->drm);
1343c349dbc7Sjsg 
1344c349dbc7Sjsg 	return 0;
1345c349dbc7Sjsg }
1346c349dbc7Sjsg 
1347c349dbc7Sjsg static const struct file_operations singleton_fops = {
1348c349dbc7Sjsg 	.owner = THIS_MODULE,
1349c349dbc7Sjsg 	.release = singleton_release,
1350c349dbc7Sjsg };
1351c349dbc7Sjsg 
1352c349dbc7Sjsg static struct file *mmap_singleton(struct drm_i915_private *i915)
1353c349dbc7Sjsg {
1354c349dbc7Sjsg 	struct file *file;
1355c349dbc7Sjsg 
1356c349dbc7Sjsg 	rcu_read_lock();
1357c349dbc7Sjsg 	file = READ_ONCE(i915->gem.mmap_singleton);
1358c349dbc7Sjsg 	if (file && !get_file_rcu(file))
1359c349dbc7Sjsg 		file = NULL;
1360c349dbc7Sjsg 	rcu_read_unlock();
1361c349dbc7Sjsg 	if (file)
1362c349dbc7Sjsg 		return file;
1363c349dbc7Sjsg 
1364c349dbc7Sjsg 	file = anon_inode_getfile("i915.gem", &singleton_fops, i915, O_RDWR);
1365c349dbc7Sjsg 	if (IS_ERR(file))
1366c349dbc7Sjsg 		return file;
1367c349dbc7Sjsg 
1368c349dbc7Sjsg 	/* Everyone shares a single global address space */
1369c349dbc7Sjsg 	file->f_mapping = i915->drm.anon_inode->i_mapping;
1370c349dbc7Sjsg 
1371c349dbc7Sjsg 	smp_store_mb(i915->gem.mmap_singleton, file);
1372c349dbc7Sjsg 	drm_dev_get(&i915->drm);
1373c349dbc7Sjsg 
1374c349dbc7Sjsg 	return file;
1375c349dbc7Sjsg }
1376c349dbc7Sjsg 
1377f005ef32Sjsg static int
1378f005ef32Sjsg i915_gem_object_mmap(struct drm_i915_gem_object *obj,
1379f005ef32Sjsg 		     struct i915_mmap_offset *mmo,
1380f005ef32Sjsg 		     struct vm_area_struct *vma)
1381c349dbc7Sjsg {
1382f005ef32Sjsg 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
1383f005ef32Sjsg 	struct drm_device *dev = &i915->drm;
1384c349dbc7Sjsg 	struct file *anon;
1385c349dbc7Sjsg 
1386c349dbc7Sjsg 	if (i915_gem_object_is_readonly(obj)) {
1387c349dbc7Sjsg 		if (vma->vm_flags & VM_WRITE) {
1388c349dbc7Sjsg 			i915_gem_object_put(obj);
1389c349dbc7Sjsg 			return -EINVAL;
1390c349dbc7Sjsg 		}
1391f005ef32Sjsg 		vm_flags_clear(vma, VM_MAYWRITE);
1392c349dbc7Sjsg 	}
1393c349dbc7Sjsg 
1394c349dbc7Sjsg 	anon = mmap_singleton(to_i915(dev));
1395c349dbc7Sjsg 	if (IS_ERR(anon)) {
1396c349dbc7Sjsg 		i915_gem_object_put(obj);
1397c349dbc7Sjsg 		return PTR_ERR(anon);
1398c349dbc7Sjsg 	}
1399c349dbc7Sjsg 
1400f005ef32Sjsg 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
1401c349dbc7Sjsg 
1402c349dbc7Sjsg 	/*
1403c349dbc7Sjsg 	 * We keep the ref on mmo->obj, not vm_file, but we require
1404c349dbc7Sjsg 	 * vma->vm_file->f_mapping, see vma_link(), for later revocation.
1405c349dbc7Sjsg 	 * Our userspace is accustomed to having per-file resource cleanup
1406c349dbc7Sjsg 	 * (i.e. contexts, objects and requests) on their close(fd), which
1407c349dbc7Sjsg 	 * requires avoiding extraneous references to their filp, hence why
1408c349dbc7Sjsg 	 * we prefer to use an anonymous file for their mmaps.
1409c349dbc7Sjsg 	 */
14105ca02815Sjsg 	vma_set_file(vma, anon);
14115ca02815Sjsg 	/* Drop the initial creation reference, the vma is now holding one. */
14125ca02815Sjsg 	fput(anon);
14135ca02815Sjsg 
14145ca02815Sjsg 	if (obj->ops->mmap_ops) {
14155ca02815Sjsg 		vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
14165ca02815Sjsg 		vma->vm_ops = obj->ops->mmap_ops;
1417f005ef32Sjsg 		vma->vm_private_data = obj->base.vma_node.driver_private;
14185ca02815Sjsg 		return 0;
14195ca02815Sjsg 	}
14205ca02815Sjsg 
14215ca02815Sjsg 	vma->vm_private_data = mmo;
1422c349dbc7Sjsg 
1423c349dbc7Sjsg 	switch (mmo->mmap_type) {
1424c349dbc7Sjsg 	case I915_MMAP_TYPE_WC:
1425c349dbc7Sjsg 		vma->vm_page_prot =
1426c349dbc7Sjsg 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1427c349dbc7Sjsg 		vma->vm_ops = &vm_ops_cpu;
1428c349dbc7Sjsg 		break;
1429c349dbc7Sjsg 
14305ca02815Sjsg 	case I915_MMAP_TYPE_FIXED:
14315ca02815Sjsg 		GEM_WARN_ON(1);
14325ca02815Sjsg 		fallthrough;
1433c349dbc7Sjsg 	case I915_MMAP_TYPE_WB:
1434c349dbc7Sjsg 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1435c349dbc7Sjsg 		vma->vm_ops = &vm_ops_cpu;
1436c349dbc7Sjsg 		break;
1437c349dbc7Sjsg 
1438c349dbc7Sjsg 	case I915_MMAP_TYPE_UC:
1439c349dbc7Sjsg 		vma->vm_page_prot =
1440c349dbc7Sjsg 			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
1441c349dbc7Sjsg 		vma->vm_ops = &vm_ops_cpu;
1442c349dbc7Sjsg 		break;
1443c349dbc7Sjsg 
1444c349dbc7Sjsg 	case I915_MMAP_TYPE_GTT:
1445c349dbc7Sjsg 		vma->vm_page_prot =
1446c349dbc7Sjsg 			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1447c349dbc7Sjsg 		vma->vm_ops = &vm_ops_gtt;
1448c349dbc7Sjsg 		break;
1449c349dbc7Sjsg 	}
1450c349dbc7Sjsg 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1451c349dbc7Sjsg 
1452c349dbc7Sjsg 	return 0;
1453c349dbc7Sjsg }
1454c349dbc7Sjsg 
1455f005ef32Sjsg /*
1456f005ef32Sjsg  * This overcomes the limitation in drm_gem_mmap's assignment of a
1457f005ef32Sjsg  * drm_gem_object as the vma->vm_private_data. Since we need to
1458f005ef32Sjsg  * be able to resolve multiple mmap offsets which could be tied
1459f005ef32Sjsg  * to a single gem object.
1460f005ef32Sjsg  */
1461f005ef32Sjsg int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1462f005ef32Sjsg {
1463f005ef32Sjsg 	struct drm_vma_offset_node *node;
1464f005ef32Sjsg 	struct drm_file *priv = filp->private_data;
1465f005ef32Sjsg 	struct drm_device *dev = priv->minor->dev;
1466f005ef32Sjsg 	struct drm_i915_gem_object *obj = NULL;
1467f005ef32Sjsg 	struct i915_mmap_offset *mmo = NULL;
1468f005ef32Sjsg 
1469f005ef32Sjsg 	if (drm_dev_is_unplugged(dev))
1470f005ef32Sjsg 		return -ENODEV;
1471f005ef32Sjsg 
1472f005ef32Sjsg 	rcu_read_lock();
1473f005ef32Sjsg 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1474f005ef32Sjsg 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1475f005ef32Sjsg 						  vma->vm_pgoff,
1476f005ef32Sjsg 						  vma_pages(vma));
1477f005ef32Sjsg 	if (node && drm_vma_node_is_allowed(node, priv)) {
1478f005ef32Sjsg 		/*
1479f005ef32Sjsg 		 * Skip 0-refcnted objects as it is in the process of being
1480f005ef32Sjsg 		 * destroyed and will be invalid when the vma manager lock
1481f005ef32Sjsg 		 * is released.
1482f005ef32Sjsg 		 */
1483f005ef32Sjsg 		if (!node->driver_private) {
1484f005ef32Sjsg 			mmo = container_of(node, struct i915_mmap_offset, vma_node);
1485f005ef32Sjsg 			obj = i915_gem_object_get_rcu(mmo->obj);
1486f005ef32Sjsg 
1487f005ef32Sjsg 			GEM_BUG_ON(obj && obj->ops->mmap_ops);
1488f005ef32Sjsg 		} else {
1489f005ef32Sjsg 			obj = i915_gem_object_get_rcu
1490f005ef32Sjsg 				(container_of(node, struct drm_i915_gem_object,
1491f005ef32Sjsg 					      base.vma_node));
1492f005ef32Sjsg 
1493f005ef32Sjsg 			GEM_BUG_ON(obj && !obj->ops->mmap_ops);
1494f005ef32Sjsg 		}
1495f005ef32Sjsg 	}
1496f005ef32Sjsg 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1497f005ef32Sjsg 	rcu_read_unlock();
1498f005ef32Sjsg 	if (!obj)
1499f005ef32Sjsg 		return node ? -EACCES : -EINVAL;
1500f005ef32Sjsg 
1501f005ef32Sjsg 	return i915_gem_object_mmap(obj, mmo, vma);
1502f005ef32Sjsg }
1503f005ef32Sjsg 
1504c349dbc7Sjsg #else /* !__linux__ */
1505c349dbc7Sjsg 
1506c349dbc7Sjsg /*
1507c349dbc7Sjsg  * This overcomes the limitation in drm_gem_mmap's assignment of a
1508c349dbc7Sjsg  * drm_gem_object as the vma->vm_private_data. Since we need to
1509c349dbc7Sjsg  * be able to resolve multiple mmap offsets which could be tied
1510c349dbc7Sjsg  * to a single gem object.
1511c349dbc7Sjsg  */
1512c349dbc7Sjsg struct uvm_object *
1513c349dbc7Sjsg i915_gem_mmap(struct file *filp, vm_prot_t accessprot,
1514c349dbc7Sjsg     voff_t off, vsize_t size)
1515c349dbc7Sjsg {
1516c349dbc7Sjsg 	struct drm_vma_offset_node *node;
1517c349dbc7Sjsg 	struct drm_file *priv = (void *)filp;
1518c349dbc7Sjsg 	struct drm_device *dev = priv->minor->dev;
1519c349dbc7Sjsg 	struct drm_i915_gem_object *obj = NULL;
1520c349dbc7Sjsg 	struct i915_mmap_offset *mmo = NULL;
1521c349dbc7Sjsg 
1522c349dbc7Sjsg 	if (drm_dev_is_unplugged(dev))
1523c349dbc7Sjsg 		return NULL;
1524c349dbc7Sjsg 
1525c349dbc7Sjsg 	rcu_read_lock();
1526c349dbc7Sjsg 	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1527c349dbc7Sjsg 	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1528c349dbc7Sjsg 						  off >> PAGE_SHIFT,
1529c349dbc7Sjsg 						  atop(round_page(size)));
15300608e4cbSjsg 	if (node && drm_vma_node_is_allowed(node, priv)) {
1531c349dbc7Sjsg 		/*
1532c349dbc7Sjsg 		 * Skip 0-refcnted objects as it is in the process of being
1533c349dbc7Sjsg 		 * destroyed and will be invalid when the vma manager lock
1534c349dbc7Sjsg 		 * is released.
1535c349dbc7Sjsg 		 */
1536d09e62dbSjsg 		if (!node->driver_private) {
1537c349dbc7Sjsg 			mmo = container_of(node, struct i915_mmap_offset, vma_node);
1538c349dbc7Sjsg 			obj = i915_gem_object_get_rcu(mmo->obj);
1539d09e62dbSjsg 
1540d09e62dbSjsg 			GEM_BUG_ON(obj && obj->ops->mmap_ops);
1541d09e62dbSjsg 		} else {
1542d09e62dbSjsg 			obj = i915_gem_object_get_rcu
1543d09e62dbSjsg 				(container_of(node, struct drm_i915_gem_object,
1544d09e62dbSjsg 					      base.vma_node));
1545d09e62dbSjsg 
1546d09e62dbSjsg 			GEM_BUG_ON(obj && !obj->ops->mmap_ops);
1547d09e62dbSjsg 		}
1548c349dbc7Sjsg 	}
1549c349dbc7Sjsg 	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1550c349dbc7Sjsg 	rcu_read_unlock();
1551c349dbc7Sjsg 	if (!obj)
1552c349dbc7Sjsg 		return NULL;
1553c349dbc7Sjsg 
1554c349dbc7Sjsg 	if (i915_gem_object_is_readonly(obj)) {
1555c349dbc7Sjsg 		if (accessprot & PROT_WRITE) {
1556c349dbc7Sjsg 			i915_gem_object_put(obj);
1557c349dbc7Sjsg 			return NULL;
1558c349dbc7Sjsg 		}
1559c349dbc7Sjsg 	}
1560c349dbc7Sjsg 
1561d09e62dbSjsg 	if (obj->ops->mmap_ops)
1562d09e62dbSjsg 		uvm_obj_init(&obj->base.uobj, obj->ops->mmap_ops, 1);
1563d09e62dbSjsg 
1564c349dbc7Sjsg 	return &obj->base.uobj;
1565c349dbc7Sjsg }
1566c349dbc7Sjsg 
1567c349dbc7Sjsg #endif /* !__linux__ */
1568c349dbc7Sjsg 
1569f005ef32Sjsg #ifdef notyet
1570f005ef32Sjsg int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma)
1571f005ef32Sjsg {
1572f005ef32Sjsg 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
1573f005ef32Sjsg 	struct drm_device *dev = &i915->drm;
1574f005ef32Sjsg 	struct i915_mmap_offset *mmo = NULL;
1575f005ef32Sjsg 	enum i915_mmap_type mmap_type;
1576f005ef32Sjsg 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
1577f005ef32Sjsg 
1578f005ef32Sjsg 	if (drm_dev_is_unplugged(dev))
1579f005ef32Sjsg 		return -ENODEV;
1580f005ef32Sjsg 
1581f005ef32Sjsg 	/* handle ttm object */
1582f005ef32Sjsg 	if (obj->ops->mmap_ops) {
1583f005ef32Sjsg 		/*
1584f005ef32Sjsg 		 * ttm fault handler, ttm_bo_vm_fault_reserved() uses fake offset
1585f005ef32Sjsg 		 * to calculate page offset so set that up.
1586f005ef32Sjsg 		 */
1587f005ef32Sjsg 		vma->vm_pgoff += drm_vma_node_start(&obj->base.vma_node);
1588f005ef32Sjsg 	} else {
1589f005ef32Sjsg 		/* handle stolen and smem objects */
1590f005ef32Sjsg 		mmap_type = i915_ggtt_has_aperture(ggtt) ? I915_MMAP_TYPE_GTT : I915_MMAP_TYPE_WC;
1591f005ef32Sjsg 		mmo = mmap_offset_attach(obj, mmap_type, NULL);
1592f005ef32Sjsg 		if (IS_ERR(mmo))
1593f005ef32Sjsg 			return PTR_ERR(mmo);
15947e709411Sjsg 
15957e709411Sjsg 		vma->vm_pgoff += drm_vma_node_start(&mmo->vma_node);
1596f005ef32Sjsg 	}
1597f005ef32Sjsg 
1598f005ef32Sjsg 	/*
1599f005ef32Sjsg 	 * When we install vm_ops for mmap we are too late for
1600f005ef32Sjsg 	 * the vm_ops->open() which increases the ref_count of
1601f005ef32Sjsg 	 * this obj and then it gets decreased by the vm_ops->close().
1602f005ef32Sjsg 	 * To balance this increase the obj ref_count here.
1603f005ef32Sjsg 	 */
1604f005ef32Sjsg 	obj = i915_gem_object_get(obj);
1605f005ef32Sjsg 	return i915_gem_object_mmap(obj, mmo, vma);
1606f005ef32Sjsg }
1607f005ef32Sjsg #endif /* notyet */
1608f005ef32Sjsg 
1609c349dbc7Sjsg #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1610c349dbc7Sjsg #include "selftests/i915_gem_mman.c"
1611c349dbc7Sjsg #endif
1612