xref: /dflybsd-src/sys/dev/drm/i915/i915_gem_userptr.c (revision 6af9a77b394698e42f3a7ec6126497a3fc2fd470)
1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <drm/drmP.h>
26 #include <drm/i915_drm.h>
27 #include "i915_drv.h"
28 #include "i915_trace.h"
29 #include "intel_drv.h"
30 
31 #if defined(CONFIG_MMU_NOTIFIER)
32 #include <linux/interval_tree.h>
33 
34 struct i915_mmu_notifier {
35 	spinlock_t lock;
36 	struct hlist_node node;
37 	struct mmu_notifier mn;
38 	struct rb_root objects;
39 	struct drm_device *dev;
40 	struct mm_struct *mm;
41 	struct work_struct work;
42 	unsigned long count;
43 	unsigned long serial;
44 };
45 
46 struct i915_mmu_object {
47 	struct i915_mmu_notifier *mmu;
48 	struct interval_tree_node it;
49 	struct drm_i915_gem_object *obj;
50 };
51 
52 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
53 						       struct mm_struct *mm,
54 						       unsigned long start,
55 						       unsigned long end)
56 {
57 	struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
58 	struct interval_tree_node *it = NULL;
59 	unsigned long serial = 0;
60 
61 	end--; /* interval ranges are inclusive, but invalidate range is exclusive */
62 	while (start < end) {
63 		struct drm_i915_gem_object *obj;
64 
65 		obj = NULL;
66 		spin_lock(&mn->lock);
67 		if (serial == mn->serial)
68 			it = interval_tree_iter_next(it, start, end);
69 		else
70 			it = interval_tree_iter_first(&mn->objects, start, end);
71 		if (it != NULL) {
72 			obj = container_of(it, struct i915_mmu_object, it)->obj;
73 			drm_gem_object_reference(&obj->base);
74 			serial = mn->serial;
75 		}
76 		spin_unlock(&mn->lock);
77 		if (obj == NULL)
78 			return;
79 
80 		mutex_lock(&mn->dev->struct_mutex);
81 		/* Cancel any active worker and force us to re-evaluate gup */
82 		obj->userptr.work = NULL;
83 
84 		if (obj->pages != NULL) {
85 			struct drm_i915_private *dev_priv = to_i915(mn->dev);
86 			struct i915_vma *vma, *tmp;
87 			bool was_interruptible;
88 
89 			was_interruptible = dev_priv->mm.interruptible;
90 			dev_priv->mm.interruptible = false;
91 
92 			list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
93 				int ret = i915_vma_unbind(vma);
94 				WARN_ON(ret && ret != -EIO);
95 			}
96 			WARN_ON(i915_gem_object_put_pages(obj));
97 
98 			dev_priv->mm.interruptible = was_interruptible;
99 		}
100 
101 		start = obj->userptr.ptr + obj->base.size;
102 
103 		drm_gem_object_unreference(&obj->base);
104 		mutex_unlock(&mn->dev->struct_mutex);
105 	}
106 }
107 
108 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
109 	.invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
110 };
111 
112 static struct i915_mmu_notifier *
113 __i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm)
114 {
115 	struct drm_i915_private *dev_priv = to_i915(dev);
116 	struct i915_mmu_notifier *mmu;
117 
118 	/* Protected by dev->struct_mutex */
119 	hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm)
120 		if (mmu->mm == mm)
121 			return mmu;
122 
123 	return NULL;
124 }
125 
126 static struct i915_mmu_notifier *
127 i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
128 {
129 	struct drm_i915_private *dev_priv = to_i915(dev);
130 	struct i915_mmu_notifier *mmu;
131 	int ret;
132 
133 	lockdep_assert_held(&dev->struct_mutex);
134 
135 	mmu = __i915_mmu_notifier_lookup(dev, mm);
136 	if (mmu)
137 		return mmu;
138 
139 	mmu = kmalloc(sizeof(*mmu), GFP_KERNEL);
140 	if (mmu == NULL)
141 		return ERR_PTR(-ENOMEM);
142 
143 	spin_lock_init(&mmu->lock);
144 	mmu->dev = dev;
145 	mmu->mn.ops = &i915_gem_userptr_notifier;
146 	mmu->mm = mm;
147 	mmu->objects = RB_ROOT;
148 	mmu->count = 0;
149 	mmu->serial = 0;
150 
151 	/* Protected by mmap_sem (write-lock) */
152 	ret = __mmu_notifier_register(&mmu->mn, mm);
153 	if (ret) {
154 		kfree(mmu);
155 		return ERR_PTR(ret);
156 	}
157 
158 	/* Protected by dev->struct_mutex */
159 	hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm);
160 	return mmu;
161 }
162 
163 static void
164 __i915_mmu_notifier_destroy_worker(struct work_struct *work)
165 {
166 	struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work);
167 	mmu_notifier_unregister(&mmu->mn, mmu->mm);
168 	kfree(mmu);
169 }
170 
171 static void
172 __i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu)
173 {
174 	lockdep_assert_held(&mmu->dev->struct_mutex);
175 
176 	/* Protected by dev->struct_mutex */
177 	hash_del(&mmu->node);
178 
179 	/* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex.
180 	 * We enter the function holding struct_mutex, therefore we need
181 	 * to drop our mutex prior to calling mmu_notifier_unregister in
182 	 * order to prevent lock inversion (and system-wide deadlock)
183 	 * between the mmap_sem and struct-mutex. Hence we defer the
184 	 * unregistration to a workqueue where we hold no locks.
185 	 */
186 	INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker);
187 	schedule_work(&mmu->work);
188 }
189 
190 static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu)
191 {
192 	if (++mmu->serial == 0)
193 		mmu->serial = 1;
194 }
195 
196 static void
197 i915_mmu_notifier_del(struct i915_mmu_notifier *mmu,
198 		      struct i915_mmu_object *mn)
199 {
200 	lockdep_assert_held(&mmu->dev->struct_mutex);
201 
202 	spin_lock(&mmu->lock);
203 	interval_tree_remove(&mn->it, &mmu->objects);
204 	__i915_mmu_notifier_update_serial(mmu);
205 	spin_unlock(&mmu->lock);
206 
207 	/* Protected against _add() by dev->struct_mutex */
208 	if (--mmu->count == 0)
209 		__i915_mmu_notifier_destroy(mmu);
210 }
211 
212 static int
213 i915_mmu_notifier_add(struct i915_mmu_notifier *mmu,
214 		      struct i915_mmu_object *mn)
215 {
216 	struct interval_tree_node *it;
217 	int ret;
218 
219 	ret = i915_mutex_lock_interruptible(mmu->dev);
220 	if (ret)
221 		return ret;
222 
223 	/* Make sure we drop the final active reference (and thereby
224 	 * remove the objects from the interval tree) before we do
225 	 * the check for overlapping objects.
226 	 */
227 	i915_gem_retire_requests(mmu->dev);
228 
229 	/* Disallow overlapping userptr objects */
230 	spin_lock(&mmu->lock);
231 	it = interval_tree_iter_first(&mmu->objects,
232 				      mn->it.start, mn->it.last);
233 	if (it) {
234 		struct drm_i915_gem_object *obj;
235 
236 		/* We only need to check the first object in the range as it
237 		 * either has cancelled gup work queued and we need to
238 		 * return back to the user to give time for the gup-workers
239 		 * to flush their object references upon which the object will
240 		 * be removed from the interval-tree, or the the range is
241 		 * still in use by another client and the overlap is invalid.
242 		 */
243 
244 		obj = container_of(it, struct i915_mmu_object, it)->obj;
245 		ret = obj->userptr.workers ? -EAGAIN : -EINVAL;
246 	} else {
247 		interval_tree_insert(&mn->it, &mmu->objects);
248 		__i915_mmu_notifier_update_serial(mmu);
249 		ret = 0;
250 	}
251 	spin_unlock(&mmu->lock);
252 	mutex_unlock(&mmu->dev->struct_mutex);
253 
254 	return ret;
255 }
256 
257 static void
258 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
259 {
260 	struct i915_mmu_object *mn;
261 
262 	mn = obj->userptr.mn;
263 	if (mn == NULL)
264 		return;
265 
266 	i915_mmu_notifier_del(mn->mmu, mn);
267 	obj->userptr.mn = NULL;
268 }
269 
270 static int
271 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
272 				    unsigned flags)
273 {
274 	struct i915_mmu_notifier *mmu;
275 	struct i915_mmu_object *mn;
276 	int ret;
277 
278 	if (flags & I915_USERPTR_UNSYNCHRONIZED)
279 		return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
280 
281 	down_write(&obj->userptr.mm->mmap_sem);
282 	ret = i915_mutex_lock_interruptible(obj->base.dev);
283 	if (ret == 0) {
284 		mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm);
285 		if (!IS_ERR(mmu))
286 			mmu->count++; /* preemptive add to act as a refcount */
287 		else
288 			ret = PTR_ERR(mmu);
289 		mutex_unlock(&obj->base.dev->struct_mutex);
290 	}
291 	up_write(&obj->userptr.mm->mmap_sem);
292 	if (ret)
293 		return ret;
294 
295 	mn = kzalloc(sizeof(*mn), GFP_KERNEL);
296 	if (mn == NULL) {
297 		ret = -ENOMEM;
298 		goto destroy_mmu;
299 	}
300 
301 	mn->mmu = mmu;
302 	mn->it.start = obj->userptr.ptr;
303 	mn->it.last = mn->it.start + obj->base.size - 1;
304 	mn->obj = obj;
305 
306 	ret = i915_mmu_notifier_add(mmu, mn);
307 	if (ret)
308 		goto free_mn;
309 
310 	obj->userptr.mn = mn;
311 	return 0;
312 
313 free_mn:
314 	kfree(mn);
315 destroy_mmu:
316 	mutex_lock(&obj->base.dev->struct_mutex);
317 	if (--mmu->count == 0)
318 		__i915_mmu_notifier_destroy(mmu);
319 	mutex_unlock(&obj->base.dev->struct_mutex);
320 	return ret;
321 }
322 
323 #else
324 
325 #if 0
326 static void
327 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
328 {
329 }
330 
331 static int
332 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
333 				    unsigned flags)
334 {
335 	if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
336 		return -ENODEV;
337 
338 
339 	return 0;
340 }
341 #endif
342 #endif
343 
344 struct get_pages_work {
345 	struct work_struct work;
346 	struct drm_i915_gem_object *obj;
347 	struct task_struct *task;
348 };
349 
350 
351 #if IS_ENABLED(CONFIG_SWIOTLB)
352 #define swiotlb_active() swiotlb_nr_tbl()
353 #else
354 #define swiotlb_active() 0
355 #endif
356 
357 #if 0
358 static int
359 st_set_pages(struct sg_table **st, struct vm_page **pvec, int num_pages)
360 {
361 	struct scatterlist *sg;
362 	int ret, n;
363 
364 	*st = kmalloc(sizeof(**st), M_DRM, M_WAITOK);
365 	if (*st == NULL)
366 		return -ENOMEM;
367 
368 	if (swiotlb_active()) {
369 		ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
370 		if (ret)
371 			goto err;
372 
373 		for_each_sg((*st)->sgl, sg, num_pages, n)
374 			sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
375 	} else {
376 		ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
377 						0, num_pages << PAGE_SHIFT,
378 						GFP_KERNEL);
379 		if (ret)
380 			goto err;
381 	}
382 
383 	return 0;
384 
385 err:
386 	kfree(*st);
387 	*st = NULL;
388 	return ret;
389 }
390 
391 static void
392 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
393 {
394 	struct get_pages_work *work = container_of(_work, typeof(*work), work);
395 	struct drm_i915_gem_object *obj = work->obj;
396 	struct drm_device *dev = obj->base.dev;
397 	const int num_pages = obj->base.size >> PAGE_SHIFT;
398 	struct page **pvec;
399 	int pinned, ret;
400 
401 	ret = -ENOMEM;
402 	pinned = 0;
403 
404 	pvec = kmalloc(num_pages*sizeof(struct page *),
405 		       GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
406 	if (pvec == NULL)
407 		pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
408 	if (pvec != NULL) {
409 		struct mm_struct *mm = obj->userptr.mm;
410 
411 		down_read(&mm->mmap_sem);
412 		while (pinned < num_pages) {
413 			ret = get_user_pages(work->task, mm,
414 					     obj->userptr.ptr + pinned * PAGE_SIZE,
415 					     num_pages - pinned,
416 					     !obj->userptr.read_only, 0,
417 					     pvec + pinned, NULL);
418 			if (ret < 0)
419 				break;
420 
421 			pinned += ret;
422 		}
423 		up_read(&mm->mmap_sem);
424 	}
425 
426 	mutex_lock(&dev->struct_mutex);
427 	if (obj->userptr.work != &work->work) {
428 		ret = 0;
429 	} else if (pinned == num_pages) {
430 		ret = st_set_pages(&obj->pages, pvec, num_pages);
431 		if (ret == 0) {
432 			list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
433 			pinned = 0;
434 		}
435 	}
436 
437 	obj->userptr.work = ERR_PTR(ret);
438 	obj->userptr.workers--;
439 	drm_gem_object_unreference(&obj->base);
440 	mutex_unlock(&dev->struct_mutex);
441 
442 	release_pages(pvec, pinned, 0);
443 	drm_free_large(pvec);
444 
445 	put_task_struct(work->task);
446 	kfree(work);
447 }
448 
449 static int
450 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
451 {
452 	const int num_pages = obj->base.size >> PAGE_SHIFT;
453 	struct page **pvec;
454 	int pinned, ret;
455 
456 	/* If userspace should engineer that these pages are replaced in
457 	 * the vma between us binding this page into the GTT and completion
458 	 * of rendering... Their loss. If they change the mapping of their
459 	 * pages they need to create a new bo to point to the new vma.
460 	 *
461 	 * However, that still leaves open the possibility of the vma
462 	 * being copied upon fork. Which falls under the same userspace
463 	 * synchronisation issue as a regular bo, except that this time
464 	 * the process may not be expecting that a particular piece of
465 	 * memory is tied to the GPU.
466 	 *
467 	 * Fortunately, we can hook into the mmu_notifier in order to
468 	 * discard the page references prior to anything nasty happening
469 	 * to the vma (discard or cloning) which should prevent the more
470 	 * egregious cases from causing harm.
471 	 */
472 
473 	pvec = NULL;
474 	pinned = 0;
475 	if (obj->userptr.mm == current->mm) {
476 		pvec = kmalloc(num_pages*sizeof(struct page *),
477 			       GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
478 		if (pvec == NULL) {
479 			pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
480 			if (pvec == NULL)
481 				return -ENOMEM;
482 		}
483 
484 		pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
485 					       !obj->userptr.read_only, pvec);
486 	}
487 	if (pinned < num_pages) {
488 		if (pinned < 0) {
489 			ret = pinned;
490 			pinned = 0;
491 		} else {
492 			/* Spawn a worker so that we can acquire the
493 			 * user pages without holding our mutex. Access
494 			 * to the user pages requires mmap_sem, and we have
495 			 * a strict lock ordering of mmap_sem, struct_mutex -
496 			 * we already hold struct_mutex here and so cannot
497 			 * call gup without encountering a lock inversion.
498 			 *
499 			 * Userspace will keep on repeating the operation
500 			 * (thanks to EAGAIN) until either we hit the fast
501 			 * path or the worker completes. If the worker is
502 			 * cancelled or superseded, the task is still run
503 			 * but the results ignored. (This leads to
504 			 * complications that we may have a stray object
505 			 * refcount that we need to be wary of when
506 			 * checking for existing objects during creation.)
507 			 * If the worker encounters an error, it reports
508 			 * that error back to this function through
509 			 * obj->userptr.work = ERR_PTR.
510 			 */
511 			ret = -EAGAIN;
512 			if (obj->userptr.work == NULL &&
513 			    obj->userptr.workers < I915_GEM_USERPTR_MAX_WORKERS) {
514 				struct get_pages_work *work;
515 
516 				work = kmalloc(sizeof(*work), GFP_KERNEL);
517 				if (work != NULL) {
518 					obj->userptr.work = &work->work;
519 					obj->userptr.workers++;
520 
521 					work->obj = obj;
522 					drm_gem_object_reference(&obj->base);
523 
524 					work->task = current;
525 					get_task_struct(work->task);
526 
527 					INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
528 					schedule_work(&work->work);
529 				} else
530 					ret = -ENOMEM;
531 			} else {
532 				if (IS_ERR(obj->userptr.work)) {
533 					ret = PTR_ERR(obj->userptr.work);
534 					obj->userptr.work = NULL;
535 				}
536 			}
537 		}
538 	} else {
539 		ret = st_set_pages(&obj->pages, pvec, num_pages);
540 		if (ret == 0) {
541 			obj->userptr.work = NULL;
542 			pinned = 0;
543 		}
544 	}
545 
546 	release_pages(pvec, pinned, 0);
547 	drm_free_large(pvec);
548 	return ret;
549 }
550 
551 static void
552 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
553 {
554 	struct scatterlist *sg;
555 	int i;
556 
557 	BUG_ON(obj->userptr.work != NULL);
558 
559 	if (obj->madv != I915_MADV_WILLNEED)
560 		obj->dirty = 0;
561 
562 	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
563 		struct page *page = sg_page(sg);
564 
565 		if (obj->dirty)
566 			set_page_dirty(page);
567 
568 		mark_page_accessed(page);
569 		page_cache_release(page);
570 	}
571 	obj->dirty = 0;
572 
573 	sg_free_table(obj->pages);
574 	kfree(obj->pages);
575 }
576 
577 static void
578 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
579 {
580 	i915_gem_userptr_release__mmu_notifier(obj);
581 
582 	if (obj->userptr.mm) {
583 		mmput(obj->userptr.mm);
584 		obj->userptr.mm = NULL;
585 	}
586 }
587 
588 static int
589 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
590 {
591 	if (obj->userptr.mn)
592 		return 0;
593 
594 	return i915_gem_userptr_init__mmu_notifier(obj, 0);
595 }
596 
597 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
598 	.dmabuf_export = i915_gem_userptr_dmabuf_export,
599 	.get_pages = i915_gem_userptr_get_pages,
600 	.put_pages = i915_gem_userptr_put_pages,
601 	.release = i915_gem_userptr_release,
602 };
603 
604 /**
605  * Creates a new mm object that wraps some normal memory from the process
606  * context - user memory.
607  *
608  * We impose several restrictions upon the memory being mapped
609  * into the GPU.
610  * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
611  * 2. It cannot overlap any other userptr object in the same address space.
612  * 3. It must be normal system memory, not a pointer into another map of IO
613  *    space (e.g. it must not be a GTT mmapping of another object).
614  * 4. We only allow a bo as large as we could in theory map into the GTT,
615  *    that is we limit the size to the total size of the GTT.
616  * 5. The bo is marked as being snoopable. The backing pages are left
617  *    accessible directly by the CPU, but reads and writes by the GPU may
618  *    incur the cost of a snoop (unless you have an LLC architecture).
619  *
620  * Synchronisation between multiple users and the GPU is left to userspace
621  * through the normal set-domain-ioctl. The kernel will enforce that the
622  * GPU relinquishes the VMA before it is returned back to the system
623  * i.e. upon free(), munmap() or process termination. However, the userspace
624  * malloc() library may not immediately relinquish the VMA after free() and
625  * instead reuse it whilst the GPU is still reading and writing to the VMA.
626  * Caveat emptor.
627  *
628  * Also note, that the object created here is not currently a "first class"
629  * object, in that several ioctls are banned. These are the CPU access
630  * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
631  * direct access via your pointer rather than use those ioctls.
632  *
633  * If you think this is a good interface to use to pass GPU memory between
634  * drivers, please use dma-buf instead. In fact, wherever possible use
635  * dma-buf instead.
636  */
637 int
638 i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
639 {
640 	struct drm_i915_private *dev_priv = dev->dev_private;
641 	struct drm_i915_gem_userptr *args = data;
642 	struct drm_i915_gem_object *obj;
643 	int ret;
644 	u32 handle;
645 
646 	if (args->flags & ~(I915_USERPTR_READ_ONLY |
647 			    I915_USERPTR_UNSYNCHRONIZED))
648 		return -EINVAL;
649 
650 	if (offset_in_page(args->user_ptr | args->user_size))
651 		return -EINVAL;
652 
653 	if (args->user_size > dev_priv->gtt.base.total)
654 		return -E2BIG;
655 
656 	if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
657 		       (char __user *)(unsigned long)args->user_ptr, args->user_size))
658 		return -EFAULT;
659 
660 	if (args->flags & I915_USERPTR_READ_ONLY) {
661 		/* On almost all of the current hw, we cannot tell the GPU that a
662 		 * page is readonly, so this is just a placeholder in the uAPI.
663 		 */
664 		return -ENODEV;
665 	}
666 
667 	/* Allocate the new object */
668 	obj = i915_gem_object_alloc(dev);
669 	if (obj == NULL)
670 		return -ENOMEM;
671 
672 	drm_gem_private_object_init(dev, &obj->base, args->user_size);
673 	i915_gem_object_init(obj, &i915_gem_userptr_ops);
674 	obj->cache_level = I915_CACHE_LLC;
675 	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
676 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
677 
678 	obj->userptr.ptr = args->user_ptr;
679 	obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
680 
681 	/* And keep a pointer to the current->mm for resolving the user pages
682 	 * at binding. This means that we need to hook into the mmu_notifier
683 	 * in order to detect if the mmu is destroyed.
684 	 */
685 	ret = -ENOMEM;
686 	if ((obj->userptr.mm = get_task_mm(current)))
687 		ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
688 	if (ret == 0)
689 		ret = drm_gem_handle_create(file, &obj->base, &handle);
690 
691 	/* drop reference from allocate - handle holds it now */
692 	drm_gem_object_unreference_unlocked(&obj->base);
693 	if (ret)
694 		return ret;
695 
696 	args->handle = handle;
697 	return 0;
698 }
699 #endif
700 
701 int
702 i915_gem_init_userptr(struct drm_device *dev)
703 {
704 #if defined(CONFIG_MMU_NOTIFIER)
705 	struct drm_i915_private *dev_priv = to_i915(dev);
706 	hash_init(dev_priv->mmu_notifiers);
707 #endif
708 	return 0;
709 }
710