1c349dbc7Sjsg /*
2c349dbc7Sjsg * SPDX-License-Identifier: MIT
3c349dbc7Sjsg *
4c349dbc7Sjsg * Copyright © 2012-2014 Intel Corporation
55ca02815Sjsg *
65ca02815Sjsg * Based on amdgpu_mn, which bears the following notice:
75ca02815Sjsg *
85ca02815Sjsg * Copyright 2014 Advanced Micro Devices, Inc.
95ca02815Sjsg * All Rights Reserved.
105ca02815Sjsg *
115ca02815Sjsg * Permission is hereby granted, free of charge, to any person obtaining a
125ca02815Sjsg * copy of this software and associated documentation files (the
135ca02815Sjsg * "Software"), to deal in the Software without restriction, including
145ca02815Sjsg * without limitation the rights to use, copy, modify, merge, publish,
155ca02815Sjsg * distribute, sub license, and/or sell copies of the Software, and to
165ca02815Sjsg * permit persons to whom the Software is furnished to do so, subject to
175ca02815Sjsg * the following conditions:
185ca02815Sjsg *
195ca02815Sjsg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
205ca02815Sjsg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
215ca02815Sjsg * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
225ca02815Sjsg * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
235ca02815Sjsg * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
245ca02815Sjsg * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
255ca02815Sjsg * USE OR OTHER DEALINGS IN THE SOFTWARE.
265ca02815Sjsg *
275ca02815Sjsg * The above copyright notice and this permission notice (including the
285ca02815Sjsg * next paragraph) shall be included in all copies or substantial portions
295ca02815Sjsg * of the Software.
305ca02815Sjsg *
315ca02815Sjsg */
325ca02815Sjsg /*
335ca02815Sjsg * Authors:
345ca02815Sjsg * Christian König <christian.koenig@amd.com>
35c349dbc7Sjsg */
36c349dbc7Sjsg
37c349dbc7Sjsg #include <linux/mmu_context.h>
38c349dbc7Sjsg #include <linux/mempolicy.h>
39c349dbc7Sjsg #include <linux/swap.h>
40c349dbc7Sjsg #include <linux/sched/mm.h>
41c349dbc7Sjsg
42c349dbc7Sjsg #include "i915_drv.h"
43c349dbc7Sjsg #include "i915_gem_ioctls.h"
44c349dbc7Sjsg #include "i915_gem_object.h"
451bb76ff1Sjsg #include "i915_gem_userptr.h"
46c349dbc7Sjsg #include "i915_scatterlist.h"
47c349dbc7Sjsg
485ca02815Sjsg #ifdef CONFIG_MMU_NOTIFIER
49c349dbc7Sjsg
505ca02815Sjsg /**
515ca02815Sjsg * i915_gem_userptr_invalidate - callback to notify about mm change
525ca02815Sjsg *
535ca02815Sjsg * @mni: the range (mm) is about to update
545ca02815Sjsg * @range: details on the invalidation
555ca02815Sjsg * @cur_seq: Value to pass to mmu_interval_set_seq()
565ca02815Sjsg *
575ca02815Sjsg * Block for operations on BOs to finish and mark pages as accessed and
585ca02815Sjsg * potentially dirty.
59c349dbc7Sjsg */
i915_gem_userptr_invalidate(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)605ca02815Sjsg static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
615ca02815Sjsg const struct mmu_notifier_range *range,
625ca02815Sjsg unsigned long cur_seq)
63c349dbc7Sjsg {
645ca02815Sjsg struct drm_i915_gem_object *obj = container_of(mni, struct drm_i915_gem_object, userptr.notifier);
65ad8b1aafSjsg struct drm_i915_private *i915 = to_i915(obj->base.dev);
665ca02815Sjsg long r;
67c349dbc7Sjsg
685ca02815Sjsg if (!mmu_notifier_range_blockable(range))
695ca02815Sjsg return false;
705ca02815Sjsg
715ca02815Sjsg write_lock(&i915->mm.notifier_lock);
725ca02815Sjsg
735ca02815Sjsg mmu_interval_set_seq(mni, cur_seq);
745ca02815Sjsg
755ca02815Sjsg write_unlock(&i915->mm.notifier_lock);
765ca02815Sjsg
775ca02815Sjsg /*
785ca02815Sjsg * We don't wait when the process is exiting. This is valid
795ca02815Sjsg * because the object will be cleaned up anyway.
805ca02815Sjsg *
815ca02815Sjsg * This is also temporarily required as a hack, because we
825ca02815Sjsg * cannot currently force non-consistent batch buffers to preempt
835ca02815Sjsg * and reschedule by waiting on it, hanging processes on exit.
84c349dbc7Sjsg */
855ca02815Sjsg if (current->flags & PF_EXITING)
865ca02815Sjsg return true;
87c349dbc7Sjsg
885ca02815Sjsg /* we will unbind on next submission, still have userptr pins */
891bb76ff1Sjsg r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
905ca02815Sjsg MAX_SCHEDULE_TIMEOUT);
915ca02815Sjsg if (r <= 0)
925ca02815Sjsg drm_err(&i915->drm, "(%ld) failed to wait for idle\n", r);
93c349dbc7Sjsg
945ca02815Sjsg return true;
95c349dbc7Sjsg }
96c349dbc7Sjsg
975ca02815Sjsg static const struct mmu_interval_notifier_ops i915_gem_userptr_notifier_ops = {
985ca02815Sjsg .invalidate = i915_gem_userptr_invalidate,
99c349dbc7Sjsg };
100c349dbc7Sjsg
1015ca02815Sjsg static int
i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object * obj)1025ca02815Sjsg i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj)
103c349dbc7Sjsg {
1045ca02815Sjsg return mmu_interval_notifier_insert(&obj->userptr.notifier, current->mm,
1055ca02815Sjsg obj->userptr.ptr, obj->base.size,
1065ca02815Sjsg &i915_gem_userptr_notifier_ops);
1075ca02815Sjsg }
1085ca02815Sjsg
i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object * obj)1095ca02815Sjsg static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
1105ca02815Sjsg {
1115ca02815Sjsg struct page **pvec = NULL;
1125ca02815Sjsg
1135ca02815Sjsg assert_object_held_shared(obj);
1145ca02815Sjsg
1155ca02815Sjsg if (!--obj->userptr.page_ref) {
1165ca02815Sjsg pvec = obj->userptr.pvec;
1175ca02815Sjsg obj->userptr.pvec = NULL;
1185ca02815Sjsg }
1195ca02815Sjsg GEM_BUG_ON(obj->userptr.page_ref < 0);
1205ca02815Sjsg
1215ca02815Sjsg if (pvec) {
1225ca02815Sjsg const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
1235ca02815Sjsg
1245ca02815Sjsg unpin_user_pages(pvec, num_pages);
1255ca02815Sjsg kvfree(pvec);
1265ca02815Sjsg }
1275ca02815Sjsg }
1285ca02815Sjsg
i915_gem_userptr_get_pages(struct drm_i915_gem_object * obj)1295ca02815Sjsg static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
1305ca02815Sjsg {
1311bb76ff1Sjsg unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev);
132c349dbc7Sjsg struct sg_table *st;
1335ca02815Sjsg struct page **pvec;
134f005ef32Sjsg unsigned int num_pages; /* limited by sg_alloc_table_from_pages_segment */
135c349dbc7Sjsg int ret;
136c349dbc7Sjsg
137f005ef32Sjsg if (overflows_type(obj->base.size >> PAGE_SHIFT, num_pages))
138f005ef32Sjsg return -E2BIG;
139f005ef32Sjsg
140f005ef32Sjsg num_pages = obj->base.size >> PAGE_SHIFT;
141c349dbc7Sjsg st = kmalloc(sizeof(*st), GFP_KERNEL);
142c349dbc7Sjsg if (!st)
1435ca02815Sjsg return -ENOMEM;
1445ca02815Sjsg
1455ca02815Sjsg if (!obj->userptr.page_ref) {
1465ca02815Sjsg ret = -EAGAIN;
1475ca02815Sjsg goto err_free;
1485ca02815Sjsg }
1495ca02815Sjsg
1505ca02815Sjsg obj->userptr.page_ref++;
1515ca02815Sjsg pvec = obj->userptr.pvec;
152c349dbc7Sjsg
153c349dbc7Sjsg alloc_table:
1545ca02815Sjsg ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0,
1555ca02815Sjsg num_pages << PAGE_SHIFT,
1565ca02815Sjsg max_segment, GFP_KERNEL);
1575ca02815Sjsg if (ret)
1585ca02815Sjsg goto err;
159c349dbc7Sjsg
160c349dbc7Sjsg ret = i915_gem_gtt_prepare_pages(obj, st);
161c349dbc7Sjsg if (ret) {
162c349dbc7Sjsg sg_free_table(st);
163c349dbc7Sjsg
164c349dbc7Sjsg if (max_segment > PAGE_SIZE) {
165c349dbc7Sjsg max_segment = PAGE_SIZE;
166c349dbc7Sjsg goto alloc_table;
167c349dbc7Sjsg }
168c349dbc7Sjsg
1695ca02815Sjsg goto err;
170c349dbc7Sjsg }
171c349dbc7Sjsg
1721bb76ff1Sjsg WARN_ON_ONCE(!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE));
1731bb76ff1Sjsg if (i915_gem_object_can_bypass_llc(obj))
1741bb76ff1Sjsg obj->cache_dirty = true;
175c349dbc7Sjsg
176f005ef32Sjsg __i915_gem_object_set_pages(obj, st);
177c349dbc7Sjsg
1785ca02815Sjsg return 0;
179c349dbc7Sjsg
1805ca02815Sjsg err:
1815ca02815Sjsg i915_gem_object_userptr_drop_ref(obj);
1825ca02815Sjsg err_free:
1835ca02815Sjsg kfree(st);
1845ca02815Sjsg return ret;
185c349dbc7Sjsg }
186c349dbc7Sjsg
187c349dbc7Sjsg static void
i915_gem_userptr_put_pages(struct drm_i915_gem_object * obj,struct sg_table * pages)188c349dbc7Sjsg i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
189c349dbc7Sjsg struct sg_table *pages)
190c349dbc7Sjsg {
191c349dbc7Sjsg struct sgt_iter sgt_iter;
192c349dbc7Sjsg struct page *page;
193c349dbc7Sjsg
194c349dbc7Sjsg if (!pages)
195c349dbc7Sjsg return;
196c349dbc7Sjsg
197c349dbc7Sjsg __i915_gem_object_release_shmem(obj, pages, true);
198c349dbc7Sjsg i915_gem_gtt_finish_pages(obj, pages);
199c349dbc7Sjsg
200c349dbc7Sjsg /*
201c349dbc7Sjsg * We always mark objects as dirty when they are used by the GPU,
202c349dbc7Sjsg * just in case. However, if we set the vma as being read-only we know
203c349dbc7Sjsg * that the object will never have been written to.
204c349dbc7Sjsg */
205c349dbc7Sjsg if (i915_gem_object_is_readonly(obj))
206c349dbc7Sjsg obj->mm.dirty = false;
207c349dbc7Sjsg
208c349dbc7Sjsg for_each_sgt_page(page, sgt_iter, pages) {
209c349dbc7Sjsg if (obj->mm.dirty && trylock_page(page)) {
210c349dbc7Sjsg /*
211c349dbc7Sjsg * As this may not be anonymous memory (e.g. shmem)
212c349dbc7Sjsg * but exist on a real mapping, we have to lock
213c349dbc7Sjsg * the page in order to dirty it -- holding
214c349dbc7Sjsg * the page reference is not sufficient to
215c349dbc7Sjsg * prevent the inode from being truncated.
216c349dbc7Sjsg * Play safe and take the lock.
217c349dbc7Sjsg *
218c349dbc7Sjsg * However...!
219c349dbc7Sjsg *
220c349dbc7Sjsg * The mmu-notifier can be invalidated for a
2211bb76ff1Sjsg * migrate_folio, that is alreadying holding the lock
2221bb76ff1Sjsg * on the folio. Such a try_to_unmap() will result
223c349dbc7Sjsg * in us calling put_pages() and so recursively try
224c349dbc7Sjsg * to lock the page. We avoid that deadlock with
225c349dbc7Sjsg * a trylock_page() and in exchange we risk missing
226c349dbc7Sjsg * some page dirtying.
227c349dbc7Sjsg */
228c349dbc7Sjsg set_page_dirty(page);
229c349dbc7Sjsg unlock_page(page);
230c349dbc7Sjsg }
231c349dbc7Sjsg
232c349dbc7Sjsg mark_page_accessed(page);
233c349dbc7Sjsg }
234c349dbc7Sjsg obj->mm.dirty = false;
235c349dbc7Sjsg
236c349dbc7Sjsg sg_free_table(pages);
237c349dbc7Sjsg kfree(pages);
2385ca02815Sjsg
2395ca02815Sjsg i915_gem_object_userptr_drop_ref(obj);
2405ca02815Sjsg }
2415ca02815Sjsg
i915_gem_object_userptr_unbind(struct drm_i915_gem_object * obj)2425ca02815Sjsg static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj)
2435ca02815Sjsg {
2445ca02815Sjsg struct sg_table *pages;
2455ca02815Sjsg int err;
2465ca02815Sjsg
2475ca02815Sjsg err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
2485ca02815Sjsg if (err)
2495ca02815Sjsg return err;
2505ca02815Sjsg
2515ca02815Sjsg if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj)))
2525ca02815Sjsg return -EBUSY;
2535ca02815Sjsg
2545ca02815Sjsg assert_object_held(obj);
2555ca02815Sjsg
2565ca02815Sjsg pages = __i915_gem_object_unset_pages(obj);
2575ca02815Sjsg if (!IS_ERR_OR_NULL(pages))
2585ca02815Sjsg i915_gem_userptr_put_pages(obj, pages);
2595ca02815Sjsg
2605ca02815Sjsg return err;
2615ca02815Sjsg }
2625ca02815Sjsg
i915_gem_object_userptr_submit_init(struct drm_i915_gem_object * obj)2635ca02815Sjsg int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
2645ca02815Sjsg {
2655ca02815Sjsg const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
2665ca02815Sjsg struct page **pvec;
2675ca02815Sjsg unsigned int gup_flags = 0;
2685ca02815Sjsg unsigned long notifier_seq;
2695ca02815Sjsg int pinned, ret;
2705ca02815Sjsg
2715ca02815Sjsg if (obj->userptr.notifier.mm != current->mm)
2725ca02815Sjsg return -EFAULT;
2735ca02815Sjsg
2745ca02815Sjsg notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);
2755ca02815Sjsg
2765ca02815Sjsg ret = i915_gem_object_lock_interruptible(obj, NULL);
2775ca02815Sjsg if (ret)
2785ca02815Sjsg return ret;
2795ca02815Sjsg
2805ca02815Sjsg if (notifier_seq == obj->userptr.notifier_seq && obj->userptr.pvec) {
2815ca02815Sjsg i915_gem_object_unlock(obj);
2825ca02815Sjsg return 0;
2835ca02815Sjsg }
2845ca02815Sjsg
2855ca02815Sjsg ret = i915_gem_object_userptr_unbind(obj);
2865ca02815Sjsg i915_gem_object_unlock(obj);
2875ca02815Sjsg if (ret)
2885ca02815Sjsg return ret;
2895ca02815Sjsg
2905ca02815Sjsg pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
2915ca02815Sjsg if (!pvec)
2925ca02815Sjsg return -ENOMEM;
2935ca02815Sjsg
2945ca02815Sjsg if (!i915_gem_object_is_readonly(obj))
2955ca02815Sjsg gup_flags |= FOLL_WRITE;
2965ca02815Sjsg
297f005ef32Sjsg pinned = 0;
2985ca02815Sjsg while (pinned < num_pages) {
2995ca02815Sjsg ret = pin_user_pages_fast(obj->userptr.ptr + pinned * PAGE_SIZE,
3005ca02815Sjsg num_pages - pinned, gup_flags,
3015ca02815Sjsg &pvec[pinned]);
3025ca02815Sjsg if (ret < 0)
3035ca02815Sjsg goto out;
3045ca02815Sjsg
3055ca02815Sjsg pinned += ret;
3065ca02815Sjsg }
3075ca02815Sjsg
3085ca02815Sjsg ret = i915_gem_object_lock_interruptible(obj, NULL);
3095ca02815Sjsg if (ret)
3105ca02815Sjsg goto out;
3115ca02815Sjsg
3125ca02815Sjsg if (mmu_interval_read_retry(&obj->userptr.notifier,
3135ca02815Sjsg !obj->userptr.page_ref ? notifier_seq :
3145ca02815Sjsg obj->userptr.notifier_seq)) {
3155ca02815Sjsg ret = -EAGAIN;
3165ca02815Sjsg goto out_unlock;
3175ca02815Sjsg }
3185ca02815Sjsg
3195ca02815Sjsg if (!obj->userptr.page_ref++) {
3205ca02815Sjsg obj->userptr.pvec = pvec;
3215ca02815Sjsg obj->userptr.notifier_seq = notifier_seq;
3225ca02815Sjsg pvec = NULL;
3235ca02815Sjsg ret = ____i915_gem_object_get_pages(obj);
3245ca02815Sjsg }
3255ca02815Sjsg
3265ca02815Sjsg obj->userptr.page_ref--;
3275ca02815Sjsg
3285ca02815Sjsg out_unlock:
3295ca02815Sjsg i915_gem_object_unlock(obj);
3305ca02815Sjsg
3315ca02815Sjsg out:
3325ca02815Sjsg if (pvec) {
3335ca02815Sjsg unpin_user_pages(pvec, pinned);
3345ca02815Sjsg kvfree(pvec);
3355ca02815Sjsg }
3365ca02815Sjsg
3375ca02815Sjsg return ret;
3385ca02815Sjsg }
3395ca02815Sjsg
i915_gem_object_userptr_submit_done(struct drm_i915_gem_object * obj)3405ca02815Sjsg int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj)
3415ca02815Sjsg {
3425ca02815Sjsg if (mmu_interval_read_retry(&obj->userptr.notifier,
3435ca02815Sjsg obj->userptr.notifier_seq)) {
3445ca02815Sjsg /* We collided with the mmu notifier, need to retry */
3455ca02815Sjsg
3465ca02815Sjsg return -EAGAIN;
3475ca02815Sjsg }
3485ca02815Sjsg
3495ca02815Sjsg return 0;
3505ca02815Sjsg }
3515ca02815Sjsg
i915_gem_object_userptr_validate(struct drm_i915_gem_object * obj)3525ca02815Sjsg int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj)
3535ca02815Sjsg {
3545ca02815Sjsg int err;
3555ca02815Sjsg
3565ca02815Sjsg err = i915_gem_object_userptr_submit_init(obj);
3575ca02815Sjsg if (err)
3585ca02815Sjsg return err;
3595ca02815Sjsg
3605ca02815Sjsg err = i915_gem_object_lock_interruptible(obj, NULL);
3615ca02815Sjsg if (!err) {
3625ca02815Sjsg /*
3635ca02815Sjsg * Since we only check validity, not use the pages,
3645ca02815Sjsg * it doesn't matter if we collide with the mmu notifier,
3655ca02815Sjsg * and -EAGAIN handling is not required.
3665ca02815Sjsg */
3675ca02815Sjsg err = i915_gem_object_pin_pages(obj);
3685ca02815Sjsg if (!err)
3695ca02815Sjsg i915_gem_object_unpin_pages(obj);
3705ca02815Sjsg
3715ca02815Sjsg i915_gem_object_unlock(obj);
3725ca02815Sjsg }
3735ca02815Sjsg
3745ca02815Sjsg return err;
375c349dbc7Sjsg }
376c349dbc7Sjsg
377c349dbc7Sjsg static void
i915_gem_userptr_release(struct drm_i915_gem_object * obj)378c349dbc7Sjsg i915_gem_userptr_release(struct drm_i915_gem_object *obj)
379c349dbc7Sjsg {
3805ca02815Sjsg GEM_WARN_ON(obj->userptr.page_ref);
3815ca02815Sjsg
382*292e95c8Sjsg if (!obj->userptr.notifier.mm)
383*292e95c8Sjsg return;
384*292e95c8Sjsg
3855ca02815Sjsg mmu_interval_notifier_remove(&obj->userptr.notifier);
3865ca02815Sjsg obj->userptr.notifier.mm = NULL;
387c349dbc7Sjsg }
388c349dbc7Sjsg
389c349dbc7Sjsg static int
i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object * obj)390c349dbc7Sjsg i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
391c349dbc7Sjsg {
3925ca02815Sjsg drm_dbg(obj->base.dev, "Exporting userptr no longer allowed\n");
393c349dbc7Sjsg
3945ca02815Sjsg return -EINVAL;
3955ca02815Sjsg }
3965ca02815Sjsg
3975ca02815Sjsg static int
i915_gem_userptr_pwrite(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pwrite * args)3985ca02815Sjsg i915_gem_userptr_pwrite(struct drm_i915_gem_object *obj,
3995ca02815Sjsg const struct drm_i915_gem_pwrite *args)
4005ca02815Sjsg {
4015ca02815Sjsg drm_dbg(obj->base.dev, "pwrite to userptr no longer allowed\n");
4025ca02815Sjsg
4035ca02815Sjsg return -EINVAL;
4045ca02815Sjsg }
4055ca02815Sjsg
4065ca02815Sjsg static int
i915_gem_userptr_pread(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pread * args)4075ca02815Sjsg i915_gem_userptr_pread(struct drm_i915_gem_object *obj,
4085ca02815Sjsg const struct drm_i915_gem_pread *args)
4095ca02815Sjsg {
4105ca02815Sjsg drm_dbg(obj->base.dev, "pread from userptr no longer allowed\n");
4115ca02815Sjsg
4125ca02815Sjsg return -EINVAL;
413c349dbc7Sjsg }
414c349dbc7Sjsg
415c349dbc7Sjsg static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
416ad8b1aafSjsg .name = "i915_gem_object_userptr",
4175ca02815Sjsg .flags = I915_GEM_OBJECT_IS_SHRINKABLE |
418c349dbc7Sjsg I915_GEM_OBJECT_NO_MMAP |
4195ca02815Sjsg I915_GEM_OBJECT_IS_PROXY,
420c349dbc7Sjsg .get_pages = i915_gem_userptr_get_pages,
421c349dbc7Sjsg .put_pages = i915_gem_userptr_put_pages,
422c349dbc7Sjsg .dmabuf_export = i915_gem_userptr_dmabuf_export,
4235ca02815Sjsg .pwrite = i915_gem_userptr_pwrite,
4245ca02815Sjsg .pread = i915_gem_userptr_pread,
425c349dbc7Sjsg .release = i915_gem_userptr_release,
426c349dbc7Sjsg };
427c349dbc7Sjsg
4285ca02815Sjsg #endif
4295ca02815Sjsg
4305ca02815Sjsg #ifdef notyet
4315ca02815Sjsg
4325ca02815Sjsg static int
probe_range(struct mm_struct * mm,unsigned long addr,unsigned long len)4335ca02815Sjsg probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
4345ca02815Sjsg {
4351bb76ff1Sjsg VMA_ITERATOR(vmi, mm, addr);
4365ca02815Sjsg struct vm_area_struct *vma;
4371bb76ff1Sjsg unsigned long end = addr + len;
4385ca02815Sjsg
4395ca02815Sjsg mmap_read_lock(mm);
4401bb76ff1Sjsg for_each_vma_range(vmi, vma, end) {
4415ca02815Sjsg /* Check for holes, note that we also update the addr below */
4425ca02815Sjsg if (vma->vm_start > addr)
4435ca02815Sjsg break;
4445ca02815Sjsg
4455ca02815Sjsg if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
4465ca02815Sjsg break;
4475ca02815Sjsg
4485ca02815Sjsg addr = vma->vm_end;
4495ca02815Sjsg }
4505ca02815Sjsg mmap_read_unlock(mm);
4515ca02815Sjsg
4521bb76ff1Sjsg if (vma || addr < end)
4531bb76ff1Sjsg return -EFAULT;
4541bb76ff1Sjsg return 0;
4555ca02815Sjsg }
4565ca02815Sjsg #endif
4575ca02815Sjsg
458c349dbc7Sjsg /*
459c349dbc7Sjsg * Creates a new mm object that wraps some normal memory from the process
460c349dbc7Sjsg * context - user memory.
461c349dbc7Sjsg *
462c349dbc7Sjsg * We impose several restrictions upon the memory being mapped
463c349dbc7Sjsg * into the GPU.
464c349dbc7Sjsg * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
465c349dbc7Sjsg * 2. It must be normal system memory, not a pointer into another map of IO
466c349dbc7Sjsg * space (e.g. it must not be a GTT mmapping of another object).
467c349dbc7Sjsg * 3. We only allow a bo as large as we could in theory map into the GTT,
468c349dbc7Sjsg * that is we limit the size to the total size of the GTT.
469c349dbc7Sjsg * 4. The bo is marked as being snoopable. The backing pages are left
470c349dbc7Sjsg * accessible directly by the CPU, but reads and writes by the GPU may
471c349dbc7Sjsg * incur the cost of a snoop (unless you have an LLC architecture).
472c349dbc7Sjsg *
473c349dbc7Sjsg * Synchronisation between multiple users and the GPU is left to userspace
474c349dbc7Sjsg * through the normal set-domain-ioctl. The kernel will enforce that the
475c349dbc7Sjsg * GPU relinquishes the VMA before it is returned back to the system
476c349dbc7Sjsg * i.e. upon free(), munmap() or process termination. However, the userspace
477c349dbc7Sjsg * malloc() library may not immediately relinquish the VMA after free() and
478c349dbc7Sjsg * instead reuse it whilst the GPU is still reading and writing to the VMA.
479c349dbc7Sjsg * Caveat emptor.
480c349dbc7Sjsg *
481c349dbc7Sjsg * Also note, that the object created here is not currently a "first class"
482c349dbc7Sjsg * object, in that several ioctls are banned. These are the CPU access
483c349dbc7Sjsg * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
484c349dbc7Sjsg * direct access via your pointer rather than use those ioctls. Another
485c349dbc7Sjsg * restriction is that we do not allow userptr surfaces to be pinned to the
486c349dbc7Sjsg * hardware and so we reject any attempt to create a framebuffer out of a
487c349dbc7Sjsg * userptr.
488c349dbc7Sjsg *
489c349dbc7Sjsg * If you think this is a good interface to use to pass GPU memory between
490c349dbc7Sjsg * drivers, please use dma-buf instead. In fact, wherever possible use
491c349dbc7Sjsg * dma-buf instead.
492c349dbc7Sjsg */
493c349dbc7Sjsg int
i915_gem_userptr_ioctl(struct drm_device * dev,void * data,struct drm_file * file)494c349dbc7Sjsg i915_gem_userptr_ioctl(struct drm_device *dev,
495c349dbc7Sjsg void *data,
496c349dbc7Sjsg struct drm_file *file)
497c349dbc7Sjsg {
4985ca02815Sjsg static struct lock_class_key __maybe_unused lock_class;
499c349dbc7Sjsg struct drm_i915_private *dev_priv = to_i915(dev);
500c349dbc7Sjsg struct drm_i915_gem_userptr *args = data;
5015ca02815Sjsg struct drm_i915_gem_object __maybe_unused *obj;
5025ca02815Sjsg int __maybe_unused ret;
5035ca02815Sjsg u32 __maybe_unused handle;
504c349dbc7Sjsg
505c349dbc7Sjsg if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
506c349dbc7Sjsg /* We cannot support coherent userptr objects on hw without
507c349dbc7Sjsg * LLC and broken snooping.
508c349dbc7Sjsg */
509c349dbc7Sjsg return -ENODEV;
510c349dbc7Sjsg }
511c349dbc7Sjsg
512c349dbc7Sjsg if (args->flags & ~(I915_USERPTR_READ_ONLY |
5135ca02815Sjsg I915_USERPTR_UNSYNCHRONIZED |
5145ca02815Sjsg I915_USERPTR_PROBE))
515c349dbc7Sjsg return -EINVAL;
516c349dbc7Sjsg
5175ca02815Sjsg if (i915_gem_object_size_2big(args->user_size))
518c349dbc7Sjsg return -E2BIG;
519c349dbc7Sjsg
520c349dbc7Sjsg if (!args->user_size)
521c349dbc7Sjsg return -EINVAL;
522c349dbc7Sjsg
523c349dbc7Sjsg if (offset_in_page(args->user_ptr | args->user_size))
524c349dbc7Sjsg return -EINVAL;
525c349dbc7Sjsg
526c349dbc7Sjsg if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
527c349dbc7Sjsg return -EFAULT;
528c349dbc7Sjsg
5295ca02815Sjsg if (args->flags & I915_USERPTR_UNSYNCHRONIZED)
5305ca02815Sjsg return -ENODEV;
5315ca02815Sjsg
532c349dbc7Sjsg if (args->flags & I915_USERPTR_READ_ONLY) {
533c349dbc7Sjsg /*
534c349dbc7Sjsg * On almost all of the older hw, we cannot tell the GPU that
535c349dbc7Sjsg * a page is readonly.
536c349dbc7Sjsg */
5371bb76ff1Sjsg if (!to_gt(dev_priv)->vm->has_read_only)
538c349dbc7Sjsg return -ENODEV;
539c349dbc7Sjsg }
540c349dbc7Sjsg
5415ca02815Sjsg if (args->flags & I915_USERPTR_PROBE) {
5425ca02815Sjsg /*
5435ca02815Sjsg * Check that the range pointed to represents real struct
5445ca02815Sjsg * pages and not iomappings (at this moment in time!)
5455ca02815Sjsg */
5465ca02815Sjsg #ifdef notyet
5475ca02815Sjsg ret = probe_range(current->mm, args->user_ptr, args->user_size);
5485ca02815Sjsg if (ret)
5495ca02815Sjsg return ret;
5505ca02815Sjsg #else
5515ca02815Sjsg STUB();
5525ca02815Sjsg return -ENOSYS;
5535ca02815Sjsg #endif
5545ca02815Sjsg }
5555ca02815Sjsg
5565ca02815Sjsg #ifdef CONFIG_MMU_NOTIFIER
557c349dbc7Sjsg obj = i915_gem_object_alloc();
558c349dbc7Sjsg if (obj == NULL)
559c349dbc7Sjsg return -ENOMEM;
560c349dbc7Sjsg
561c349dbc7Sjsg drm_gem_private_object_init(dev, &obj->base, args->user_size);
5621bb76ff1Sjsg i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class,
5631bb76ff1Sjsg I915_BO_ALLOC_USER);
5645ca02815Sjsg obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE;
565c349dbc7Sjsg obj->read_domains = I915_GEM_DOMAIN_CPU;
566c349dbc7Sjsg obj->write_domain = I915_GEM_DOMAIN_CPU;
567c349dbc7Sjsg i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
568c349dbc7Sjsg
569c349dbc7Sjsg obj->userptr.ptr = args->user_ptr;
5705ca02815Sjsg obj->userptr.notifier_seq = ULONG_MAX;
571c349dbc7Sjsg if (args->flags & I915_USERPTR_READ_ONLY)
572c349dbc7Sjsg i915_gem_object_set_readonly(obj);
573c349dbc7Sjsg
574c349dbc7Sjsg /* And keep a pointer to the current->mm for resolving the user pages
575c349dbc7Sjsg * at binding. This means that we need to hook into the mmu_notifier
576c349dbc7Sjsg * in order to detect if the mmu is destroyed.
577c349dbc7Sjsg */
5785ca02815Sjsg ret = i915_gem_userptr_init__mmu_notifier(obj);
579c349dbc7Sjsg if (ret == 0)
580c349dbc7Sjsg ret = drm_gem_handle_create(file, &obj->base, &handle);
581c349dbc7Sjsg
582c349dbc7Sjsg /* drop reference from allocate - handle holds it now */
583c349dbc7Sjsg i915_gem_object_put(obj);
584c349dbc7Sjsg if (ret)
585c349dbc7Sjsg return ret;
586c349dbc7Sjsg
587c349dbc7Sjsg args->handle = handle;
588c349dbc7Sjsg return 0;
589c349dbc7Sjsg #else
590c349dbc7Sjsg return -ENODEV;
591c349dbc7Sjsg #endif
5925ca02815Sjsg }
593c349dbc7Sjsg
i915_gem_init_userptr(struct drm_i915_private * dev_priv)594c349dbc7Sjsg int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
595c349dbc7Sjsg {
5965ca02815Sjsg #ifdef CONFIG_MMU_NOTIFIER
5975ca02815Sjsg rwlock_init(&dev_priv->mm.notifier_lock);
5985ca02815Sjsg #endif
599c349dbc7Sjsg
600c349dbc7Sjsg return 0;
601c349dbc7Sjsg }
602c349dbc7Sjsg
i915_gem_cleanup_userptr(struct drm_i915_private * dev_priv)603c349dbc7Sjsg void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
604c349dbc7Sjsg {
605c349dbc7Sjsg }
606