1 /* $NetBSD: amdgpu_mn.c,v 1.3 2021/12/18 23:44:58 riastradh Exp $ */
2
3 /*
4 * Copyright 2014 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 *
27 */
28 /*
29 * Authors:
30 * Christian König <christian.koenig@amd.com>
31 */
32
33 /**
34 * DOC: MMU Notifier
35 *
36 * For coherent userptr handling registers an MMU notifier to inform the driver
37 * about updates on the page tables of a process.
38 *
39 * When somebody tries to invalidate the page tables we block the update until
40 * all operations on the pages in question are completed, then those pages are
41 * marked as accessed and also dirty if it wasn't a read only access.
42 *
43 * New command submissions using the userptrs in question are delayed until all
44 * page table invalidation are completed and we once more see a coherent process
45 * address space.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: amdgpu_mn.c,v 1.3 2021/12/18 23:44:58 riastradh Exp $");
50
51 #include <linux/firmware.h>
52 #include <linux/module.h>
53 #include <drm/drm.h>
54
55 #include "amdgpu.h"
56 #include "amdgpu_amdkfd.h"
57
58 /**
59 * amdgpu_mn_invalidate_gfx - callback to notify about mm change
60 *
61 * @mni: the range (mm) is about to update
62 * @range: details on the invalidation
63 * @cur_seq: Value to pass to mmu_interval_set_seq()
64 *
65 * Block for operations on BOs to finish and mark pages as accessed and
66 * potentially dirty.
67 */
amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)68 static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
69 const struct mmu_notifier_range *range,
70 unsigned long cur_seq)
71 {
72 struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
73 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
74 long r;
75
76 if (!mmu_notifier_range_blockable(range))
77 return false;
78
79 mutex_lock(&adev->notifier_lock);
80
81 mmu_interval_set_seq(mni, cur_seq);
82
83 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false,
84 MAX_SCHEDULE_TIMEOUT);
85 mutex_unlock(&adev->notifier_lock);
86 if (r <= 0)
87 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
88 return true;
89 }
90
91 static const struct mmu_interval_notifier_ops amdgpu_mn_gfx_ops = {
92 .invalidate = amdgpu_mn_invalidate_gfx,
93 };
94
95 /**
96 * amdgpu_mn_invalidate_hsa - callback to notify about mm change
97 *
98 * @mni: the range (mm) is about to update
99 * @range: details on the invalidation
100 * @cur_seq: Value to pass to mmu_interval_set_seq()
101 *
102 * We temporarily evict the BO attached to this range. This necessitates
103 * evicting all user-mode queues of the process.
104 */
amdgpu_mn_invalidate_hsa(struct mmu_interval_notifier * mni,const struct mmu_notifier_range * range,unsigned long cur_seq)105 static bool amdgpu_mn_invalidate_hsa(struct mmu_interval_notifier *mni,
106 const struct mmu_notifier_range *range,
107 unsigned long cur_seq)
108 {
109 struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
110 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
111
112 if (!mmu_notifier_range_blockable(range))
113 return false;
114
115 mutex_lock(&adev->notifier_lock);
116
117 mmu_interval_set_seq(mni, cur_seq);
118
119 amdgpu_amdkfd_evict_userptr(bo->kfd_bo, bo->notifier.mm);
120 mutex_unlock(&adev->notifier_lock);
121
122 return true;
123 }
124
125 static const struct mmu_interval_notifier_ops amdgpu_mn_hsa_ops = {
126 .invalidate = amdgpu_mn_invalidate_hsa,
127 };
128
129 /**
130 * amdgpu_mn_register - register a BO for notifier updates
131 *
132 * @bo: amdgpu buffer object
133 * @addr: userptr addr we should monitor
134 *
135 * Registers a mmu_notifier for the given BO at the specified address.
136 * Returns 0 on success, -ERRNO if anything goes wrong.
137 */
amdgpu_mn_register(struct amdgpu_bo * bo,unsigned long addr)138 int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
139 {
140 if (bo->kfd_bo)
141 return mmu_interval_notifier_insert(&bo->notifier, current->mm,
142 addr, amdgpu_bo_size(bo),
143 &amdgpu_mn_hsa_ops);
144 return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
145 amdgpu_bo_size(bo),
146 &amdgpu_mn_gfx_ops);
147 }
148
149 /**
150 * amdgpu_mn_unregister - unregister a BO for notifier updates
151 *
152 * @bo: amdgpu buffer object
153 *
154 * Remove any registration of mmu notifier updates from the buffer object.
155 */
amdgpu_mn_unregister(struct amdgpu_bo * bo)156 void amdgpu_mn_unregister(struct amdgpu_bo *bo)
157 {
158 if (!bo->notifier.mm)
159 return;
160 mmu_interval_notifier_remove(&bo->notifier);
161 bo->notifier.mm = NULL;
162 }
163