xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/amdgpu_vm_sdma.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: amdgpu_vm_sdma.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2019 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_vm_sdma.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $");
27 
28 #include "amdgpu_vm.h"
29 #include "amdgpu_job.h"
30 #include "amdgpu_object.h"
31 #include "amdgpu_trace.h"
32 
33 #define AMDGPU_VM_SDMA_MIN_NUM_DW	256u
34 #define AMDGPU_VM_SDMA_MAX_NUM_DW	(16u * 1024u)
35 
36 /**
37  * amdgpu_vm_sdma_map_table - make sure new PDs/PTs are GTT mapped
38  *
39  * @table: newly allocated or validated PD/PT
40  */
amdgpu_vm_sdma_map_table(struct amdgpu_bo * table)41 static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
42 {
43 	int r;
44 
45 	r = amdgpu_ttm_alloc_gart(&table->tbo);
46 	if (r)
47 		return r;
48 
49 	if (table->shadow)
50 		r = amdgpu_ttm_alloc_gart(&table->shadow->tbo);
51 
52 	return r;
53 }
54 
55 /**
56  * amdgpu_vm_sdma_prepare - prepare SDMA command submission
57  *
58  * @p: see amdgpu_vm_update_params definition
59  * @owner: owner we need to sync to
60  * @exclusive: exclusive move fence we need to sync to
61  *
62  * Returns:
63  * Negativ errno, 0 for success.
64  */
amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params * p,void * owner,struct dma_fence * exclusive)65 static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
66 				  void *owner, struct dma_fence *exclusive)
67 {
68 	struct amdgpu_bo *root = p->vm->root.base.bo;
69 	unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
70 	int r;
71 
72 	r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
73 	if (r)
74 		return r;
75 
76 	p->num_dw_left = ndw;
77 
78 	/* Wait for moves to be completed */
79 	r = amdgpu_sync_fence(&p->job->sync, exclusive, false);
80 	if (r)
81 		return r;
82 
83 	/* Don't wait for any submissions during page fault handling */
84 	if (p->direct)
85 		return 0;
86 
87 	return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
88 				owner, false);
89 }
90 
91 /**
92  * amdgpu_vm_sdma_commit - commit SDMA command submission
93  *
94  * @p: see amdgpu_vm_update_params definition
95  * @fence: resulting fence
96  *
97  * Returns:
98  * Negativ errno, 0 for success.
99  */
amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params * p,struct dma_fence ** fence)100 static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
101 				 struct dma_fence **fence)
102 {
103 	struct amdgpu_ib *ib = p->job->ibs;
104 	struct drm_sched_entity *entity;
105 	struct dma_fence *f, *tmp;
106 	struct amdgpu_ring *ring;
107 	int r;
108 
109 	entity = p->direct ? &p->vm->direct : &p->vm->delayed;
110 	ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
111 
112 	WARN_ON(ib->length_dw == 0);
113 	amdgpu_ring_pad_ib(ring, ib);
114 	WARN_ON(ib->length_dw > p->num_dw_left);
115 	r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f);
116 	if (r)
117 		goto error;
118 
119 	tmp = dma_fence_get(f);
120 	if (p->direct)
121 		swap(p->vm->last_direct, tmp);
122 	else
123 		swap(p->vm->last_delayed, tmp);
124 	dma_fence_put(tmp);
125 
126 	if (fence && !p->direct)
127 		swap(*fence, f);
128 	dma_fence_put(f);
129 	return 0;
130 
131 error:
132 	amdgpu_job_free(p->job);
133 	return r;
134 }
135 
136 /**
137  * amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
138  *
139  * @p: see amdgpu_vm_update_params definition
140  * @bo: PD/PT to update
141  * @pe: addr of the page entry
142  * @count: number of page entries to copy
143  *
144  * Traces the parameters and calls the DMA function to copy the PTEs.
145  */
amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params * p,struct amdgpu_bo * bo,uint64_t pe,unsigned count)146 static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
147 				     struct amdgpu_bo *bo, uint64_t pe,
148 				     unsigned count)
149 {
150 	struct amdgpu_ib *ib = p->job->ibs;
151 	uint64_t src = ib->gpu_addr;
152 
153 	src += p->num_dw_left * 4;
154 
155 	pe += amdgpu_bo_gpu_offset(bo);
156 	trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
157 
158 	amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
159 }
160 
161 /**
162  * amdgpu_vm_sdma_set_ptes - helper to call the right asic function
163  *
164  * @p: see amdgpu_vm_update_params definition
165  * @bo: PD/PT to update
166  * @pe: addr of the page entry
167  * @addr: dst addr to write into pe
168  * @count: number of page entries to update
169  * @incr: increase next addr by incr bytes
170  * @flags: hw access flags
171  *
172  * Traces the parameters and calls the right asic functions
173  * to setup the page table using the DMA.
174  */
amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params * p,struct amdgpu_bo * bo,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint64_t flags)175 static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
176 				    struct amdgpu_bo *bo, uint64_t pe,
177 				    uint64_t addr, unsigned count,
178 				    uint32_t incr, uint64_t flags)
179 {
180 	struct amdgpu_ib *ib = p->job->ibs;
181 
182 	pe += amdgpu_bo_gpu_offset(bo);
183 	trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
184 	if (count < 3) {
185 		amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
186 				    count, incr);
187 	} else {
188 		amdgpu_vm_set_pte_pde(p->adev, ib, pe, addr,
189 				      count, incr, flags);
190 	}
191 }
192 
193 /**
194  * amdgpu_vm_sdma_update - execute VM update
195  *
196  * @p: see amdgpu_vm_update_params definition
197  * @bo: PD/PT to update
198  * @pe: addr of the page entry
199  * @addr: dst addr to write into pe
200  * @count: number of page entries to update
201  * @incr: increase next addr by incr bytes
202  * @flags: hw access flags
203  *
204  * Reserve space in the IB, setup mapping buffer on demand and write commands to
205  * the IB.
206  */
amdgpu_vm_sdma_update(struct amdgpu_vm_update_params * p,struct amdgpu_bo * bo,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint64_t flags)207 static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
208 				 struct amdgpu_bo *bo, uint64_t pe,
209 				 uint64_t addr, unsigned count, uint32_t incr,
210 				 uint64_t flags)
211 {
212 	unsigned int i, ndw, nptes;
213 	uint64_t *pte;
214 	int r;
215 
216 	do {
217 		ndw = p->num_dw_left;
218 		ndw -= p->job->ibs->length_dw;
219 
220 		if (ndw < 32) {
221 			r = amdgpu_vm_sdma_commit(p, NULL);
222 			if (r)
223 				return r;
224 
225 			/* estimate how many dw we need */
226 			ndw = 32;
227 			if (p->pages_addr)
228 				ndw += count * 2;
229 			ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
230 			ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
231 
232 			r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
233 			if (r)
234 				return r;
235 
236 			p->num_dw_left = ndw;
237 		}
238 
239 		if (!p->pages_addr) {
240 			/* set page commands needed */
241 			if (bo->shadow)
242 				amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr,
243 							count, incr, flags);
244 			amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
245 						incr, flags);
246 			return 0;
247 		}
248 
249 		/* copy commands needed */
250 		ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
251 			(bo->shadow ? 2 : 1);
252 
253 		/* for padding */
254 		ndw -= 7;
255 
256 		nptes = min(count, ndw / 2);
257 
258 		/* Put the PTEs at the end of the IB. */
259 		p->num_dw_left -= nptes * 2;
260 		pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]);
261 		for (i = 0; i < nptes; ++i, addr += incr) {
262 			pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
263 			pte[i] |= flags;
264 		}
265 
266 		if (bo->shadow)
267 			amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes);
268 		amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
269 
270 		pe += nptes * 8;
271 		count -= nptes;
272 	} while (count);
273 
274 	return 0;
275 }
276 
277 const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = {
278 	.map_table = amdgpu_vm_sdma_map_table,
279 	.prepare = amdgpu_vm_sdma_prepare,
280 	.update = amdgpu_vm_sdma_update,
281 	.commit = amdgpu_vm_sdma_commit
282 };
283