xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/radeon/radeon_si_dma.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: radeon_si_dma.c,v 1.2 2021/12/18 23:45:43 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2013 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Alex Deucher
25  */
26 
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: radeon_si_dma.c,v 1.2 2021/12/18 23:45:43 riastradh Exp $");
29 
30 #include "radeon.h"
31 #include "radeon_asic.h"
32 #include "radeon_trace.h"
33 #include "sid.h"
34 
35 u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
36 
37 /**
38  * si_dma_is_lockup - Check if the DMA engine is locked up
39  *
40  * @rdev: radeon_device pointer
41  * @ring: radeon_ring structure holding ring information
42  *
43  * Check if the async DMA engine is locked up.
44  * Returns true if the engine appears to be locked up, false if not.
45  */
si_dma_is_lockup(struct radeon_device * rdev,struct radeon_ring * ring)46 bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
47 {
48 	u32 reset_mask = si_gpu_check_soft_reset(rdev);
49 	u32 mask;
50 
51 	if (ring->idx == R600_RING_TYPE_DMA_INDEX)
52 		mask = RADEON_RESET_DMA;
53 	else
54 		mask = RADEON_RESET_DMA1;
55 
56 	if (!(reset_mask & mask)) {
57 		radeon_ring_lockup_update(rdev, ring);
58 		return false;
59 	}
60 	return radeon_ring_test_lockup(rdev, ring);
61 }
62 
63 /**
64  * si_dma_vm_copy_pages - update PTEs by copying them from the GART
65  *
66  * @rdev: radeon_device pointer
67  * @ib: indirect buffer to fill with commands
68  * @pe: addr of the page entry
69  * @src: src addr where to copy from
70  * @count: number of page entries to update
71  *
72  * Update PTEs by copying them from the GART using the DMA (SI).
73  */
si_dma_vm_copy_pages(struct radeon_device * rdev,struct radeon_ib * ib,uint64_t pe,uint64_t src,unsigned count)74 void si_dma_vm_copy_pages(struct radeon_device *rdev,
75 			  struct radeon_ib *ib,
76 			  uint64_t pe, uint64_t src,
77 			  unsigned count)
78 {
79 	while (count) {
80 		unsigned bytes = count * 8;
81 		if (bytes > 0xFFFF8)
82 			bytes = 0xFFFF8;
83 
84 		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
85 						      1, 0, 0, bytes);
86 		ib->ptr[ib->length_dw++] = lower_32_bits(pe);
87 		ib->ptr[ib->length_dw++] = lower_32_bits(src);
88 		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
89 		ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
90 
91 		pe += bytes;
92 		src += bytes;
93 		count -= bytes / 8;
94 	}
95 }
96 
97 /**
98  * si_dma_vm_write_pages - update PTEs by writing them manually
99  *
100  * @rdev: radeon_device pointer
101  * @ib: indirect buffer to fill with commands
102  * @pe: addr of the page entry
103  * @addr: dst addr to write into pe
104  * @count: number of page entries to update
105  * @incr: increase next addr by incr bytes
106  * @flags: access flags
107  *
108  * Update PTEs by writing them manually using the DMA (SI).
109  */
si_dma_vm_write_pages(struct radeon_device * rdev,struct radeon_ib * ib,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint32_t flags)110 void si_dma_vm_write_pages(struct radeon_device *rdev,
111 			   struct radeon_ib *ib,
112 			   uint64_t pe,
113 			   uint64_t addr, unsigned count,
114 			   uint32_t incr, uint32_t flags)
115 {
116 	uint64_t value;
117 	unsigned ndw;
118 
119 	while (count) {
120 		ndw = count * 2;
121 		if (ndw > 0xFFFFE)
122 			ndw = 0xFFFFE;
123 
124 		/* for non-physically contiguous pages (system) */
125 		ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
126 		ib->ptr[ib->length_dw++] = pe;
127 		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
128 		for (; ndw > 0; ndw -= 2, --count, pe += 8) {
129 			if (flags & R600_PTE_SYSTEM) {
130 				value = radeon_vm_map_gart(rdev, addr);
131 			} else if (flags & R600_PTE_VALID) {
132 				value = addr;
133 			} else {
134 				value = 0;
135 			}
136 			addr += incr;
137 			value |= flags;
138 			ib->ptr[ib->length_dw++] = value;
139 			ib->ptr[ib->length_dw++] = upper_32_bits(value);
140 		}
141 	}
142 }
143 
144 /**
145  * si_dma_vm_set_pages - update the page tables using the DMA
146  *
147  * @rdev: radeon_device pointer
148  * @ib: indirect buffer to fill with commands
149  * @pe: addr of the page entry
150  * @addr: dst addr to write into pe
151  * @count: number of page entries to update
152  * @incr: increase next addr by incr bytes
153  * @flags: access flags
154  *
155  * Update the page tables using the DMA (SI).
156  */
si_dma_vm_set_pages(struct radeon_device * rdev,struct radeon_ib * ib,uint64_t pe,uint64_t addr,unsigned count,uint32_t incr,uint32_t flags)157 void si_dma_vm_set_pages(struct radeon_device *rdev,
158 			 struct radeon_ib *ib,
159 			 uint64_t pe,
160 			 uint64_t addr, unsigned count,
161 			 uint32_t incr, uint32_t flags)
162 {
163 	uint64_t value;
164 	unsigned ndw;
165 
166 	while (count) {
167 		ndw = count * 2;
168 		if (ndw > 0xFFFFE)
169 			ndw = 0xFFFFE;
170 
171 		if (flags & R600_PTE_VALID)
172 			value = addr;
173 		else
174 			value = 0;
175 
176 		/* for physically contiguous pages (vram) */
177 		ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
178 		ib->ptr[ib->length_dw++] = pe; /* dst addr */
179 		ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
180 		ib->ptr[ib->length_dw++] = flags; /* mask */
181 		ib->ptr[ib->length_dw++] = 0;
182 		ib->ptr[ib->length_dw++] = value; /* value */
183 		ib->ptr[ib->length_dw++] = upper_32_bits(value);
184 		ib->ptr[ib->length_dw++] = incr; /* increment size */
185 		ib->ptr[ib->length_dw++] = 0;
186 		pe += ndw * 4;
187 		addr += (ndw / 2) * incr;
188 		count -= ndw / 2;
189 	}
190 }
191 
si_dma_vm_flush(struct radeon_device * rdev,struct radeon_ring * ring,unsigned vm_id,uint64_t pd_addr)192 void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
193 		     unsigned vm_id, uint64_t pd_addr)
194 
195 {
196 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
197 	if (vm_id < 8) {
198 		radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
199 	} else {
200 		radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2));
201 	}
202 	radeon_ring_write(ring, pd_addr >> 12);
203 
204 	/* flush hdp cache */
205 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
206 	radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
207 	radeon_ring_write(ring, 1);
208 
209 	/* bits 0-7 are the VM contexts0-7 */
210 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
211 	radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
212 	radeon_ring_write(ring, 1 << vm_id);
213 
214 	/* wait for invalidate to complete */
215 	radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
216 	radeon_ring_write(ring, VM_INVALIDATE_REQUEST);
217 	radeon_ring_write(ring, 0xff << 16); /* retry */
218 	radeon_ring_write(ring, 1 << vm_id); /* mask */
219 	radeon_ring_write(ring, 0); /* value */
220 	radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
221 }
222 
223 /**
224  * si_copy_dma - copy pages using the DMA engine
225  *
226  * @rdev: radeon_device pointer
227  * @src_offset: src GPU address
228  * @dst_offset: dst GPU address
229  * @num_gpu_pages: number of GPU pages to xfer
230  * @resv: reservation object to sync to
231  *
232  * Copy GPU paging using the DMA engine (SI).
233  * Used by the radeon ttm implementation to move pages if
234  * registered as the asic copy callback.
235  */
si_copy_dma(struct radeon_device * rdev,uint64_t src_offset,uint64_t dst_offset,unsigned num_gpu_pages,struct dma_resv * resv)236 struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
237 				 uint64_t src_offset, uint64_t dst_offset,
238 				 unsigned num_gpu_pages,
239 				 struct dma_resv *resv)
240 {
241 	struct radeon_fence *fence;
242 	struct radeon_sync sync;
243 	int ring_index = rdev->asic->copy.dma_ring_index;
244 	struct radeon_ring *ring = &rdev->ring[ring_index];
245 	u32 size_in_bytes, cur_size_in_bytes;
246 	int i, num_loops;
247 	int r = 0;
248 
249 	radeon_sync_create(&sync);
250 
251 	size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
252 	num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
253 	r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
254 	if (r) {
255 		DRM_ERROR("radeon: moving bo (%d).\n", r);
256 		radeon_sync_free(rdev, &sync, NULL);
257 		return ERR_PTR(r);
258 	}
259 
260 	radeon_sync_resv(rdev, &sync, resv, false);
261 	radeon_sync_rings(rdev, &sync, ring->idx);
262 
263 	for (i = 0; i < num_loops; i++) {
264 		cur_size_in_bytes = size_in_bytes;
265 		if (cur_size_in_bytes > 0xFFFFF)
266 			cur_size_in_bytes = 0xFFFFF;
267 		size_in_bytes -= cur_size_in_bytes;
268 		radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
269 		radeon_ring_write(ring, lower_32_bits(dst_offset));
270 		radeon_ring_write(ring, lower_32_bits(src_offset));
271 		radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
272 		radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
273 		src_offset += cur_size_in_bytes;
274 		dst_offset += cur_size_in_bytes;
275 	}
276 
277 	r = radeon_fence_emit(rdev, &fence, ring->idx);
278 	if (r) {
279 		radeon_ring_unlock_undo(rdev, ring);
280 		radeon_sync_free(rdev, &sync, NULL);
281 		return ERR_PTR(r);
282 	}
283 
284 	radeon_ring_unlock_commit(rdev, ring, false);
285 	radeon_sync_free(rdev, &sync, fence);
286 
287 	return fence;
288 }
289 
290