1 /* $NetBSD: radeon_evergreen_dma.c,v 1.2 2021/12/18 23:45:43 riastradh Exp $ */
2
3 /*
4 * Copyright 2010 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Alex Deucher
25 */
26
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: radeon_evergreen_dma.c,v 1.2 2021/12/18 23:45:43 riastradh Exp $");
29
30 #include "radeon.h"
31 #include "radeon_asic.h"
32 #include "evergreend.h"
33
34 u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev);
35
36 /**
37 * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
38 *
39 * @rdev: radeon_device pointer
40 * @fence: radeon fence object
41 *
42 * Add a DMA fence packet to the ring to write
43 * the fence seq number and DMA trap packet to generate
44 * an interrupt if needed (evergreen-SI).
45 */
evergreen_dma_fence_ring_emit(struct radeon_device * rdev,struct radeon_fence * fence)46 void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
47 struct radeon_fence *fence)
48 {
49 struct radeon_ring *ring = &rdev->ring[fence->ring];
50 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
51 /* write the fence */
52 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
53 radeon_ring_write(ring, addr & 0xfffffffc);
54 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
55 radeon_ring_write(ring, fence->seq);
56 /* generate an interrupt */
57 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
58 /* flush HDP */
59 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
60 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
61 radeon_ring_write(ring, 1);
62 }
63
64 /**
65 * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
66 *
67 * @rdev: radeon_device pointer
68 * @ib: IB object to schedule
69 *
70 * Schedule an IB in the DMA ring (evergreen).
71 */
evergreen_dma_ring_ib_execute(struct radeon_device * rdev,struct radeon_ib * ib)72 void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
73 struct radeon_ib *ib)
74 {
75 struct radeon_ring *ring = &rdev->ring[ib->ring];
76
77 if (rdev->wb.enabled) {
78 u32 next_rptr = ring->wptr + 4;
79 while ((next_rptr & 7) != 5)
80 next_rptr++;
81 next_rptr += 3;
82 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
83 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
84 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
85 radeon_ring_write(ring, next_rptr);
86 }
87
88 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
89 * Pad as necessary with NOPs.
90 */
91 while ((ring->wptr & 7) != 5)
92 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
93 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
94 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
95 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
96
97 }
98
99 /**
100 * evergreen_copy_dma - copy pages using the DMA engine
101 *
102 * @rdev: radeon_device pointer
103 * @src_offset: src GPU address
104 * @dst_offset: dst GPU address
105 * @num_gpu_pages: number of GPU pages to xfer
106 * @fence: radeon fence object
107 *
108 * Copy GPU paging using the DMA engine (evergreen-cayman).
109 * Used by the radeon ttm implementation to move pages if
110 * registered as the asic copy callback.
111 */
evergreen_copy_dma(struct radeon_device * rdev,uint64_t src_offset,uint64_t dst_offset,unsigned num_gpu_pages,struct dma_resv * resv)112 struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
113 uint64_t src_offset,
114 uint64_t dst_offset,
115 unsigned num_gpu_pages,
116 struct dma_resv *resv)
117 {
118 struct radeon_fence *fence;
119 struct radeon_sync sync;
120 int ring_index = rdev->asic->copy.dma_ring_index;
121 struct radeon_ring *ring = &rdev->ring[ring_index];
122 u32 size_in_dw, cur_size_in_dw;
123 int i, num_loops;
124 int r = 0;
125
126 radeon_sync_create(&sync);
127
128 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
129 num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
130 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
131 if (r) {
132 DRM_ERROR("radeon: moving bo (%d).\n", r);
133 radeon_sync_free(rdev, &sync, NULL);
134 return ERR_PTR(r);
135 }
136
137 radeon_sync_resv(rdev, &sync, resv, false);
138 radeon_sync_rings(rdev, &sync, ring->idx);
139
140 for (i = 0; i < num_loops; i++) {
141 cur_size_in_dw = size_in_dw;
142 if (cur_size_in_dw > 0xFFFFF)
143 cur_size_in_dw = 0xFFFFF;
144 size_in_dw -= cur_size_in_dw;
145 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
146 radeon_ring_write(ring, dst_offset & 0xfffffffc);
147 radeon_ring_write(ring, src_offset & 0xfffffffc);
148 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
149 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
150 src_offset += cur_size_in_dw * 4;
151 dst_offset += cur_size_in_dw * 4;
152 }
153
154 r = radeon_fence_emit(rdev, &fence, ring->idx);
155 if (r) {
156 radeon_ring_unlock_undo(rdev, ring);
157 radeon_sync_free(rdev, &sync, NULL);
158 return ERR_PTR(r);
159 }
160
161 radeon_ring_unlock_commit(rdev, ring, false);
162 radeon_sync_free(rdev, &sync, fence);
163
164 return fence;
165 }
166
167 /**
168 * evergreen_dma_is_lockup - Check if the DMA engine is locked up
169 *
170 * @rdev: radeon_device pointer
171 * @ring: radeon_ring structure holding ring information
172 *
173 * Check if the async DMA engine is locked up.
174 * Returns true if the engine appears to be locked up, false if not.
175 */
evergreen_dma_is_lockup(struct radeon_device * rdev,struct radeon_ring * ring)176 bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
177 {
178 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
179
180 if (!(reset_mask & RADEON_RESET_DMA)) {
181 radeon_ring_lockup_update(rdev, ring);
182 return false;
183 }
184 return radeon_ring_test_lockup(rdev, ring);
185 }
186
187
188