1 /* $NetBSD: amdgpu_benchmark.c,v 1.4 2021/12/18 23:44:58 riastradh Exp $ */
2
3 /*
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Jerome Glisse
25 */
26
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: amdgpu_benchmark.c,v 1.4 2021/12/18 23:44:58 riastradh Exp $");
29
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu.h"
32
33 #define AMDGPU_BENCHMARK_ITERATIONS 1024
34 #define AMDGPU_BENCHMARK_COMMON_MODES_N 17
35
amdgpu_benchmark_do_move(struct amdgpu_device * adev,unsigned size,uint64_t saddr,uint64_t daddr,int n)36 static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
37 uint64_t saddr, uint64_t daddr, int n)
38 {
39 unsigned long start_jiffies;
40 unsigned long end_jiffies;
41 struct dma_fence *fence;
42 int i, r;
43
44 start_jiffies = jiffies;
45 for (i = 0; i < n; i++) {
46 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
47 r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
48 false, false);
49 if (r)
50 goto exit_do_move;
51 r = dma_fence_wait(fence, false);
52 dma_fence_put(fence);
53 if (r)
54 goto exit_do_move;
55 }
56 end_jiffies = jiffies;
57 r = jiffies_to_msecs(end_jiffies - start_jiffies);
58
59 exit_do_move:
60 return r;
61 }
62
63
amdgpu_benchmark_log_results(int n,unsigned size,unsigned int time,unsigned sdomain,unsigned ddomain,const char * kind)64 static void amdgpu_benchmark_log_results(int n, unsigned size,
65 unsigned int time,
66 unsigned sdomain, unsigned ddomain,
67 const char *kind)
68 {
69 unsigned int throughput = (n * (size >> 10)) / time;
70 DRM_INFO("amdgpu: %s %u bo moves of %u kB from"
71 " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
72 kind, n, size >> 10, sdomain, ddomain, time,
73 throughput * 8, throughput);
74 }
75
amdgpu_benchmark_move(struct amdgpu_device * adev,unsigned size,unsigned sdomain,unsigned ddomain)76 static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
77 unsigned sdomain, unsigned ddomain)
78 {
79 struct amdgpu_bo *dobj = NULL;
80 struct amdgpu_bo *sobj = NULL;
81 struct amdgpu_bo_param bp;
82 uint64_t saddr, daddr;
83 int r, n;
84 int time;
85
86 memset(&bp, 0, sizeof(bp));
87 bp.size = size;
88 bp.byte_align = PAGE_SIZE;
89 bp.domain = sdomain;
90 bp.flags = 0;
91 bp.type = ttm_bo_type_kernel;
92 bp.resv = NULL;
93 n = AMDGPU_BENCHMARK_ITERATIONS;
94 r = amdgpu_bo_create(adev, &bp, &sobj);
95 if (r) {
96 goto out_cleanup;
97 }
98 r = amdgpu_bo_reserve(sobj, false);
99 if (unlikely(r != 0))
100 goto out_cleanup;
101 r = amdgpu_bo_pin(sobj, sdomain);
102 if (r) {
103 amdgpu_bo_unreserve(sobj);
104 goto out_cleanup;
105 }
106 r = amdgpu_ttm_alloc_gart(&sobj->tbo);
107 amdgpu_bo_unreserve(sobj);
108 if (r) {
109 goto out_cleanup;
110 }
111 saddr = amdgpu_bo_gpu_offset(sobj);
112 bp.domain = ddomain;
113 r = amdgpu_bo_create(adev, &bp, &dobj);
114 if (r) {
115 goto out_cleanup;
116 }
117 r = amdgpu_bo_reserve(dobj, false);
118 if (unlikely(r != 0))
119 goto out_cleanup;
120 r = amdgpu_bo_pin(dobj, ddomain);
121 if (r) {
122 amdgpu_bo_unreserve(sobj);
123 goto out_cleanup;
124 }
125 r = amdgpu_ttm_alloc_gart(&dobj->tbo);
126 amdgpu_bo_unreserve(dobj);
127 if (r) {
128 goto out_cleanup;
129 }
130 daddr = amdgpu_bo_gpu_offset(dobj);
131
132 if (adev->mman.buffer_funcs) {
133 time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
134 if (time < 0)
135 goto out_cleanup;
136 if (time > 0)
137 amdgpu_benchmark_log_results(n, size, time,
138 sdomain, ddomain, "dma");
139 }
140
141 out_cleanup:
142 /* Check error value now. The value can be overwritten when clean up.*/
143 if (r) {
144 DRM_ERROR("Error while benchmarking BO move.\n");
145 }
146
147 if (sobj) {
148 r = amdgpu_bo_reserve(sobj, true);
149 if (likely(r == 0)) {
150 amdgpu_bo_unpin(sobj);
151 amdgpu_bo_unreserve(sobj);
152 }
153 amdgpu_bo_unref(&sobj);
154 }
155 if (dobj) {
156 r = amdgpu_bo_reserve(dobj, true);
157 if (likely(r == 0)) {
158 amdgpu_bo_unpin(dobj);
159 amdgpu_bo_unreserve(dobj);
160 }
161 amdgpu_bo_unref(&dobj);
162 }
163 }
164
amdgpu_benchmark(struct amdgpu_device * adev,int test_number)165 void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
166 {
167 int i;
168 static const int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
169 640 * 480 * 4,
170 720 * 480 * 4,
171 800 * 600 * 4,
172 848 * 480 * 4,
173 1024 * 768 * 4,
174 1152 * 768 * 4,
175 1280 * 720 * 4,
176 1280 * 800 * 4,
177 1280 * 854 * 4,
178 1280 * 960 * 4,
179 1280 * 1024 * 4,
180 1440 * 900 * 4,
181 1400 * 1050 * 4,
182 1680 * 1050 * 4,
183 1600 * 1200 * 4,
184 1920 * 1080 * 4,
185 1920 * 1200 * 4
186 };
187
188 switch (test_number) {
189 case 1:
190 /* simple test, VRAM to GTT and GTT to VRAM */
191 amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_GTT,
192 AMDGPU_GEM_DOMAIN_VRAM);
193 amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
194 AMDGPU_GEM_DOMAIN_GTT);
195 break;
196 case 2:
197 /* simple test, VRAM to VRAM */
198 amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
199 AMDGPU_GEM_DOMAIN_VRAM);
200 break;
201 case 3:
202 /* GTT to VRAM, buffer size sweep, powers of 2 */
203 for (i = 1; i <= 16384; i <<= 1)
204 amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
205 AMDGPU_GEM_DOMAIN_GTT,
206 AMDGPU_GEM_DOMAIN_VRAM);
207 break;
208 case 4:
209 /* VRAM to GTT, buffer size sweep, powers of 2 */
210 for (i = 1; i <= 16384; i <<= 1)
211 amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
212 AMDGPU_GEM_DOMAIN_VRAM,
213 AMDGPU_GEM_DOMAIN_GTT);
214 break;
215 case 5:
216 /* VRAM to VRAM, buffer size sweep, powers of 2 */
217 for (i = 1; i <= 16384; i <<= 1)
218 amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
219 AMDGPU_GEM_DOMAIN_VRAM,
220 AMDGPU_GEM_DOMAIN_VRAM);
221 break;
222 case 6:
223 /* GTT to VRAM, buffer size sweep, common modes */
224 for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
225 amdgpu_benchmark_move(adev, common_modes[i],
226 AMDGPU_GEM_DOMAIN_GTT,
227 AMDGPU_GEM_DOMAIN_VRAM);
228 break;
229 case 7:
230 /* VRAM to GTT, buffer size sweep, common modes */
231 for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
232 amdgpu_benchmark_move(adev, common_modes[i],
233 AMDGPU_GEM_DOMAIN_VRAM,
234 AMDGPU_GEM_DOMAIN_GTT);
235 break;
236 case 8:
237 /* VRAM to VRAM, buffer size sweep, common modes */
238 for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
239 amdgpu_benchmark_move(adev, common_modes[i],
240 AMDGPU_GEM_DOMAIN_VRAM,
241 AMDGPU_GEM_DOMAIN_VRAM);
242 break;
243
244 default:
245 DRM_ERROR("Unknown benchmark\n");
246 }
247 }
248