xref: /dflybsd-src/sys/dev/drm/amd/amdgpu/amdgpu_benchmark.c (revision b843c749addef9340ee7d4e250b09fdd492602a1)
1*b843c749SSergey Zigachev /*
2*b843c749SSergey Zigachev  * Copyright 2009 Jerome Glisse.
3*b843c749SSergey Zigachev  *
4*b843c749SSergey Zigachev  * Permission is hereby granted, free of charge, to any person obtaining a
5*b843c749SSergey Zigachev  * copy of this software and associated documentation files (the "Software"),
6*b843c749SSergey Zigachev  * to deal in the Software without restriction, including without limitation
7*b843c749SSergey Zigachev  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*b843c749SSergey Zigachev  * and/or sell copies of the Software, and to permit persons to whom the
9*b843c749SSergey Zigachev  * Software is furnished to do so, subject to the following conditions:
10*b843c749SSergey Zigachev  *
11*b843c749SSergey Zigachev  * The above copyright notice and this permission notice shall be included in
12*b843c749SSergey Zigachev  * all copies or substantial portions of the Software.
13*b843c749SSergey Zigachev  *
14*b843c749SSergey Zigachev  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*b843c749SSergey Zigachev  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*b843c749SSergey Zigachev  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*b843c749SSergey Zigachev  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*b843c749SSergey Zigachev  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*b843c749SSergey Zigachev  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*b843c749SSergey Zigachev  * OTHER DEALINGS IN THE SOFTWARE.
21*b843c749SSergey Zigachev  *
22*b843c749SSergey Zigachev  * Authors: Jerome Glisse
23*b843c749SSergey Zigachev  */
24*b843c749SSergey Zigachev #include <drm/drmP.h>
25*b843c749SSergey Zigachev #include <drm/amdgpu_drm.h>
26*b843c749SSergey Zigachev #include "amdgpu.h"
27*b843c749SSergey Zigachev 
28*b843c749SSergey Zigachev #define AMDGPU_BENCHMARK_ITERATIONS 1024
29*b843c749SSergey Zigachev #define AMDGPU_BENCHMARK_COMMON_MODES_N 17
30*b843c749SSergey Zigachev 
amdgpu_benchmark_do_move(struct amdgpu_device * adev,unsigned size,uint64_t saddr,uint64_t daddr,int n)31*b843c749SSergey Zigachev static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
32*b843c749SSergey Zigachev 				    uint64_t saddr, uint64_t daddr, int n)
33*b843c749SSergey Zigachev {
34*b843c749SSergey Zigachev 	unsigned long start_jiffies;
35*b843c749SSergey Zigachev 	unsigned long end_jiffies;
36*b843c749SSergey Zigachev 	struct dma_fence *fence = NULL;
37*b843c749SSergey Zigachev 	int i, r;
38*b843c749SSergey Zigachev 
39*b843c749SSergey Zigachev 	start_jiffies = jiffies;
40*b843c749SSergey Zigachev 	for (i = 0; i < n; i++) {
41*b843c749SSergey Zigachev 		struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
42*b843c749SSergey Zigachev 		r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
43*b843c749SSergey Zigachev 				       false, false);
44*b843c749SSergey Zigachev 		if (r)
45*b843c749SSergey Zigachev 			goto exit_do_move;
46*b843c749SSergey Zigachev 		r = dma_fence_wait(fence, false);
47*b843c749SSergey Zigachev 		if (r)
48*b843c749SSergey Zigachev 			goto exit_do_move;
49*b843c749SSergey Zigachev 		dma_fence_put(fence);
50*b843c749SSergey Zigachev 	}
51*b843c749SSergey Zigachev 	end_jiffies = jiffies;
52*b843c749SSergey Zigachev 	r = jiffies_to_msecs(end_jiffies - start_jiffies);
53*b843c749SSergey Zigachev 
54*b843c749SSergey Zigachev exit_do_move:
55*b843c749SSergey Zigachev 	if (fence)
56*b843c749SSergey Zigachev 		dma_fence_put(fence);
57*b843c749SSergey Zigachev 	return r;
58*b843c749SSergey Zigachev }
59*b843c749SSergey Zigachev 
60*b843c749SSergey Zigachev 
amdgpu_benchmark_log_results(int n,unsigned size,unsigned int time,unsigned sdomain,unsigned ddomain,char * kind)61*b843c749SSergey Zigachev static void amdgpu_benchmark_log_results(int n, unsigned size,
62*b843c749SSergey Zigachev 					 unsigned int time,
63*b843c749SSergey Zigachev 					 unsigned sdomain, unsigned ddomain,
64*b843c749SSergey Zigachev 					 char *kind)
65*b843c749SSergey Zigachev {
66*b843c749SSergey Zigachev 	unsigned int throughput = (n * (size >> 10)) / time;
67*b843c749SSergey Zigachev 	DRM_INFO("amdgpu: %s %u bo moves of %u kB from"
68*b843c749SSergey Zigachev 		 " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
69*b843c749SSergey Zigachev 		 kind, n, size >> 10, sdomain, ddomain, time,
70*b843c749SSergey Zigachev 		 throughput * 8, throughput);
71*b843c749SSergey Zigachev }
72*b843c749SSergey Zigachev 
amdgpu_benchmark_move(struct amdgpu_device * adev,unsigned size,unsigned sdomain,unsigned ddomain)73*b843c749SSergey Zigachev static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
74*b843c749SSergey Zigachev 				  unsigned sdomain, unsigned ddomain)
75*b843c749SSergey Zigachev {
76*b843c749SSergey Zigachev 	struct amdgpu_bo *dobj = NULL;
77*b843c749SSergey Zigachev 	struct amdgpu_bo *sobj = NULL;
78*b843c749SSergey Zigachev 	struct amdgpu_bo_param bp;
79*b843c749SSergey Zigachev 	uint64_t saddr, daddr;
80*b843c749SSergey Zigachev 	int r, n;
81*b843c749SSergey Zigachev 	int time;
82*b843c749SSergey Zigachev 
83*b843c749SSergey Zigachev 	memset(&bp, 0, sizeof(bp));
84*b843c749SSergey Zigachev 	bp.size = size;
85*b843c749SSergey Zigachev 	bp.byte_align = PAGE_SIZE;
86*b843c749SSergey Zigachev 	bp.domain = sdomain;
87*b843c749SSergey Zigachev 	bp.flags = 0;
88*b843c749SSergey Zigachev 	bp.type = ttm_bo_type_kernel;
89*b843c749SSergey Zigachev 	bp.resv = NULL;
90*b843c749SSergey Zigachev 	n = AMDGPU_BENCHMARK_ITERATIONS;
91*b843c749SSergey Zigachev 	r = amdgpu_bo_create(adev, &bp, &sobj);
92*b843c749SSergey Zigachev 	if (r) {
93*b843c749SSergey Zigachev 		goto out_cleanup;
94*b843c749SSergey Zigachev 	}
95*b843c749SSergey Zigachev 	r = amdgpu_bo_reserve(sobj, false);
96*b843c749SSergey Zigachev 	if (unlikely(r != 0))
97*b843c749SSergey Zigachev 		goto out_cleanup;
98*b843c749SSergey Zigachev 	r = amdgpu_bo_pin(sobj, sdomain);
99*b843c749SSergey Zigachev 	if (r) {
100*b843c749SSergey Zigachev 		amdgpu_bo_unreserve(sobj);
101*b843c749SSergey Zigachev 		goto out_cleanup;
102*b843c749SSergey Zigachev 	}
103*b843c749SSergey Zigachev 	r = amdgpu_ttm_alloc_gart(&sobj->tbo);
104*b843c749SSergey Zigachev 	amdgpu_bo_unreserve(sobj);
105*b843c749SSergey Zigachev 	if (r) {
106*b843c749SSergey Zigachev 		goto out_cleanup;
107*b843c749SSergey Zigachev 	}
108*b843c749SSergey Zigachev 	saddr = amdgpu_bo_gpu_offset(sobj);
109*b843c749SSergey Zigachev 	bp.domain = ddomain;
110*b843c749SSergey Zigachev 	r = amdgpu_bo_create(adev, &bp, &dobj);
111*b843c749SSergey Zigachev 	if (r) {
112*b843c749SSergey Zigachev 		goto out_cleanup;
113*b843c749SSergey Zigachev 	}
114*b843c749SSergey Zigachev 	r = amdgpu_bo_reserve(dobj, false);
115*b843c749SSergey Zigachev 	if (unlikely(r != 0))
116*b843c749SSergey Zigachev 		goto out_cleanup;
117*b843c749SSergey Zigachev 	r = amdgpu_bo_pin(dobj, ddomain);
118*b843c749SSergey Zigachev 	if (r) {
119*b843c749SSergey Zigachev 		amdgpu_bo_unreserve(sobj);
120*b843c749SSergey Zigachev 		goto out_cleanup;
121*b843c749SSergey Zigachev 	}
122*b843c749SSergey Zigachev 	r = amdgpu_ttm_alloc_gart(&dobj->tbo);
123*b843c749SSergey Zigachev 	amdgpu_bo_unreserve(dobj);
124*b843c749SSergey Zigachev 	if (r) {
125*b843c749SSergey Zigachev 		goto out_cleanup;
126*b843c749SSergey Zigachev 	}
127*b843c749SSergey Zigachev 	daddr = amdgpu_bo_gpu_offset(dobj);
128*b843c749SSergey Zigachev 
129*b843c749SSergey Zigachev 	if (adev->mman.buffer_funcs) {
130*b843c749SSergey Zigachev 		time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
131*b843c749SSergey Zigachev 		if (time < 0)
132*b843c749SSergey Zigachev 			goto out_cleanup;
133*b843c749SSergey Zigachev 		if (time > 0)
134*b843c749SSergey Zigachev 			amdgpu_benchmark_log_results(n, size, time,
135*b843c749SSergey Zigachev 						     sdomain, ddomain, "dma");
136*b843c749SSergey Zigachev 	}
137*b843c749SSergey Zigachev 
138*b843c749SSergey Zigachev out_cleanup:
139*b843c749SSergey Zigachev 	/* Check error value now. The value can be overwritten when clean up.*/
140*b843c749SSergey Zigachev 	if (r) {
141*b843c749SSergey Zigachev 		DRM_ERROR("Error while benchmarking BO move.\n");
142*b843c749SSergey Zigachev 	}
143*b843c749SSergey Zigachev 
144*b843c749SSergey Zigachev 	if (sobj) {
145*b843c749SSergey Zigachev 		r = amdgpu_bo_reserve(sobj, true);
146*b843c749SSergey Zigachev 		if (likely(r == 0)) {
147*b843c749SSergey Zigachev 			amdgpu_bo_unpin(sobj);
148*b843c749SSergey Zigachev 			amdgpu_bo_unreserve(sobj);
149*b843c749SSergey Zigachev 		}
150*b843c749SSergey Zigachev 		amdgpu_bo_unref(&sobj);
151*b843c749SSergey Zigachev 	}
152*b843c749SSergey Zigachev 	if (dobj) {
153*b843c749SSergey Zigachev 		r = amdgpu_bo_reserve(dobj, true);
154*b843c749SSergey Zigachev 		if (likely(r == 0)) {
155*b843c749SSergey Zigachev 			amdgpu_bo_unpin(dobj);
156*b843c749SSergey Zigachev 			amdgpu_bo_unreserve(dobj);
157*b843c749SSergey Zigachev 		}
158*b843c749SSergey Zigachev 		amdgpu_bo_unref(&dobj);
159*b843c749SSergey Zigachev 	}
160*b843c749SSergey Zigachev }
161*b843c749SSergey Zigachev 
amdgpu_benchmark(struct amdgpu_device * adev,int test_number)162*b843c749SSergey Zigachev void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
163*b843c749SSergey Zigachev {
164*b843c749SSergey Zigachev 	int i;
165*b843c749SSergey Zigachev 	static const int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
166*b843c749SSergey Zigachev 		640 * 480 * 4,
167*b843c749SSergey Zigachev 		720 * 480 * 4,
168*b843c749SSergey Zigachev 		800 * 600 * 4,
169*b843c749SSergey Zigachev 		848 * 480 * 4,
170*b843c749SSergey Zigachev 		1024 * 768 * 4,
171*b843c749SSergey Zigachev 		1152 * 768 * 4,
172*b843c749SSergey Zigachev 		1280 * 720 * 4,
173*b843c749SSergey Zigachev 		1280 * 800 * 4,
174*b843c749SSergey Zigachev 		1280 * 854 * 4,
175*b843c749SSergey Zigachev 		1280 * 960 * 4,
176*b843c749SSergey Zigachev 		1280 * 1024 * 4,
177*b843c749SSergey Zigachev 		1440 * 900 * 4,
178*b843c749SSergey Zigachev 		1400 * 1050 * 4,
179*b843c749SSergey Zigachev 		1680 * 1050 * 4,
180*b843c749SSergey Zigachev 		1600 * 1200 * 4,
181*b843c749SSergey Zigachev 		1920 * 1080 * 4,
182*b843c749SSergey Zigachev 		1920 * 1200 * 4
183*b843c749SSergey Zigachev 	};
184*b843c749SSergey Zigachev 
185*b843c749SSergey Zigachev 	switch (test_number) {
186*b843c749SSergey Zigachev 	case 1:
187*b843c749SSergey Zigachev 		/* simple test, VRAM to GTT and GTT to VRAM */
188*b843c749SSergey Zigachev 		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_GTT,
189*b843c749SSergey Zigachev 				      AMDGPU_GEM_DOMAIN_VRAM);
190*b843c749SSergey Zigachev 		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
191*b843c749SSergey Zigachev 				      AMDGPU_GEM_DOMAIN_GTT);
192*b843c749SSergey Zigachev 		break;
193*b843c749SSergey Zigachev 	case 2:
194*b843c749SSergey Zigachev 		/* simple test, VRAM to VRAM */
195*b843c749SSergey Zigachev 		amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
196*b843c749SSergey Zigachev 				      AMDGPU_GEM_DOMAIN_VRAM);
197*b843c749SSergey Zigachev 		break;
198*b843c749SSergey Zigachev 	case 3:
199*b843c749SSergey Zigachev 		/* GTT to VRAM, buffer size sweep, powers of 2 */
200*b843c749SSergey Zigachev 		for (i = 1; i <= 16384; i <<= 1)
201*b843c749SSergey Zigachev 			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
202*b843c749SSergey Zigachev 					      AMDGPU_GEM_DOMAIN_GTT,
203*b843c749SSergey Zigachev 					      AMDGPU_GEM_DOMAIN_VRAM);
204*b843c749SSergey Zigachev 		break;
205*b843c749SSergey Zigachev 	case 4:
206*b843c749SSergey Zigachev 		/* VRAM to GTT, buffer size sweep, powers of 2 */
207*b843c749SSergey Zigachev 		for (i = 1; i <= 16384; i <<= 1)
208*b843c749SSergey Zigachev 			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
209*b843c749SSergey Zigachev 					      AMDGPU_GEM_DOMAIN_VRAM,
210*b843c749SSergey Zigachev 					      AMDGPU_GEM_DOMAIN_GTT);
211*b843c749SSergey Zigachev 		break;
212*b843c749SSergey Zigachev 	case 5:
213*b843c749SSergey Zigachev 		/* VRAM to VRAM, buffer size sweep, powers of 2 */
214*b843c749SSergey Zigachev 		for (i = 1; i <= 16384; i <<= 1)
215*b843c749SSergey Zigachev 			amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
216*b843c749SSergey Zigachev 					      AMDGPU_GEM_DOMAIN_VRAM,
217*b843c749SSergey Zigachev 					      AMDGPU_GEM_DOMAIN_VRAM);
218*b843c749SSergey Zigachev 		break;
219*b843c749SSergey Zigachev 	case 6:
220*b843c749SSergey Zigachev 		/* GTT to VRAM, buffer size sweep, common modes */
221*b843c749SSergey Zigachev 		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
222*b843c749SSergey Zigachev 			amdgpu_benchmark_move(adev, common_modes[i],
223*b843c749SSergey Zigachev 					      AMDGPU_GEM_DOMAIN_GTT,
224*b843c749SSergey Zigachev 					      AMDGPU_GEM_DOMAIN_VRAM);
225*b843c749SSergey Zigachev 		break;
226*b843c749SSergey Zigachev 	case 7:
227*b843c749SSergey Zigachev 		/* VRAM to GTT, buffer size sweep, common modes */
228*b843c749SSergey Zigachev 		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
229*b843c749SSergey Zigachev 			amdgpu_benchmark_move(adev, common_modes[i],
230*b843c749SSergey Zigachev 					      AMDGPU_GEM_DOMAIN_VRAM,
231*b843c749SSergey Zigachev 					      AMDGPU_GEM_DOMAIN_GTT);
232*b843c749SSergey Zigachev 		break;
233*b843c749SSergey Zigachev 	case 8:
234*b843c749SSergey Zigachev 		/* VRAM to VRAM, buffer size sweep, common modes */
235*b843c749SSergey Zigachev 		for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
236*b843c749SSergey Zigachev 			amdgpu_benchmark_move(adev, common_modes[i],
237*b843c749SSergey Zigachev 					      AMDGPU_GEM_DOMAIN_VRAM,
238*b843c749SSergey Zigachev 					      AMDGPU_GEM_DOMAIN_VRAM);
239*b843c749SSergey Zigachev 		break;
240*b843c749SSergey Zigachev 
241*b843c749SSergey Zigachev 	default:
242*b843c749SSergey Zigachev 		DRM_ERROR("Unknown benchmark\n");
243*b843c749SSergey Zigachev 	}
244*b843c749SSergey Zigachev }
245