1 /* $NetBSD: amdgpu_test.c,v 1.5 2021/12/18 23:44:58 riastradh Exp $ */ 2 3 // SPDX-License-Identifier: GPL-2.0 OR MIT 4 /* 5 * Copyright 2009 VMware, Inc. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the "Software"), 9 * to deal in the Software without restriction, including without limitation 10 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * and/or sell copies of the Software, and to permit persons to whom the 12 * Software is furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice shall be included in 15 * all copies or substantial portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 23 * OTHER DEALINGS IN THE SOFTWARE. 24 * 25 * Authors: Michel Dänzer 26 */ 27 28 #include <sys/cdefs.h> 29 __KERNEL_RCSID(0, "$NetBSD: amdgpu_test.c,v 1.5 2021/12/18 23:44:58 riastradh Exp $"); 30 31 #include <drm/amdgpu_drm.h> 32 #include "amdgpu.h" 33 #include "amdgpu_uvd.h" 34 #include "amdgpu_vce.h" 35 36 /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ 37 static void amdgpu_do_test_moves(struct amdgpu_device *adev) 38 { 39 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 40 struct amdgpu_bo *vram_obj = NULL; 41 struct amdgpu_bo **gtt_obj = NULL; 42 struct amdgpu_bo_param bp; 43 uint64_t gart_addr, vram_addr; 44 unsigned n, size; 45 int i, r; 46 47 size = 1024 * 1024; 48 49 /* Number of tests = 50 * (Total GTT - IB pool - writeback page - ring buffers) / test size 51 */ 52 n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024; 53 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 54 if (adev->rings[i]) 55 n -= adev->rings[i]->ring_size; 56 if (adev->wb.wb_obj) 57 n -= AMDGPU_GPU_PAGE_SIZE; 58 if (adev->irq.ih.ring_obj) 59 n -= adev->irq.ih.ring_size; 60 n /= size; 61 62 gtt_obj = kcalloc(n, sizeof(*gtt_obj), GFP_KERNEL); 63 if (!gtt_obj) { 64 DRM_ERROR("Failed to allocate %d pointers\n", n); 65 r = 1; 66 goto out_cleanup; 67 } 68 memset(&bp, 0, sizeof(bp)); 69 bp.size = size; 70 bp.byte_align = PAGE_SIZE; 71 bp.domain = AMDGPU_GEM_DOMAIN_VRAM; 72 bp.flags = 0; 73 bp.type = ttm_bo_type_kernel; 74 bp.resv = NULL; 75 76 r = amdgpu_bo_create(adev, &bp, &vram_obj); 77 if (r) { 78 DRM_ERROR("Failed to create VRAM object\n"); 79 goto out_cleanup; 80 } 81 r = amdgpu_bo_reserve(vram_obj, false); 82 if (unlikely(r != 0)) 83 goto out_unref; 84 r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM); 85 if (r) { 86 DRM_ERROR("Failed to pin VRAM object\n"); 87 goto out_unres; 88 } 89 vram_addr = amdgpu_bo_gpu_offset(vram_obj); 90 for (i = 0; i < n; i++) { 91 void *gtt_map, *vram_map; 92 void **gart_start, **gart_end; 93 void **vram_start, **vram_end; 94 struct dma_fence *fence = NULL; 95 96 bp.domain = AMDGPU_GEM_DOMAIN_GTT; 97 r = amdgpu_bo_create(adev, &bp, gtt_obj + i); 98 if (r) { 99 DRM_ERROR("Failed to create GTT object %d\n", i); 100 goto out_lclean; 101 } 102 103 r = amdgpu_bo_reserve(gtt_obj[i], false); 104 if (unlikely(r != 0)) 105 goto out_lclean_unref; 106 r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT); 107 if (r) { 108 DRM_ERROR("Failed to pin GTT object %d\n", i); 109 goto out_lclean_unres; 110 } 111 r = amdgpu_ttm_alloc_gart(>t_obj[i]->tbo); 112 if (r) { 113 DRM_ERROR("%p bind failed\n", gtt_obj[i]); 114 goto out_lclean_unpin; 115 } 116 gart_addr = amdgpu_bo_gpu_offset(gtt_obj[i]); 117 118 r = amdgpu_bo_kmap(gtt_obj[i], >t_map); 119 if (r) { 120 DRM_ERROR("Failed to map GTT object %d\n", i); 121 goto out_lclean_unpin; 122 } 123 124 for (gart_start = gtt_map, gart_end = gtt_map + size; 125 gart_start < gart_end; 126 gart_start++) 127 *gart_start = gart_start; 128 129 amdgpu_bo_kunmap(gtt_obj[i]); 130 131 r = amdgpu_copy_buffer(ring, gart_addr, vram_addr, 132 size, NULL, &fence, false, false); 133 134 if (r) { 135 DRM_ERROR("Failed GTT->VRAM copy %d\n", i); 136 goto out_lclean_unpin; 137 } 138 139 r = dma_fence_wait(fence, false); 140 if (r) { 141 DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i); 142 goto out_lclean_unpin; 143 } 144 145 dma_fence_put(fence); 146 fence = NULL; 147 148 r = amdgpu_bo_kmap(vram_obj, &vram_map); 149 if (r) { 150 DRM_ERROR("Failed to map VRAM object after copy %d\n", i); 151 goto out_lclean_unpin; 152 } 153 154 for (gart_start = gtt_map, gart_end = gtt_map + size, 155 vram_start = vram_map, vram_end = vram_map + size; 156 vram_start < vram_end; 157 gart_start++, vram_start++) { 158 if (*vram_start != gart_start) { 159 DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, " 160 "expected 0x%p (GTT/VRAM offset " 161 "0x%16llx/0x%16llx)\n", 162 i, *vram_start, gart_start, 163 (unsigned long long) 164 (gart_addr - adev->gmc.gart_start + 165 (void*)gart_start - gtt_map), 166 (unsigned long long) 167 (vram_addr - adev->gmc.vram_start + 168 (void*)gart_start - gtt_map)); 169 amdgpu_bo_kunmap(vram_obj); 170 goto out_lclean_unpin; 171 } 172 *vram_start = vram_start; 173 } 174 175 amdgpu_bo_kunmap(vram_obj); 176 177 r = amdgpu_copy_buffer(ring, vram_addr, gart_addr, 178 size, NULL, &fence, false, false); 179 180 if (r) { 181 DRM_ERROR("Failed VRAM->GTT copy %d\n", i); 182 goto out_lclean_unpin; 183 } 184 185 r = dma_fence_wait(fence, false); 186 if (r) { 187 DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i); 188 goto out_lclean_unpin; 189 } 190 191 dma_fence_put(fence); 192 fence = NULL; 193 194 r = amdgpu_bo_kmap(gtt_obj[i], >t_map); 195 if (r) { 196 DRM_ERROR("Failed to map GTT object after copy %d\n", i); 197 goto out_lclean_unpin; 198 } 199 200 for (gart_start = gtt_map, gart_end = gtt_map + size, 201 vram_start = vram_map, vram_end = vram_map + size; 202 gart_start < gart_end; 203 gart_start++, vram_start++) { 204 if (*gart_start != vram_start) { 205 DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, " 206 "expected 0x%p (VRAM/GTT offset " 207 "0x%16llx/0x%16llx)\n", 208 i, *gart_start, vram_start, 209 (unsigned long long) 210 (vram_addr - adev->gmc.vram_start + 211 (void*)vram_start - vram_map), 212 (unsigned long long) 213 (gart_addr - adev->gmc.gart_start + 214 (void*)vram_start - vram_map)); 215 amdgpu_bo_kunmap(gtt_obj[i]); 216 goto out_lclean_unpin; 217 } 218 } 219 220 amdgpu_bo_kunmap(gtt_obj[i]); 221 222 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%"PRIx64"\n", 223 gart_addr - adev->gmc.gart_start); 224 continue; 225 226 out_lclean_unpin: 227 amdgpu_bo_unpin(gtt_obj[i]); 228 out_lclean_unres: 229 amdgpu_bo_unreserve(gtt_obj[i]); 230 out_lclean_unref: 231 amdgpu_bo_unref(>t_obj[i]); 232 out_lclean: 233 for (--i; i >= 0; --i) { 234 amdgpu_bo_unpin(gtt_obj[i]); 235 amdgpu_bo_unreserve(gtt_obj[i]); 236 amdgpu_bo_unref(>t_obj[i]); 237 } 238 if (fence) 239 dma_fence_put(fence); 240 break; 241 } 242 243 amdgpu_bo_unpin(vram_obj); 244 out_unres: 245 amdgpu_bo_unreserve(vram_obj); 246 out_unref: 247 amdgpu_bo_unref(&vram_obj); 248 out_cleanup: 249 kfree(gtt_obj); 250 if (r) { 251 pr_warn("Error while testing BO move\n"); 252 } 253 } 254 255 void amdgpu_test_moves(struct amdgpu_device *adev) 256 { 257 if (adev->mman.buffer_funcs) 258 amdgpu_do_test_moves(adev); 259 } 260