1*b843c749SSergey Zigachev /* 2*b843c749SSergey Zigachev * Copyright 2016 Advanced Micro Devices, Inc. 3*b843c749SSergey Zigachev * 4*b843c749SSergey Zigachev * Permission is hereby granted, free of charge, to any person obtaining a 5*b843c749SSergey Zigachev * copy of this software and associated documentation files (the "Software"), 6*b843c749SSergey Zigachev * to deal in the Software without restriction, including without limitation 7*b843c749SSergey Zigachev * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8*b843c749SSergey Zigachev * and/or sell copies of the Software, and to permit persons to whom the 9*b843c749SSergey Zigachev * Software is furnished to do so, subject to the following conditions: 10*b843c749SSergey Zigachev * 11*b843c749SSergey Zigachev * The above copyright notice and this permission notice shall be included in 12*b843c749SSergey Zigachev * all copies or substantial portions of the Software. 13*b843c749SSergey Zigachev * 14*b843c749SSergey Zigachev * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15*b843c749SSergey Zigachev * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16*b843c749SSergey Zigachev * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17*b843c749SSergey Zigachev * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18*b843c749SSergey Zigachev * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19*b843c749SSergey Zigachev * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20*b843c749SSergey Zigachev * OTHER DEALINGS IN THE SOFTWARE. 21*b843c749SSergey Zigachev * 22*b843c749SSergey Zigachev */ 23*b843c749SSergey Zigachev 24*b843c749SSergey Zigachev #include "amdgpu.h" 25*b843c749SSergey Zigachev #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ 26*b843c749SSergey Zigachev #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ 27*b843c749SSergey Zigachev #define MAX_KIQ_REG_TRY 20 28*b843c749SSergey Zigachev 29*b843c749SSergey Zigachev uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev) 30*b843c749SSergey Zigachev { 31*b843c749SSergey Zigachev uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT; 32*b843c749SSergey Zigachev 33*b843c749SSergey Zigachev addr -= AMDGPU_VA_RESERVED_SIZE; 34*b843c749SSergey Zigachev 35*b843c749SSergey Zigachev if (addr >= AMDGPU_VA_HOLE_START) 36*b843c749SSergey Zigachev addr |= AMDGPU_VA_HOLE_END; 37*b843c749SSergey Zigachev 38*b843c749SSergey Zigachev return addr; 39*b843c749SSergey Zigachev } 40*b843c749SSergey Zigachev 41*b843c749SSergey Zigachev bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) 42*b843c749SSergey Zigachev { 43*b843c749SSergey Zigachev /* By now all MMIO pages except mailbox are blocked */ 44*b843c749SSergey Zigachev /* if blocking is enabled in hypervisor. Choose the */ 45*b843c749SSergey Zigachev /* SCRATCH_REG0 to test. */ 46*b843c749SSergey Zigachev return RREG32_NO_KIQ(0xc040) == 0xffffffff; 47*b843c749SSergey Zigachev } 48*b843c749SSergey Zigachev 49*b843c749SSergey Zigachev int amdgpu_allocate_static_csa(struct amdgpu_device *adev) 50*b843c749SSergey Zigachev { 51*b843c749SSergey Zigachev int r; 52*b843c749SSergey Zigachev void *ptr; 53*b843c749SSergey Zigachev 54*b843c749SSergey Zigachev r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE, 55*b843c749SSergey Zigachev AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj, 56*b843c749SSergey Zigachev &adev->virt.csa_vmid0_addr, &ptr); 57*b843c749SSergey Zigachev if (r) 58*b843c749SSergey Zigachev return r; 59*b843c749SSergey Zigachev 60*b843c749SSergey Zigachev memset(ptr, 0, AMDGPU_CSA_SIZE); 61*b843c749SSergey Zigachev return 0; 62*b843c749SSergey Zigachev } 63*b843c749SSergey Zigachev 64*b843c749SSergey Zigachev void amdgpu_free_static_csa(struct amdgpu_device *adev) { 65*b843c749SSergey Zigachev amdgpu_bo_free_kernel(&adev->virt.csa_obj, 66*b843c749SSergey Zigachev &adev->virt.csa_vmid0_addr, 67*b843c749SSergey Zigachev NULL); 68*b843c749SSergey Zigachev } 69*b843c749SSergey Zigachev 70*b843c749SSergey Zigachev /* 71*b843c749SSergey Zigachev * amdgpu_map_static_csa should be called during amdgpu_vm_init 72*b843c749SSergey Zigachev * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command 73*b843c749SSergey Zigachev * submission of GFX should use this virtual address within META_DATA init 74*b843c749SSergey Zigachev * package to support SRIOV gfx preemption. 75*b843c749SSergey Zigachev */ 76*b843c749SSergey Zigachev int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, 77*b843c749SSergey Zigachev struct amdgpu_bo_va **bo_va) 78*b843c749SSergey Zigachev { 79*b843c749SSergey Zigachev uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK; 80*b843c749SSergey Zigachev struct ww_acquire_ctx ticket; 81*b843c749SSergey Zigachev struct list_head list; 82*b843c749SSergey Zigachev struct amdgpu_bo_list_entry pd; 83*b843c749SSergey Zigachev struct ttm_validate_buffer csa_tv; 84*b843c749SSergey Zigachev int r; 85*b843c749SSergey Zigachev 86*b843c749SSergey Zigachev INIT_LIST_HEAD(&list); 87*b843c749SSergey Zigachev INIT_LIST_HEAD(&csa_tv.head); 88*b843c749SSergey Zigachev csa_tv.bo = &adev->virt.csa_obj->tbo; 89*b843c749SSergey Zigachev csa_tv.shared = true; 90*b843c749SSergey Zigachev 91*b843c749SSergey Zigachev list_add(&csa_tv.head, &list); 92*b843c749SSergey Zigachev amdgpu_vm_get_pd_bo(vm, &list, &pd); 93*b843c749SSergey Zigachev 94*b843c749SSergey Zigachev r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 95*b843c749SSergey Zigachev if (r) { 96*b843c749SSergey Zigachev DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); 97*b843c749SSergey Zigachev return r; 98*b843c749SSergey Zigachev } 99*b843c749SSergey Zigachev 100*b843c749SSergey Zigachev *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); 101*b843c749SSergey Zigachev if (!*bo_va) { 102*b843c749SSergey Zigachev ttm_eu_backoff_reservation(&ticket, &list); 103*b843c749SSergey Zigachev DRM_ERROR("failed to create bo_va for static CSA\n"); 104*b843c749SSergey Zigachev return -ENOMEM; 105*b843c749SSergey Zigachev } 106*b843c749SSergey Zigachev 107*b843c749SSergey Zigachev r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr, 108*b843c749SSergey Zigachev AMDGPU_CSA_SIZE); 109*b843c749SSergey Zigachev if (r) { 110*b843c749SSergey Zigachev DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); 111*b843c749SSergey Zigachev amdgpu_vm_bo_rmv(adev, *bo_va); 112*b843c749SSergey Zigachev ttm_eu_backoff_reservation(&ticket, &list); 113*b843c749SSergey Zigachev return r; 114*b843c749SSergey Zigachev } 115*b843c749SSergey Zigachev 116*b843c749SSergey Zigachev r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE, 117*b843c749SSergey Zigachev AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | 118*b843c749SSergey Zigachev AMDGPU_PTE_EXECUTABLE); 119*b843c749SSergey Zigachev 120*b843c749SSergey Zigachev if (r) { 121*b843c749SSergey Zigachev DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); 122*b843c749SSergey Zigachev amdgpu_vm_bo_rmv(adev, *bo_va); 123*b843c749SSergey Zigachev ttm_eu_backoff_reservation(&ticket, &list); 124*b843c749SSergey Zigachev return r; 125*b843c749SSergey Zigachev } 126*b843c749SSergey Zigachev 127*b843c749SSergey Zigachev ttm_eu_backoff_reservation(&ticket, &list); 128*b843c749SSergey Zigachev return 0; 129*b843c749SSergey Zigachev } 130*b843c749SSergey Zigachev 131*b843c749SSergey Zigachev void amdgpu_virt_init_setting(struct amdgpu_device *adev) 132*b843c749SSergey Zigachev { 133*b843c749SSergey Zigachev /* enable virtual display */ 134*b843c749SSergey Zigachev adev->mode_info.num_crtc = 1; 135*b843c749SSergey Zigachev adev->enable_virtual_display = true; 136*b843c749SSergey Zigachev adev->cg_flags = 0; 137*b843c749SSergey Zigachev adev->pg_flags = 0; 138*b843c749SSergey Zigachev } 139*b843c749SSergey Zigachev 140*b843c749SSergey Zigachev uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) 141*b843c749SSergey Zigachev { 142*b843c749SSergey Zigachev signed long r, cnt = 0; 143*b843c749SSergey Zigachev unsigned long flags; 144*b843c749SSergey Zigachev uint32_t seq; 145*b843c749SSergey Zigachev struct amdgpu_kiq *kiq = &adev->gfx.kiq; 146*b843c749SSergey Zigachev struct amdgpu_ring *ring = &kiq->ring; 147*b843c749SSergey Zigachev 148*b843c749SSergey Zigachev BUG_ON(!ring->funcs->emit_rreg); 149*b843c749SSergey Zigachev 150*b843c749SSergey Zigachev spin_lock_irqsave(&kiq->ring_lock, flags); 151*b843c749SSergey Zigachev amdgpu_ring_alloc(ring, 32); 152*b843c749SSergey Zigachev amdgpu_ring_emit_rreg(ring, reg); 153*b843c749SSergey Zigachev amdgpu_fence_emit_polling(ring, &seq); 154*b843c749SSergey Zigachev amdgpu_ring_commit(ring); 155*b843c749SSergey Zigachev spin_unlock_irqrestore(&kiq->ring_lock, flags); 156*b843c749SSergey Zigachev 157*b843c749SSergey Zigachev r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 158*b843c749SSergey Zigachev 159*b843c749SSergey Zigachev /* don't wait anymore for gpu reset case because this way may 160*b843c749SSergey Zigachev * block gpu_recover() routine forever, e.g. this virt_kiq_rreg 161*b843c749SSergey Zigachev * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will 162*b843c749SSergey Zigachev * never return if we keep waiting in virt_kiq_rreg, which cause 163*b843c749SSergey Zigachev * gpu_recover() hang there. 164*b843c749SSergey Zigachev * 165*b843c749SSergey Zigachev * also don't wait anymore for IRQ context 166*b843c749SSergey Zigachev * */ 167*b843c749SSergey Zigachev if (r < 1 && (adev->in_gpu_reset || in_interrupt())) 168*b843c749SSergey Zigachev goto failed_kiq_read; 169*b843c749SSergey Zigachev 170*b843c749SSergey Zigachev if (in_interrupt()) 171*b843c749SSergey Zigachev might_sleep(); 172*b843c749SSergey Zigachev 173*b843c749SSergey Zigachev while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { 174*b843c749SSergey Zigachev msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); 175*b843c749SSergey Zigachev r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 176*b843c749SSergey Zigachev } 177*b843c749SSergey Zigachev 178*b843c749SSergey Zigachev if (cnt > MAX_KIQ_REG_TRY) 179*b843c749SSergey Zigachev goto failed_kiq_read; 180*b843c749SSergey Zigachev 181*b843c749SSergey Zigachev return adev->wb.wb[adev->virt.reg_val_offs]; 182*b843c749SSergey Zigachev 183*b843c749SSergey Zigachev failed_kiq_read: 184*b843c749SSergey Zigachev pr_err("failed to read reg:%x\n", reg); 185*b843c749SSergey Zigachev return ~0; 186*b843c749SSergey Zigachev } 187*b843c749SSergey Zigachev 188*b843c749SSergey Zigachev void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 189*b843c749SSergey Zigachev { 190*b843c749SSergey Zigachev signed long r, cnt = 0; 191*b843c749SSergey Zigachev unsigned long flags; 192*b843c749SSergey Zigachev uint32_t seq; 193*b843c749SSergey Zigachev struct amdgpu_kiq *kiq = &adev->gfx.kiq; 194*b843c749SSergey Zigachev struct amdgpu_ring *ring = &kiq->ring; 195*b843c749SSergey Zigachev 196*b843c749SSergey Zigachev BUG_ON(!ring->funcs->emit_wreg); 197*b843c749SSergey Zigachev 198*b843c749SSergey Zigachev spin_lock_irqsave(&kiq->ring_lock, flags); 199*b843c749SSergey Zigachev amdgpu_ring_alloc(ring, 32); 200*b843c749SSergey Zigachev amdgpu_ring_emit_wreg(ring, reg, v); 201*b843c749SSergey Zigachev amdgpu_fence_emit_polling(ring, &seq); 202*b843c749SSergey Zigachev amdgpu_ring_commit(ring); 203*b843c749SSergey Zigachev spin_unlock_irqrestore(&kiq->ring_lock, flags); 204*b843c749SSergey Zigachev 205*b843c749SSergey Zigachev r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 206*b843c749SSergey Zigachev 207*b843c749SSergey Zigachev /* don't wait anymore for gpu reset case because this way may 208*b843c749SSergey Zigachev * block gpu_recover() routine forever, e.g. this virt_kiq_rreg 209*b843c749SSergey Zigachev * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will 210*b843c749SSergey Zigachev * never return if we keep waiting in virt_kiq_rreg, which cause 211*b843c749SSergey Zigachev * gpu_recover() hang there. 212*b843c749SSergey Zigachev * 213*b843c749SSergey Zigachev * also don't wait anymore for IRQ context 214*b843c749SSergey Zigachev * */ 215*b843c749SSergey Zigachev if (r < 1 && (adev->in_gpu_reset || in_interrupt())) 216*b843c749SSergey Zigachev goto failed_kiq_write; 217*b843c749SSergey Zigachev 218*b843c749SSergey Zigachev if (in_interrupt()) 219*b843c749SSergey Zigachev might_sleep(); 220*b843c749SSergey Zigachev 221*b843c749SSergey Zigachev while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { 222*b843c749SSergey Zigachev 223*b843c749SSergey Zigachev msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); 224*b843c749SSergey Zigachev r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 225*b843c749SSergey Zigachev } 226*b843c749SSergey Zigachev 227*b843c749SSergey Zigachev if (cnt > MAX_KIQ_REG_TRY) 228*b843c749SSergey Zigachev goto failed_kiq_write; 229*b843c749SSergey Zigachev 230*b843c749SSergey Zigachev return; 231*b843c749SSergey Zigachev 232*b843c749SSergey Zigachev failed_kiq_write: 233*b843c749SSergey Zigachev pr_err("failed to write reg:%x\n", reg); 234*b843c749SSergey Zigachev } 235*b843c749SSergey Zigachev 236*b843c749SSergey Zigachev /** 237*b843c749SSergey Zigachev * amdgpu_virt_request_full_gpu() - request full gpu access 238*b843c749SSergey Zigachev * @amdgpu: amdgpu device. 239*b843c749SSergey Zigachev * @init: is driver init time. 240*b843c749SSergey Zigachev * When start to init/fini driver, first need to request full gpu access. 241*b843c749SSergey Zigachev * Return: Zero if request success, otherwise will return error. 242*b843c749SSergey Zigachev */ 243*b843c749SSergey Zigachev int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init) 244*b843c749SSergey Zigachev { 245*b843c749SSergey Zigachev struct amdgpu_virt *virt = &adev->virt; 246*b843c749SSergey Zigachev int r; 247*b843c749SSergey Zigachev 248*b843c749SSergey Zigachev if (virt->ops && virt->ops->req_full_gpu) { 249*b843c749SSergey Zigachev r = virt->ops->req_full_gpu(adev, init); 250*b843c749SSergey Zigachev if (r) 251*b843c749SSergey Zigachev return r; 252*b843c749SSergey Zigachev 253*b843c749SSergey Zigachev adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 254*b843c749SSergey Zigachev } 255*b843c749SSergey Zigachev 256*b843c749SSergey Zigachev return 0; 257*b843c749SSergey Zigachev } 258*b843c749SSergey Zigachev 259*b843c749SSergey Zigachev /** 260*b843c749SSergey Zigachev * amdgpu_virt_release_full_gpu() - release full gpu access 261*b843c749SSergey Zigachev * @amdgpu: amdgpu device. 262*b843c749SSergey Zigachev * @init: is driver init time. 263*b843c749SSergey Zigachev * When finishing driver init/fini, need to release full gpu access. 264*b843c749SSergey Zigachev * Return: Zero if release success, otherwise will returen error. 265*b843c749SSergey Zigachev */ 266*b843c749SSergey Zigachev int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init) 267*b843c749SSergey Zigachev { 268*b843c749SSergey Zigachev struct amdgpu_virt *virt = &adev->virt; 269*b843c749SSergey Zigachev int r; 270*b843c749SSergey Zigachev 271*b843c749SSergey Zigachev if (virt->ops && virt->ops->rel_full_gpu) { 272*b843c749SSergey Zigachev r = virt->ops->rel_full_gpu(adev, init); 273*b843c749SSergey Zigachev if (r) 274*b843c749SSergey Zigachev return r; 275*b843c749SSergey Zigachev 276*b843c749SSergey Zigachev adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME; 277*b843c749SSergey Zigachev } 278*b843c749SSergey Zigachev return 0; 279*b843c749SSergey Zigachev } 280*b843c749SSergey Zigachev 281*b843c749SSergey Zigachev /** 282*b843c749SSergey Zigachev * amdgpu_virt_reset_gpu() - reset gpu 283*b843c749SSergey Zigachev * @amdgpu: amdgpu device. 284*b843c749SSergey Zigachev * Send reset command to GPU hypervisor to reset GPU that VM is using 285*b843c749SSergey Zigachev * Return: Zero if reset success, otherwise will return error. 286*b843c749SSergey Zigachev */ 287*b843c749SSergey Zigachev int amdgpu_virt_reset_gpu(struct amdgpu_device *adev) 288*b843c749SSergey Zigachev { 289*b843c749SSergey Zigachev struct amdgpu_virt *virt = &adev->virt; 290*b843c749SSergey Zigachev int r; 291*b843c749SSergey Zigachev 292*b843c749SSergey Zigachev if (virt->ops && virt->ops->reset_gpu) { 293*b843c749SSergey Zigachev r = virt->ops->reset_gpu(adev); 294*b843c749SSergey Zigachev if (r) 295*b843c749SSergey Zigachev return r; 296*b843c749SSergey Zigachev 297*b843c749SSergey Zigachev adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME; 298*b843c749SSergey Zigachev } 299*b843c749SSergey Zigachev 300*b843c749SSergey Zigachev return 0; 301*b843c749SSergey Zigachev } 302*b843c749SSergey Zigachev 303*b843c749SSergey Zigachev /** 304*b843c749SSergey Zigachev * amdgpu_virt_wait_reset() - wait for reset gpu completed 305*b843c749SSergey Zigachev * @amdgpu: amdgpu device. 306*b843c749SSergey Zigachev * Wait for GPU reset completed. 307*b843c749SSergey Zigachev * Return: Zero if reset success, otherwise will return error. 308*b843c749SSergey Zigachev */ 309*b843c749SSergey Zigachev int amdgpu_virt_wait_reset(struct amdgpu_device *adev) 310*b843c749SSergey Zigachev { 311*b843c749SSergey Zigachev struct amdgpu_virt *virt = &adev->virt; 312*b843c749SSergey Zigachev 313*b843c749SSergey Zigachev if (!virt->ops || !virt->ops->wait_reset) 314*b843c749SSergey Zigachev return -EINVAL; 315*b843c749SSergey Zigachev 316*b843c749SSergey Zigachev return virt->ops->wait_reset(adev); 317*b843c749SSergey Zigachev } 318*b843c749SSergey Zigachev 319*b843c749SSergey Zigachev /** 320*b843c749SSergey Zigachev * amdgpu_virt_alloc_mm_table() - alloc memory for mm table 321*b843c749SSergey Zigachev * @amdgpu: amdgpu device. 322*b843c749SSergey Zigachev * MM table is used by UVD and VCE for its initialization 323*b843c749SSergey Zigachev * Return: Zero if allocate success. 324*b843c749SSergey Zigachev */ 325*b843c749SSergey Zigachev int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev) 326*b843c749SSergey Zigachev { 327*b843c749SSergey Zigachev int r; 328*b843c749SSergey Zigachev 329*b843c749SSergey Zigachev if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr) 330*b843c749SSergey Zigachev return 0; 331*b843c749SSergey Zigachev 332*b843c749SSergey Zigachev r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, 333*b843c749SSergey Zigachev AMDGPU_GEM_DOMAIN_VRAM, 334*b843c749SSergey Zigachev &adev->virt.mm_table.bo, 335*b843c749SSergey Zigachev &adev->virt.mm_table.gpu_addr, 336*b843c749SSergey Zigachev (void *)&adev->virt.mm_table.cpu_addr); 337*b843c749SSergey Zigachev if (r) { 338*b843c749SSergey Zigachev DRM_ERROR("failed to alloc mm table and error = %d.\n", r); 339*b843c749SSergey Zigachev return r; 340*b843c749SSergey Zigachev } 341*b843c749SSergey Zigachev 342*b843c749SSergey Zigachev memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE); 343*b843c749SSergey Zigachev DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n", 344*b843c749SSergey Zigachev adev->virt.mm_table.gpu_addr, 345*b843c749SSergey Zigachev adev->virt.mm_table.cpu_addr); 346*b843c749SSergey Zigachev return 0; 347*b843c749SSergey Zigachev } 348*b843c749SSergey Zigachev 349*b843c749SSergey Zigachev /** 350*b843c749SSergey Zigachev * amdgpu_virt_free_mm_table() - free mm table memory 351*b843c749SSergey Zigachev * @amdgpu: amdgpu device. 352*b843c749SSergey Zigachev * Free MM table memory 353*b843c749SSergey Zigachev */ 354*b843c749SSergey Zigachev void amdgpu_virt_free_mm_table(struct amdgpu_device *adev) 355*b843c749SSergey Zigachev { 356*b843c749SSergey Zigachev if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr) 357*b843c749SSergey Zigachev return; 358*b843c749SSergey Zigachev 359*b843c749SSergey Zigachev amdgpu_bo_free_kernel(&adev->virt.mm_table.bo, 360*b843c749SSergey Zigachev &adev->virt.mm_table.gpu_addr, 361*b843c749SSergey Zigachev (void *)&adev->virt.mm_table.cpu_addr); 362*b843c749SSergey Zigachev adev->virt.mm_table.gpu_addr = 0; 363*b843c749SSergey Zigachev } 364*b843c749SSergey Zigachev 365*b843c749SSergey Zigachev 366*b843c749SSergey Zigachev int amdgpu_virt_fw_reserve_get_checksum(void *obj, 367*b843c749SSergey Zigachev unsigned long obj_size, 368*b843c749SSergey Zigachev unsigned int key, 369*b843c749SSergey Zigachev unsigned int chksum) 370*b843c749SSergey Zigachev { 371*b843c749SSergey Zigachev unsigned int ret = key; 372*b843c749SSergey Zigachev unsigned long i = 0; 373*b843c749SSergey Zigachev unsigned char *pos; 374*b843c749SSergey Zigachev 375*b843c749SSergey Zigachev pos = (char *)obj; 376*b843c749SSergey Zigachev /* calculate checksum */ 377*b843c749SSergey Zigachev for (i = 0; i < obj_size; ++i) 378*b843c749SSergey Zigachev ret += *(pos + i); 379*b843c749SSergey Zigachev /* minus the chksum itself */ 380*b843c749SSergey Zigachev pos = (char *)&chksum; 381*b843c749SSergey Zigachev for (i = 0; i < sizeof(chksum); ++i) 382*b843c749SSergey Zigachev ret -= *(pos + i); 383*b843c749SSergey Zigachev return ret; 384*b843c749SSergey Zigachev } 385*b843c749SSergey Zigachev 386*b843c749SSergey Zigachev void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) 387*b843c749SSergey Zigachev { 388*b843c749SSergey Zigachev uint32_t pf2vf_size = 0; 389*b843c749SSergey Zigachev uint32_t checksum = 0; 390*b843c749SSergey Zigachev uint32_t checkval; 391*b843c749SSergey Zigachev char *str; 392*b843c749SSergey Zigachev 393*b843c749SSergey Zigachev adev->virt.fw_reserve.p_pf2vf = NULL; 394*b843c749SSergey Zigachev adev->virt.fw_reserve.p_vf2pf = NULL; 395*b843c749SSergey Zigachev 396*b843c749SSergey Zigachev if (adev->fw_vram_usage.va != NULL) { 397*b843c749SSergey Zigachev adev->virt.fw_reserve.p_pf2vf = 398*b843c749SSergey Zigachev (struct amdgim_pf2vf_info_header *)( 399*b843c749SSergey Zigachev adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET); 400*b843c749SSergey Zigachev AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size); 401*b843c749SSergey Zigachev AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum); 402*b843c749SSergey Zigachev AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature); 403*b843c749SSergey Zigachev 404*b843c749SSergey Zigachev /* pf2vf message must be in 4K */ 405*b843c749SSergey Zigachev if (pf2vf_size > 0 && pf2vf_size < 4096) { 406*b843c749SSergey Zigachev checkval = amdgpu_virt_fw_reserve_get_checksum( 407*b843c749SSergey Zigachev adev->virt.fw_reserve.p_pf2vf, pf2vf_size, 408*b843c749SSergey Zigachev adev->virt.fw_reserve.checksum_key, checksum); 409*b843c749SSergey Zigachev if (checkval == checksum) { 410*b843c749SSergey Zigachev adev->virt.fw_reserve.p_vf2pf = 411*b843c749SSergey Zigachev ((void *)adev->virt.fw_reserve.p_pf2vf + 412*b843c749SSergey Zigachev pf2vf_size); 413*b843c749SSergey Zigachev memset((void *)adev->virt.fw_reserve.p_vf2pf, 0, 414*b843c749SSergey Zigachev sizeof(amdgim_vf2pf_info)); 415*b843c749SSergey Zigachev AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version, 416*b843c749SSergey Zigachev AMDGPU_FW_VRAM_VF2PF_VER); 417*b843c749SSergey Zigachev AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size, 418*b843c749SSergey Zigachev sizeof(amdgim_vf2pf_info)); 419*b843c749SSergey Zigachev AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version, 420*b843c749SSergey Zigachev &str); 421*b843c749SSergey Zigachev #ifdef MODULE 422*b843c749SSergey Zigachev if (THIS_MODULE->version != NULL) 423*b843c749SSergey Zigachev strcpy(str, THIS_MODULE->version); 424*b843c749SSergey Zigachev else 425*b843c749SSergey Zigachev #endif 426*b843c749SSergey Zigachev strcpy(str, "N/A"); 427*b843c749SSergey Zigachev AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert, 428*b843c749SSergey Zigachev 0); 429*b843c749SSergey Zigachev AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum, 430*b843c749SSergey Zigachev amdgpu_virt_fw_reserve_get_checksum( 431*b843c749SSergey Zigachev adev->virt.fw_reserve.p_vf2pf, 432*b843c749SSergey Zigachev pf2vf_size, 433*b843c749SSergey Zigachev adev->virt.fw_reserve.checksum_key, 0)); 434*b843c749SSergey Zigachev } 435*b843c749SSergey Zigachev } 436*b843c749SSergey Zigachev } 437*b843c749SSergey Zigachev } 438*b843c749SSergey Zigachev 439*b843c749SSergey Zigachev 440