1*2b73d18aSriastradh /* $NetBSD: amdgpu_virt.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $ */
24e390cabSriastradh
34e390cabSriastradh /*
44e390cabSriastradh * Copyright 2016 Advanced Micro Devices, Inc.
54e390cabSriastradh *
64e390cabSriastradh * Permission is hereby granted, free of charge, to any person obtaining a
74e390cabSriastradh * copy of this software and associated documentation files (the "Software"),
84e390cabSriastradh * to deal in the Software without restriction, including without limitation
94e390cabSriastradh * the rights to use, copy, modify, merge, publish, distribute, sublicense,
104e390cabSriastradh * and/or sell copies of the Software, and to permit persons to whom the
114e390cabSriastradh * Software is furnished to do so, subject to the following conditions:
124e390cabSriastradh *
134e390cabSriastradh * The above copyright notice and this permission notice shall be included in
144e390cabSriastradh * all copies or substantial portions of the Software.
154e390cabSriastradh *
164e390cabSriastradh * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
174e390cabSriastradh * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
184e390cabSriastradh * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
194e390cabSriastradh * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
204e390cabSriastradh * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
214e390cabSriastradh * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
224e390cabSriastradh * OTHER DEALINGS IN THE SOFTWARE.
234e390cabSriastradh *
244e390cabSriastradh */
254e390cabSriastradh
264e390cabSriastradh #include <sys/cdefs.h>
27*2b73d18aSriastradh __KERNEL_RCSID(0, "$NetBSD: amdgpu_virt.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $");
284e390cabSriastradh
294e390cabSriastradh #include <linux/module.h>
304e390cabSriastradh
314e390cabSriastradh #include <drm/drm_drv.h>
324e390cabSriastradh
334e390cabSriastradh #include "amdgpu.h"
344e390cabSriastradh
amdgpu_virt_mmio_blocked(struct amdgpu_device * adev)354e390cabSriastradh bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
364e390cabSriastradh {
374e390cabSriastradh /* By now all MMIO pages except mailbox are blocked */
384e390cabSriastradh /* if blocking is enabled in hypervisor. Choose the */
394e390cabSriastradh /* SCRATCH_REG0 to test. */
404e390cabSriastradh return RREG32_NO_KIQ(0xc040) == 0xffffffff;
414e390cabSriastradh }
424e390cabSriastradh
amdgpu_virt_init_setting(struct amdgpu_device * adev)434e390cabSriastradh void amdgpu_virt_init_setting(struct amdgpu_device *adev)
444e390cabSriastradh {
454e390cabSriastradh /* enable virtual display */
464e390cabSriastradh adev->mode_info.num_crtc = 1;
474e390cabSriastradh adev->enable_virtual_display = true;
484e390cabSriastradh adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC;
494e390cabSriastradh adev->cg_flags = 0;
504e390cabSriastradh adev->pg_flags = 0;
514e390cabSriastradh }
524e390cabSriastradh
amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device * adev,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)534e390cabSriastradh void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
544e390cabSriastradh uint32_t reg0, uint32_t reg1,
554e390cabSriastradh uint32_t ref, uint32_t mask)
564e390cabSriastradh {
574e390cabSriastradh struct amdgpu_kiq *kiq = &adev->gfx.kiq;
584e390cabSriastradh struct amdgpu_ring *ring = &kiq->ring;
594e390cabSriastradh signed long r, cnt = 0;
604e390cabSriastradh unsigned long flags;
614e390cabSriastradh uint32_t seq;
624e390cabSriastradh
634e390cabSriastradh spin_lock_irqsave(&kiq->ring_lock, flags);
644e390cabSriastradh amdgpu_ring_alloc(ring, 32);
654e390cabSriastradh amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
664e390cabSriastradh ref, mask);
674e390cabSriastradh amdgpu_fence_emit_polling(ring, &seq);
684e390cabSriastradh amdgpu_ring_commit(ring);
694e390cabSriastradh spin_unlock_irqrestore(&kiq->ring_lock, flags);
704e390cabSriastradh
714e390cabSriastradh r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
724e390cabSriastradh
734e390cabSriastradh /* don't wait anymore for IRQ context */
744e390cabSriastradh if (r < 1 && in_interrupt())
754e390cabSriastradh goto failed_kiq;
764e390cabSriastradh
774e390cabSriastradh might_sleep();
784e390cabSriastradh while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
794e390cabSriastradh
804e390cabSriastradh msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
814e390cabSriastradh r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
824e390cabSriastradh }
834e390cabSriastradh
844e390cabSriastradh if (cnt > MAX_KIQ_REG_TRY)
854e390cabSriastradh goto failed_kiq;
864e390cabSriastradh
874e390cabSriastradh return;
884e390cabSriastradh
894e390cabSriastradh failed_kiq:
904e390cabSriastradh pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
914e390cabSriastradh }
924e390cabSriastradh
934e390cabSriastradh /**
944e390cabSriastradh * amdgpu_virt_request_full_gpu() - request full gpu access
954e390cabSriastradh * @amdgpu: amdgpu device.
964e390cabSriastradh * @init: is driver init time.
974e390cabSriastradh * When start to init/fini driver, first need to request full gpu access.
984e390cabSriastradh * Return: Zero if request success, otherwise will return error.
994e390cabSriastradh */
amdgpu_virt_request_full_gpu(struct amdgpu_device * adev,bool init)1004e390cabSriastradh int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
1014e390cabSriastradh {
1024e390cabSriastradh struct amdgpu_virt *virt = &adev->virt;
1034e390cabSriastradh int r;
1044e390cabSriastradh
1054e390cabSriastradh if (virt->ops && virt->ops->req_full_gpu) {
1064e390cabSriastradh r = virt->ops->req_full_gpu(adev, init);
1074e390cabSriastradh if (r)
1084e390cabSriastradh return r;
1094e390cabSriastradh
1104e390cabSriastradh adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
1114e390cabSriastradh }
1124e390cabSriastradh
1134e390cabSriastradh return 0;
1144e390cabSriastradh }
1154e390cabSriastradh
1164e390cabSriastradh /**
1174e390cabSriastradh * amdgpu_virt_release_full_gpu() - release full gpu access
1184e390cabSriastradh * @amdgpu: amdgpu device.
1194e390cabSriastradh * @init: is driver init time.
1204e390cabSriastradh * When finishing driver init/fini, need to release full gpu access.
1214e390cabSriastradh * Return: Zero if release success, otherwise will returen error.
1224e390cabSriastradh */
amdgpu_virt_release_full_gpu(struct amdgpu_device * adev,bool init)1234e390cabSriastradh int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
1244e390cabSriastradh {
1254e390cabSriastradh struct amdgpu_virt *virt = &adev->virt;
1264e390cabSriastradh int r;
1274e390cabSriastradh
1284e390cabSriastradh if (virt->ops && virt->ops->rel_full_gpu) {
1294e390cabSriastradh r = virt->ops->rel_full_gpu(adev, init);
1304e390cabSriastradh if (r)
1314e390cabSriastradh return r;
1324e390cabSriastradh
1334e390cabSriastradh adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
1344e390cabSriastradh }
1354e390cabSriastradh return 0;
1364e390cabSriastradh }
1374e390cabSriastradh
1384e390cabSriastradh /**
1394e390cabSriastradh * amdgpu_virt_reset_gpu() - reset gpu
1404e390cabSriastradh * @amdgpu: amdgpu device.
1414e390cabSriastradh * Send reset command to GPU hypervisor to reset GPU that VM is using
1424e390cabSriastradh * Return: Zero if reset success, otherwise will return error.
1434e390cabSriastradh */
amdgpu_virt_reset_gpu(struct amdgpu_device * adev)1444e390cabSriastradh int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
1454e390cabSriastradh {
1464e390cabSriastradh struct amdgpu_virt *virt = &adev->virt;
1474e390cabSriastradh int r;
1484e390cabSriastradh
1494e390cabSriastradh if (virt->ops && virt->ops->reset_gpu) {
1504e390cabSriastradh r = virt->ops->reset_gpu(adev);
1514e390cabSriastradh if (r)
1524e390cabSriastradh return r;
1534e390cabSriastradh
1544e390cabSriastradh adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
1554e390cabSriastradh }
1564e390cabSriastradh
1574e390cabSriastradh return 0;
1584e390cabSriastradh }
1594e390cabSriastradh
1604e390cabSriastradh /**
1614e390cabSriastradh * amdgpu_virt_wait_reset() - wait for reset gpu completed
1624e390cabSriastradh * @amdgpu: amdgpu device.
1634e390cabSriastradh * Wait for GPU reset completed.
1644e390cabSriastradh * Return: Zero if reset success, otherwise will return error.
1654e390cabSriastradh */
amdgpu_virt_wait_reset(struct amdgpu_device * adev)1664e390cabSriastradh int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
1674e390cabSriastradh {
1684e390cabSriastradh struct amdgpu_virt *virt = &adev->virt;
1694e390cabSriastradh
1704e390cabSriastradh if (!virt->ops || !virt->ops->wait_reset)
1714e390cabSriastradh return -EINVAL;
1724e390cabSriastradh
1734e390cabSriastradh return virt->ops->wait_reset(adev);
1744e390cabSriastradh }
1754e390cabSriastradh
1764e390cabSriastradh /**
1774e390cabSriastradh * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
1784e390cabSriastradh * @amdgpu: amdgpu device.
1794e390cabSriastradh * MM table is used by UVD and VCE for its initialization
1804e390cabSriastradh * Return: Zero if allocate success.
1814e390cabSriastradh */
amdgpu_virt_alloc_mm_table(struct amdgpu_device * adev)1824e390cabSriastradh int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
1834e390cabSriastradh {
1844e390cabSriastradh int r;
1854e390cabSriastradh
1864e390cabSriastradh if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
1874e390cabSriastradh return 0;
1884e390cabSriastradh
1894e390cabSriastradh r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
1904e390cabSriastradh AMDGPU_GEM_DOMAIN_VRAM,
1914e390cabSriastradh &adev->virt.mm_table.bo,
1924e390cabSriastradh &adev->virt.mm_table.gpu_addr,
1934e390cabSriastradh (void *)&adev->virt.mm_table.cpu_addr);
1944e390cabSriastradh if (r) {
1954e390cabSriastradh DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
1964e390cabSriastradh return r;
1974e390cabSriastradh }
1984e390cabSriastradh
1994e390cabSriastradh memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
200*2b73d18aSriastradh DRM_INFO("MM table gpu addr = 0x%"PRIx64", cpu addr = %p.\n",
2014e390cabSriastradh adev->virt.mm_table.gpu_addr,
2024e390cabSriastradh adev->virt.mm_table.cpu_addr);
2034e390cabSriastradh return 0;
2044e390cabSriastradh }
2054e390cabSriastradh
2064e390cabSriastradh /**
2074e390cabSriastradh * amdgpu_virt_free_mm_table() - free mm table memory
2084e390cabSriastradh * @amdgpu: amdgpu device.
2094e390cabSriastradh * Free MM table memory
2104e390cabSriastradh */
amdgpu_virt_free_mm_table(struct amdgpu_device * adev)2114e390cabSriastradh void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
2124e390cabSriastradh {
2134e390cabSriastradh if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
2144e390cabSriastradh return;
2154e390cabSriastradh
2164e390cabSriastradh amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
2174e390cabSriastradh &adev->virt.mm_table.gpu_addr,
2184e390cabSriastradh (void *)&adev->virt.mm_table.cpu_addr);
2194e390cabSriastradh adev->virt.mm_table.gpu_addr = 0;
2204e390cabSriastradh }
2214e390cabSriastradh
2224e390cabSriastradh
amdgpu_virt_fw_reserve_get_checksum(void * obj,unsigned long obj_size,unsigned int key,unsigned int chksum)2234e390cabSriastradh int amdgpu_virt_fw_reserve_get_checksum(void *obj,
2244e390cabSriastradh unsigned long obj_size,
2254e390cabSriastradh unsigned int key,
2264e390cabSriastradh unsigned int chksum)
2274e390cabSriastradh {
2284e390cabSriastradh unsigned int ret = key;
2294e390cabSriastradh unsigned long i = 0;
2304e390cabSriastradh unsigned char *pos;
2314e390cabSriastradh
2324e390cabSriastradh pos = (char *)obj;
2334e390cabSriastradh /* calculate checksum */
2344e390cabSriastradh for (i = 0; i < obj_size; ++i)
2354e390cabSriastradh ret += *(pos + i);
2364e390cabSriastradh /* minus the chksum itself */
2374e390cabSriastradh pos = (char *)&chksum;
2384e390cabSriastradh for (i = 0; i < sizeof(chksum); ++i)
2394e390cabSriastradh ret -= *(pos + i);
2404e390cabSriastradh return ret;
2414e390cabSriastradh }
2424e390cabSriastradh
amdgpu_virt_init_data_exchange(struct amdgpu_device * adev)2434e390cabSriastradh void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
2444e390cabSriastradh {
2454e390cabSriastradh uint32_t pf2vf_size = 0;
2464e390cabSriastradh uint32_t checksum = 0;
2474e390cabSriastradh uint32_t checkval;
2484e390cabSriastradh char *str;
2494e390cabSriastradh
2504e390cabSriastradh adev->virt.fw_reserve.p_pf2vf = NULL;
2514e390cabSriastradh adev->virt.fw_reserve.p_vf2pf = NULL;
2524e390cabSriastradh
2534e390cabSriastradh if (adev->fw_vram_usage.va != NULL) {
2544e390cabSriastradh adev->virt.fw_reserve.p_pf2vf =
2554e390cabSriastradh (struct amd_sriov_msg_pf2vf_info_header *)(
2564e390cabSriastradh adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
2574e390cabSriastradh AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
2584e390cabSriastradh AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
2594e390cabSriastradh AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
2604e390cabSriastradh
2614e390cabSriastradh /* pf2vf message must be in 4K */
2624e390cabSriastradh if (pf2vf_size > 0 && pf2vf_size < 4096) {
2634e390cabSriastradh checkval = amdgpu_virt_fw_reserve_get_checksum(
2644e390cabSriastradh adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
2654e390cabSriastradh adev->virt.fw_reserve.checksum_key, checksum);
2664e390cabSriastradh if (checkval == checksum) {
2674e390cabSriastradh adev->virt.fw_reserve.p_vf2pf =
2684e390cabSriastradh ((void *)adev->virt.fw_reserve.p_pf2vf +
2694e390cabSriastradh pf2vf_size);
2704e390cabSriastradh memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
2714e390cabSriastradh sizeof(amdgim_vf2pf_info));
2724e390cabSriastradh AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
2734e390cabSriastradh AMDGPU_FW_VRAM_VF2PF_VER);
2744e390cabSriastradh AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
2754e390cabSriastradh sizeof(amdgim_vf2pf_info));
2764e390cabSriastradh AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
2774e390cabSriastradh &str);
278*2b73d18aSriastradh #ifndef __NetBSD__ /* XXX ??? */
2794e390cabSriastradh #ifdef MODULE
2804e390cabSriastradh if (THIS_MODULE->version != NULL)
2814e390cabSriastradh strcpy(str, THIS_MODULE->version);
2824e390cabSriastradh else
2834e390cabSriastradh #endif
284*2b73d18aSriastradh #endif
2854e390cabSriastradh strcpy(str, "N/A");
2864e390cabSriastradh AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
2874e390cabSriastradh 0);
2884e390cabSriastradh AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
2894e390cabSriastradh amdgpu_virt_fw_reserve_get_checksum(
2904e390cabSriastradh adev->virt.fw_reserve.p_vf2pf,
2914e390cabSriastradh pf2vf_size,
2924e390cabSriastradh adev->virt.fw_reserve.checksum_key, 0));
2934e390cabSriastradh }
2944e390cabSriastradh }
2954e390cabSriastradh }
2964e390cabSriastradh }
297