Lines Matching defs:adev
45 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
53 void amdgpu_virt_init_setting(struct amdgpu_device *adev)
55 struct drm_device *ddev = adev_to_drm(adev);
58 if (adev->asic_type != CHIP_ALDEBARAN &&
59 adev->asic_type != CHIP_ARCTURUS &&
60 ((adev->pdev->class >> 8) != PCI_CLASS_ACCELERATOR_PROCESSING)) {
61 if (adev->mode_info.num_crtc == 0)
62 adev->mode_info.num_crtc = 1;
63 adev->enable_virtual_display = true;
66 adev->cg_flags = 0;
67 adev->pg_flags = 0;
74 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
78 struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
84 if (adev->mes.ring.sched.ready) {
85 amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
123 dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
128 * @adev: amdgpu device.
133 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
135 struct amdgpu_virt *virt = &adev->virt;
139 r = virt->ops->req_full_gpu(adev, init);
141 adev->no_hw_access = true;
145 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
153 * @adev: amdgpu device.
158 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
160 struct amdgpu_virt *virt = &adev->virt;
164 r = virt->ops->rel_full_gpu(adev, init);
168 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
175 * @adev: amdgpu device.
179 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
181 struct amdgpu_virt *virt = &adev->virt;
185 r = virt->ops->reset_gpu(adev);
189 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
195 void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
197 struct amdgpu_virt *virt = &adev->virt;
200 virt->ops->req_init_data(adev);
202 if (adev->virt.req_init_data_ver > 0)
210 * @adev: amdgpu device.
214 int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
216 struct amdgpu_virt *virt = &adev->virt;
221 return virt->ops->wait_reset(adev);
226 * @adev: amdgpu device.
230 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
234 if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
237 r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
240 &adev->virt.mm_table.bo,
241 &adev->virt.mm_table.gpu_addr,
242 (void *)&adev->virt.mm_table.cpu_addr);
248 memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
250 adev->virt.mm_table.gpu_addr,
251 adev->virt.mm_table.cpu_addr);
257 * @adev: amdgpu device.
260 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
262 if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
265 amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
266 &adev->virt.mm_table.gpu_addr,
267 (void *)&adev->virt.mm_table.cpu_addr);
268 adev->virt.mm_table.gpu_addr = 0;
292 static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
294 struct amdgpu_virt *virt = &adev->virt;
332 static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
334 struct amdgpu_virt *virt = &adev->virt;
350 void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev)
352 struct amdgpu_virt *virt = &adev->virt;
360 amdgpu_virt_ras_release_bp(adev);
368 static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev,
371 struct amdgpu_virt *virt = &adev->virt;
381 static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
383 struct amdgpu_virt *virt = &adev->virt;
400 if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
411 static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev,
414 struct amdgpu_virt *virt = &adev->virt;
428 static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
436 if (adev->mman.fw_vram_usage_va)
437 vram_usage_va = adev->mman.fw_vram_usage_va;
439 vram_usage_va = adev->mman.drv_vram_usage_va;
448 if (amdgpu_virt_ras_check_bad_page(adev, retired_page))
451 amdgpu_virt_ras_add_bps(adev, &bp, 1);
453 amdgpu_virt_ras_reserve_bps(adev);
458 static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
460 struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
467 if (adev->virt.fw_reserve.p_pf2vf == NULL)
479 adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
480 adev->virt.fw_reserve.checksum_key, checksum);
486 adev->virt.gim_feature =
493 adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
500 adev->virt.vf2pf_update_interval_ms =
502 adev->virt.gim_feature =
504 adev->virt.reg_access =
507 adev->virt.decode_max_dimension_pixels = 0;
508 adev->virt.decode_max_frame_pixels = 0;
509 adev->virt.encode_max_dimension_pixels = 0;
510 adev->virt.encode_max_frame_pixels = 0;
511 adev->virt.is_mm_bw_enabled = false;
514 adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels);
517 adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels);
520 adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels);
523 adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
525 if ((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
526 adev->virt.is_mm_bw_enabled = true;
528 adev->unique_id =
537 if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
538 adev->virt.vf2pf_update_interval_ms = 2000;
543 static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
546 vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
548 if (adev->virt.fw_reserve.p_vf2pf == NULL)
551 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE, adev->vce.fw_version);
552 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD, adev->uvd.fw_version);
553 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC, adev->gmc.fw_version);
554 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME, adev->gfx.me_fw_version);
555 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP, adev->gfx.pfp_fw_version);
556 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE, adev->gfx.ce_fw_version);
557 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC, adev->gfx.rlc_fw_version);
558 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
559 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
560 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
561 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
562 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
563 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version);
565 adev->psp.asd_context.bin_desc.fw_version);
567 adev->psp.ras_context.context.bin_desc.fw_version);
569 adev->psp.xgmi_context.context.bin_desc.fw_version);
570 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version);
571 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version);
572 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version);
573 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN, adev->vcn.fw_version);
574 POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU, adev->dm.dmcu_fw_version);
577 static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
581 vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
583 if (adev->virt.fw_reserve.p_vf2pf == NULL)
604 ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
606 amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
607 vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
608 vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
610 amdgpu_virt_populate_vf2pf_ucode_info(adev);
618 vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
628 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
631 ret = amdgpu_virt_read_pf2vf_data(adev);
634 amdgpu_virt_write_vf2pf_data(adev);
637 schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
640 void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
642 if (adev->virt.vf2pf_update_interval_ms != 0) {
644 cancel_delayed_work_sync(&adev->virt.vf2pf_work);
645 adev->virt.vf2pf_update_interval_ms = 0;
649 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
651 adev->virt.fw_reserve.p_pf2vf = NULL;
652 adev->virt.fw_reserve.p_vf2pf = NULL;
653 adev->virt.vf2pf_update_interval_ms = 0;
655 if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) {
657 } else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
659 amdgpu_virt_exchange_data(adev);
661 INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
662 schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
663 } else if (adev->bios != NULL) {
665 adev->virt.fw_reserve.p_pf2vf =
667 (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
669 amdgpu_virt_read_pf2vf_data(adev);
674 void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
680 if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
681 if (adev->mman.fw_vram_usage_va) {
682 adev->virt.fw_reserve.p_pf2vf =
684 (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
685 adev->virt.fw_reserve.p_vf2pf =
687 (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
688 } else if (adev->mman.drv_vram_usage_va) {
689 adev->virt.fw_reserve.p_pf2vf =
691 (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
692 adev->virt.fw_reserve.p_vf2pf =
694 (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
697 amdgpu_virt_read_pf2vf_data(adev);
698 amdgpu_virt_write_vf2pf_data(adev);
701 if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
702 pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
708 if (bp_block_size && !adev->virt.ras_init_done)
709 amdgpu_virt_init_ras_err_handler_data(adev);
711 if (adev->virt.ras_init_done)
712 amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
717 void amdgpu_detect_virtualization(struct amdgpu_device *adev)
721 switch (adev->asic_type) {
742 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
745 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
750 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
753 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
757 adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
760 if (amdgpu_sriov_vf(adev)) {
761 switch (adev->asic_type) {
764 vi_set_virt_ops(adev);
767 soc15_set_virt_ops(adev);
773 amdgpu_virt_request_init_data(adev);
778 soc15_set_virt_ops(adev);
784 nv_set_virt_ops(adev);
786 amdgpu_virt_request_init_data(adev);
789 DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
795 static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
797 return amdgpu_sriov_is_debug(adev) ? true : false;
800 static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
802 return amdgpu_sriov_is_normal(adev) ? true : false;
805 int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
807 if (!amdgpu_sriov_vf(adev) ||
808 amdgpu_virt_access_debugfs_is_kiq(adev))
811 if (amdgpu_virt_access_debugfs_is_mmio(adev))
812 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
819 void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
821 if (amdgpu_sriov_vf(adev))
822 adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
825 enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
829 if (amdgpu_sriov_vf(adev)) {
830 if (amdgpu_sriov_is_pp_one_vf(adev))
841 void amdgpu_virt_post_reset(struct amdgpu_device *adev)
843 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3)) {
847 adev->gfx.is_poweron = false;
851 bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
853 switch (adev->ip_versions[MP0_HWIP][0]) {
916 void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
922 if (!adev->virt.is_mm_bw_enabled)
927 encode[i].max_width = adev->virt.encode_max_dimension_pixels;
928 encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels;
938 decode[i].max_width = adev->virt.decode_max_dimension_pixels;
939 decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels;
948 static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
956 if (amdgpu_sriov_reg_indirect_gc(adev)) {
969 if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
981 static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id)
993 if (!adev->gfx.rlc.rlcg_reg_access_supported) {
994 dev_err(adev->dev,
999 if (adev->gfx.xcc_mask && (((1 << xcc_id) & adev->gfx.xcc_mask) == 0)) {
1000 dev_err(adev->dev, "invalid xcc\n");
1004 if (amdgpu_device_skip_hw_access(adev))
1007 reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id];
1008 scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
1009 scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
1010 scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
1011 scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
1013 mutex_lock(&adev->virt.rlcg_reg_lock);
1016 spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
1022 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
1027 writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
1048 if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
1050 dev_err(adev->dev,
1053 dev_err(adev->dev,
1056 dev_err(adev->dev,
1059 dev_err(adev->dev,
1063 dev_err(adev->dev,
1071 mutex_unlock(&adev->virt.rlcg_reg_lock);
1076 void amdgpu_sriov_wreg(struct amdgpu_device *adev,
1082 if (amdgpu_device_skip_hw_access(adev))
1085 if (!amdgpu_sriov_runtime(adev) &&
1086 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
1087 amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id);
1097 u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
1102 if (amdgpu_device_skip_hw_access(adev))
1105 if (!amdgpu_sriov_runtime(adev) &&
1106 amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
1107 return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id);