Lines Matching defs:adev
157 struct amdgpu_device *adev = drm_to_adev(ddev);
158 uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
166 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
179 struct amdgpu_device *adev = drm_to_adev(dev);
181 if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
196 struct amdgpu_device *adev = drm_to_adev(dev);
198 if (adev->has_pr3 ||
199 ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
214 struct amdgpu_device *adev = drm_to_adev(dev);
216 return amdgpu_asic_supports_baco(adev);
241 * @adev: amdgpu_device pointer
247 void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
256 if (!drm_dev_enter(adev_to_drm(adev), &idx))
261 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
276 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
283 * @adev: amdgpu_device pointer
291 size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
299 if (!adev->mman.aper_base_kaddr)
302 last = min(pos + size, adev->gmc.visible_vram_size);
304 addr = adev->mman.aper_base_kaddr + pos;
313 amdgpu_device_flush_hdp(adev, NULL);
315 amdgpu_device_invalidate_hdp(adev, NULL);
334 * @adev: amdgpu_device pointer
340 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
346 count = amdgpu_device_aper_access(adev, pos, buf, size, write);
352 amdgpu_device_mm_access(adev, pos, buf, size, write);
361 bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
363 if (adev->no_hw_access)
379 if (down_read_trylock(&adev->reset_domain->sem))
380 up_read(&adev->reset_domain->sem);
382 lockdep_assert_held(&adev->reset_domain->sem);
391 * @adev: amdgpu_device pointer
397 uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
402 if (amdgpu_device_skip_hw_access(adev))
405 if ((reg * 4) < adev->rmmio_size) {
407 amdgpu_sriov_runtime(adev) &&
408 down_read_trylock(&adev->reset_domain->sem)) {
409 ret = amdgpu_kiq_rreg(adev, reg);
410 up_read(&adev->reset_domain->sem);
412 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
415 ret = adev->pcie_rreg(adev, reg * 4);
418 trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
431 * @adev: amdgpu_device pointer
436 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
438 if (amdgpu_device_skip_hw_access(adev))
441 if (offset < adev->rmmio_size)
442 return (readb(adev->rmmio + offset));
455 * @adev: amdgpu_device pointer
461 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
463 if (amdgpu_device_skip_hw_access(adev))
466 if (offset < adev->rmmio_size)
467 writeb(value, adev->rmmio + offset);
475 * @adev: amdgpu_device pointer
482 void amdgpu_device_wreg(struct amdgpu_device *adev,
486 if (amdgpu_device_skip_hw_access(adev))
489 if ((reg * 4) < adev->rmmio_size) {
491 amdgpu_sriov_runtime(adev) &&
492 down_read_trylock(&adev->reset_domain->sem)) {
493 amdgpu_kiq_wreg(adev, reg, v);
494 up_read(&adev->reset_domain->sem);
496 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
499 adev->pcie_wreg(adev, reg * 4, v);
502 trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
508 * @adev: amdgpu_device pointer
514 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
518 if (amdgpu_device_skip_hw_access(adev))
521 if (amdgpu_sriov_fullaccess(adev) &&
522 adev->gfx.rlc.funcs &&
523 adev->gfx.rlc.funcs->is_rlcg_access_range) {
524 if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
525 return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
526 } else if ((reg * 4) >= adev->rmmio_size) {
527 adev->pcie_wreg(adev, reg * 4, v);
529 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
536 * @adev: amdgpu_device pointer
541 u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
549 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
550 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
552 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
553 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
554 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
559 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
564 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
573 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
574 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
575 if (adev->nbio.funcs->get_pcie_index_hi_offset)
576 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
580 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
581 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
582 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
584 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
601 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
609 * @adev: amdgpu_device pointer
614 u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
622 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
623 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
625 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
626 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
627 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
637 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
645 * @adev: amdgpu_device pointer
650 void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
657 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
658 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
660 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
661 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
662 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
668 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
671 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
679 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
680 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
681 if (adev->nbio.funcs->get_pcie_index_hi_offset)
682 pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
686 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
687 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
688 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
690 pcie_index_hi_offset = (void __iomem *)adev->rmmio +
708 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
714 * @adev: amdgpu_device pointer
719 void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
726 pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
727 pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
729 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
730 pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
731 pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
743 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
749 * @adev: amdgpu_device pointer
753 u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
755 return adev->nbio.funcs->get_rev_id(adev);
761 * @adev: amdgpu_device pointer
768 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
775 static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
785 * @adev: amdgpu_device pointer
792 static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
799 static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
809 * @adev: amdgpu_device pointer
816 static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
826 * @adev: amdgpu_device pointer
833 static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
843 * @adev: amdgpu_device pointer
851 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
863 * @adev: amdgpu_device pointer
871 static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
883 * @adev: amdgpu_device pointer
887 static int amdgpu_device_asic_init(struct amdgpu_device *adev)
891 amdgpu_asic_pre_asic_init(adev);
893 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) ||
894 adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) {
895 amdgpu_psp_wait_for_bootloader(adev);
896 ret = amdgpu_atomfirmware_asic_init(adev, true);
899 return amdgpu_atom_asic_init(adev->mode_info.atom_context);
908 * @adev: amdgpu_device pointer
913 static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
915 return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
918 &adev->mem_scratch.robj,
919 &adev->mem_scratch.gpu_addr,
920 (void **)&adev->mem_scratch.ptr);
926 * @adev: amdgpu_device pointer
930 static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
932 amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
938 * @adev: amdgpu_device pointer
945 void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
965 if (adev->family >= AMDGPU_FAMILY_AI)
977 * @adev: amdgpu_device pointer
982 void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
984 pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
990 * @adev: amdgpu_device pointer
994 int amdgpu_device_pci_reset(struct amdgpu_device *adev)
999 return pci_reset_function(adev->pdev);
1012 * @adev: amdgpu_device pointer
1017 static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
1019 if (adev->wb.wb_obj) {
1020 amdgpu_bo_free_kernel(&adev->wb.wb_obj,
1021 &adev->wb.gpu_addr,
1022 (void **)&adev->wb.wb);
1023 adev->wb.wb_obj = NULL;
1030 * @adev: amdgpu_device pointer
1036 static int amdgpu_device_wb_init(struct amdgpu_device *adev)
1040 if (adev->wb.wb_obj == NULL) {
1042 r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1044 &adev->wb.wb_obj, &adev->wb.gpu_addr,
1045 (void **)&adev->wb.wb);
1047 dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
1051 adev->wb.num_wb = AMDGPU_MAX_WB;
1052 memset(&adev->wb.used, 0, sizeof(adev->wb.used));
1055 memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
1064 * @adev: amdgpu_device pointer
1070 int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
1072 unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
1074 if (offset < adev->wb.num_wb) {
1075 __set_bit(offset, adev->wb.used);
1086 * @adev: amdgpu_device pointer
1091 void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
1094 if (wb < adev->wb.num_wb)
1095 __clear_bit(wb, adev->wb.used);
1101 * @adev: amdgpu_device pointer
1107 int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
1110 int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1121 if (amdgpu_sriov_vf(adev))
1125 if (adev->gmc.real_vram_size &&
1126 (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
1130 root = adev->pdev->bus;
1145 rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
1149 pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
1150 pci_write_config_word(adev->pdev, PCI_COMMAND,
1154 amdgpu_doorbell_fini(adev);
1155 if (adev->asic_type >= CHIP_BONAIRE)
1156 pci_release_resource(adev->pdev, 2);
1158 pci_release_resource(adev->pdev, 0);
1160 r = pci_resize_resource(adev->pdev, 0, rbar_size);
1166 pci_assign_unassigned_bus_resources(adev->pdev->bus);
1171 r = amdgpu_doorbell_init(adev);
1172 if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
1175 pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
1181 static bool amdgpu_device_read_bios(struct amdgpu_device *adev)
1183 if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
1195 * @adev: amdgpu_device pointer
1201 bool amdgpu_device_need_post(struct amdgpu_device *adev)
1205 if (amdgpu_sriov_vf(adev))
1208 if (!amdgpu_device_read_bios(adev))
1211 if (amdgpu_passthrough(adev)) {
1217 if (adev->asic_type == CHIP_FIJI) {
1221 err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
1226 fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
1227 release_firmware(adev->pm.fw);
1234 if (adev->gmc.xgmi.pending_reset)
1237 if (adev->has_hw_reset) {
1238 adev->has_hw_reset = false;
1243 if (adev->asic_type >= CHIP_BONAIRE)
1244 return amdgpu_atombios_scratch_need_asic_init(adev);
1247 reg = amdgpu_asic_get_config_memsize(adev);
1281 * @adev: amdgpu_device pointer
1288 bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
1300 return pcie_aspm_enabled(adev->pdev);
1328 struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
1330 amdgpu_asic_set_vga_state(adev, state);
1342 * @adev: amdgpu_device pointer
1349 static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1359 dev_warn(adev->dev, "VM page table size (%d) too small\n",
1368 * @adev: amdgpu_device pointer
1373 static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1380 dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
1386 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
1422 adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
1429 adev->pm.smu_prv_buffer_size = 0;
1432 static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
1434 if (!(adev->flags & AMD_IS_APU) ||
1435 adev->asic_type < CHIP_RAVEN)
1438 switch (adev->asic_type) {
1440 if (adev->pdev->device == 0x15dd)
1441 adev->apu_flags |= AMD_APU_IS_RAVEN;
1442 if (adev->pdev->device == 0x15d8)
1443 adev->apu_flags |= AMD_APU_IS_PICASSO;
1446 if ((adev->pdev->device == 0x1636) ||
1447 (adev->pdev->device == 0x164c))
1448 adev->apu_flags |= AMD_APU_IS_RENOIR;
1450 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
1453 adev->apu_flags |= AMD_APU_IS_VANGOGH;
1458 if ((adev->pdev->device == 0x13FE) ||
1459 (adev->pdev->device == 0x143F))
1460 adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
1472 * @adev: amdgpu_device pointer
1477 static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
1480 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
1484 dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
1491 dev_warn(adev->dev, "gart size (%d) too small\n",
1498 dev_warn(adev->dev, "gtt size (%d) too small\n",
1506 dev_warn(adev->dev, "valid range is between 4 and 9\n");
1511 dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
1515 dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
1521 dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
1525 amdgpu_device_check_smu_prv_buffer_size(adev);
1527 amdgpu_device_check_vm_size(adev);
1529 amdgpu_device_check_block_size(adev);
1531 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1626 struct amdgpu_device *adev = dev;
1629 for (i = 0; i < adev->num_ip_blocks; i++) {
1630 if (!adev->ip_blocks[i].status.valid)
1632 if (adev->ip_blocks[i].version->type != block_type)
1634 if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
1636 r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
1637 (void *)adev, state);
1640 adev->ip_blocks[i].version->funcs->name, r);
1660 struct amdgpu_device *adev = dev;
1663 for (i = 0; i < adev->num_ip_blocks; i++) {
1664 if (!adev->ip_blocks[i].status.valid)
1666 if (adev->ip_blocks[i].version->type != block_type)
1668 if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
1670 r = adev->ip_blocks[i].version->funcs->set_powergating_state(
1671 (void *)adev, state);
1674 adev->ip_blocks[i].version->funcs->name, r);
1682 * @adev: amdgpu_device pointer
1690 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
1695 for (i = 0; i < adev->num_ip_blocks; i++) {
1696 if (!adev->ip_blocks[i].status.valid)
1698 if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
1699 adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
1706 * @adev: amdgpu_device pointer
1712 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
1717 for (i = 0; i < adev->num_ip_blocks; i++) {
1718 if (!adev->ip_blocks[i].status.valid)
1720 if (adev->ip_blocks[i].version->type == block_type) {
1721 r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1734 * @adev: amdgpu_device pointer
1740 bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
1745 for (i = 0; i < adev->num_ip_blocks; i++) {
1746 if (!adev->ip_blocks[i].status.valid)
1748 if (adev->ip_blocks[i].version->type == block_type)
1749 return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1758 * @adev: amdgpu_device pointer
1765 amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
1770 for (i = 0; i < adev->num_ip_blocks; i++)
1771 if (adev->ip_blocks[i].version->type == type)
1772 return &adev->ip_blocks[i];
1780 * @adev: amdgpu_device pointer
1788 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
1792 struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
1805 * @adev: amdgpu_device pointer
1811 int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
1819 if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
1823 if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
1830 DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1833 adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
1841 * @adev: amdgpu_device pointer
1850 static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1852 adev->enable_virtual_display = false;
1856 const char *pci_address_name = pci_name(adev->pdev);
1868 adev->enable_virtual_display = true;
1879 adev->mode_info.num_crtc = num_crtc;
1881 adev->mode_info.num_crtc = 1;
1889 adev->enable_virtual_display, adev->mode_info.num_crtc);
1896 void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
1898 if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
1899 adev->mode_info.num_crtc = 1;
1900 adev->enable_virtual_display = true;
1902 adev->enable_virtual_display, adev->mode_info.num_crtc);
1909 * @adev: amdgpu_device pointer
1916 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
1923 adev->firmware.gpu_info_fw = NULL;
1925 if (adev->mman.discovery_bin)
1928 switch (adev->asic_type) {
1938 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1940 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1954 err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
1956 dev_err(adev->dev,
1962 hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1969 (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1975 if (adev->asic_type == CHIP_NAVI12)
1978 adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
1979 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
1980 adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
1981 adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1982 adev->gfx.config.max_texture_channel_caches =
1984 adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
1985 adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
1986 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
1987 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1988 adev->gfx.config.double_offchip_lds_buf =
1990 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1991 adev->gfx.cu_info.max_waves_per_simd =
1993 adev->gfx.cu_info.max_scratch_slots_per_cu =
1995 adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1998 (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
2000 adev->gfx.config.num_sc_per_sh =
2002 adev->gfx.config.num_packer_per_sc =
2013 (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
2015 adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
2020 dev_err(adev->dev,
2032 * @adev: amdgpu_device pointer
2039 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
2045 amdgpu_device_enable_virtual_display(adev);
2047 if (amdgpu_sriov_vf(adev)) {
2048 r = amdgpu_virt_request_full_gpu(adev, true);
2053 switch (adev->asic_type) {
2060 adev->family = AMDGPU_FAMILY_SI;
2061 r = si_set_ip_blocks(adev);
2072 if (adev->flags & AMD_IS_APU)
2073 adev->family = AMDGPU_FAMILY_KV;
2075 adev->family = AMDGPU_FAMILY_CI;
2077 r = cik_set_ip_blocks(adev);
2091 if (adev->flags & AMD_IS_APU)
2092 adev->family = AMDGPU_FAMILY_CZ;
2094 adev->family = AMDGPU_FAMILY_VI;
2096 r = vi_set_ip_blocks(adev);
2101 r = amdgpu_discovery_set_ip_blocks(adev);
2110 ((adev->flags & AMD_IS_APU) == 0) &&
2111 !dev_is_removable(&adev->pdev->dev))
2112 adev->flags |= AMD_IS_PX;
2114 if (!(adev->flags & AMD_IS_APU)) {
2116 parent = pcie_find_root_port(adev->pdev);
2117 adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
2119 adev->has_pr3 = false;
2124 adev->pm.pp_feature = amdgpu_pp_feature_mask;
2125 if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2126 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2127 if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
2128 adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2130 adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
2133 for (i = 0; i < adev->num_ip_blocks; i++) {
2136 i, adev->ip_blocks[i].version->funcs->name);
2137 adev->ip_blocks[i].status.valid = false;
2139 if (adev->ip_blocks[i].version->funcs->early_init) {
2140 r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2142 adev->ip_blocks[i].status.valid = false;
2145 adev->ip_blocks[i].version->funcs->name, r);
2148 adev->ip_blocks[i].status.valid = true;
2151 adev->ip_blocks[i].status.valid = true;
2155 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2156 r = amdgpu_device_parse_gpu_info_fw(adev);
2161 if (amdgpu_device_read_bios(adev)) {
2162 if (!amdgpu_get_bios(adev))
2165 r = amdgpu_atombios_init(adev);
2167 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
2168 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
2174 if (amdgpu_sriov_vf(adev))
2175 amdgpu_virt_init_data_exchange(adev);
2182 amdgpu_amdkfd_device_probe(adev);
2183 adev->cg_flags &= amdgpu_cg_mask;
2184 adev->pg_flags &= amdgpu_pg_mask;
2189 static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
2193 for (i = 0; i < adev->num_ip_blocks; i++) {
2194 if (!adev->ip_blocks[i].status.sw)
2196 if (adev->ip_blocks[i].status.hw)
2198 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2199 (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2200 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2201 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2204 adev->ip_blocks[i].version->funcs->name, r);
2207 adev->ip_blocks[i].status.hw = true;
2214 static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
2218 for (i = 0; i < adev->num_ip_blocks; i++) {
2219 if (!adev->ip_blocks[i].status.sw)
2221 if (adev->ip_blocks[i].status.hw)
2223 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2226 adev->ip_blocks[i].version->funcs->name, r);
2229 adev->ip_blocks[i].status.hw = true;
2235 static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
2241 if (adev->asic_type >= CHIP_VEGA10) {
2242 for (i = 0; i < adev->num_ip_blocks; i++) {
2243 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
2246 if (!adev->ip_blocks[i].status.sw)
2250 if (adev->ip_blocks[i].status.hw == true)
2253 if (amdgpu_in_reset(adev) || adev->in_suspend) {
2254 r = adev->ip_blocks[i].version->funcs->resume(adev);
2257 adev->ip_blocks[i].version->funcs->name, r);
2261 r = adev->ip_blocks[i].version->funcs->hw_init(adev);
2264 adev->ip_blocks[i].version->funcs->name, r);
2269 adev->ip_blocks[i].status.hw = true;
2274 if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
2275 r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2280 static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
2286 struct amdgpu_ring *ring = adev->rings[i];
2294 timeout = adev->gfx_timeout;
2297 timeout = adev->compute_timeout;
2300 timeout = adev->sdma_timeout;
2303 timeout = adev->video_timeout;
2309 timeout, adev->reset_domain->wq,
2311 adev->dev);
2319 amdgpu_xcp_update_partition_sched_list(adev);
2328 * @adev: amdgpu_device pointer
2336 static int amdgpu_device_ip_init(struct amdgpu_device *adev)
2340 r = amdgpu_ras_init(adev);
2344 for (i = 0; i < adev->num_ip_blocks; i++) {
2345 if (!adev->ip_blocks[i].status.valid)
2347 r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2350 adev->ip_blocks[i].version->funcs->name, r);
2353 adev->ip_blocks[i].status.sw = true;
2355 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2357 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2362 adev->ip_blocks[i].status.hw = true;
2363 } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2366 if (amdgpu_sriov_vf(adev))
2367 amdgpu_virt_exchange_data(adev);
2369 r = amdgpu_device_mem_scratch_init(adev);
2374 r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2379 r = amdgpu_device_wb_init(adev);
2384 adev->ip_blocks[i].status.hw = true;
2387 if (adev->gfx.mcbp) {
2388 r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
2400 if (amdgpu_sriov_vf(adev))
2401 amdgpu_virt_init_data_exchange(adev);
2403 r = amdgpu_ib_pool_init(adev);
2405 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
2406 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
2410 r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
2414 r = amdgpu_device_ip_hw_init_phase1(adev);
2418 r = amdgpu_device_fw_loading(adev);
2422 r = amdgpu_device_ip_hw_init_phase2(adev);
2441 r = amdgpu_ras_recovery_init(adev);
2448 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2449 if (amdgpu_xgmi_add_device(adev) == 0) {
2450 if (!amdgpu_sriov_vf(adev)) {
2451 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
2466 amdgpu_reset_put_reset_domain(adev->reset_domain);
2467 adev->reset_domain = hive->reset_domain;
2473 r = amdgpu_device_init_schedulers(adev);
2478 if (!adev->gmc.xgmi.pending_reset) {
2479 kgd2kfd_init_zone_device(adev);
2480 amdgpu_amdkfd_device_init(adev);
2483 amdgpu_fru_get_product_info(adev);
2493 * @adev: amdgpu_device pointer
2499 static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2501 memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
2507 * @adev: amdgpu_device pointer
2514 static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2516 if (memcmp(adev->gart.ptr, adev->reset_magic,
2520 if (!amdgpu_in_reset(adev))
2527 switch (amdgpu_asic_reset_method(adev)) {
2539 * @adev: amdgpu_device pointer
2549 int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
2557 for (j = 0; j < adev->num_ip_blocks; j++) {
2558 i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2559 if (!adev->ip_blocks[i].status.late_initialized)
2562 if (adev->in_s0ix &&
2563 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2564 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2567 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2568 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2569 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2570 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2571 adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2573 r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2577 adev->ip_blocks[i].version->funcs->name, r);
2586 int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
2594 for (j = 0; j < adev->num_ip_blocks; j++) {
2595 i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2596 if (!adev->ip_blocks[i].status.late_initialized)
2599 if (adev->in_s0ix &&
2600 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
2601 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
2604 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2605 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2606 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2607 adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2608 adev->ip_blocks[i].version->funcs->set_powergating_state) {
2610 r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2614 adev->ip_blocks[i].version->funcs->name, r);
2625 struct amdgpu_device *adev;
2640 adev = gpu_ins->adev;
2641 if (!(adev->flags & AMD_IS_APU) &&
2643 ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
2660 * @adev: amdgpu_device pointer
2669 static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2674 for (i = 0; i < adev->num_ip_blocks; i++) {
2675 if (!adev->ip_blocks[i].status.hw)
2677 if (adev->ip_blocks[i].version->funcs->late_init) {
2678 r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
2681 adev->ip_blocks[i].version->funcs->name, r);
2685 adev->ip_blocks[i].status.late_initialized = true;
2688 r = amdgpu_ras_late_init(adev);
2694 amdgpu_ras_set_error_query_ready(adev, true);
2696 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
2697 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2699 amdgpu_device_fill_reset_magic(adev);
2706 if (amdgpu_passthrough(adev) &&
2707 ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
2708 adev->asic_type == CHIP_ALDEBARAN))
2709 amdgpu_dpm_handle_passthrough_sbr(adev, true);
2711 if (adev->gmc.xgmi.num_physical_nodes > 1) {
2727 if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
2730 if (gpu_instance->adev->flags & AMD_IS_APU)
2733 r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
2751 * @adev: amdgpu_device pointer
2755 static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
2759 if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
2762 for (i = 0; i < adev->num_ip_blocks; i++) {
2763 if (!adev->ip_blocks[i].status.hw)
2765 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2766 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2770 adev->ip_blocks[i].version->funcs->name, r);
2772 adev->ip_blocks[i].status.hw = false;
2778 static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
2782 for (i = 0; i < adev->num_ip_blocks; i++) {
2783 if (!adev->ip_blocks[i].version->funcs->early_fini)
2786 r = adev->ip_blocks[i].version->funcs->early_fini((void *)adev);
2789 adev->ip_blocks[i].version->funcs->name, r);
2793 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2794 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2796 amdgpu_amdkfd_suspend(adev, false);
2799 amdgpu_device_smu_fini_early(adev);
2801 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2802 if (!adev->ip_blocks[i].status.hw)
2805 r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2809 adev->ip_blocks[i].version->funcs->name, r);
2812 adev->ip_blocks[i].status.hw = false;
2815 if (amdgpu_sriov_vf(adev)) {
2816 if (amdgpu_virt_release_full_gpu(adev, false))
2826 * @adev: amdgpu_device pointer
2834 static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
2838 if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
2839 amdgpu_virt_release_ras_err_handler_data(adev);
2841 if (adev->gmc.xgmi.num_physical_nodes > 1)
2842 amdgpu_xgmi_remove_device(adev);
2844 amdgpu_amdkfd_device_fini_sw(adev);
2846 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2847 if (!adev->ip_blocks[i].status.sw)
2850 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2851 amdgpu_ucode_free_bo(adev);
2852 amdgpu_free_static_csa(&adev->virt.csa_obj);
2853 amdgpu_device_wb_fini(adev);
2854 amdgpu_device_mem_scratch_fini(adev);
2855 amdgpu_ib_pool_fini(adev);
2858 r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
2862 adev->ip_blocks[i].version->funcs->name, r);
2864 adev->ip_blocks[i].status.sw = false;
2865 adev->ip_blocks[i].status.valid = false;
2868 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2869 if (!adev->ip_blocks[i].status.late_initialized)
2871 if (adev->ip_blocks[i].version->funcs->late_fini)
2872 adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
2873 adev->ip_blocks[i].status.late_initialized = false;
2876 amdgpu_ras_fini(adev);
2888 struct amdgpu_device *adev =
2892 r = amdgpu_ib_ring_tests(adev);
2899 struct amdgpu_device *adev =
2902 WARN_ON_ONCE(adev->gfx.gfx_off_state);
2903 WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
2905 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
2906 adev->gfx.gfx_off_state = true;
2912 * @adev: amdgpu_device pointer
2920 static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
2924 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2925 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
2932 if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
2933 dev_warn(adev->dev, "Failed to disallow df cstate");
2935 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2936 if (!adev->ip_blocks[i].status.valid)
2940 if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
2944 r = adev->ip_blocks[i].version->funcs->suspend(adev);
2948 adev->ip_blocks[i].version->funcs->name, r);
2952 adev->ip_blocks[i].status.hw = false;
2961 * @adev: amdgpu_device pointer
2969 static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
2973 if (adev->in_s0ix)
2974 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
2976 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2977 if (!adev->ip_blocks[i].status.valid)
2980 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
2984 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
2985 adev->ip_blocks[i].status.hw = false;
2990 if (adev->gmc.xgmi.pending_reset &&
2991 !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2992 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
2993 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2994 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
2995 adev->ip_blocks[i].status.hw = false;
3004 if (adev->in_s0ix &&
3005 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
3006 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
3007 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
3011 if (adev->in_s0ix &&
3012 (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) &&
3013 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
3023 if (amdgpu_in_reset(adev) &&
3024 (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
3025 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3029 r = adev->ip_blocks[i].version->funcs->suspend(adev);
3033 adev->ip_blocks[i].version->funcs->name, r);
3035 adev->ip_blocks[i].status.hw = false;
3037 if (!amdgpu_sriov_vf(adev)) {
3038 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
3039 r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
3042 adev->mp1_state, r);
3055 * @adev: amdgpu_device pointer
3063 int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
3067 if (amdgpu_sriov_vf(adev)) {
3068 amdgpu_virt_fini_data_exchange(adev);
3069 amdgpu_virt_request_full_gpu(adev, false);
3072 r = amdgpu_device_ip_suspend_phase1(adev);
3075 r = amdgpu_device_ip_suspend_phase2(adev);
3077 if (amdgpu_sriov_vf(adev))
3078 amdgpu_virt_release_full_gpu(adev, false);
3083 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
3094 for (i = 0; i < adev->num_ip_blocks; i++) {
3098 block = &adev->ip_blocks[i];
3107 r = block->version->funcs->hw_init(adev);
3118 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
3138 for (j = 0; j < adev->num_ip_blocks; j++) {
3139 block = &adev->ip_blocks[j];
3147 r = block->version->funcs->resume(adev);
3149 r = block->version->funcs->hw_init(adev);
3164 * @adev: amdgpu_device pointer
3173 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
3177 for (i = 0; i < adev->num_ip_blocks; i++) {
3178 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3180 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3181 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3182 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3183 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
3185 r = adev->ip_blocks[i].version->funcs->resume(adev);
3188 adev->ip_blocks[i].version->funcs->name, r);
3191 adev->ip_blocks[i].status.hw = true;
3201 * @adev: amdgpu_device pointer
3211 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
3215 for (i = 0; i < adev->num_ip_blocks; i++) {
3216 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3218 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3219 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3220 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3221 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE ||
3222 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
3224 r = adev->ip_blocks[i].version->funcs->resume(adev);
3227 adev->ip_blocks[i].version->funcs->name, r);
3230 adev->ip_blocks[i].status.hw = true;
3239 * @adev: amdgpu_device pointer
3249 static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev)
3253 for (i = 0; i < adev->num_ip_blocks; i++) {
3254 if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
3256 if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
3257 r = adev->ip_blocks[i].version->funcs->resume(adev);
3260 adev->ip_blocks[i].version->funcs->name, r);
3263 adev->ip_blocks[i].status.hw = true;
3273 * @adev: amdgpu_device pointer
3282 static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
3286 r = amdgpu_device_ip_resume_phase1(adev);
3290 r = amdgpu_device_fw_loading(adev);
3294 r = amdgpu_device_ip_resume_phase2(adev);
3299 amdgpu_fence_driver_hw_init(adev);
3301 r = amdgpu_device_ip_resume_phase3(adev);
3309 * @adev: amdgpu_device pointer
3313 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3315 if (amdgpu_sriov_vf(adev)) {
3316 if (adev->is_atom_fw) {
3317 if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
3318 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3320 if (amdgpu_atombios_has_gpu_virtualization_table(adev))
3321 adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
3324 if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
3325 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3389 * @adev: amdgpu_device pointer
3393 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
3395 if (adev->enable_virtual_display ||
3396 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
3399 return amdgpu_device_asic_has_dc_support(adev->asic_type);
3404 struct amdgpu_device *adev =
3406 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3418 if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
3421 adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3423 if (adev->asic_reset_res)
3427 adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3429 if (adev->asic_reset_res)
3432 if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
3433 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
3434 adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
3438 adev->asic_reset_res = amdgpu_asic_reset(adev);
3442 if (adev->asic_reset_res)
3444 adev->asic_reset_res, adev_to_drm(adev)->unique);
3448 static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
3462 adev->gfx_timeout = msecs_to_jiffies(10000);
3463 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3464 if (amdgpu_sriov_vf(adev))
3465 adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
3468 adev->compute_timeout = msecs_to_jiffies(60000);
3483 dev_warn(adev->dev, "lockup timeout disabled");
3491 adev->gfx_timeout = timeout;
3494 adev->compute_timeout = timeout;
3497 adev->sdma_timeout = timeout;
3500 adev->video_timeout = timeout;
3511 adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3512 if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
3513 adev->compute_timeout = adev->gfx_timeout;
3524 * @adev: amdgpu_device pointer
3528 static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
3533 domain = iommu_get_domain_for_dev(adev->dev);
3536 adev->ram_is_direct_mapped = true;
3544 static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
3547 adev->gfx.mcbp = true;
3549 adev->gfx.mcbp = false;
3551 if (amdgpu_sriov_vf(adev))
3552 adev->gfx.mcbp = true;
3554 if (adev->gfx.mcbp)
3561 * @adev: amdgpu_device pointer
3568 int amdgpu_device_init(struct amdgpu_device *adev,
3571 struct drm_device *ddev = adev_to_drm(adev);
3572 struct pci_dev *pdev = adev->pdev;
3578 adev->shutdown = false;
3579 adev->flags = flags;
3582 adev->asic_type = amdgpu_force_asic_type;
3584 adev->asic_type = flags & AMD_ASIC_MASK;
3586 adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3588 adev->usec_timeout *= 10;
3589 adev->gmc.gart_size = 512 * 1024 * 1024;
3590 adev->accel_working = false;
3591 adev->num_rings = 0;
3592 RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
3593 adev->mman.buffer_funcs = NULL;
3594 adev->mman.buffer_funcs_ring = NULL;
3595 adev->vm_manager.vm_pte_funcs = NULL;
3596 adev->vm_manager.vm_pte_num_scheds = 0;
3597 adev->gmc.gmc_funcs = NULL;
3598 adev->harvest_ip_mask = 0x0;
3599 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3600 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
3602 adev->smc_rreg = &amdgpu_invalid_rreg;
3603 adev->smc_wreg = &amdgpu_invalid_wreg;
3604 adev->pcie_rreg = &amdgpu_invalid_rreg;
3605 adev->pcie_wreg = &amdgpu_invalid_wreg;
3606 adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
3607 adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
3608 adev->pciep_rreg = &amdgpu_invalid_rreg;
3609 adev->pciep_wreg = &amdgpu_invalid_wreg;
3610 adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
3611 adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
3612 adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
3613 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
3614 adev->didt_rreg = &amdgpu_invalid_rreg;
3615 adev->didt_wreg = &amdgpu_invalid_wreg;
3616 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
3617 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
3618 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
3619 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
3622 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
3628 rw_init(&adev->firmware.mutex, "agfw");
3629 rw_init(&adev->pm.mutex, "agpm");
3630 rw_init(&adev->gfx.gpu_clock_mutex, "gfxclk");
3631 rw_init(&adev->srbm_mutex, "srbm");
3632 rw_init(&adev->gfx.pipe_reserve_mutex, "pipers");
3633 rw_init(&adev->gfx.gfx_off_mutex, "gfxoff");
3634 rw_init(&adev->gfx.partition_mutex, "gfxpar");
3635 rw_init(&adev->grbm_idx_mutex, "grbmidx");
3636 rw_init(&adev->mn_lock, "agpumn");
3637 rw_init(&adev->virt.vf_errors.lock, "vferr");
3638 rw_init(&adev->virt.rlcg_reg_lock, "vrlcg");
3639 hash_init(adev->mn_hash);
3640 rw_init(&adev->psp.mutex, "agpsp");
3641 rw_init(&adev->notifier_lock, "agnf");
3642 rw_init(&adev->pm.stable_pstate_ctx_lock, "agps");
3643 rw_init(&adev->benchmark_mutex, "agbm");
3645 amdgpu_device_init_apu_flags(adev);
3647 r = amdgpu_device_check_arguments(adev);
3651 mtx_init(&adev->mmio_idx_lock, IPL_TTY);
3652 mtx_init(&adev->smc_idx_lock, IPL_TTY);
3653 mtx_init(&adev->pcie_idx_lock, IPL_TTY);
3654 mtx_init(&adev->uvd_ctx_idx_lock, IPL_TTY);
3655 mtx_init(&adev->didt_idx_lock, IPL_TTY);
3656 mtx_init(&adev->gc_cac_idx_lock, IPL_TTY);
3657 mtx_init(&adev->se_cac_idx_lock, IPL_TTY);
3658 mtx_init(&adev->audio_endpt_idx_lock, IPL_TTY);
3659 mtx_init(&adev->mm_stats.lock, IPL_NONE);
3661 INIT_LIST_HEAD(&adev->shadow_list);
3662 rw_init(&adev->shadow_list_lock, "sdwlst");
3664 INIT_LIST_HEAD(&adev->reset_list);
3666 INIT_LIST_HEAD(&adev->ras_list);
3668 INIT_DELAYED_WORK(&adev->delayed_init_work,
3670 INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
3673 INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
3675 adev->gfx.gfx_off_req_count = 1;
3676 adev->gfx.gfx_off_residency = 0;
3677 adev->gfx.gfx_off_entrycount = 0;
3678 adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3680 atomic_set(&adev->throttling_logging_enabled, 1);
3688 ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
3689 ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
3694 if (adev->asic_type >= CHIP_BONAIRE) {
3695 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
3696 adev->rmmio_size = pci_resource_len(adev->pdev, 5);
3698 adev->rmmio_base = pci_resource_start(adev->pdev, 2);
3699 adev->rmmio_size = pci_resource_len(adev->pdev, 2);
3704 atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
3707 adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
3708 if (!adev->rmmio)
3711 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
3712 DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size);
3719 adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
3720 if (!adev->reset_domain)
3724 amdgpu_detect_virtualization(adev);
3726 amdgpu_device_get_pcie_info(adev);
3728 r = amdgpu_device_get_job_timeout_settings(adev);
3730 dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3735 r = amdgpu_device_ip_early_init(adev);
3739 amdgpu_device_set_mcbp(adev);
3742 r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
3747 amdgpu_gmc_tmz_set(adev);
3749 amdgpu_gmc_noretry_set(adev);
3751 if (adev->gmc.xgmi.supported) {
3752 r = adev->gfxhub.funcs->get_xgmi_info(adev);
3759 if (amdgpu_sriov_vf(adev)) {
3760 if (adev->virt.fw_reserve.p_pf2vf)
3761 adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
3762 adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
3767 } else if ((adev->flags & AMD_IS_APU) &&
3768 (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))) {
3769 adev->have_atomics_support = true;
3771 adev->have_atomics_support =
3772 !pci_enable_atomic_ops_to_root(adev->pdev,
3777 if (!adev->have_atomics_support)
3778 dev_info(adev->dev, "PCIE atomic ops is not supported\n");
3783 if ((adev->flags & AMD_IS_APU) &&
3784 (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0)))
3785 adev->have_atomics_support = true;
3787 adev->have_atomics_support = false;
3791 amdgpu_doorbell_init(adev);
3795 emu_soc_asic_init(adev);
3799 amdgpu_reset_init(adev);
3802 if (adev->bios)
3803 amdgpu_device_detect_sriov_bios(adev);
3808 if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3809 if (adev->gmc.xgmi.num_physical_nodes) {
3810 dev_info(adev->dev, "Pending hive reset.\n");
3811 adev->gmc.xgmi.pending_reset = true;
3813 for (i = 0; i < adev->num_ip_blocks; i++) {
3814 if (!adev->ip_blocks[i].status.valid)
3816 if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
3817 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
3818 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
3819 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3821 adev->ip_blocks[i].version->funcs->name);
3822 adev->ip_blocks[i].status.hw = true;
3831 r = amdgpu_asic_reset(adev);
3834 dev_err(adev->dev, "asic reset on init failed\n");
3841 if (amdgpu_device_need_post(adev)) {
3842 if (!adev->bios) {
3843 dev_err(adev->dev, "no vBIOS found\n");
3848 r = amdgpu_device_asic_init(adev);
3850 dev_err(adev->dev, "gpu post error!\n");
3855 if (adev->bios) {
3856 if (adev->is_atom_fw) {
3858 r = amdgpu_atomfirmware_get_clock_info(adev);
3860 dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
3861 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3866 r = amdgpu_atombios_get_clock_info(adev);
3868 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
3869 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3873 if (!amdgpu_device_has_dc_support(adev))
3874 amdgpu_atombios_i2c_init(adev);
3880 r = amdgpu_fence_driver_sw_init(adev);
3882 dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
3883 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3888 drm_mode_config_init(adev_to_drm(adev));
3890 r = amdgpu_device_ip_init(adev);
3892 dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
3893 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3897 amdgpu_fence_driver_hw_init(adev);
3899 dev_info(adev->dev,
3901 adev->gfx.config.max_shader_engines,
3902 adev->gfx.config.max_sh_per_se,
3903 adev->gfx.config.max_cu_per_sh,
3904 adev->gfx.cu_info.number);
3909 uint32_t version = adev->ip_versions[GC_HWIP][0];
3912 switch (adev->asic_type) {
3914 if (adev->apu_flags & AMD_APU_IS_RAVEN2)
3916 else if (adev->apu_flags & AMD_APU_IS_PICASSO)
3922 if (adev->apu_flags & AMD_APU_IS_RENOIR)
3928 chip_name = amdgpu_asic_name[adev->asic_type];
3931 printf("%s: %s", adev->self.dv_xname, chip_name);
3939 printf(" %d CU rev 0x%02x\n", adev->gfx.cu_info.number, adev->rev_id);
3943 adev->accel_working = true;
3945 amdgpu_vm_check_compute_bug(adev);
3953 adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
3955 r = amdgpu_atombios_sysfs_init(adev);
3957 drm_err(&adev->ddev,
3960 r = amdgpu_pm_sysfs_init(adev);
3964 r = amdgpu_ucode_sysfs_init(adev);
3966 adev->ucode_sysfs_en = false;
3969 adev->ucode_sysfs_en = true;
3976 amdgpu_register_gpu_instance(adev);
3981 if (!adev->gmc.xgmi.pending_reset) {
3982 r = amdgpu_device_ip_late_init(adev);
3984 dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
3985 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3989 amdgpu_ras_resume(adev);
3990 queue_delayed_work(system_wq, &adev->delayed_init_work,
3994 if (amdgpu_sriov_vf(adev)) {
3995 amdgpu_virt_release_full_gpu(adev, true);
3996 flush_delayed_work(&adev->delayed_init_work);
3999 r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
4001 dev_err(adev->dev, "Could not create amdgpu device attr\n");
4003 amdgpu_fru_sysfs_init(adev);
4006 r = amdgpu_pmu_init(adev);
4008 dev_err(adev->dev, "amdgpu_pmu_init failed\n");
4011 if (amdgpu_device_cache_pci_state(adev->pdev))
4019 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4020 vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
4025 if (px || (!dev_is_removable(&adev->pdev->dev) &&
4027 vga_switcheroo_register_client(adev->pdev,
4031 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
4033 if (adev->gmc.xgmi.pending_reset)
4037 amdgpu_device_check_iommu_direct_map(adev);
4042 if (amdgpu_sriov_vf(adev))
4043 amdgpu_virt_release_full_gpu(adev, true);
4046 if (amdgpu_sriov_vf(adev) &&
4047 !amdgpu_sriov_runtime(adev) &&
4048 amdgpu_virt_mmio_blocked(adev) &&
4049 !amdgpu_virt_wait_reset(adev)) {
4050 dev_err(adev->dev, "VF exclusive mode timeout\n");
4052 adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
4053 adev->virt.ops = NULL;
4056 amdgpu_release_ras_context(adev);
4059 amdgpu_vf_error_trans_all(adev);
4064 static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
4070 unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
4074 amdgpu_doorbell_fini(adev);
4077 iounmap(adev->rmmio);
4078 adev->rmmio = NULL;
4079 if (adev->mman.aper_base_kaddr)
4080 iounmap(adev->mman.aper_base_kaddr);
4081 adev->mman.aper_base_kaddr = NULL;
4083 if (adev->rmmio_size > 0)
4084 bus_space_unmap(adev->rmmio_bst, adev->rmmio_bsh,
4085 adev->rmmio_size);
4086 adev->rmmio_size = 0;
4087 adev->rmmio = NULL;
4088 if (adev->mman.aper_base_kaddr)
4089 bus_space_unmap(adev->memt, adev->mman.aper_bsh,
4090 adev->gmc.visible_vram_size);
4091 adev->mman.aper_base_kaddr = NULL;
4095 if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
4097 arch_phys_wc_del(adev->gmc.vram_mtrr);
4098 arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
4100 drm_mtrr_del(0, adev->gmc.aper_base, adev->gmc.aper_size, DRM_MTRR_WC);
4108 * @adev: amdgpu_device pointer
4113 void amdgpu_device_fini_hw(struct amdgpu_device *adev)
4115 dev_info(adev->dev, "amdgpu: finishing device.\n");
4116 flush_delayed_work(&adev->delayed_init_work);
4117 adev->shutdown = true;
4122 if (amdgpu_sriov_vf(adev)) {
4123 amdgpu_virt_request_full_gpu(adev, false);
4124 amdgpu_virt_fini_data_exchange(adev);
4128 amdgpu_irq_disable_all(adev);
4129 if (adev->mode_info.mode_config_initialized) {
4130 if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
4131 drm_helper_force_disable_all(adev_to_drm(adev));
4133 drm_atomic_helper_shutdown(adev_to_drm(adev));
4135 amdgpu_fence_driver_hw_fini(adev);
4137 if (adev->mman.initialized)
4138 drain_workqueue(adev->mman.bdev.wq);
4140 if (adev->pm.sysfs_initialized)
4141 amdgpu_pm_sysfs_fini(adev);
4142 if (adev->ucode_sysfs_en)
4143 amdgpu_ucode_sysfs_fini(adev);
4144 sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
4145 amdgpu_fru_sysfs_fini(adev);
4148 amdgpu_ras_pre_fini(adev);
4150 amdgpu_device_ip_fini_early(adev);
4152 amdgpu_irq_fini_hw(adev);
4154 if (adev->mman.initialized)
4155 ttm_device_clear_dma_mappings(&adev->mman.bdev);
4157 amdgpu_gart_dummy_page_fini(adev);
4159 if (drm_dev_is_unplugged(adev_to_drm(adev)))
4160 amdgpu_device_unmap_mmio(adev);
4164 void amdgpu_device_fini_sw(struct amdgpu_device *adev)
4169 amdgpu_device_ip_fini(adev);
4170 amdgpu_fence_driver_sw_fini(adev);
4171 amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
4172 adev->accel_working = false;
4173 dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
4175 amdgpu_reset_fini(adev);
4178 if (!amdgpu_device_has_dc_support(adev))
4179 amdgpu_i2c_fini(adev);
4182 amdgpu_atombios_fini(adev);
4184 kfree(adev->bios);
4185 adev->bios = NULL;
4187 px = amdgpu_device_supports_px(adev_to_drm(adev));
4189 if (px || (!dev_is_removable(&adev->pdev->dev) &&
4191 vga_switcheroo_unregister_client(adev->pdev);
4194 vga_switcheroo_fini_domain_pm_ops(adev->dev);
4196 if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
4197 vga_client_unregister(adev->pdev);
4199 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
4201 iounmap(adev->rmmio);
4202 adev->rmmio = NULL;
4204 if (adev->rmmio_size > 0)
4205 bus_space_unmap(adev->rmmio_bst, adev->rmmio_bsh,
4206 adev->rmmio_size);
4207 adev->rmmio_size = 0;
4208 adev->rmmio = NULL;
4210 amdgpu_doorbell_fini(adev);
4215 amdgpu_pmu_fini(adev);
4216 if (adev->mman.discovery_bin)
4217 amdgpu_discovery_fini(adev);
4219 amdgpu_reset_put_reset_domain(adev->reset_domain);
4220 adev->reset_domain = NULL;
4222 kfree(adev->pci_state);
4228 * @adev: amdgpu device object
4235 static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
4240 if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
4243 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
4263 struct amdgpu_device *adev = drm_to_adev(dev);
4266 amdgpu_choose_low_power_state(adev);
4272 r = amdgpu_device_evict_resources(adev);
4276 flush_delayed_work(&adev->gfx.gfx_off_delay_work);
4278 for (i = 0; i < adev->num_ip_blocks; i++) {
4279 if (!adev->ip_blocks[i].status.valid)
4281 if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
4283 r = adev->ip_blocks[i].version->funcs->prepare_suspend((void *)adev);
4291 adev->in_s0ix = adev->in_s3 = false;
4308 struct amdgpu_device *adev = drm_to_adev(dev);
4311 if (adev->shutdown)
4319 adev->in_suspend = true;
4321 if (amdgpu_sriov_vf(adev)) {
4322 amdgpu_virt_fini_data_exchange(adev);
4323 r = amdgpu_virt_request_full_gpu(adev, false);
4332 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
4334 cancel_delayed_work_sync(&adev->delayed_init_work);
4336 amdgpu_ras_suspend(adev);
4338 amdgpu_device_ip_suspend_phase1(adev);
4340 if (!adev->in_s0ix)
4341 amdgpu_amdkfd_suspend(adev, adev->in_runpm);
4343 r = amdgpu_device_evict_resources(adev);
4347 amdgpu_fence_driver_hw_fini(adev);
4349 amdgpu_device_ip_suspend_phase2(adev);
4351 if (amdgpu_sriov_vf(adev))
4352 amdgpu_virt_release_full_gpu(adev, false);
4369 struct amdgpu_device *adev = drm_to_adev(dev);
4372 if (amdgpu_sriov_vf(adev)) {
4373 r = amdgpu_virt_request_full_gpu(adev, true);
4383 if (adev->in_s0ix)
4384 amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
4387 if (amdgpu_device_need_post(adev)) {
4388 r = amdgpu_device_asic_init(adev);
4390 dev_err(adev->dev, "amdgpu asic init failed\n");
4393 r = amdgpu_device_ip_resume(adev);
4396 dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
4400 r = amdgpu_device_ip_late_init(adev);
4404 queue_delayed_work(system_wq, &adev->delayed_init_work,
4407 if (!adev->in_s0ix) {
4408 r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
4414 if (amdgpu_sriov_vf(adev)) {
4415 amdgpu_virt_init_data_exchange(adev);
4416 amdgpu_virt_release_full_gpu(adev, true);
4423 flush_delayed_work(&adev->delayed_init_work);
4426 drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
4428 amdgpu_ras_resume(adev);
4430 if (adev->mode_info.num_crtc) {
4443 if (!adev->dc_enabled)
4451 adev->in_suspend = false;
4453 if (adev->enable_mes)
4454 amdgpu_mes_self_test(adev);
4465 * @adev: amdgpu_device pointer
4472 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
4477 if (amdgpu_sriov_vf(adev))
4480 if (amdgpu_asic_need_full_reset(adev))
4483 for (i = 0; i < adev->num_ip_blocks; i++) {
4484 if (!adev->ip_blocks[i].status.valid)
4486 if (adev->ip_blocks[i].version->funcs->check_soft_reset)
4487 adev->ip_blocks[i].status.hang =
4488 adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
4489 if (adev->ip_blocks[i].status.hang) {
4490 dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
4500 * @adev: amdgpu_device pointer
4508 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
4512 for (i = 0; i < adev->num_ip_blocks; i++) {
4513 if (!adev->ip_blocks[i].status.valid)
4515 if (adev->ip_blocks[i].status.hang &&
4516 adev->ip_blocks[i].version->funcs->pre_soft_reset) {
4517 r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
4529 * @adev: amdgpu_device pointer
4535 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
4539 if (amdgpu_asic_need_full_reset(adev))
4542 for (i = 0; i < adev->num_ip_blocks; i++) {
4543 if (!adev->ip_blocks[i].status.valid)
4545 if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
4546 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
4547 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
4548 (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
4549 adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
4550 if (adev->ip_blocks[i].status.hang) {
4551 dev_info(adev->dev, "Some block need full reset!\n");
4562 * @adev: amdgpu_device pointer
4570 static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
4574 for (i = 0; i < adev->num_ip_blocks; i++) {
4575 if (!adev->ip_blocks[i].status.valid)
4577 if (adev->ip_blocks[i].status.hang &&
4578 adev->ip_blocks[i].version->funcs->soft_reset) {
4579 r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
4591 * @adev: amdgpu_device pointer
4599 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
4603 for (i = 0; i < adev->num_ip_blocks; i++) {
4604 if (!adev->ip_blocks[i].status.valid)
4606 if (adev->ip_blocks[i].status.hang &&
4607 adev->ip_blocks[i].version->funcs->post_soft_reset)
4608 r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
4619 * @adev: amdgpu_device pointer
4628 static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
4635 if (amdgpu_sriov_runtime(adev))
4640 dev_info(adev->dev, "recover vram bo from shadow start\n");
4641 mutex_lock(&adev->shadow_list_lock);
4642 list_for_each_entry(vmbo, &adev->shadow_list, shadow_list) {
4643 /* If vm is compute context or adev is APU, shadow will be NULL */
4674 mutex_unlock(&adev->shadow_list_lock);
4681 dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4685 dev_info(adev->dev, "recover vram bo from shadow done\n");
4693 * @adev: amdgpu_device pointer
4699 static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
4707 amdgpu_amdkfd_pre_reset(adev);
4710 r = amdgpu_virt_request_full_gpu(adev, true);
4712 r = amdgpu_virt_reset_gpu(adev);
4715 amdgpu_irq_gpu_reset_resume_helper(adev);
4718 amdgpu_virt_post_reset(adev);
4721 r = amdgpu_device_ip_reinit_early_sriov(adev);
4725 amdgpu_virt_init_data_exchange(adev);
4727 r = amdgpu_device_fw_loading(adev);
4732 r = amdgpu_device_ip_reinit_late_sriov(adev);
4736 hive = amdgpu_get_xgmi_hive(adev);
4738 if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
4739 r = amdgpu_xgmi_update_topology(hive, adev);
4745 r = amdgpu_ib_ring_tests(adev);
4747 amdgpu_amdkfd_post_reset(adev);
4751 if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4752 amdgpu_inc_vram_lost(adev);
4753 r = amdgpu_device_recover_vram(adev);
4755 amdgpu_virt_release_full_gpu(adev, true);
4771 * @adev: amdgpu_device pointer
4775 bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
4781 struct amdgpu_ring *ring = adev->rings[i];
4799 * @adev: amdgpu_device pointer
4804 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
4811 if (!amdgpu_ras_is_poison_mode_supported(adev))
4814 if (amdgpu_sriov_vf(adev))
4818 switch (adev->asic_type) {
4843 dev_info(adev->dev, "GPU recovery disabled.\n");
4847 int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
4852 amdgpu_atombios_scratch_regs_engine_hung(adev, true);
4854 dev_info(adev->dev, "GPU mode1 reset\n");
4859 amdgpu_device_cache_pci_state(adev->pdev);
4862 pci_clear_master(adev->pdev);
4864 if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
4865 dev_info(adev->dev, "GPU smu mode1 reset\n");
4866 ret = amdgpu_dpm_mode1_reset(adev);
4868 dev_info(adev->dev, "GPU psp mode1 reset\n");
4869 ret = psp_gpu_reset(adev);
4875 amdgpu_device_load_pci_state(adev->pdev);
4876 ret = amdgpu_psp_wait_for_bootloader(adev);
4881 for (i = 0; i < adev->usec_timeout; i++) {
4882 u32 memsize = adev->nbio.funcs->get_memsize(adev);
4889 if (i >= adev->usec_timeout) {
4894 amdgpu_atombios_scratch_regs_engine_hung(adev, false);
4899 dev_err(adev->dev, "GPU mode1 reset failed\n");
4903 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
4911 if (reset_context->reset_req_dev == adev)
4914 if (amdgpu_sriov_vf(adev)) {
4916 amdgpu_virt_fini_data_exchange(adev);
4919 amdgpu_fence_driver_isr_toggle(adev, true);
4923 struct amdgpu_ring *ring = adev->rings[i];
4937 amdgpu_fence_driver_isr_toggle(adev, false);
4942 r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
4950 if (!amdgpu_sriov_vf(adev)) {
4953 need_full_reset = amdgpu_device_ip_need_full_reset(adev);
4956 amdgpu_device_ip_check_soft_reset(adev)) {
4957 amdgpu_device_ip_pre_soft_reset(adev);
4958 r = amdgpu_device_ip_soft_reset(adev);
4959 amdgpu_device_ip_post_soft_reset(adev);
4960 if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4961 dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4967 r = amdgpu_device_ip_suspend(adev);
4978 static int amdgpu_reset_reg_dumps(struct amdgpu_device *adev)
4982 lockdep_assert_held(&adev->reset_domain->sem);
4984 for (i = 0; i < adev->num_regs; i++) {
4985 adev->reset_dump_reg_value[i] = RREG32(adev->reset_dump_reg_list[i]);
4986 trace_amdgpu_reset_reg_dumps(adev->reset_dump_reg_list[i],
4987 adev->reset_dump_reg_value[i]);
4998 struct amdgpu_device *adev = data;
5012 drm_printf(&p, "time: %lld.%09ld\n", adev->reset_time.tv_sec, adev->reset_time.tv_nsec);
5013 if (adev->reset_task_info.pid)
5015 adev->reset_task_info.process_name,
5016 adev->reset_task_info.pid);
5018 if (adev->reset_vram_lost)
5020 if (adev->num_regs) {
5023 for (i = 0; i < adev->num_regs; i++)
5025 adev->reset_dump_reg_list[i],
5026 adev->reset_dump_reg_value[i]);
5036 static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
5038 struct drm_device *dev = adev_to_drm(adev);
5040 ktime_get_ts64(&adev->reset_time);
5041 dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_NOWAIT,
5243 static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
5246 switch (amdgpu_asic_reset_method(adev)) {
5248 adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
5251 adev->mp1_state = PP_MP1_STATE_RESET;
5254 adev->mp1_state = PP_MP1_STATE_NONE;
5261 static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
5263 amdgpu_vf_error_trans_all(adev);
5264 adev->mp1_state = PP_MP1_STATE_NONE;
5267 static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
5273 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5274 adev->pdev->bus->number, 1);
5282 static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
5292 reset_method = amdgpu_asic_reset_method(adev);
5301 p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
5302 adev->pdev->bus->number, 1);
5321 dev_warn(adev->dev, "failed to suspend display audio\n");
5335 static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
5337 struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
5340 if (!amdgpu_sriov_vf(adev))
5341 cancel_work(&adev->reset_work);
5344 if (adev->kfd.dev)
5345 cancel_work(&adev->kfd.reset_work);
5347 if (amdgpu_sriov_vf(adev))
5348 cancel_work(&adev->virt.flr_work);
5350 if (con && adev->ras_enabled)
5358 * @adev: amdgpu_device pointer
5367 int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
5387 need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
5393 if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
5394 amdgpu_ras_get_context(adev)->reboot) {
5405 dev_info(adev->dev, "GPU %s begin!\n",
5408 if (!amdgpu_sriov_vf(adev))
5409 hive = amdgpu_get_xgmi_hive(adev);
5418 * to put adev in the 1st position.
5421 if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
5424 if (gpu_reset_for_dev_remove && adev->shutdown)
5427 if (!list_is_first(&adev->reset_list, &device_list))
5428 list_rotate_to_front(&adev->reset_list, &device_list);
5431 list_add_tail(&adev->reset_list, &device_list);
5503 dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
5530 if (amdgpu_sriov_vf(adev)) {
5531 r = amdgpu_device_reset_sriov(adev, job ? false : true);
5533 adev->asic_reset_res = r;
5536 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
5537 adev->ip_versions[GC_HWIP][0] == IP_VERSION(11, 0, 3))
5538 amdgpu_ras_resume(adev);
5562 if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
5593 if (!adev->kfd.init_complete)
5594 amdgpu_amdkfd_device_init(adev);
5615 dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
5617 atomic_set(&adev->reset_domain->reset_res, r);
5624 * @adev: amdgpu_device pointer
5630 static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
5637 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
5640 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
5643 if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
5644 if (adev->pm.pcie_gen_mask == 0)
5645 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
5646 if (adev->pm.pcie_mlw_mask == 0)
5647 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
5651 if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
5654 pcie_bandwidth_available(adev->pdev, NULL,
5657 if (adev->pm.pcie_gen_mask == 0) {
5659 pdev = adev->pdev;
5662 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5667 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5673 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5678 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5682 adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5685 adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
5689 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5693 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5699 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5704 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5708 adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
5711 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
5715 if (adev->pm.pcie_mlw_mask == 0) {
5717 adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
5721 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
5730 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
5738 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
5745 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
5751 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
5756 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
5760 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
5772 * @adev: amdgpu_device pointer
5773 * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
5775 * Return true if @peer_adev can access (DMA) @adev through the PCIe
5776 * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
5779 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
5786 adev->gmc.aper_base + adev->gmc.aper_size - 1;
5788 !adev->gmc.xgmi.connected_to_cpu &&
5789 !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
5791 return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
5792 adev->gmc.real_vram_size == adev->gmc.visible_vram_size &&
5793 !(adev->gmc.aper_base & address_mask ||
5802 struct amdgpu_device *adev = drm_to_adev(dev);
5803 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5808 if (ras && adev->ras_enabled &&
5809 adev->nbio.funcs->enable_doorbell_interrupt)
5810 adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
5812 return amdgpu_dpm_baco_enter(adev);
5817 struct amdgpu_device *adev = drm_to_adev(dev);
5818 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
5824 ret = amdgpu_dpm_baco_exit(adev);
5828 if (ras && adev->ras_enabled &&
5829 adev->nbio.funcs->enable_doorbell_interrupt)
5830 adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
5832 if (amdgpu_passthrough(adev) && adev->nbio.funcs &&
5833 adev->nbio.funcs->clear_doorbell_interrupt)
5834 adev->nbio.funcs->clear_doorbell_interrupt(adev);
5854 struct amdgpu_device *adev = drm_to_adev(dev);
5859 if (adev->gmc.xgmi.num_physical_nodes > 1) {
5864 adev->pci_channel_state = state;
5872 * Locking adev->reset_domain->sem will prevent any external access
5875 amdgpu_device_lock_reset_domain(adev->reset_domain);
5876 amdgpu_device_set_mp1_state(adev);
5883 struct amdgpu_ring *ring = adev->rings[i];
5890 atomic_inc(&adev->gpu_reset_counter);
5934 struct amdgpu_device *adev = drm_to_adev(dev);
5945 list_add_tail(&adev->reset_list, &device_list);
5954 for (i = 0; i < adev->usec_timeout; i++) {
5955 memsize = amdgpu_asic_get_config_memsize(adev);
5967 reset_context.reset_req_dev = adev;
5971 adev->no_hw_access = true;
5972 r = amdgpu_device_pre_asic_reset(adev, &reset_context);
5973 adev->no_hw_access = false;
5981 if (amdgpu_device_cache_pci_state(adev->pdev))
5982 pci_restore_state(adev->pdev);
5987 amdgpu_device_unset_mp1_state(adev);
5988 amdgpu_device_unlock_reset_domain(adev->reset_domain);
6007 struct amdgpu_device *adev = drm_to_adev(dev);
6014 if (adev->pci_channel_state != pci_channel_io_frozen)
6018 struct amdgpu_ring *ring = adev->rings[i];
6026 amdgpu_device_unset_mp1_state(adev);
6027 amdgpu_device_unlock_reset_domain(adev->reset_domain);
6036 struct amdgpu_device *adev = drm_to_adev(dev);
6039 if (amdgpu_sriov_vf(adev))
6044 kfree(adev->pci_state);
6046 adev->pci_state = pci_store_saved_state(pdev);
6048 if (!adev->pci_state) {
6067 struct amdgpu_device *adev = drm_to_adev(dev);
6070 if (!adev->pci_state)
6073 r = pci_load_saved_state(pdev, adev->pci_state);
6086 void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
6090 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6093 if (adev->gmc.xgmi.connected_to_cpu)
6099 amdgpu_asic_flush_hdp(adev, ring);
6102 void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
6106 if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
6109 if (adev->gmc.xgmi.connected_to_cpu)
6112 amdgpu_asic_invalidate_hdp(adev, ring);
6115 int amdgpu_in_reset(struct amdgpu_device *adev)
6117 return atomic_read(&adev->reset_domain->in_gpu_reset);
6123 * @adev: amdgpu_device pointer
6135 * 4. set adev->no_hw_access to avoid potential crashes after setp 5
6140 void amdgpu_device_halt(struct amdgpu_device *adev)
6142 struct pci_dev *pdev = adev->pdev;
6143 struct drm_device *ddev = adev_to_drm(adev);
6145 amdgpu_xcp_dev_unplug(adev);
6148 amdgpu_irq_disable_all(adev);
6150 amdgpu_fence_driver_hw_fini(adev);
6152 adev->no_hw_access = true;
6154 amdgpu_device_unmap_mmio(adev);
6160 u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
6166 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6167 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6169 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6173 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6177 void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
6182 address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
6183 data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
6185 spin_lock_irqsave(&adev->pcie_idx_lock, flags);
6190 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
6195 * @adev: amdgpu_device pointer
6202 struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
6210 old = dma_fence_get_rcu_safe(&adev->gang_submit);
6219 } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
6226 bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
6228 switch (adev->asic_type) {
6260 if (!adev->ip_versions[DCE_HWIP][0] ||
6261 (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
6267 uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
6274 uint32_t loop = adev->usec_timeout;
6278 loop = adev->usec_timeout;