1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 26 #include "amdgpu.h" 27 #include "amdgpu_discovery.h" 28 #include "soc15_hw_ip.h" 29 #include "discovery.h" 30 31 #include "soc15.h" 32 #include "gfx_v9_0.h" 33 #include "gfx_v9_4_3.h" 34 #include "gmc_v9_0.h" 35 #include "df_v1_7.h" 36 #include "df_v3_6.h" 37 #include "df_v4_3.h" 38 #include "nbio_v6_1.h" 39 #include "nbio_v7_0.h" 40 #include "nbio_v7_4.h" 41 #include "nbio_v7_9.h" 42 #include "hdp_v4_0.h" 43 #include "vega10_ih.h" 44 #include "vega20_ih.h" 45 #include "sdma_v4_0.h" 46 #include "sdma_v4_4_2.h" 47 #include "uvd_v7_0.h" 48 #include "vce_v4_0.h" 49 #include "vcn_v1_0.h" 50 #include "vcn_v2_5.h" 51 #include "jpeg_v2_5.h" 52 #include "smuio_v9_0.h" 53 #include "gmc_v10_0.h" 54 #include "gmc_v11_0.h" 55 #include "gfxhub_v2_0.h" 56 #include "mmhub_v2_0.h" 57 #include "nbio_v2_3.h" 58 #include "nbio_v4_3.h" 59 #include "nbio_v7_2.h" 60 #include "nbio_v7_7.h" 61 #include "hdp_v5_0.h" 62 #include "hdp_v5_2.h" 63 #include "hdp_v6_0.h" 64 #include "nv.h" 65 #include "soc21.h" 66 #include "navi10_ih.h" 67 #include "ih_v6_0.h" 68 #include "ih_v6_1.h" 69 #include "gfx_v10_0.h" 70 #include "gfx_v11_0.h" 71 #include "sdma_v5_0.h" 72 #include "sdma_v5_2.h" 73 #include "sdma_v6_0.h" 74 #include "lsdma_v6_0.h" 75 #include "vcn_v2_0.h" 76 #include "jpeg_v2_0.h" 77 #include "vcn_v3_0.h" 78 #include "jpeg_v3_0.h" 79 #include "vcn_v4_0.h" 80 #include "jpeg_v4_0.h" 81 #include "vcn_v4_0_3.h" 82 #include "jpeg_v4_0_3.h" 83 #include "amdgpu_vkms.h" 84 #include "mes_v10_1.h" 85 #include "mes_v11_0.h" 86 #include "smuio_v11_0.h" 87 #include "smuio_v11_0_6.h" 88 #include "smuio_v13_0.h" 89 #include "smuio_v13_0_3.h" 90 #include "smuio_v13_0_6.h" 91 92 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin" 93 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY); 94 95 #define mmRCC_CONFIG_MEMSIZE 0xde3 96 #define mmMP0_SMN_C2PMSG_33 0x16061 97 #define mmMM_INDEX 0x0 98 #define mmMM_INDEX_HI 0x6 99 #define mmMM_DATA 0x1 100 101 static const char *hw_id_names[HW_ID_MAX] = { 102 [MP1_HWID] = "MP1", 103 [MP2_HWID] = "MP2", 104 [THM_HWID] = "THM", 105 [SMUIO_HWID] = "SMUIO", 106 [FUSE_HWID] = "FUSE", 107 [CLKA_HWID] = "CLKA", 108 [PWR_HWID] = "PWR", 109 [GC_HWID] = "GC", 110 [UVD_HWID] = "UVD", 111 [AUDIO_AZ_HWID] = "AUDIO_AZ", 112 [ACP_HWID] = "ACP", 113 [DCI_HWID] = "DCI", 114 [DMU_HWID] = "DMU", 115 [DCO_HWID] = "DCO", 116 [DIO_HWID] = "DIO", 117 [XDMA_HWID] = "XDMA", 118 [DCEAZ_HWID] = "DCEAZ", 119 [DAZ_HWID] = "DAZ", 120 [SDPMUX_HWID] = "SDPMUX", 121 [NTB_HWID] = "NTB", 122 [IOHC_HWID] = "IOHC", 123 [L2IMU_HWID] = "L2IMU", 124 [VCE_HWID] = "VCE", 125 [MMHUB_HWID] = "MMHUB", 126 [ATHUB_HWID] = "ATHUB", 127 [DBGU_NBIO_HWID] = "DBGU_NBIO", 128 [DFX_HWID] = "DFX", 129 [DBGU0_HWID] = "DBGU0", 130 [DBGU1_HWID] = "DBGU1", 131 [OSSSYS_HWID] = "OSSSYS", 132 [HDP_HWID] = "HDP", 133 [SDMA0_HWID] = "SDMA0", 134 [SDMA1_HWID] = "SDMA1", 135 [SDMA2_HWID] = "SDMA2", 136 [SDMA3_HWID] = "SDMA3", 137 [LSDMA_HWID] = "LSDMA", 138 [ISP_HWID] = "ISP", 139 [DBGU_IO_HWID] = "DBGU_IO", 140 [DF_HWID] = "DF", 141 [CLKB_HWID] = "CLKB", 142 [FCH_HWID] = "FCH", 143 [DFX_DAP_HWID] = "DFX_DAP", 144 [L1IMU_PCIE_HWID] = "L1IMU_PCIE", 145 [L1IMU_NBIF_HWID] = "L1IMU_NBIF", 146 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR", 147 [L1IMU3_HWID] = "L1IMU3", 148 [L1IMU4_HWID] = "L1IMU4", 149 [L1IMU5_HWID] = "L1IMU5", 150 [L1IMU6_HWID] = "L1IMU6", 151 [L1IMU7_HWID] = "L1IMU7", 152 [L1IMU8_HWID] = "L1IMU8", 153 [L1IMU9_HWID] = "L1IMU9", 154 [L1IMU10_HWID] = "L1IMU10", 155 [L1IMU11_HWID] = "L1IMU11", 156 [L1IMU12_HWID] = "L1IMU12", 157 [L1IMU13_HWID] = "L1IMU13", 158 [L1IMU14_HWID] = "L1IMU14", 159 [L1IMU15_HWID] = "L1IMU15", 160 [WAFLC_HWID] = "WAFLC", 161 [FCH_USB_PD_HWID] = "FCH_USB_PD", 162 [PCIE_HWID] = "PCIE", 163 [PCS_HWID] = "PCS", 164 [DDCL_HWID] = "DDCL", 165 [SST_HWID] = "SST", 166 [IOAGR_HWID] = "IOAGR", 167 [NBIF_HWID] = "NBIF", 168 [IOAPIC_HWID] = "IOAPIC", 169 [SYSTEMHUB_HWID] = "SYSTEMHUB", 170 [NTBCCP_HWID] = "NTBCCP", 171 [UMC_HWID] = "UMC", 172 [SATA_HWID] = "SATA", 173 [USB_HWID] = "USB", 174 [CCXSEC_HWID] = "CCXSEC", 175 [XGMI_HWID] = "XGMI", 176 [XGBE_HWID] = "XGBE", 177 [MP0_HWID] = "MP0", 178 }; 179 180 static int hw_id_map[MAX_HWIP] = { 181 [GC_HWIP] = GC_HWID, 182 [HDP_HWIP] = HDP_HWID, 183 [SDMA0_HWIP] = SDMA0_HWID, 184 [SDMA1_HWIP] = SDMA1_HWID, 185 [SDMA2_HWIP] = SDMA2_HWID, 186 [SDMA3_HWIP] = SDMA3_HWID, 187 [LSDMA_HWIP] = LSDMA_HWID, 188 [MMHUB_HWIP] = MMHUB_HWID, 189 [ATHUB_HWIP] = ATHUB_HWID, 190 [NBIO_HWIP] = NBIF_HWID, 191 [MP0_HWIP] = MP0_HWID, 192 [MP1_HWIP] = MP1_HWID, 193 [UVD_HWIP] = UVD_HWID, 194 [VCE_HWIP] = VCE_HWID, 195 [DF_HWIP] = DF_HWID, 196 [DCE_HWIP] = DMU_HWID, 197 [OSSSYS_HWIP] = OSSSYS_HWID, 198 [SMUIO_HWIP] = SMUIO_HWID, 199 [PWR_HWIP] = PWR_HWID, 200 [NBIF_HWIP] = NBIF_HWID, 201 [THM_HWIP] = THM_HWID, 202 [CLK_HWIP] = CLKA_HWID, 203 [UMC_HWIP] = UMC_HWID, 204 [XGMI_HWIP] = XGMI_HWID, 205 [DCI_HWIP] = DCI_HWID, 206 [PCIE_HWIP] = PCIE_HWID, 207 }; 208 209 static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary) 210 { 211 u64 tmr_offset, tmr_size, pos; 212 void *discv_regn; 213 int ret; 214 215 ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size); 216 if (ret) 217 return ret; 218 219 pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET; 220 221 /* This region is read-only and reserved from system use */ 222 STUB(); 223 return -ENOSYS; 224 #ifdef notyet 225 discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC); 226 if (discv_regn) { 227 memcpy(binary, discv_regn, adev->mman.discovery_tmr_size); 228 memunmap(discv_regn); 229 return 0; 230 } 231 232 return -ENOENT; 233 #endif 234 } 235 236 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, 237 uint8_t *binary) 238 { 239 uint64_t vram_size; 240 u32 msg; 241 int i, ret = 0; 242 243 /* It can take up to a second for IFWI init to complete on some dGPUs, 244 * but generally it should be in the 60-100ms range. Normally this starts 245 * as soon as the device gets power so by the time the OS loads this has long 246 * completed. However, when a card is hotplugged via e.g., USB4, we need to 247 * wait for this to complete. Once the C2PMSG is updated, we can 248 * continue. 249 */ 250 if (dev_is_removable(&adev->pdev->dev)) { 251 for (i = 0; i < 1000; i++) { 252 msg = RREG32(mmMP0_SMN_C2PMSG_33); 253 if (msg & 0x80000000) 254 break; 255 drm_msleep(1); 256 } 257 } 258 vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; 259 260 if (vram_size) { 261 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; 262 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, 263 adev->mman.discovery_tmr_size, false); 264 } else { 265 ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary); 266 } 267 268 return ret; 269 } 270 271 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary) 272 { 273 const struct firmware *fw; 274 const char *fw_name; 275 int r; 276 277 switch (amdgpu_discovery) { 278 case 2: 279 fw_name = FIRMWARE_IP_DISCOVERY; 280 break; 281 default: 282 dev_warn(adev->dev, "amdgpu_discovery is not set properly\n"); 283 return -EINVAL; 284 } 285 286 r = request_firmware(&fw, fw_name, adev->dev); 287 if (r) { 288 dev_err(adev->dev, "can't load firmware \"%s\"\n", 289 fw_name); 290 return r; 291 } 292 293 memcpy((u8 *)binary, (u8 *)fw->data, fw->size); 294 release_firmware(fw); 295 296 return 0; 297 } 298 299 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size) 300 { 301 uint16_t checksum = 0; 302 int i; 303 304 for (i = 0; i < size; i++) 305 checksum += data[i]; 306 307 return checksum; 308 } 309 310 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size, 311 uint16_t expected) 312 { 313 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected); 314 } 315 316 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary) 317 { 318 struct binary_header *bhdr; 319 bhdr = (struct binary_header *)binary; 320 321 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE); 322 } 323 324 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) 325 { 326 /* 327 * So far, apply this quirk only on those Navy Flounder boards which 328 * have a bad harvest table of VCN config. 329 */ 330 if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && 331 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) { 332 switch (adev->pdev->revision) { 333 case 0xC1: 334 case 0xC2: 335 case 0xC3: 336 case 0xC5: 337 case 0xC7: 338 case 0xCF: 339 case 0xDF: 340 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 341 adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1; 342 break; 343 default: 344 break; 345 } 346 } 347 } 348 349 static int amdgpu_discovery_init(struct amdgpu_device *adev) 350 { 351 struct table_info *info; 352 struct binary_header *bhdr; 353 uint16_t offset; 354 uint16_t size; 355 uint16_t checksum; 356 int r; 357 358 adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE; 359 adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL); 360 if (!adev->mman.discovery_bin) 361 return -ENOMEM; 362 363 /* Read from file if it is the preferred option */ 364 if (amdgpu_discovery == 2) { 365 dev_info(adev->dev, "use ip discovery information from file"); 366 r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin); 367 368 if (r) { 369 dev_err(adev->dev, "failed to read ip discovery binary from file\n"); 370 r = -EINVAL; 371 goto out; 372 } 373 374 } else { 375 r = amdgpu_discovery_read_binary_from_mem( 376 adev, adev->mman.discovery_bin); 377 if (r) 378 goto out; 379 } 380 381 /* check the ip discovery binary signature */ 382 if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { 383 dev_err(adev->dev, 384 "get invalid ip discovery binary signature\n"); 385 r = -EINVAL; 386 goto out; 387 } 388 389 bhdr = (struct binary_header *)adev->mman.discovery_bin; 390 391 offset = offsetof(struct binary_header, binary_checksum) + 392 sizeof(bhdr->binary_checksum); 393 size = le16_to_cpu(bhdr->binary_size) - offset; 394 checksum = le16_to_cpu(bhdr->binary_checksum); 395 396 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 397 size, checksum)) { 398 dev_err(adev->dev, "invalid ip discovery binary checksum\n"); 399 r = -EINVAL; 400 goto out; 401 } 402 403 info = &bhdr->table_list[IP_DISCOVERY]; 404 offset = le16_to_cpu(info->offset); 405 checksum = le16_to_cpu(info->checksum); 406 407 if (offset) { 408 struct ip_discovery_header *ihdr = 409 (struct ip_discovery_header *)(adev->mman.discovery_bin + offset); 410 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { 411 dev_err(adev->dev, "invalid ip discovery data table signature\n"); 412 r = -EINVAL; 413 goto out; 414 } 415 416 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 417 le16_to_cpu(ihdr->size), checksum)) { 418 dev_err(adev->dev, "invalid ip discovery data table checksum\n"); 419 r = -EINVAL; 420 goto out; 421 } 422 } 423 424 info = &bhdr->table_list[GC]; 425 offset = le16_to_cpu(info->offset); 426 checksum = le16_to_cpu(info->checksum); 427 428 if (offset) { 429 struct gpu_info_header *ghdr = 430 (struct gpu_info_header *)(adev->mman.discovery_bin + offset); 431 432 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) { 433 dev_err(adev->dev, "invalid ip discovery gc table id\n"); 434 r = -EINVAL; 435 goto out; 436 } 437 438 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 439 le32_to_cpu(ghdr->size), checksum)) { 440 dev_err(adev->dev, "invalid gc data table checksum\n"); 441 r = -EINVAL; 442 goto out; 443 } 444 } 445 446 info = &bhdr->table_list[HARVEST_INFO]; 447 offset = le16_to_cpu(info->offset); 448 checksum = le16_to_cpu(info->checksum); 449 450 if (offset) { 451 struct harvest_info_header *hhdr = 452 (struct harvest_info_header *)(adev->mman.discovery_bin + offset); 453 454 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) { 455 dev_err(adev->dev, "invalid ip discovery harvest table signature\n"); 456 r = -EINVAL; 457 goto out; 458 } 459 460 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 461 sizeof(struct harvest_table), checksum)) { 462 dev_err(adev->dev, "invalid harvest data table checksum\n"); 463 r = -EINVAL; 464 goto out; 465 } 466 } 467 468 info = &bhdr->table_list[VCN_INFO]; 469 offset = le16_to_cpu(info->offset); 470 checksum = le16_to_cpu(info->checksum); 471 472 if (offset) { 473 struct vcn_info_header *vhdr = 474 (struct vcn_info_header *)(adev->mman.discovery_bin + offset); 475 476 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) { 477 dev_err(adev->dev, "invalid ip discovery vcn table id\n"); 478 r = -EINVAL; 479 goto out; 480 } 481 482 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 483 le32_to_cpu(vhdr->size_bytes), checksum)) { 484 dev_err(adev->dev, "invalid vcn data table checksum\n"); 485 r = -EINVAL; 486 goto out; 487 } 488 } 489 490 info = &bhdr->table_list[MALL_INFO]; 491 offset = le16_to_cpu(info->offset); 492 checksum = le16_to_cpu(info->checksum); 493 494 if (0 && offset) { 495 struct mall_info_header *mhdr = 496 (struct mall_info_header *)(adev->mman.discovery_bin + offset); 497 498 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) { 499 dev_err(adev->dev, "invalid ip discovery mall table id\n"); 500 r = -EINVAL; 501 goto out; 502 } 503 504 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 505 le32_to_cpu(mhdr->size_bytes), checksum)) { 506 dev_err(adev->dev, "invalid mall data table checksum\n"); 507 r = -EINVAL; 508 goto out; 509 } 510 } 511 512 return 0; 513 514 out: 515 kfree(adev->mman.discovery_bin); 516 adev->mman.discovery_bin = NULL; 517 518 return r; 519 } 520 521 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev); 522 523 void amdgpu_discovery_fini(struct amdgpu_device *adev) 524 { 525 amdgpu_discovery_sysfs_fini(adev); 526 kfree(adev->mman.discovery_bin); 527 adev->mman.discovery_bin = NULL; 528 } 529 530 static int amdgpu_discovery_validate_ip(const struct ip_v4 *ip) 531 { 532 if (ip->instance_number >= HWIP_MAX_INSTANCE) { 533 DRM_ERROR("Unexpected instance_number (%d) from ip discovery blob\n", 534 ip->instance_number); 535 return -EINVAL; 536 } 537 if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) { 538 DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n", 539 le16_to_cpu(ip->hw_id)); 540 return -EINVAL; 541 } 542 543 return 0; 544 } 545 546 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, 547 uint32_t *vcn_harvest_count) 548 { 549 struct binary_header *bhdr; 550 struct ip_discovery_header *ihdr; 551 struct die_header *dhdr; 552 struct ip_v4 *ip; 553 uint16_t die_offset, ip_offset, num_dies, num_ips; 554 int i, j; 555 556 bhdr = (struct binary_header *)adev->mman.discovery_bin; 557 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 558 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 559 num_dies = le16_to_cpu(ihdr->num_dies); 560 561 /* scan harvest bit of all IP data structures */ 562 for (i = 0; i < num_dies; i++) { 563 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 564 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 565 num_ips = le16_to_cpu(dhdr->num_ips); 566 ip_offset = die_offset + sizeof(*dhdr); 567 568 for (j = 0; j < num_ips; j++) { 569 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset); 570 571 if (amdgpu_discovery_validate_ip(ip)) 572 goto next_ip; 573 574 if (le16_to_cpu(ip->variant) == 1) { 575 switch (le16_to_cpu(ip->hw_id)) { 576 case VCN_HWID: 577 (*vcn_harvest_count)++; 578 if (ip->instance_number == 0) { 579 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; 580 adev->vcn.inst_mask &= 581 ~AMDGPU_VCN_HARVEST_VCN0; 582 adev->jpeg.inst_mask &= 583 ~AMDGPU_VCN_HARVEST_VCN0; 584 } else { 585 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 586 adev->vcn.inst_mask &= 587 ~AMDGPU_VCN_HARVEST_VCN1; 588 adev->jpeg.inst_mask &= 589 ~AMDGPU_VCN_HARVEST_VCN1; 590 } 591 break; 592 case DMU_HWID: 593 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 594 break; 595 default: 596 break; 597 } 598 } 599 next_ip: 600 if (ihdr->base_addr_64_bit) 601 ip_offset += struct_size(ip, base_address_64, ip->num_base_address); 602 else 603 ip_offset += struct_size(ip, base_address, ip->num_base_address); 604 } 605 } 606 } 607 608 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, 609 uint32_t *vcn_harvest_count, 610 uint32_t *umc_harvest_count) 611 { 612 struct binary_header *bhdr; 613 struct harvest_table *harvest_info; 614 u16 offset; 615 int i; 616 uint32_t umc_harvest_config = 0; 617 618 bhdr = (struct binary_header *)adev->mman.discovery_bin; 619 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset); 620 621 if (!offset) { 622 dev_err(adev->dev, "invalid harvest table offset\n"); 623 return; 624 } 625 626 harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset); 627 628 for (i = 0; i < 32; i++) { 629 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) 630 break; 631 632 switch (le16_to_cpu(harvest_info->list[i].hw_id)) { 633 case VCN_HWID: 634 (*vcn_harvest_count)++; 635 adev->vcn.harvest_config |= 636 (1 << harvest_info->list[i].number_instance); 637 adev->jpeg.harvest_config |= 638 (1 << harvest_info->list[i].number_instance); 639 640 adev->vcn.inst_mask &= 641 ~(1U << harvest_info->list[i].number_instance); 642 adev->jpeg.inst_mask &= 643 ~(1U << harvest_info->list[i].number_instance); 644 break; 645 case DMU_HWID: 646 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 647 break; 648 case UMC_HWID: 649 umc_harvest_config |= 650 1 << (le16_to_cpu(harvest_info->list[i].number_instance)); 651 (*umc_harvest_count)++; 652 break; 653 case GC_HWID: 654 adev->gfx.xcc_mask &= 655 ~(1U << harvest_info->list[i].number_instance); 656 break; 657 case SDMA0_HWID: 658 adev->sdma.sdma_mask &= 659 ~(1U << harvest_info->list[i].number_instance); 660 break; 661 default: 662 break; 663 } 664 } 665 666 adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) & 667 ~umc_harvest_config; 668 } 669 670 /* ================================================== */ 671 672 struct ip_hw_instance { 673 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */ 674 675 int hw_id; 676 u8 num_instance; 677 u8 major, minor, revision; 678 u8 harvest; 679 680 int num_base_addresses; 681 u32 base_addr[]; 682 }; 683 684 #ifdef notyet 685 686 struct ip_hw_id { 687 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */ 688 int hw_id; 689 }; 690 691 struct ip_die_entry { 692 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */ 693 u16 num_ips; 694 }; 695 696 /* -------------------------------------------------- */ 697 698 struct ip_hw_instance_attr { 699 struct attribute attr; 700 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf); 701 }; 702 703 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf) 704 { 705 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id); 706 } 707 708 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf) 709 { 710 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance); 711 } 712 713 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf) 714 { 715 return sysfs_emit(buf, "%d\n", ip_hw_instance->major); 716 } 717 718 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf) 719 { 720 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor); 721 } 722 723 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf) 724 { 725 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision); 726 } 727 728 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf) 729 { 730 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest); 731 } 732 733 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf) 734 { 735 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses); 736 } 737 738 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf) 739 { 740 ssize_t res, at; 741 int ii; 742 743 for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) { 744 /* Here we satisfy the condition that, at + size <= PAGE_SIZE. 745 */ 746 if (at + 12 > PAGE_SIZE) 747 break; 748 res = sysfs_emit_at(buf, at, "0x%08X\n", 749 ip_hw_instance->base_addr[ii]); 750 if (res <= 0) 751 break; 752 at += res; 753 } 754 755 return res < 0 ? res : at; 756 } 757 758 static struct ip_hw_instance_attr ip_hw_attr[] = { 759 __ATTR_RO(hw_id), 760 __ATTR_RO(num_instance), 761 __ATTR_RO(major), 762 __ATTR_RO(minor), 763 __ATTR_RO(revision), 764 __ATTR_RO(harvest), 765 __ATTR_RO(num_base_addresses), 766 __ATTR_RO(base_addr), 767 }; 768 769 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1]; 770 ATTRIBUTE_GROUPS(ip_hw_instance); 771 772 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj) 773 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr) 774 775 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj, 776 struct attribute *attr, 777 char *buf) 778 { 779 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 780 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr); 781 782 if (!ip_hw_attr->show) 783 return -EIO; 784 785 return ip_hw_attr->show(ip_hw_instance, buf); 786 } 787 788 static const struct sysfs_ops ip_hw_instance_sysfs_ops = { 789 .show = ip_hw_instance_attr_show, 790 }; 791 792 static void ip_hw_instance_release(struct kobject *kobj) 793 { 794 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 795 796 kfree(ip_hw_instance); 797 } 798 799 static const struct kobj_type ip_hw_instance_ktype = { 800 .release = ip_hw_instance_release, 801 .sysfs_ops = &ip_hw_instance_sysfs_ops, 802 .default_groups = ip_hw_instance_groups, 803 }; 804 805 /* -------------------------------------------------- */ 806 807 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset) 808 809 static void ip_hw_id_release(struct kobject *kobj) 810 { 811 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj); 812 813 if (!list_empty(&ip_hw_id->hw_id_kset.list)) 814 DRM_ERROR("ip_hw_id->hw_id_kset is not empty"); 815 kfree(ip_hw_id); 816 } 817 818 static const struct kobj_type ip_hw_id_ktype = { 819 .release = ip_hw_id_release, 820 .sysfs_ops = &kobj_sysfs_ops, 821 }; 822 823 /* -------------------------------------------------- */ 824 825 static void die_kobj_release(struct kobject *kobj); 826 static void ip_disc_release(struct kobject *kobj); 827 828 struct ip_die_entry_attribute { 829 struct attribute attr; 830 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf); 831 }; 832 833 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr) 834 835 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf) 836 { 837 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips); 838 } 839 840 /* If there are more ip_die_entry attrs, other than the number of IPs, 841 * we can make this intro an array of attrs, and then initialize 842 * ip_die_entry_attrs in a loop. 843 */ 844 static struct ip_die_entry_attribute num_ips_attr = 845 __ATTR_RO(num_ips); 846 847 static struct attribute *ip_die_entry_attrs[] = { 848 &num_ips_attr.attr, 849 NULL, 850 }; 851 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */ 852 853 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset) 854 855 static ssize_t ip_die_entry_attr_show(struct kobject *kobj, 856 struct attribute *attr, 857 char *buf) 858 { 859 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr); 860 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 861 862 if (!ip_die_entry_attr->show) 863 return -EIO; 864 865 return ip_die_entry_attr->show(ip_die_entry, buf); 866 } 867 868 static void ip_die_entry_release(struct kobject *kobj) 869 { 870 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 871 872 if (!list_empty(&ip_die_entry->ip_kset.list)) 873 DRM_ERROR("ip_die_entry->ip_kset is not empty"); 874 kfree(ip_die_entry); 875 } 876 877 static const struct sysfs_ops ip_die_entry_sysfs_ops = { 878 .show = ip_die_entry_attr_show, 879 }; 880 881 static const struct kobj_type ip_die_entry_ktype = { 882 .release = ip_die_entry_release, 883 .sysfs_ops = &ip_die_entry_sysfs_ops, 884 .default_groups = ip_die_entry_groups, 885 }; 886 887 static const struct kobj_type die_kobj_ktype = { 888 .release = die_kobj_release, 889 .sysfs_ops = &kobj_sysfs_ops, 890 }; 891 892 static const struct kobj_type ip_discovery_ktype = { 893 .release = ip_disc_release, 894 .sysfs_ops = &kobj_sysfs_ops, 895 }; 896 897 struct ip_discovery_top { 898 struct kobject kobj; /* ip_discovery/ */ 899 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */ 900 struct amdgpu_device *adev; 901 }; 902 903 static void die_kobj_release(struct kobject *kobj) 904 { 905 struct ip_discovery_top *ip_top = container_of(to_kset(kobj), 906 struct ip_discovery_top, 907 die_kset); 908 if (!list_empty(&ip_top->die_kset.list)) 909 DRM_ERROR("ip_top->die_kset is not empty"); 910 } 911 912 static void ip_disc_release(struct kobject *kobj) 913 { 914 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top, 915 kobj); 916 struct amdgpu_device *adev = ip_top->adev; 917 918 adev->ip_top = NULL; 919 kfree(ip_top); 920 } 921 922 static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev, 923 uint16_t hw_id, uint8_t inst) 924 { 925 uint8_t harvest = 0; 926 927 /* Until a uniform way is figured, get mask based on hwid */ 928 switch (hw_id) { 929 case VCN_HWID: 930 harvest = ((1 << inst) & adev->vcn.inst_mask) == 0; 931 break; 932 case DMU_HWID: 933 if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK) 934 harvest = 0x1; 935 break; 936 case UMC_HWID: 937 /* TODO: It needs another parsing; for now, ignore.*/ 938 break; 939 case GC_HWID: 940 harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0; 941 break; 942 case SDMA0_HWID: 943 harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0; 944 break; 945 default: 946 break; 947 } 948 949 return harvest; 950 } 951 952 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, 953 struct ip_die_entry *ip_die_entry, 954 const size_t _ip_offset, const int num_ips, 955 bool reg_base_64) 956 { 957 int ii, jj, kk, res; 958 959 DRM_DEBUG("num_ips:%d", num_ips); 960 961 /* Find all IPs of a given HW ID, and add their instance to 962 * #die/#hw_id/#instance/<attributes> 963 */ 964 for (ii = 0; ii < HW_ID_MAX; ii++) { 965 struct ip_hw_id *ip_hw_id = NULL; 966 size_t ip_offset = _ip_offset; 967 968 for (jj = 0; jj < num_ips; jj++) { 969 struct ip_v4 *ip; 970 struct ip_hw_instance *ip_hw_instance; 971 972 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset); 973 if (amdgpu_discovery_validate_ip(ip) || 974 le16_to_cpu(ip->hw_id) != ii) 975 goto next_ip; 976 977 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset); 978 979 /* We have a hw_id match; register the hw 980 * block if not yet registered. 981 */ 982 if (!ip_hw_id) { 983 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL); 984 if (!ip_hw_id) 985 return -ENOMEM; 986 ip_hw_id->hw_id = ii; 987 988 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii); 989 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset; 990 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype; 991 res = kset_register(&ip_hw_id->hw_id_kset); 992 if (res) { 993 DRM_ERROR("Couldn't register ip_hw_id kset"); 994 kfree(ip_hw_id); 995 return res; 996 } 997 if (hw_id_names[ii]) { 998 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj, 999 &ip_hw_id->hw_id_kset.kobj, 1000 hw_id_names[ii]); 1001 if (res) { 1002 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n", 1003 hw_id_names[ii], 1004 kobject_name(&ip_die_entry->ip_kset.kobj)); 1005 } 1006 } 1007 } 1008 1009 /* Now register its instance. 1010 */ 1011 ip_hw_instance = kzalloc(struct_size(ip_hw_instance, 1012 base_addr, 1013 ip->num_base_address), 1014 GFP_KERNEL); 1015 if (!ip_hw_instance) { 1016 DRM_ERROR("no memory for ip_hw_instance"); 1017 return -ENOMEM; 1018 } 1019 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */ 1020 ip_hw_instance->num_instance = ip->instance_number; 1021 ip_hw_instance->major = ip->major; 1022 ip_hw_instance->minor = ip->minor; 1023 ip_hw_instance->revision = ip->revision; 1024 ip_hw_instance->harvest = 1025 amdgpu_discovery_get_harvest_info( 1026 adev, ip_hw_instance->hw_id, 1027 ip_hw_instance->num_instance); 1028 ip_hw_instance->num_base_addresses = ip->num_base_address; 1029 1030 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) { 1031 if (reg_base_64) 1032 ip_hw_instance->base_addr[kk] = 1033 lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF; 1034 else 1035 ip_hw_instance->base_addr[kk] = ip->base_address[kk]; 1036 } 1037 1038 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype); 1039 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset; 1040 res = kobject_add(&ip_hw_instance->kobj, NULL, 1041 "%d", ip_hw_instance->num_instance); 1042 next_ip: 1043 if (reg_base_64) 1044 ip_offset += struct_size(ip, base_address_64, 1045 ip->num_base_address); 1046 else 1047 ip_offset += struct_size(ip, base_address, 1048 ip->num_base_address); 1049 } 1050 } 1051 1052 return 0; 1053 } 1054 1055 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) 1056 { 1057 struct binary_header *bhdr; 1058 struct ip_discovery_header *ihdr; 1059 struct die_header *dhdr; 1060 struct kset *die_kset = &adev->ip_top->die_kset; 1061 u16 num_dies, die_offset, num_ips; 1062 size_t ip_offset; 1063 int ii, res; 1064 1065 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1066 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 1067 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1068 num_dies = le16_to_cpu(ihdr->num_dies); 1069 1070 DRM_DEBUG("number of dies: %d\n", num_dies); 1071 1072 for (ii = 0; ii < num_dies; ii++) { 1073 struct ip_die_entry *ip_die_entry; 1074 1075 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset); 1076 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 1077 num_ips = le16_to_cpu(dhdr->num_ips); 1078 ip_offset = die_offset + sizeof(*dhdr); 1079 1080 /* Add the die to the kset. 1081 * 1082 * dhdr->die_id == ii, which was checked in 1083 * amdgpu_discovery_reg_base_init(). 1084 */ 1085 1086 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL); 1087 if (!ip_die_entry) 1088 return -ENOMEM; 1089 1090 ip_die_entry->num_ips = num_ips; 1091 1092 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id)); 1093 ip_die_entry->ip_kset.kobj.kset = die_kset; 1094 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype; 1095 res = kset_register(&ip_die_entry->ip_kset); 1096 if (res) { 1097 DRM_ERROR("Couldn't register ip_die_entry kset"); 1098 kfree(ip_die_entry); 1099 return res; 1100 } 1101 1102 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit); 1103 } 1104 1105 return 0; 1106 } 1107 1108 #endif 1109 1110 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev) 1111 { 1112 return 0; 1113 #ifdef notyet 1114 struct kset *die_kset; 1115 int res, ii; 1116 1117 if (!adev->mman.discovery_bin) 1118 return -EINVAL; 1119 1120 adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL); 1121 if (!adev->ip_top) 1122 return -ENOMEM; 1123 1124 adev->ip_top->adev = adev; 1125 1126 res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype, 1127 &adev->dev->kobj, "ip_discovery"); 1128 if (res) { 1129 DRM_ERROR("Couldn't init and add ip_discovery/"); 1130 goto Err; 1131 } 1132 1133 die_kset = &adev->ip_top->die_kset; 1134 kobject_set_name(&die_kset->kobj, "%s", "die"); 1135 die_kset->kobj.parent = &adev->ip_top->kobj; 1136 die_kset->kobj.ktype = &die_kobj_ktype; 1137 res = kset_register(&adev->ip_top->die_kset); 1138 if (res) { 1139 DRM_ERROR("Couldn't register die_kset"); 1140 goto Err; 1141 } 1142 1143 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++) 1144 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr; 1145 ip_hw_instance_attrs[ii] = NULL; 1146 1147 res = amdgpu_discovery_sysfs_recurse(adev); 1148 1149 return res; 1150 Err: 1151 kobject_put(&adev->ip_top->kobj); 1152 return res; 1153 #endif 1154 } 1155 1156 #ifdef notyet 1157 1158 /* -------------------------------------------------- */ 1159 1160 #define list_to_kobj(el) container_of(el, struct kobject, entry) 1161 1162 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id) 1163 { 1164 struct list_head *el, *tmp; 1165 struct kset *hw_id_kset; 1166 1167 hw_id_kset = &ip_hw_id->hw_id_kset; 1168 spin_lock(&hw_id_kset->list_lock); 1169 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) { 1170 list_del_init(el); 1171 spin_unlock(&hw_id_kset->list_lock); 1172 /* kobject is embedded in ip_hw_instance */ 1173 kobject_put(list_to_kobj(el)); 1174 spin_lock(&hw_id_kset->list_lock); 1175 } 1176 spin_unlock(&hw_id_kset->list_lock); 1177 kobject_put(&ip_hw_id->hw_id_kset.kobj); 1178 } 1179 1180 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry) 1181 { 1182 struct list_head *el, *tmp; 1183 struct kset *ip_kset; 1184 1185 ip_kset = &ip_die_entry->ip_kset; 1186 spin_lock(&ip_kset->list_lock); 1187 list_for_each_prev_safe(el, tmp, &ip_kset->list) { 1188 list_del_init(el); 1189 spin_unlock(&ip_kset->list_lock); 1190 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el))); 1191 spin_lock(&ip_kset->list_lock); 1192 } 1193 spin_unlock(&ip_kset->list_lock); 1194 kobject_put(&ip_die_entry->ip_kset.kobj); 1195 } 1196 1197 #endif /* notyet */ 1198 1199 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev) 1200 { 1201 #ifdef notyet 1202 struct list_head *el, *tmp; 1203 struct kset *die_kset; 1204 1205 die_kset = &adev->ip_top->die_kset; 1206 spin_lock(&die_kset->list_lock); 1207 list_for_each_prev_safe(el, tmp, &die_kset->list) { 1208 list_del_init(el); 1209 spin_unlock(&die_kset->list_lock); 1210 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el))); 1211 spin_lock(&die_kset->list_lock); 1212 } 1213 spin_unlock(&die_kset->list_lock); 1214 kobject_put(&adev->ip_top->die_kset.kobj); 1215 kobject_put(&adev->ip_top->kobj); 1216 #endif 1217 } 1218 1219 /* ================================================== */ 1220 1221 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) 1222 { 1223 struct binary_header *bhdr; 1224 struct ip_discovery_header *ihdr; 1225 struct die_header *dhdr; 1226 struct ip_v4 *ip; 1227 uint16_t die_offset; 1228 uint16_t ip_offset; 1229 uint16_t num_dies; 1230 uint16_t num_ips; 1231 uint8_t num_base_address; 1232 int hw_ip; 1233 int i, j, k; 1234 int r; 1235 1236 r = amdgpu_discovery_init(adev); 1237 if (r) { 1238 DRM_ERROR("amdgpu_discovery_init failed\n"); 1239 return r; 1240 } 1241 1242 adev->gfx.xcc_mask = 0; 1243 adev->sdma.sdma_mask = 0; 1244 adev->vcn.inst_mask = 0; 1245 adev->jpeg.inst_mask = 0; 1246 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1247 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 1248 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1249 num_dies = le16_to_cpu(ihdr->num_dies); 1250 1251 DRM_DEBUG("number of dies: %d\n", num_dies); 1252 1253 for (i = 0; i < num_dies; i++) { 1254 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 1255 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 1256 num_ips = le16_to_cpu(dhdr->num_ips); 1257 ip_offset = die_offset + sizeof(*dhdr); 1258 1259 if (le16_to_cpu(dhdr->die_id) != i) { 1260 DRM_ERROR("invalid die id %d, expected %d\n", 1261 le16_to_cpu(dhdr->die_id), i); 1262 return -EINVAL; 1263 } 1264 1265 DRM_DEBUG("number of hardware IPs on die%d: %d\n", 1266 le16_to_cpu(dhdr->die_id), num_ips); 1267 1268 for (j = 0; j < num_ips; j++) { 1269 ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset); 1270 1271 if (amdgpu_discovery_validate_ip(ip)) 1272 goto next_ip; 1273 1274 num_base_address = ip->num_base_address; 1275 1276 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n", 1277 hw_id_names[le16_to_cpu(ip->hw_id)], 1278 le16_to_cpu(ip->hw_id), 1279 ip->instance_number, 1280 ip->major, ip->minor, 1281 ip->revision); 1282 1283 if (le16_to_cpu(ip->hw_id) == VCN_HWID) { 1284 /* Bit [5:0]: original revision value 1285 * Bit [7:6]: en/decode capability: 1286 * 0b00 : VCN function normally 1287 * 0b10 : encode is disabled 1288 * 0b01 : decode is disabled 1289 */ 1290 if (adev->vcn.num_vcn_inst < 1291 AMDGPU_MAX_VCN_INSTANCES) { 1292 adev->vcn.vcn_config[adev->vcn.num_vcn_inst] = 1293 ip->revision & 0xc0; 1294 adev->vcn.num_vcn_inst++; 1295 adev->vcn.inst_mask |= 1296 (1U << ip->instance_number); 1297 adev->jpeg.inst_mask |= 1298 (1U << ip->instance_number); 1299 } else { 1300 dev_err(adev->dev, "Too many VCN instances: %d vs %d\n", 1301 adev->vcn.num_vcn_inst + 1, 1302 AMDGPU_MAX_VCN_INSTANCES); 1303 } 1304 ip->revision &= ~0xc0; 1305 } 1306 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID || 1307 le16_to_cpu(ip->hw_id) == SDMA1_HWID || 1308 le16_to_cpu(ip->hw_id) == SDMA2_HWID || 1309 le16_to_cpu(ip->hw_id) == SDMA3_HWID) { 1310 if (adev->sdma.num_instances < 1311 AMDGPU_MAX_SDMA_INSTANCES) { 1312 adev->sdma.num_instances++; 1313 adev->sdma.sdma_mask |= 1314 (1U << ip->instance_number); 1315 } else { 1316 dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n", 1317 adev->sdma.num_instances + 1, 1318 AMDGPU_MAX_SDMA_INSTANCES); 1319 } 1320 } 1321 1322 if (le16_to_cpu(ip->hw_id) == UMC_HWID) { 1323 adev->gmc.num_umc++; 1324 adev->umc.node_inst_num++; 1325 } 1326 1327 if (le16_to_cpu(ip->hw_id) == GC_HWID) 1328 adev->gfx.xcc_mask |= 1329 (1U << ip->instance_number); 1330 1331 for (k = 0; k < num_base_address; k++) { 1332 /* 1333 * convert the endianness of base addresses in place, 1334 * so that we don't need to convert them when accessing adev->reg_offset. 1335 */ 1336 if (ihdr->base_addr_64_bit) 1337 /* Truncate the 64bit base address from ip discovery 1338 * and only store lower 32bit ip base in reg_offset[]. 1339 * Bits > 32 follows ASIC specific format, thus just 1340 * discard them and handle it within specific ASIC. 1341 * By this way reg_offset[] and related helpers can 1342 * stay unchanged. 1343 * The base address is in dwords, thus clear the 1344 * highest 2 bits to store. 1345 */ 1346 ip->base_address[k] = 1347 lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF; 1348 else 1349 ip->base_address[k] = le32_to_cpu(ip->base_address[k]); 1350 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]); 1351 } 1352 1353 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) { 1354 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) && 1355 hw_id_map[hw_ip] != 0) { 1356 DRM_DEBUG("set register base offset for %s\n", 1357 hw_id_names[le16_to_cpu(ip->hw_id)]); 1358 adev->reg_offset[hw_ip][ip->instance_number] = 1359 ip->base_address; 1360 /* Instance support is somewhat inconsistent. 1361 * SDMA is a good example. Sienna cichlid has 4 total 1362 * SDMA instances, each enumerated separately (HWIDs 1363 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances, 1364 * but they are enumerated as multiple instances of the 1365 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another 1366 * example. On most chips there are multiple instances 1367 * with the same HWID. 1368 */ 1369 adev->ip_versions[hw_ip][ip->instance_number] = 1370 IP_VERSION(ip->major, ip->minor, ip->revision); 1371 } 1372 } 1373 1374 next_ip: 1375 if (ihdr->base_addr_64_bit) 1376 ip_offset += struct_size(ip, base_address_64, ip->num_base_address); 1377 else 1378 ip_offset += struct_size(ip, base_address, ip->num_base_address); 1379 } 1380 } 1381 1382 return 0; 1383 } 1384 1385 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) 1386 { 1387 int vcn_harvest_count = 0; 1388 int umc_harvest_count = 0; 1389 1390 /* 1391 * Harvest table does not fit Navi1x and legacy GPUs, 1392 * so read harvest bit per IP data structure to set 1393 * harvest configuration. 1394 */ 1395 if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0) && 1396 adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) { 1397 if ((adev->pdev->device == 0x731E && 1398 (adev->pdev->revision == 0xC6 || 1399 adev->pdev->revision == 0xC7)) || 1400 (adev->pdev->device == 0x7340 && 1401 adev->pdev->revision == 0xC9) || 1402 (adev->pdev->device == 0x7360 && 1403 adev->pdev->revision == 0xC7)) 1404 amdgpu_discovery_read_harvest_bit_per_ip(adev, 1405 &vcn_harvest_count); 1406 } else { 1407 amdgpu_discovery_read_from_harvest_table(adev, 1408 &vcn_harvest_count, 1409 &umc_harvest_count); 1410 } 1411 1412 amdgpu_discovery_harvest_config_quirk(adev); 1413 1414 if (vcn_harvest_count == adev->vcn.num_vcn_inst) { 1415 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; 1416 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; 1417 } 1418 1419 if (umc_harvest_count < adev->gmc.num_umc) { 1420 adev->gmc.num_umc -= umc_harvest_count; 1421 } 1422 } 1423 1424 union gc_info { 1425 struct gc_info_v1_0 v1; 1426 struct gc_info_v1_1 v1_1; 1427 struct gc_info_v1_2 v1_2; 1428 struct gc_info_v2_0 v2; 1429 struct gc_info_v2_1 v2_1; 1430 }; 1431 1432 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) 1433 { 1434 struct binary_header *bhdr; 1435 union gc_info *gc_info; 1436 u16 offset; 1437 1438 if (!adev->mman.discovery_bin) { 1439 DRM_ERROR("ip discovery uninitialized\n"); 1440 return -EINVAL; 1441 } 1442 1443 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1444 offset = le16_to_cpu(bhdr->table_list[GC].offset); 1445 1446 if (!offset) 1447 return 0; 1448 1449 gc_info = (union gc_info *)(adev->mman.discovery_bin + offset); 1450 1451 switch (le16_to_cpu(gc_info->v1.header.version_major)) { 1452 case 1: 1453 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se); 1454 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) + 1455 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa)); 1456 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1457 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se); 1458 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c); 1459 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs); 1460 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds); 1461 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth); 1462 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth); 1463 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer); 1464 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size); 1465 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd); 1466 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu); 1467 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size); 1468 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) / 1469 le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1470 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc); 1471 if (gc_info->v1.header.version_minor >= 1) { 1472 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa); 1473 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface); 1474 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps); 1475 } 1476 if (gc_info->v1.header.version_minor >= 2) { 1477 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg); 1478 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size); 1479 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp); 1480 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc); 1481 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc); 1482 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa); 1483 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance); 1484 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu); 1485 } 1486 break; 1487 case 2: 1488 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se); 1489 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh); 1490 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1491 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se); 1492 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs); 1493 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs); 1494 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds); 1495 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth); 1496 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth); 1497 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer); 1498 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size); 1499 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd); 1500 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu); 1501 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size); 1502 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) / 1503 le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1504 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc); 1505 if (gc_info->v2.header.version_minor == 1) { 1506 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh); 1507 adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu); 1508 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */ 1509 adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc); 1510 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc); 1511 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc); 1512 adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */ 1513 } 1514 break; 1515 default: 1516 dev_err(adev->dev, 1517 "Unhandled GC info table %d.%d\n", 1518 le16_to_cpu(gc_info->v1.header.version_major), 1519 le16_to_cpu(gc_info->v1.header.version_minor)); 1520 return -EINVAL; 1521 } 1522 return 0; 1523 } 1524 1525 union mall_info { 1526 struct mall_info_v1_0 v1; 1527 struct mall_info_v2_0 v2; 1528 }; 1529 1530 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev) 1531 { 1532 struct binary_header *bhdr; 1533 union mall_info *mall_info; 1534 u32 u, mall_size_per_umc, m_s_present, half_use; 1535 u64 mall_size; 1536 u16 offset; 1537 1538 if (!adev->mman.discovery_bin) { 1539 DRM_ERROR("ip discovery uninitialized\n"); 1540 return -EINVAL; 1541 } 1542 1543 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1544 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset); 1545 1546 if (!offset) 1547 return 0; 1548 1549 mall_info = (union mall_info *)(adev->mman.discovery_bin + offset); 1550 1551 switch (le16_to_cpu(mall_info->v1.header.version_major)) { 1552 case 1: 1553 mall_size = 0; 1554 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m); 1555 m_s_present = le32_to_cpu(mall_info->v1.m_s_present); 1556 half_use = le32_to_cpu(mall_info->v1.m_half_use); 1557 for (u = 0; u < adev->gmc.num_umc; u++) { 1558 if (m_s_present & (1 << u)) 1559 mall_size += mall_size_per_umc * 2; 1560 else if (half_use & (1 << u)) 1561 mall_size += mall_size_per_umc / 2; 1562 else 1563 mall_size += mall_size_per_umc; 1564 } 1565 adev->gmc.mall_size = mall_size; 1566 adev->gmc.m_half_use = half_use; 1567 break; 1568 case 2: 1569 mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc); 1570 adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc; 1571 break; 1572 default: 1573 dev_err(adev->dev, 1574 "Unhandled MALL info table %d.%d\n", 1575 le16_to_cpu(mall_info->v1.header.version_major), 1576 le16_to_cpu(mall_info->v1.header.version_minor)); 1577 return -EINVAL; 1578 } 1579 return 0; 1580 } 1581 1582 union vcn_info { 1583 struct vcn_info_v1_0 v1; 1584 }; 1585 1586 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev) 1587 { 1588 struct binary_header *bhdr; 1589 union vcn_info *vcn_info; 1590 u16 offset; 1591 int v; 1592 1593 if (!adev->mman.discovery_bin) { 1594 DRM_ERROR("ip discovery uninitialized\n"); 1595 return -EINVAL; 1596 } 1597 1598 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1599 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES 1600 * but that may change in the future with new GPUs so keep this 1601 * check for defensive purposes. 1602 */ 1603 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) { 1604 dev_err(adev->dev, "invalid vcn instances\n"); 1605 return -EINVAL; 1606 } 1607 1608 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1609 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset); 1610 1611 if (!offset) 1612 return 0; 1613 1614 vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset); 1615 1616 switch (le16_to_cpu(vcn_info->v1.header.version_major)) { 1617 case 1: 1618 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1619 * so this won't overflow. 1620 */ 1621 for (v = 0; v < adev->vcn.num_vcn_inst; v++) { 1622 adev->vcn.vcn_codec_disable_mask[v] = 1623 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits); 1624 } 1625 break; 1626 default: 1627 dev_err(adev->dev, 1628 "Unhandled VCN info table %d.%d\n", 1629 le16_to_cpu(vcn_info->v1.header.version_major), 1630 le16_to_cpu(vcn_info->v1.header.version_minor)); 1631 return -EINVAL; 1632 } 1633 return 0; 1634 } 1635 1636 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) 1637 { 1638 /* what IP to use for this? */ 1639 switch (adev->ip_versions[GC_HWIP][0]) { 1640 case IP_VERSION(9, 0, 1): 1641 case IP_VERSION(9, 1, 0): 1642 case IP_VERSION(9, 2, 1): 1643 case IP_VERSION(9, 2, 2): 1644 case IP_VERSION(9, 3, 0): 1645 case IP_VERSION(9, 4, 0): 1646 case IP_VERSION(9, 4, 1): 1647 case IP_VERSION(9, 4, 2): 1648 case IP_VERSION(9, 4, 3): 1649 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 1650 break; 1651 case IP_VERSION(10, 1, 10): 1652 case IP_VERSION(10, 1, 1): 1653 case IP_VERSION(10, 1, 2): 1654 case IP_VERSION(10, 1, 3): 1655 case IP_VERSION(10, 1, 4): 1656 case IP_VERSION(10, 3, 0): 1657 case IP_VERSION(10, 3, 1): 1658 case IP_VERSION(10, 3, 2): 1659 case IP_VERSION(10, 3, 3): 1660 case IP_VERSION(10, 3, 4): 1661 case IP_VERSION(10, 3, 5): 1662 case IP_VERSION(10, 3, 6): 1663 case IP_VERSION(10, 3, 7): 1664 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 1665 break; 1666 case IP_VERSION(11, 0, 0): 1667 case IP_VERSION(11, 0, 1): 1668 case IP_VERSION(11, 0, 2): 1669 case IP_VERSION(11, 0, 3): 1670 case IP_VERSION(11, 0, 4): 1671 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block); 1672 break; 1673 default: 1674 dev_err(adev->dev, 1675 "Failed to add common ip block(GC_HWIP:0x%x)\n", 1676 adev->ip_versions[GC_HWIP][0]); 1677 return -EINVAL; 1678 } 1679 return 0; 1680 } 1681 1682 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) 1683 { 1684 /* use GC or MMHUB IP version */ 1685 switch (adev->ip_versions[GC_HWIP][0]) { 1686 case IP_VERSION(9, 0, 1): 1687 case IP_VERSION(9, 1, 0): 1688 case IP_VERSION(9, 2, 1): 1689 case IP_VERSION(9, 2, 2): 1690 case IP_VERSION(9, 3, 0): 1691 case IP_VERSION(9, 4, 0): 1692 case IP_VERSION(9, 4, 1): 1693 case IP_VERSION(9, 4, 2): 1694 case IP_VERSION(9, 4, 3): 1695 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 1696 break; 1697 case IP_VERSION(10, 1, 10): 1698 case IP_VERSION(10, 1, 1): 1699 case IP_VERSION(10, 1, 2): 1700 case IP_VERSION(10, 1, 3): 1701 case IP_VERSION(10, 1, 4): 1702 case IP_VERSION(10, 3, 0): 1703 case IP_VERSION(10, 3, 1): 1704 case IP_VERSION(10, 3, 2): 1705 case IP_VERSION(10, 3, 3): 1706 case IP_VERSION(10, 3, 4): 1707 case IP_VERSION(10, 3, 5): 1708 case IP_VERSION(10, 3, 6): 1709 case IP_VERSION(10, 3, 7): 1710 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 1711 break; 1712 case IP_VERSION(11, 0, 0): 1713 case IP_VERSION(11, 0, 1): 1714 case IP_VERSION(11, 0, 2): 1715 case IP_VERSION(11, 0, 3): 1716 case IP_VERSION(11, 0, 4): 1717 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block); 1718 break; 1719 default: 1720 dev_err(adev->dev, 1721 "Failed to add gmc ip block(GC_HWIP:0x%x)\n", 1722 adev->ip_versions[GC_HWIP][0]); 1723 return -EINVAL; 1724 } 1725 return 0; 1726 } 1727 1728 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) 1729 { 1730 switch (adev->ip_versions[OSSSYS_HWIP][0]) { 1731 case IP_VERSION(4, 0, 0): 1732 case IP_VERSION(4, 0, 1): 1733 case IP_VERSION(4, 1, 0): 1734 case IP_VERSION(4, 1, 1): 1735 case IP_VERSION(4, 3, 0): 1736 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 1737 break; 1738 case IP_VERSION(4, 2, 0): 1739 case IP_VERSION(4, 2, 1): 1740 case IP_VERSION(4, 4, 0): 1741 case IP_VERSION(4, 4, 2): 1742 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 1743 break; 1744 case IP_VERSION(5, 0, 0): 1745 case IP_VERSION(5, 0, 1): 1746 case IP_VERSION(5, 0, 2): 1747 case IP_VERSION(5, 0, 3): 1748 case IP_VERSION(5, 2, 0): 1749 case IP_VERSION(5, 2, 1): 1750 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 1751 break; 1752 case IP_VERSION(6, 0, 0): 1753 case IP_VERSION(6, 0, 1): 1754 case IP_VERSION(6, 0, 2): 1755 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block); 1756 break; 1757 case IP_VERSION(6, 1, 0): 1758 amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block); 1759 break; 1760 default: 1761 dev_err(adev->dev, 1762 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", 1763 adev->ip_versions[OSSSYS_HWIP][0]); 1764 return -EINVAL; 1765 } 1766 return 0; 1767 } 1768 1769 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) 1770 { 1771 switch (adev->ip_versions[MP0_HWIP][0]) { 1772 case IP_VERSION(9, 0, 0): 1773 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 1774 break; 1775 case IP_VERSION(10, 0, 0): 1776 case IP_VERSION(10, 0, 1): 1777 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 1778 break; 1779 case IP_VERSION(11, 0, 0): 1780 case IP_VERSION(11, 0, 2): 1781 case IP_VERSION(11, 0, 4): 1782 case IP_VERSION(11, 0, 5): 1783 case IP_VERSION(11, 0, 9): 1784 case IP_VERSION(11, 0, 7): 1785 case IP_VERSION(11, 0, 11): 1786 case IP_VERSION(11, 0, 12): 1787 case IP_VERSION(11, 0, 13): 1788 case IP_VERSION(11, 5, 0): 1789 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 1790 break; 1791 case IP_VERSION(11, 0, 8): 1792 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block); 1793 break; 1794 case IP_VERSION(11, 0, 3): 1795 case IP_VERSION(12, 0, 1): 1796 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); 1797 break; 1798 case IP_VERSION(13, 0, 0): 1799 case IP_VERSION(13, 0, 1): 1800 case IP_VERSION(13, 0, 2): 1801 case IP_VERSION(13, 0, 3): 1802 case IP_VERSION(13, 0, 5): 1803 case IP_VERSION(13, 0, 6): 1804 case IP_VERSION(13, 0, 7): 1805 case IP_VERSION(13, 0, 8): 1806 case IP_VERSION(13, 0, 10): 1807 case IP_VERSION(13, 0, 11): 1808 case IP_VERSION(14, 0, 0): 1809 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); 1810 break; 1811 case IP_VERSION(13, 0, 4): 1812 amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block); 1813 break; 1814 default: 1815 dev_err(adev->dev, 1816 "Failed to add psp ip block(MP0_HWIP:0x%x)\n", 1817 adev->ip_versions[MP0_HWIP][0]); 1818 return -EINVAL; 1819 } 1820 return 0; 1821 } 1822 1823 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) 1824 { 1825 switch (adev->ip_versions[MP1_HWIP][0]) { 1826 case IP_VERSION(9, 0, 0): 1827 case IP_VERSION(10, 0, 0): 1828 case IP_VERSION(10, 0, 1): 1829 case IP_VERSION(11, 0, 2): 1830 if (adev->asic_type == CHIP_ARCTURUS) 1831 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 1832 else 1833 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1834 break; 1835 case IP_VERSION(11, 0, 0): 1836 case IP_VERSION(11, 0, 5): 1837 case IP_VERSION(11, 0, 9): 1838 case IP_VERSION(11, 0, 7): 1839 case IP_VERSION(11, 0, 8): 1840 case IP_VERSION(11, 0, 11): 1841 case IP_VERSION(11, 0, 12): 1842 case IP_VERSION(11, 0, 13): 1843 case IP_VERSION(11, 5, 0): 1844 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 1845 break; 1846 case IP_VERSION(12, 0, 0): 1847 case IP_VERSION(12, 0, 1): 1848 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); 1849 break; 1850 case IP_VERSION(13, 0, 0): 1851 case IP_VERSION(13, 0, 1): 1852 case IP_VERSION(13, 0, 2): 1853 case IP_VERSION(13, 0, 3): 1854 case IP_VERSION(13, 0, 4): 1855 case IP_VERSION(13, 0, 5): 1856 case IP_VERSION(13, 0, 6): 1857 case IP_VERSION(13, 0, 7): 1858 case IP_VERSION(13, 0, 8): 1859 case IP_VERSION(13, 0, 10): 1860 case IP_VERSION(13, 0, 11): 1861 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); 1862 break; 1863 default: 1864 dev_err(adev->dev, 1865 "Failed to add smu ip block(MP1_HWIP:0x%x)\n", 1866 adev->ip_versions[MP1_HWIP][0]); 1867 return -EINVAL; 1868 } 1869 return 0; 1870 } 1871 1872 #if defined(CONFIG_DRM_AMD_DC) 1873 static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev) 1874 { 1875 amdgpu_device_set_sriov_virtual_display(adev); 1876 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 1877 } 1878 #endif 1879 1880 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) 1881 { 1882 if (adev->enable_virtual_display) { 1883 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 1884 return 0; 1885 } 1886 1887 if (!amdgpu_device_has_dc_support(adev)) 1888 return 0; 1889 1890 #if defined(CONFIG_DRM_AMD_DC) 1891 if (adev->ip_versions[DCE_HWIP][0]) { 1892 switch (adev->ip_versions[DCE_HWIP][0]) { 1893 case IP_VERSION(1, 0, 0): 1894 case IP_VERSION(1, 0, 1): 1895 case IP_VERSION(2, 0, 2): 1896 case IP_VERSION(2, 0, 0): 1897 case IP_VERSION(2, 0, 3): 1898 case IP_VERSION(2, 1, 0): 1899 case IP_VERSION(3, 0, 0): 1900 case IP_VERSION(3, 0, 2): 1901 case IP_VERSION(3, 0, 3): 1902 case IP_VERSION(3, 0, 1): 1903 case IP_VERSION(3, 1, 2): 1904 case IP_VERSION(3, 1, 3): 1905 case IP_VERSION(3, 1, 4): 1906 case IP_VERSION(3, 1, 5): 1907 case IP_VERSION(3, 1, 6): 1908 case IP_VERSION(3, 2, 0): 1909 case IP_VERSION(3, 2, 1): 1910 if (amdgpu_sriov_vf(adev)) 1911 amdgpu_discovery_set_sriov_display(adev); 1912 else 1913 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1914 break; 1915 default: 1916 dev_err(adev->dev, 1917 "Failed to add dm ip block(DCE_HWIP:0x%x)\n", 1918 adev->ip_versions[DCE_HWIP][0]); 1919 return -EINVAL; 1920 } 1921 } else if (adev->ip_versions[DCI_HWIP][0]) { 1922 switch (adev->ip_versions[DCI_HWIP][0]) { 1923 case IP_VERSION(12, 0, 0): 1924 case IP_VERSION(12, 0, 1): 1925 case IP_VERSION(12, 1, 0): 1926 if (amdgpu_sriov_vf(adev)) 1927 amdgpu_discovery_set_sriov_display(adev); 1928 else 1929 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1930 break; 1931 default: 1932 dev_err(adev->dev, 1933 "Failed to add dm ip block(DCI_HWIP:0x%x)\n", 1934 adev->ip_versions[DCI_HWIP][0]); 1935 return -EINVAL; 1936 } 1937 } 1938 #endif 1939 return 0; 1940 } 1941 1942 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) 1943 { 1944 switch (adev->ip_versions[GC_HWIP][0]) { 1945 case IP_VERSION(9, 0, 1): 1946 case IP_VERSION(9, 1, 0): 1947 case IP_VERSION(9, 2, 1): 1948 case IP_VERSION(9, 2, 2): 1949 case IP_VERSION(9, 3, 0): 1950 case IP_VERSION(9, 4, 0): 1951 case IP_VERSION(9, 4, 1): 1952 case IP_VERSION(9, 4, 2): 1953 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 1954 break; 1955 case IP_VERSION(9, 4, 3): 1956 if (!amdgpu_exp_hw_support) 1957 return -EINVAL; 1958 amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block); 1959 break; 1960 case IP_VERSION(10, 1, 10): 1961 case IP_VERSION(10, 1, 2): 1962 case IP_VERSION(10, 1, 1): 1963 case IP_VERSION(10, 1, 3): 1964 case IP_VERSION(10, 1, 4): 1965 case IP_VERSION(10, 3, 0): 1966 case IP_VERSION(10, 3, 2): 1967 case IP_VERSION(10, 3, 1): 1968 case IP_VERSION(10, 3, 4): 1969 case IP_VERSION(10, 3, 5): 1970 case IP_VERSION(10, 3, 6): 1971 case IP_VERSION(10, 3, 3): 1972 case IP_VERSION(10, 3, 7): 1973 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 1974 break; 1975 case IP_VERSION(11, 0, 0): 1976 case IP_VERSION(11, 0, 1): 1977 case IP_VERSION(11, 0, 2): 1978 case IP_VERSION(11, 0, 3): 1979 case IP_VERSION(11, 0, 4): 1980 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block); 1981 break; 1982 default: 1983 dev_err(adev->dev, 1984 "Failed to add gfx ip block(GC_HWIP:0x%x)\n", 1985 adev->ip_versions[GC_HWIP][0]); 1986 return -EINVAL; 1987 } 1988 return 0; 1989 } 1990 1991 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) 1992 { 1993 switch (adev->ip_versions[SDMA0_HWIP][0]) { 1994 case IP_VERSION(4, 0, 0): 1995 case IP_VERSION(4, 0, 1): 1996 case IP_VERSION(4, 1, 0): 1997 case IP_VERSION(4, 1, 1): 1998 case IP_VERSION(4, 1, 2): 1999 case IP_VERSION(4, 2, 0): 2000 case IP_VERSION(4, 2, 2): 2001 case IP_VERSION(4, 4, 0): 2002 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 2003 break; 2004 case IP_VERSION(4, 4, 2): 2005 amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block); 2006 break; 2007 case IP_VERSION(5, 0, 0): 2008 case IP_VERSION(5, 0, 1): 2009 case IP_VERSION(5, 0, 2): 2010 case IP_VERSION(5, 0, 5): 2011 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 2012 break; 2013 case IP_VERSION(5, 2, 0): 2014 case IP_VERSION(5, 2, 2): 2015 case IP_VERSION(5, 2, 4): 2016 case IP_VERSION(5, 2, 5): 2017 case IP_VERSION(5, 2, 6): 2018 case IP_VERSION(5, 2, 3): 2019 case IP_VERSION(5, 2, 1): 2020 case IP_VERSION(5, 2, 7): 2021 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); 2022 break; 2023 case IP_VERSION(6, 0, 0): 2024 case IP_VERSION(6, 0, 1): 2025 case IP_VERSION(6, 0, 2): 2026 case IP_VERSION(6, 0, 3): 2027 case IP_VERSION(6, 1, 0): 2028 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block); 2029 break; 2030 default: 2031 dev_err(adev->dev, 2032 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", 2033 adev->ip_versions[SDMA0_HWIP][0]); 2034 return -EINVAL; 2035 } 2036 return 0; 2037 } 2038 2039 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) 2040 { 2041 if (adev->ip_versions[VCE_HWIP][0]) { 2042 switch (adev->ip_versions[UVD_HWIP][0]) { 2043 case IP_VERSION(7, 0, 0): 2044 case IP_VERSION(7, 2, 0): 2045 /* UVD is not supported on vega20 SR-IOV */ 2046 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 2047 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 2048 break; 2049 default: 2050 dev_err(adev->dev, 2051 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", 2052 adev->ip_versions[UVD_HWIP][0]); 2053 return -EINVAL; 2054 } 2055 switch (adev->ip_versions[VCE_HWIP][0]) { 2056 case IP_VERSION(4, 0, 0): 2057 case IP_VERSION(4, 1, 0): 2058 /* VCE is not supported on vega20 SR-IOV */ 2059 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 2060 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 2061 break; 2062 default: 2063 dev_err(adev->dev, 2064 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", 2065 adev->ip_versions[VCE_HWIP][0]); 2066 return -EINVAL; 2067 } 2068 } else { 2069 switch (adev->ip_versions[UVD_HWIP][0]) { 2070 case IP_VERSION(1, 0, 0): 2071 case IP_VERSION(1, 0, 1): 2072 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 2073 break; 2074 case IP_VERSION(2, 0, 0): 2075 case IP_VERSION(2, 0, 2): 2076 case IP_VERSION(2, 2, 0): 2077 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 2078 if (!amdgpu_sriov_vf(adev)) 2079 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 2080 break; 2081 case IP_VERSION(2, 0, 3): 2082 break; 2083 case IP_VERSION(2, 5, 0): 2084 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 2085 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); 2086 break; 2087 case IP_VERSION(2, 6, 0): 2088 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block); 2089 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block); 2090 break; 2091 case IP_VERSION(3, 0, 0): 2092 case IP_VERSION(3, 0, 16): 2093 case IP_VERSION(3, 1, 1): 2094 case IP_VERSION(3, 1, 2): 2095 case IP_VERSION(3, 0, 2): 2096 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 2097 if (!amdgpu_sriov_vf(adev)) 2098 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); 2099 break; 2100 case IP_VERSION(3, 0, 33): 2101 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 2102 break; 2103 case IP_VERSION(4, 0, 0): 2104 case IP_VERSION(4, 0, 2): 2105 case IP_VERSION(4, 0, 4): 2106 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block); 2107 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block); 2108 break; 2109 case IP_VERSION(4, 0, 3): 2110 amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block); 2111 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block); 2112 break; 2113 default: 2114 dev_err(adev->dev, 2115 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", 2116 adev->ip_versions[UVD_HWIP][0]); 2117 return -EINVAL; 2118 } 2119 } 2120 return 0; 2121 } 2122 2123 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) 2124 { 2125 switch (adev->ip_versions[GC_HWIP][0]) { 2126 case IP_VERSION(10, 1, 10): 2127 case IP_VERSION(10, 1, 1): 2128 case IP_VERSION(10, 1, 2): 2129 case IP_VERSION(10, 1, 3): 2130 case IP_VERSION(10, 1, 4): 2131 case IP_VERSION(10, 3, 0): 2132 case IP_VERSION(10, 3, 1): 2133 case IP_VERSION(10, 3, 2): 2134 case IP_VERSION(10, 3, 3): 2135 case IP_VERSION(10, 3, 4): 2136 case IP_VERSION(10, 3, 5): 2137 case IP_VERSION(10, 3, 6): 2138 if (amdgpu_mes) { 2139 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 2140 adev->enable_mes = true; 2141 if (amdgpu_mes_kiq) 2142 adev->enable_mes_kiq = true; 2143 } 2144 break; 2145 case IP_VERSION(11, 0, 0): 2146 case IP_VERSION(11, 0, 1): 2147 case IP_VERSION(11, 0, 2): 2148 case IP_VERSION(11, 0, 3): 2149 case IP_VERSION(11, 0, 4): 2150 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block); 2151 adev->enable_mes = true; 2152 adev->enable_mes_kiq = true; 2153 break; 2154 default: 2155 break; 2156 } 2157 return 0; 2158 } 2159 2160 static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev) 2161 { 2162 switch (adev->ip_versions[GC_HWIP][0]) { 2163 case IP_VERSION(9, 4, 3): 2164 aqua_vanjaram_init_soc_config(adev); 2165 break; 2166 default: 2167 break; 2168 } 2169 } 2170 2171 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) 2172 { 2173 int r; 2174 2175 switch (adev->asic_type) { 2176 case CHIP_VEGA10: 2177 vega10_reg_base_init(adev); 2178 adev->sdma.num_instances = 2; 2179 adev->gmc.num_umc = 4; 2180 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0); 2181 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0); 2182 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); 2183 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0); 2184 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0); 2185 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0); 2186 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 2187 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0); 2188 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0); 2189 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 2190 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2191 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2192 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0); 2193 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1); 2194 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2195 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2196 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0); 2197 break; 2198 case CHIP_VEGA12: 2199 vega10_reg_base_init(adev); 2200 adev->sdma.num_instances = 2; 2201 adev->gmc.num_umc = 4; 2202 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2203 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2204 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); 2205 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1); 2206 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1); 2207 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1); 2208 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0); 2209 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0); 2210 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0); 2211 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 2212 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2213 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2214 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1); 2215 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1); 2216 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2217 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2218 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1); 2219 break; 2220 case CHIP_RAVEN: 2221 vega10_reg_base_init(adev); 2222 adev->sdma.num_instances = 1; 2223 adev->vcn.num_vcn_inst = 1; 2224 adev->gmc.num_umc = 2; 2225 if (adev->apu_flags & AMD_APU_IS_RAVEN2) { 2226 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2227 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2228 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1); 2229 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1); 2230 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1); 2231 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1); 2232 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1); 2233 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0); 2234 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1); 2235 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1); 2236 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0); 2237 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1); 2238 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2); 2239 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1); 2240 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1); 2241 } else { 2242 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2243 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2244 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0); 2245 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0); 2246 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0); 2247 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 2248 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0); 2249 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0); 2250 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0); 2251 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0); 2252 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0); 2253 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0); 2254 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0); 2255 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0); 2256 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0); 2257 } 2258 break; 2259 case CHIP_VEGA20: 2260 vega20_reg_base_init(adev); 2261 adev->sdma.num_instances = 2; 2262 adev->gmc.num_umc = 8; 2263 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2264 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2265 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); 2266 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0); 2267 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0); 2268 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0); 2269 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0); 2270 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0); 2271 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1); 2272 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2); 2273 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2274 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2); 2275 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2); 2276 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0); 2277 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0); 2278 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0); 2279 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0); 2280 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0); 2281 break; 2282 case CHIP_ARCTURUS: 2283 arct_reg_base_init(adev); 2284 adev->sdma.num_instances = 8; 2285 adev->vcn.num_vcn_inst = 2; 2286 adev->gmc.num_umc = 8; 2287 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2288 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2289 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); 2290 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1); 2291 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2); 2292 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2); 2293 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2); 2294 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2); 2295 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2); 2296 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2); 2297 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2); 2298 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2); 2299 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1); 2300 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1); 2301 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2); 2302 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4); 2303 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2304 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3); 2305 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3); 2306 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1); 2307 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0); 2308 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0); 2309 break; 2310 case CHIP_ALDEBARAN: 2311 aldebaran_reg_base_init(adev); 2312 adev->sdma.num_instances = 5; 2313 adev->vcn.num_vcn_inst = 2; 2314 adev->gmc.num_umc = 4; 2315 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2316 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2317 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); 2318 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0); 2319 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0); 2320 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0); 2321 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0); 2322 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0); 2323 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0); 2324 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2); 2325 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4); 2326 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0); 2327 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2); 2328 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2); 2329 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2); 2330 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2); 2331 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2); 2332 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0); 2333 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0); 2334 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); 2335 break; 2336 default: 2337 r = amdgpu_discovery_reg_base_init(adev); 2338 if (r) 2339 return -EINVAL; 2340 2341 amdgpu_discovery_harvest_ip(adev); 2342 amdgpu_discovery_get_gfx_info(adev); 2343 amdgpu_discovery_get_mall_info(adev); 2344 amdgpu_discovery_get_vcn_info(adev); 2345 break; 2346 } 2347 2348 amdgpu_discovery_init_soc_config(adev); 2349 amdgpu_discovery_sysfs_init(adev); 2350 2351 switch (adev->ip_versions[GC_HWIP][0]) { 2352 case IP_VERSION(9, 0, 1): 2353 case IP_VERSION(9, 2, 1): 2354 case IP_VERSION(9, 4, 0): 2355 case IP_VERSION(9, 4, 1): 2356 case IP_VERSION(9, 4, 2): 2357 case IP_VERSION(9, 4, 3): 2358 adev->family = AMDGPU_FAMILY_AI; 2359 break; 2360 case IP_VERSION(9, 1, 0): 2361 case IP_VERSION(9, 2, 2): 2362 case IP_VERSION(9, 3, 0): 2363 adev->family = AMDGPU_FAMILY_RV; 2364 break; 2365 case IP_VERSION(10, 1, 10): 2366 case IP_VERSION(10, 1, 1): 2367 case IP_VERSION(10, 1, 2): 2368 case IP_VERSION(10, 1, 3): 2369 case IP_VERSION(10, 1, 4): 2370 case IP_VERSION(10, 3, 0): 2371 case IP_VERSION(10, 3, 2): 2372 case IP_VERSION(10, 3, 4): 2373 case IP_VERSION(10, 3, 5): 2374 adev->family = AMDGPU_FAMILY_NV; 2375 break; 2376 case IP_VERSION(10, 3, 1): 2377 adev->family = AMDGPU_FAMILY_VGH; 2378 adev->apu_flags |= AMD_APU_IS_VANGOGH; 2379 break; 2380 case IP_VERSION(10, 3, 3): 2381 adev->family = AMDGPU_FAMILY_YC; 2382 break; 2383 case IP_VERSION(10, 3, 6): 2384 adev->family = AMDGPU_FAMILY_GC_10_3_6; 2385 break; 2386 case IP_VERSION(10, 3, 7): 2387 adev->family = AMDGPU_FAMILY_GC_10_3_7; 2388 break; 2389 case IP_VERSION(11, 0, 0): 2390 case IP_VERSION(11, 0, 2): 2391 case IP_VERSION(11, 0, 3): 2392 adev->family = AMDGPU_FAMILY_GC_11_0_0; 2393 break; 2394 case IP_VERSION(11, 0, 1): 2395 case IP_VERSION(11, 0, 4): 2396 adev->family = AMDGPU_FAMILY_GC_11_0_1; 2397 break; 2398 default: 2399 return -EINVAL; 2400 } 2401 2402 switch (adev->ip_versions[GC_HWIP][0]) { 2403 case IP_VERSION(9, 1, 0): 2404 case IP_VERSION(9, 2, 2): 2405 case IP_VERSION(9, 3, 0): 2406 case IP_VERSION(10, 1, 3): 2407 case IP_VERSION(10, 1, 4): 2408 case IP_VERSION(10, 3, 1): 2409 case IP_VERSION(10, 3, 3): 2410 case IP_VERSION(10, 3, 6): 2411 case IP_VERSION(10, 3, 7): 2412 case IP_VERSION(11, 0, 1): 2413 case IP_VERSION(11, 0, 4): 2414 adev->flags |= AMD_IS_APU; 2415 break; 2416 default: 2417 break; 2418 } 2419 2420 if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0)) 2421 adev->gmc.xgmi.supported = true; 2422 2423 /* set NBIO version */ 2424 switch (adev->ip_versions[NBIO_HWIP][0]) { 2425 case IP_VERSION(6, 1, 0): 2426 case IP_VERSION(6, 2, 0): 2427 adev->nbio.funcs = &nbio_v6_1_funcs; 2428 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; 2429 break; 2430 case IP_VERSION(7, 0, 0): 2431 case IP_VERSION(7, 0, 1): 2432 case IP_VERSION(2, 5, 0): 2433 adev->nbio.funcs = &nbio_v7_0_funcs; 2434 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; 2435 break; 2436 case IP_VERSION(7, 4, 0): 2437 case IP_VERSION(7, 4, 1): 2438 case IP_VERSION(7, 4, 4): 2439 adev->nbio.funcs = &nbio_v7_4_funcs; 2440 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; 2441 break; 2442 case IP_VERSION(7, 9, 0): 2443 adev->nbio.funcs = &nbio_v7_9_funcs; 2444 adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg; 2445 break; 2446 case IP_VERSION(7, 2, 0): 2447 case IP_VERSION(7, 2, 1): 2448 case IP_VERSION(7, 3, 0): 2449 case IP_VERSION(7, 5, 0): 2450 case IP_VERSION(7, 5, 1): 2451 adev->nbio.funcs = &nbio_v7_2_funcs; 2452 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg; 2453 break; 2454 case IP_VERSION(2, 1, 1): 2455 case IP_VERSION(2, 3, 0): 2456 case IP_VERSION(2, 3, 1): 2457 case IP_VERSION(2, 3, 2): 2458 case IP_VERSION(3, 3, 0): 2459 case IP_VERSION(3, 3, 1): 2460 case IP_VERSION(3, 3, 2): 2461 case IP_VERSION(3, 3, 3): 2462 adev->nbio.funcs = &nbio_v2_3_funcs; 2463 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 2464 break; 2465 case IP_VERSION(4, 3, 0): 2466 case IP_VERSION(4, 3, 1): 2467 if (amdgpu_sriov_vf(adev)) 2468 adev->nbio.funcs = &nbio_v4_3_sriov_funcs; 2469 else 2470 adev->nbio.funcs = &nbio_v4_3_funcs; 2471 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg; 2472 break; 2473 case IP_VERSION(7, 7, 0): 2474 case IP_VERSION(7, 7, 1): 2475 adev->nbio.funcs = &nbio_v7_7_funcs; 2476 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg; 2477 break; 2478 default: 2479 break; 2480 } 2481 2482 switch (adev->ip_versions[HDP_HWIP][0]) { 2483 case IP_VERSION(4, 0, 0): 2484 case IP_VERSION(4, 0, 1): 2485 case IP_VERSION(4, 1, 0): 2486 case IP_VERSION(4, 1, 1): 2487 case IP_VERSION(4, 1, 2): 2488 case IP_VERSION(4, 2, 0): 2489 case IP_VERSION(4, 2, 1): 2490 case IP_VERSION(4, 4, 0): 2491 case IP_VERSION(4, 4, 2): 2492 adev->hdp.funcs = &hdp_v4_0_funcs; 2493 break; 2494 case IP_VERSION(5, 0, 0): 2495 case IP_VERSION(5, 0, 1): 2496 case IP_VERSION(5, 0, 2): 2497 case IP_VERSION(5, 0, 3): 2498 case IP_VERSION(5, 0, 4): 2499 case IP_VERSION(5, 2, 0): 2500 adev->hdp.funcs = &hdp_v5_0_funcs; 2501 break; 2502 case IP_VERSION(5, 2, 1): 2503 adev->hdp.funcs = &hdp_v5_2_funcs; 2504 break; 2505 case IP_VERSION(6, 0, 0): 2506 case IP_VERSION(6, 0, 1): 2507 case IP_VERSION(6, 1, 0): 2508 adev->hdp.funcs = &hdp_v6_0_funcs; 2509 break; 2510 default: 2511 break; 2512 } 2513 2514 switch (adev->ip_versions[DF_HWIP][0]) { 2515 case IP_VERSION(3, 6, 0): 2516 case IP_VERSION(3, 6, 1): 2517 case IP_VERSION(3, 6, 2): 2518 adev->df.funcs = &df_v3_6_funcs; 2519 break; 2520 case IP_VERSION(2, 1, 0): 2521 case IP_VERSION(2, 1, 1): 2522 case IP_VERSION(2, 5, 0): 2523 case IP_VERSION(3, 5, 1): 2524 case IP_VERSION(3, 5, 2): 2525 adev->df.funcs = &df_v1_7_funcs; 2526 break; 2527 case IP_VERSION(4, 3, 0): 2528 adev->df.funcs = &df_v4_3_funcs; 2529 break; 2530 default: 2531 break; 2532 } 2533 2534 switch (adev->ip_versions[SMUIO_HWIP][0]) { 2535 case IP_VERSION(9, 0, 0): 2536 case IP_VERSION(9, 0, 1): 2537 case IP_VERSION(10, 0, 0): 2538 case IP_VERSION(10, 0, 1): 2539 case IP_VERSION(10, 0, 2): 2540 adev->smuio.funcs = &smuio_v9_0_funcs; 2541 break; 2542 case IP_VERSION(11, 0, 0): 2543 case IP_VERSION(11, 0, 2): 2544 case IP_VERSION(11, 0, 3): 2545 case IP_VERSION(11, 0, 4): 2546 case IP_VERSION(11, 0, 7): 2547 case IP_VERSION(11, 0, 8): 2548 adev->smuio.funcs = &smuio_v11_0_funcs; 2549 break; 2550 case IP_VERSION(11, 0, 6): 2551 case IP_VERSION(11, 0, 10): 2552 case IP_VERSION(11, 0, 11): 2553 case IP_VERSION(11, 5, 0): 2554 case IP_VERSION(13, 0, 1): 2555 case IP_VERSION(13, 0, 9): 2556 case IP_VERSION(13, 0, 10): 2557 adev->smuio.funcs = &smuio_v11_0_6_funcs; 2558 break; 2559 case IP_VERSION(13, 0, 2): 2560 adev->smuio.funcs = &smuio_v13_0_funcs; 2561 break; 2562 case IP_VERSION(13, 0, 3): 2563 adev->smuio.funcs = &smuio_v13_0_3_funcs; 2564 if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) { 2565 adev->flags |= AMD_IS_APU; 2566 } 2567 break; 2568 case IP_VERSION(13, 0, 6): 2569 case IP_VERSION(13, 0, 8): 2570 case IP_VERSION(14, 0, 0): 2571 adev->smuio.funcs = &smuio_v13_0_6_funcs; 2572 break; 2573 default: 2574 break; 2575 } 2576 2577 switch (adev->ip_versions[LSDMA_HWIP][0]) { 2578 case IP_VERSION(6, 0, 0): 2579 case IP_VERSION(6, 0, 1): 2580 case IP_VERSION(6, 0, 2): 2581 case IP_VERSION(6, 0, 3): 2582 adev->lsdma.funcs = &lsdma_v6_0_funcs; 2583 break; 2584 default: 2585 break; 2586 } 2587 2588 r = amdgpu_discovery_set_common_ip_blocks(adev); 2589 if (r) 2590 return r; 2591 2592 r = amdgpu_discovery_set_gmc_ip_blocks(adev); 2593 if (r) 2594 return r; 2595 2596 /* For SR-IOV, PSP needs to be initialized before IH */ 2597 if (amdgpu_sriov_vf(adev)) { 2598 r = amdgpu_discovery_set_psp_ip_blocks(adev); 2599 if (r) 2600 return r; 2601 r = amdgpu_discovery_set_ih_ip_blocks(adev); 2602 if (r) 2603 return r; 2604 } else { 2605 r = amdgpu_discovery_set_ih_ip_blocks(adev); 2606 if (r) 2607 return r; 2608 2609 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 2610 r = amdgpu_discovery_set_psp_ip_blocks(adev); 2611 if (r) 2612 return r; 2613 } 2614 } 2615 2616 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 2617 r = amdgpu_discovery_set_smu_ip_blocks(adev); 2618 if (r) 2619 return r; 2620 } 2621 2622 r = amdgpu_discovery_set_display_ip_blocks(adev); 2623 if (r) 2624 return r; 2625 2626 r = amdgpu_discovery_set_gc_ip_blocks(adev); 2627 if (r) 2628 return r; 2629 2630 r = amdgpu_discovery_set_sdma_ip_blocks(adev); 2631 if (r) 2632 return r; 2633 2634 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 2635 !amdgpu_sriov_vf(adev)) || 2636 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { 2637 r = amdgpu_discovery_set_smu_ip_blocks(adev); 2638 if (r) 2639 return r; 2640 } 2641 2642 r = amdgpu_discovery_set_mm_ip_blocks(adev); 2643 if (r) 2644 return r; 2645 2646 r = amdgpu_discovery_set_mes_ip_blocks(adev); 2647 if (r) 2648 return r; 2649 2650 return 0; 2651 } 2652 2653