1 /* 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/firmware.h> 25 26 #include "amdgpu.h" 27 #include "amdgpu_discovery.h" 28 #include "soc15_hw_ip.h" 29 #include "discovery.h" 30 31 #include "soc15.h" 32 #include "gfx_v9_0.h" 33 #include "gmc_v9_0.h" 34 #include "df_v1_7.h" 35 #include "df_v3_6.h" 36 #include "nbio_v6_1.h" 37 #include "nbio_v7_0.h" 38 #include "nbio_v7_4.h" 39 #include "hdp_v4_0.h" 40 #include "vega10_ih.h" 41 #include "vega20_ih.h" 42 #include "sdma_v4_0.h" 43 #include "uvd_v7_0.h" 44 #include "vce_v4_0.h" 45 #include "vcn_v1_0.h" 46 #include "vcn_v2_5.h" 47 #include "jpeg_v2_5.h" 48 #include "smuio_v9_0.h" 49 #include "gmc_v10_0.h" 50 #include "gmc_v11_0.h" 51 #include "gfxhub_v2_0.h" 52 #include "mmhub_v2_0.h" 53 #include "nbio_v2_3.h" 54 #include "nbio_v4_3.h" 55 #include "nbio_v7_2.h" 56 #include "nbio_v7_7.h" 57 #include "hdp_v5_0.h" 58 #include "hdp_v5_2.h" 59 #include "hdp_v6_0.h" 60 #include "nv.h" 61 #include "soc21.h" 62 #include "navi10_ih.h" 63 #include "ih_v6_0.h" 64 #include "gfx_v10_0.h" 65 #include "gfx_v11_0.h" 66 #include "sdma_v5_0.h" 67 #include "sdma_v5_2.h" 68 #include "sdma_v6_0.h" 69 #include "lsdma_v6_0.h" 70 #include "vcn_v2_0.h" 71 #include "jpeg_v2_0.h" 72 #include "vcn_v3_0.h" 73 #include "jpeg_v3_0.h" 74 #include "vcn_v4_0.h" 75 #include "jpeg_v4_0.h" 76 #include "amdgpu_vkms.h" 77 #include "mes_v10_1.h" 78 #include "mes_v11_0.h" 79 #include "smuio_v11_0.h" 80 #include "smuio_v11_0_6.h" 81 #include "smuio_v13_0.h" 82 #include "smuio_v13_0_6.h" 83 84 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin" 85 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY); 86 87 #define mmRCC_CONFIG_MEMSIZE 0xde3 88 #define mmMM_INDEX 0x0 89 #define mmMM_INDEX_HI 0x6 90 #define mmMM_DATA 0x1 91 92 static const char *hw_id_names[HW_ID_MAX] = { 93 [MP1_HWID] = "MP1", 94 [MP2_HWID] = "MP2", 95 [THM_HWID] = "THM", 96 [SMUIO_HWID] = "SMUIO", 97 [FUSE_HWID] = "FUSE", 98 [CLKA_HWID] = "CLKA", 99 [PWR_HWID] = "PWR", 100 [GC_HWID] = "GC", 101 [UVD_HWID] = "UVD", 102 [AUDIO_AZ_HWID] = "AUDIO_AZ", 103 [ACP_HWID] = "ACP", 104 [DCI_HWID] = "DCI", 105 [DMU_HWID] = "DMU", 106 [DCO_HWID] = "DCO", 107 [DIO_HWID] = "DIO", 108 [XDMA_HWID] = "XDMA", 109 [DCEAZ_HWID] = "DCEAZ", 110 [DAZ_HWID] = "DAZ", 111 [SDPMUX_HWID] = "SDPMUX", 112 [NTB_HWID] = "NTB", 113 [IOHC_HWID] = "IOHC", 114 [L2IMU_HWID] = "L2IMU", 115 [VCE_HWID] = "VCE", 116 [MMHUB_HWID] = "MMHUB", 117 [ATHUB_HWID] = "ATHUB", 118 [DBGU_NBIO_HWID] = "DBGU_NBIO", 119 [DFX_HWID] = "DFX", 120 [DBGU0_HWID] = "DBGU0", 121 [DBGU1_HWID] = "DBGU1", 122 [OSSSYS_HWID] = "OSSSYS", 123 [HDP_HWID] = "HDP", 124 [SDMA0_HWID] = "SDMA0", 125 [SDMA1_HWID] = "SDMA1", 126 [SDMA2_HWID] = "SDMA2", 127 [SDMA3_HWID] = "SDMA3", 128 [LSDMA_HWID] = "LSDMA", 129 [ISP_HWID] = "ISP", 130 [DBGU_IO_HWID] = "DBGU_IO", 131 [DF_HWID] = "DF", 132 [CLKB_HWID] = "CLKB", 133 [FCH_HWID] = "FCH", 134 [DFX_DAP_HWID] = "DFX_DAP", 135 [L1IMU_PCIE_HWID] = "L1IMU_PCIE", 136 [L1IMU_NBIF_HWID] = "L1IMU_NBIF", 137 [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR", 138 [L1IMU3_HWID] = "L1IMU3", 139 [L1IMU4_HWID] = "L1IMU4", 140 [L1IMU5_HWID] = "L1IMU5", 141 [L1IMU6_HWID] = "L1IMU6", 142 [L1IMU7_HWID] = "L1IMU7", 143 [L1IMU8_HWID] = "L1IMU8", 144 [L1IMU9_HWID] = "L1IMU9", 145 [L1IMU10_HWID] = "L1IMU10", 146 [L1IMU11_HWID] = "L1IMU11", 147 [L1IMU12_HWID] = "L1IMU12", 148 [L1IMU13_HWID] = "L1IMU13", 149 [L1IMU14_HWID] = "L1IMU14", 150 [L1IMU15_HWID] = "L1IMU15", 151 [WAFLC_HWID] = "WAFLC", 152 [FCH_USB_PD_HWID] = "FCH_USB_PD", 153 [PCIE_HWID] = "PCIE", 154 [PCS_HWID] = "PCS", 155 [DDCL_HWID] = "DDCL", 156 [SST_HWID] = "SST", 157 [IOAGR_HWID] = "IOAGR", 158 [NBIF_HWID] = "NBIF", 159 [IOAPIC_HWID] = "IOAPIC", 160 [SYSTEMHUB_HWID] = "SYSTEMHUB", 161 [NTBCCP_HWID] = "NTBCCP", 162 [UMC_HWID] = "UMC", 163 [SATA_HWID] = "SATA", 164 [USB_HWID] = "USB", 165 [CCXSEC_HWID] = "CCXSEC", 166 [XGMI_HWID] = "XGMI", 167 [XGBE_HWID] = "XGBE", 168 [MP0_HWID] = "MP0", 169 }; 170 171 static int hw_id_map[MAX_HWIP] = { 172 [GC_HWIP] = GC_HWID, 173 [HDP_HWIP] = HDP_HWID, 174 [SDMA0_HWIP] = SDMA0_HWID, 175 [SDMA1_HWIP] = SDMA1_HWID, 176 [SDMA2_HWIP] = SDMA2_HWID, 177 [SDMA3_HWIP] = SDMA3_HWID, 178 [LSDMA_HWIP] = LSDMA_HWID, 179 [MMHUB_HWIP] = MMHUB_HWID, 180 [ATHUB_HWIP] = ATHUB_HWID, 181 [NBIO_HWIP] = NBIF_HWID, 182 [MP0_HWIP] = MP0_HWID, 183 [MP1_HWIP] = MP1_HWID, 184 [UVD_HWIP] = UVD_HWID, 185 [VCE_HWIP] = VCE_HWID, 186 [DF_HWIP] = DF_HWID, 187 [DCE_HWIP] = DMU_HWID, 188 [OSSSYS_HWIP] = OSSSYS_HWID, 189 [SMUIO_HWIP] = SMUIO_HWID, 190 [PWR_HWIP] = PWR_HWID, 191 [NBIF_HWIP] = NBIF_HWID, 192 [THM_HWIP] = THM_HWID, 193 [CLK_HWIP] = CLKA_HWID, 194 [UMC_HWIP] = UMC_HWID, 195 [XGMI_HWIP] = XGMI_HWID, 196 [DCI_HWIP] = DCI_HWID, 197 [PCIE_HWIP] = PCIE_HWID, 198 }; 199 200 static int amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary) 201 { 202 uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; 203 uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET; 204 205 amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, 206 adev->mman.discovery_tmr_size, false); 207 return 0; 208 } 209 210 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary) 211 { 212 const struct firmware *fw; 213 const char *fw_name; 214 int r; 215 216 switch (amdgpu_discovery) { 217 case 2: 218 fw_name = FIRMWARE_IP_DISCOVERY; 219 break; 220 default: 221 dev_warn(adev->dev, "amdgpu_discovery is not set properly\n"); 222 return -EINVAL; 223 } 224 225 r = request_firmware(&fw, fw_name, adev->dev); 226 if (r) { 227 dev_err(adev->dev, "can't load firmware \"%s\"\n", 228 fw_name); 229 return r; 230 } 231 232 memcpy((u8 *)binary, (u8 *)fw->data, fw->size); 233 release_firmware(fw); 234 235 return 0; 236 } 237 238 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size) 239 { 240 uint16_t checksum = 0; 241 int i; 242 243 for (i = 0; i < size; i++) 244 checksum += data[i]; 245 246 return checksum; 247 } 248 249 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size, 250 uint16_t expected) 251 { 252 return !!(amdgpu_discovery_calculate_checksum(data, size) == expected); 253 } 254 255 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary) 256 { 257 struct binary_header *bhdr; 258 bhdr = (struct binary_header *)binary; 259 260 return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE); 261 } 262 263 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) 264 { 265 /* 266 * So far, apply this quirk only on those Navy Flounder boards which 267 * have a bad harvest table of VCN config. 268 */ 269 if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && 270 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) { 271 switch (adev->pdev->revision) { 272 case 0xC1: 273 case 0xC2: 274 case 0xC3: 275 case 0xC5: 276 case 0xC7: 277 case 0xCF: 278 case 0xDF: 279 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 280 break; 281 default: 282 break; 283 } 284 } 285 } 286 287 static int amdgpu_discovery_init(struct amdgpu_device *adev) 288 { 289 struct table_info *info; 290 struct binary_header *bhdr; 291 uint16_t offset; 292 uint16_t size; 293 uint16_t checksum; 294 int r; 295 296 adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE; 297 adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL); 298 if (!adev->mman.discovery_bin) 299 return -ENOMEM; 300 301 r = amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin); 302 if (r) { 303 dev_err(adev->dev, "failed to read ip discovery binary from vram\n"); 304 r = -EINVAL; 305 goto out; 306 } 307 308 if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { 309 dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n"); 310 /* retry read ip discovery binary from file */ 311 r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin); 312 if (r) { 313 dev_err(adev->dev, "failed to read ip discovery binary from file\n"); 314 r = -EINVAL; 315 goto out; 316 } 317 /* check the ip discovery binary signature */ 318 if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) { 319 dev_warn(adev->dev, "get invalid ip discovery binary signature from file\n"); 320 r = -EINVAL; 321 goto out; 322 } 323 } 324 325 bhdr = (struct binary_header *)adev->mman.discovery_bin; 326 327 offset = offsetof(struct binary_header, binary_checksum) + 328 sizeof(bhdr->binary_checksum); 329 size = le16_to_cpu(bhdr->binary_size) - offset; 330 checksum = le16_to_cpu(bhdr->binary_checksum); 331 332 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 333 size, checksum)) { 334 dev_err(adev->dev, "invalid ip discovery binary checksum\n"); 335 r = -EINVAL; 336 goto out; 337 } 338 339 info = &bhdr->table_list[IP_DISCOVERY]; 340 offset = le16_to_cpu(info->offset); 341 checksum = le16_to_cpu(info->checksum); 342 343 if (offset) { 344 struct ip_discovery_header *ihdr = 345 (struct ip_discovery_header *)(adev->mman.discovery_bin + offset); 346 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) { 347 dev_err(adev->dev, "invalid ip discovery data table signature\n"); 348 r = -EINVAL; 349 goto out; 350 } 351 352 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 353 le16_to_cpu(ihdr->size), checksum)) { 354 dev_err(adev->dev, "invalid ip discovery data table checksum\n"); 355 r = -EINVAL; 356 goto out; 357 } 358 } 359 360 info = &bhdr->table_list[GC]; 361 offset = le16_to_cpu(info->offset); 362 checksum = le16_to_cpu(info->checksum); 363 364 if (offset) { 365 struct gpu_info_header *ghdr = 366 (struct gpu_info_header *)(adev->mman.discovery_bin + offset); 367 368 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) { 369 dev_err(adev->dev, "invalid ip discovery gc table id\n"); 370 r = -EINVAL; 371 goto out; 372 } 373 374 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 375 le32_to_cpu(ghdr->size), checksum)) { 376 dev_err(adev->dev, "invalid gc data table checksum\n"); 377 r = -EINVAL; 378 goto out; 379 } 380 } 381 382 info = &bhdr->table_list[HARVEST_INFO]; 383 offset = le16_to_cpu(info->offset); 384 checksum = le16_to_cpu(info->checksum); 385 386 if (offset) { 387 struct harvest_info_header *hhdr = 388 (struct harvest_info_header *)(adev->mman.discovery_bin + offset); 389 390 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) { 391 dev_err(adev->dev, "invalid ip discovery harvest table signature\n"); 392 r = -EINVAL; 393 goto out; 394 } 395 396 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 397 sizeof(struct harvest_table), checksum)) { 398 dev_err(adev->dev, "invalid harvest data table checksum\n"); 399 r = -EINVAL; 400 goto out; 401 } 402 } 403 404 info = &bhdr->table_list[VCN_INFO]; 405 offset = le16_to_cpu(info->offset); 406 checksum = le16_to_cpu(info->checksum); 407 408 if (offset) { 409 struct vcn_info_header *vhdr = 410 (struct vcn_info_header *)(adev->mman.discovery_bin + offset); 411 412 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) { 413 dev_err(adev->dev, "invalid ip discovery vcn table id\n"); 414 r = -EINVAL; 415 goto out; 416 } 417 418 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 419 le32_to_cpu(vhdr->size_bytes), checksum)) { 420 dev_err(adev->dev, "invalid vcn data table checksum\n"); 421 r = -EINVAL; 422 goto out; 423 } 424 } 425 426 info = &bhdr->table_list[MALL_INFO]; 427 offset = le16_to_cpu(info->offset); 428 checksum = le16_to_cpu(info->checksum); 429 430 if (0 && offset) { 431 struct mall_info_header *mhdr = 432 (struct mall_info_header *)(adev->mman.discovery_bin + offset); 433 434 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) { 435 dev_err(adev->dev, "invalid ip discovery mall table id\n"); 436 r = -EINVAL; 437 goto out; 438 } 439 440 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, 441 le32_to_cpu(mhdr->size_bytes), checksum)) { 442 dev_err(adev->dev, "invalid mall data table checksum\n"); 443 r = -EINVAL; 444 goto out; 445 } 446 } 447 448 return 0; 449 450 out: 451 kfree(adev->mman.discovery_bin); 452 adev->mman.discovery_bin = NULL; 453 454 return r; 455 } 456 457 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev); 458 459 void amdgpu_discovery_fini(struct amdgpu_device *adev) 460 { 461 amdgpu_discovery_sysfs_fini(adev); 462 kfree(adev->mman.discovery_bin); 463 adev->mman.discovery_bin = NULL; 464 } 465 466 static int amdgpu_discovery_validate_ip(const struct ip *ip) 467 { 468 if (ip->number_instance >= HWIP_MAX_INSTANCE) { 469 DRM_ERROR("Unexpected number_instance (%d) from ip discovery blob\n", 470 ip->number_instance); 471 return -EINVAL; 472 } 473 if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) { 474 DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n", 475 le16_to_cpu(ip->hw_id)); 476 return -EINVAL; 477 } 478 479 return 0; 480 } 481 482 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev, 483 uint32_t *vcn_harvest_count) 484 { 485 struct binary_header *bhdr; 486 struct ip_discovery_header *ihdr; 487 struct die_header *dhdr; 488 struct ip *ip; 489 uint16_t die_offset, ip_offset, num_dies, num_ips; 490 int i, j; 491 492 bhdr = (struct binary_header *)adev->mman.discovery_bin; 493 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 494 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 495 num_dies = le16_to_cpu(ihdr->num_dies); 496 497 /* scan harvest bit of all IP data structures */ 498 for (i = 0; i < num_dies; i++) { 499 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 500 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 501 num_ips = le16_to_cpu(dhdr->num_ips); 502 ip_offset = die_offset + sizeof(*dhdr); 503 504 for (j = 0; j < num_ips; j++) { 505 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 506 507 if (amdgpu_discovery_validate_ip(ip)) 508 goto next_ip; 509 510 if (le16_to_cpu(ip->harvest) == 1) { 511 switch (le16_to_cpu(ip->hw_id)) { 512 case VCN_HWID: 513 (*vcn_harvest_count)++; 514 if (ip->number_instance == 0) 515 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; 516 else 517 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 518 break; 519 case DMU_HWID: 520 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 521 break; 522 default: 523 break; 524 } 525 } 526 next_ip: 527 ip_offset += struct_size(ip, base_address, ip->num_base_address); 528 } 529 } 530 } 531 532 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev, 533 uint32_t *vcn_harvest_count, 534 uint32_t *umc_harvest_count) 535 { 536 struct binary_header *bhdr; 537 struct harvest_table *harvest_info; 538 u16 offset; 539 int i; 540 541 bhdr = (struct binary_header *)adev->mman.discovery_bin; 542 offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset); 543 544 if (!offset) { 545 dev_err(adev->dev, "invalid harvest table offset\n"); 546 return; 547 } 548 549 harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset); 550 551 for (i = 0; i < 32; i++) { 552 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) 553 break; 554 555 switch (le16_to_cpu(harvest_info->list[i].hw_id)) { 556 case VCN_HWID: 557 (*vcn_harvest_count)++; 558 if (harvest_info->list[i].number_instance == 0) 559 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0; 560 else 561 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; 562 break; 563 case DMU_HWID: 564 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; 565 break; 566 case UMC_HWID: 567 (*umc_harvest_count)++; 568 break; 569 default: 570 break; 571 } 572 } 573 } 574 575 /* ================================================== */ 576 577 struct ip_hw_instance { 578 struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */ 579 580 int hw_id; 581 u8 num_instance; 582 u8 major, minor, revision; 583 u8 harvest; 584 585 int num_base_addresses; 586 u32 base_addr[]; 587 }; 588 589 #ifdef notyet 590 591 struct ip_hw_id { 592 struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */ 593 int hw_id; 594 }; 595 596 struct ip_die_entry { 597 struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */ 598 u16 num_ips; 599 }; 600 601 /* -------------------------------------------------- */ 602 603 struct ip_hw_instance_attr { 604 struct attribute attr; 605 ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf); 606 }; 607 608 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf) 609 { 610 return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id); 611 } 612 613 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf) 614 { 615 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance); 616 } 617 618 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf) 619 { 620 return sysfs_emit(buf, "%d\n", ip_hw_instance->major); 621 } 622 623 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf) 624 { 625 return sysfs_emit(buf, "%d\n", ip_hw_instance->minor); 626 } 627 628 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf) 629 { 630 return sysfs_emit(buf, "%d\n", ip_hw_instance->revision); 631 } 632 633 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf) 634 { 635 return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest); 636 } 637 638 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf) 639 { 640 return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses); 641 } 642 643 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf) 644 { 645 ssize_t res, at; 646 int ii; 647 648 for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) { 649 /* Here we satisfy the condition that, at + size <= PAGE_SIZE. 650 */ 651 if (at + 12 > PAGE_SIZE) 652 break; 653 res = sysfs_emit_at(buf, at, "0x%08X\n", 654 ip_hw_instance->base_addr[ii]); 655 if (res <= 0) 656 break; 657 at += res; 658 } 659 660 return res < 0 ? res : at; 661 } 662 663 static struct ip_hw_instance_attr ip_hw_attr[] = { 664 __ATTR_RO(hw_id), 665 __ATTR_RO(num_instance), 666 __ATTR_RO(major), 667 __ATTR_RO(minor), 668 __ATTR_RO(revision), 669 __ATTR_RO(harvest), 670 __ATTR_RO(num_base_addresses), 671 __ATTR_RO(base_addr), 672 }; 673 674 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1]; 675 ATTRIBUTE_GROUPS(ip_hw_instance); 676 677 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj) 678 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr) 679 680 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj, 681 struct attribute *attr, 682 char *buf) 683 { 684 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 685 struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr); 686 687 if (!ip_hw_attr->show) 688 return -EIO; 689 690 return ip_hw_attr->show(ip_hw_instance, buf); 691 } 692 693 static const struct sysfs_ops ip_hw_instance_sysfs_ops = { 694 .show = ip_hw_instance_attr_show, 695 }; 696 697 static void ip_hw_instance_release(struct kobject *kobj) 698 { 699 struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj); 700 701 kfree(ip_hw_instance); 702 } 703 704 static struct kobj_type ip_hw_instance_ktype = { 705 .release = ip_hw_instance_release, 706 .sysfs_ops = &ip_hw_instance_sysfs_ops, 707 .default_groups = ip_hw_instance_groups, 708 }; 709 710 /* -------------------------------------------------- */ 711 712 #define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset) 713 714 static void ip_hw_id_release(struct kobject *kobj) 715 { 716 struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj); 717 718 if (!list_empty(&ip_hw_id->hw_id_kset.list)) 719 DRM_ERROR("ip_hw_id->hw_id_kset is not empty"); 720 kfree(ip_hw_id); 721 } 722 723 static struct kobj_type ip_hw_id_ktype = { 724 .release = ip_hw_id_release, 725 .sysfs_ops = &kobj_sysfs_ops, 726 }; 727 728 /* -------------------------------------------------- */ 729 730 static void die_kobj_release(struct kobject *kobj); 731 static void ip_disc_release(struct kobject *kobj); 732 733 struct ip_die_entry_attribute { 734 struct attribute attr; 735 ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf); 736 }; 737 738 #define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr) 739 740 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf) 741 { 742 return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips); 743 } 744 745 /* If there are more ip_die_entry attrs, other than the number of IPs, 746 * we can make this intro an array of attrs, and then initialize 747 * ip_die_entry_attrs in a loop. 748 */ 749 static struct ip_die_entry_attribute num_ips_attr = 750 __ATTR_RO(num_ips); 751 752 static struct attribute *ip_die_entry_attrs[] = { 753 &num_ips_attr.attr, 754 NULL, 755 }; 756 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */ 757 758 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset) 759 760 static ssize_t ip_die_entry_attr_show(struct kobject *kobj, 761 struct attribute *attr, 762 char *buf) 763 { 764 struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr); 765 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 766 767 if (!ip_die_entry_attr->show) 768 return -EIO; 769 770 return ip_die_entry_attr->show(ip_die_entry, buf); 771 } 772 773 static void ip_die_entry_release(struct kobject *kobj) 774 { 775 struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj); 776 777 if (!list_empty(&ip_die_entry->ip_kset.list)) 778 DRM_ERROR("ip_die_entry->ip_kset is not empty"); 779 kfree(ip_die_entry); 780 } 781 782 static const struct sysfs_ops ip_die_entry_sysfs_ops = { 783 .show = ip_die_entry_attr_show, 784 }; 785 786 static struct kobj_type ip_die_entry_ktype = { 787 .release = ip_die_entry_release, 788 .sysfs_ops = &ip_die_entry_sysfs_ops, 789 .default_groups = ip_die_entry_groups, 790 }; 791 792 static struct kobj_type die_kobj_ktype = { 793 .release = die_kobj_release, 794 .sysfs_ops = &kobj_sysfs_ops, 795 }; 796 797 static struct kobj_type ip_discovery_ktype = { 798 .release = ip_disc_release, 799 .sysfs_ops = &kobj_sysfs_ops, 800 }; 801 802 struct ip_discovery_top { 803 struct kobject kobj; /* ip_discovery/ */ 804 struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */ 805 struct amdgpu_device *adev; 806 }; 807 808 static void die_kobj_release(struct kobject *kobj) 809 { 810 struct ip_discovery_top *ip_top = container_of(to_kset(kobj), 811 struct ip_discovery_top, 812 die_kset); 813 if (!list_empty(&ip_top->die_kset.list)) 814 DRM_ERROR("ip_top->die_kset is not empty"); 815 } 816 817 static void ip_disc_release(struct kobject *kobj) 818 { 819 struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top, 820 kobj); 821 struct amdgpu_device *adev = ip_top->adev; 822 823 adev->ip_top = NULL; 824 kfree(ip_top); 825 } 826 827 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev, 828 struct ip_die_entry *ip_die_entry, 829 const size_t _ip_offset, const int num_ips) 830 { 831 int ii, jj, kk, res; 832 833 DRM_DEBUG("num_ips:%d", num_ips); 834 835 /* Find all IPs of a given HW ID, and add their instance to 836 * #die/#hw_id/#instance/<attributes> 837 */ 838 for (ii = 0; ii < HW_ID_MAX; ii++) { 839 struct ip_hw_id *ip_hw_id = NULL; 840 size_t ip_offset = _ip_offset; 841 842 for (jj = 0; jj < num_ips; jj++) { 843 struct ip *ip; 844 struct ip_hw_instance *ip_hw_instance; 845 846 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 847 if (amdgpu_discovery_validate_ip(ip) || 848 le16_to_cpu(ip->hw_id) != ii) 849 goto next_ip; 850 851 DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset); 852 853 /* We have a hw_id match; register the hw 854 * block if not yet registered. 855 */ 856 if (!ip_hw_id) { 857 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL); 858 if (!ip_hw_id) 859 return -ENOMEM; 860 ip_hw_id->hw_id = ii; 861 862 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii); 863 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset; 864 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype; 865 res = kset_register(&ip_hw_id->hw_id_kset); 866 if (res) { 867 DRM_ERROR("Couldn't register ip_hw_id kset"); 868 kfree(ip_hw_id); 869 return res; 870 } 871 if (hw_id_names[ii]) { 872 res = sysfs_create_link(&ip_die_entry->ip_kset.kobj, 873 &ip_hw_id->hw_id_kset.kobj, 874 hw_id_names[ii]); 875 if (res) { 876 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n", 877 hw_id_names[ii], 878 kobject_name(&ip_die_entry->ip_kset.kobj)); 879 } 880 } 881 } 882 883 /* Now register its instance. 884 */ 885 ip_hw_instance = kzalloc(struct_size(ip_hw_instance, 886 base_addr, 887 ip->num_base_address), 888 GFP_KERNEL); 889 if (!ip_hw_instance) { 890 DRM_ERROR("no memory for ip_hw_instance"); 891 return -ENOMEM; 892 } 893 ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */ 894 ip_hw_instance->num_instance = ip->number_instance; 895 ip_hw_instance->major = ip->major; 896 ip_hw_instance->minor = ip->minor; 897 ip_hw_instance->revision = ip->revision; 898 ip_hw_instance->harvest = ip->harvest; 899 ip_hw_instance->num_base_addresses = ip->num_base_address; 900 901 for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) 902 ip_hw_instance->base_addr[kk] = ip->base_address[kk]; 903 904 kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype); 905 ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset; 906 res = kobject_add(&ip_hw_instance->kobj, NULL, 907 "%d", ip_hw_instance->num_instance); 908 next_ip: 909 ip_offset += struct_size(ip, base_address, ip->num_base_address); 910 } 911 } 912 913 return 0; 914 } 915 916 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev) 917 { 918 struct binary_header *bhdr; 919 struct ip_discovery_header *ihdr; 920 struct die_header *dhdr; 921 struct kset *die_kset = &adev->ip_top->die_kset; 922 u16 num_dies, die_offset, num_ips; 923 size_t ip_offset; 924 int ii, res; 925 926 bhdr = (struct binary_header *)adev->mman.discovery_bin; 927 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 928 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 929 num_dies = le16_to_cpu(ihdr->num_dies); 930 931 DRM_DEBUG("number of dies: %d\n", num_dies); 932 933 for (ii = 0; ii < num_dies; ii++) { 934 struct ip_die_entry *ip_die_entry; 935 936 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset); 937 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 938 num_ips = le16_to_cpu(dhdr->num_ips); 939 ip_offset = die_offset + sizeof(*dhdr); 940 941 /* Add the die to the kset. 942 * 943 * dhdr->die_id == ii, which was checked in 944 * amdgpu_discovery_reg_base_init(). 945 */ 946 947 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL); 948 if (!ip_die_entry) 949 return -ENOMEM; 950 951 ip_die_entry->num_ips = num_ips; 952 953 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id)); 954 ip_die_entry->ip_kset.kobj.kset = die_kset; 955 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype; 956 res = kset_register(&ip_die_entry->ip_kset); 957 if (res) { 958 DRM_ERROR("Couldn't register ip_die_entry kset"); 959 kfree(ip_die_entry); 960 return res; 961 } 962 963 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips); 964 } 965 966 return 0; 967 } 968 969 #endif 970 971 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev) 972 { 973 return 0; 974 #ifdef notyet 975 struct kset *die_kset; 976 int res, ii; 977 978 adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL); 979 if (!adev->ip_top) 980 return -ENOMEM; 981 982 adev->ip_top->adev = adev; 983 984 res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype, 985 &adev->dev->kobj, "ip_discovery"); 986 if (res) { 987 DRM_ERROR("Couldn't init and add ip_discovery/"); 988 goto Err; 989 } 990 991 die_kset = &adev->ip_top->die_kset; 992 kobject_set_name(&die_kset->kobj, "%s", "die"); 993 die_kset->kobj.parent = &adev->ip_top->kobj; 994 die_kset->kobj.ktype = &die_kobj_ktype; 995 res = kset_register(&adev->ip_top->die_kset); 996 if (res) { 997 DRM_ERROR("Couldn't register die_kset"); 998 goto Err; 999 } 1000 1001 for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++) 1002 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr; 1003 ip_hw_instance_attrs[ii] = NULL; 1004 1005 res = amdgpu_discovery_sysfs_recurse(adev); 1006 1007 return res; 1008 Err: 1009 kobject_put(&adev->ip_top->kobj); 1010 return res; 1011 #endif 1012 } 1013 1014 #ifdef notyet 1015 1016 /* -------------------------------------------------- */ 1017 1018 #define list_to_kobj(el) container_of(el, struct kobject, entry) 1019 1020 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id) 1021 { 1022 struct list_head *el, *tmp; 1023 struct kset *hw_id_kset; 1024 1025 hw_id_kset = &ip_hw_id->hw_id_kset; 1026 spin_lock(&hw_id_kset->list_lock); 1027 list_for_each_prev_safe(el, tmp, &hw_id_kset->list) { 1028 list_del_init(el); 1029 spin_unlock(&hw_id_kset->list_lock); 1030 /* kobject is embedded in ip_hw_instance */ 1031 kobject_put(list_to_kobj(el)); 1032 spin_lock(&hw_id_kset->list_lock); 1033 } 1034 spin_unlock(&hw_id_kset->list_lock); 1035 kobject_put(&ip_hw_id->hw_id_kset.kobj); 1036 } 1037 1038 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry) 1039 { 1040 struct list_head *el, *tmp; 1041 struct kset *ip_kset; 1042 1043 ip_kset = &ip_die_entry->ip_kset; 1044 spin_lock(&ip_kset->list_lock); 1045 list_for_each_prev_safe(el, tmp, &ip_kset->list) { 1046 list_del_init(el); 1047 spin_unlock(&ip_kset->list_lock); 1048 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el))); 1049 spin_lock(&ip_kset->list_lock); 1050 } 1051 spin_unlock(&ip_kset->list_lock); 1052 kobject_put(&ip_die_entry->ip_kset.kobj); 1053 } 1054 1055 #endif /* notyet */ 1056 1057 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev) 1058 { 1059 #ifdef notyet 1060 struct list_head *el, *tmp; 1061 struct kset *die_kset; 1062 1063 die_kset = &adev->ip_top->die_kset; 1064 spin_lock(&die_kset->list_lock); 1065 list_for_each_prev_safe(el, tmp, &die_kset->list) { 1066 list_del_init(el); 1067 spin_unlock(&die_kset->list_lock); 1068 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el))); 1069 spin_lock(&die_kset->list_lock); 1070 } 1071 spin_unlock(&die_kset->list_lock); 1072 kobject_put(&adev->ip_top->die_kset.kobj); 1073 kobject_put(&adev->ip_top->kobj); 1074 #endif 1075 } 1076 1077 /* ================================================== */ 1078 1079 static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) 1080 { 1081 struct binary_header *bhdr; 1082 struct ip_discovery_header *ihdr; 1083 struct die_header *dhdr; 1084 struct ip *ip; 1085 uint16_t die_offset; 1086 uint16_t ip_offset; 1087 uint16_t num_dies; 1088 uint16_t num_ips; 1089 uint8_t num_base_address; 1090 int hw_ip; 1091 int i, j, k; 1092 int r; 1093 1094 r = amdgpu_discovery_init(adev); 1095 if (r) { 1096 DRM_ERROR("amdgpu_discovery_init failed\n"); 1097 return r; 1098 } 1099 1100 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1101 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 1102 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1103 num_dies = le16_to_cpu(ihdr->num_dies); 1104 1105 DRM_DEBUG("number of dies: %d\n", num_dies); 1106 1107 for (i = 0; i < num_dies; i++) { 1108 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 1109 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 1110 num_ips = le16_to_cpu(dhdr->num_ips); 1111 ip_offset = die_offset + sizeof(*dhdr); 1112 1113 if (le16_to_cpu(dhdr->die_id) != i) { 1114 DRM_ERROR("invalid die id %d, expected %d\n", 1115 le16_to_cpu(dhdr->die_id), i); 1116 return -EINVAL; 1117 } 1118 1119 DRM_DEBUG("number of hardware IPs on die%d: %d\n", 1120 le16_to_cpu(dhdr->die_id), num_ips); 1121 1122 for (j = 0; j < num_ips; j++) { 1123 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 1124 1125 if (amdgpu_discovery_validate_ip(ip)) 1126 goto next_ip; 1127 1128 num_base_address = ip->num_base_address; 1129 1130 DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n", 1131 hw_id_names[le16_to_cpu(ip->hw_id)], 1132 le16_to_cpu(ip->hw_id), 1133 ip->number_instance, 1134 ip->major, ip->minor, 1135 ip->revision); 1136 1137 if (le16_to_cpu(ip->hw_id) == VCN_HWID) { 1138 /* Bit [5:0]: original revision value 1139 * Bit [7:6]: en/decode capability: 1140 * 0b00 : VCN function normally 1141 * 0b10 : encode is disabled 1142 * 0b01 : decode is disabled 1143 */ 1144 adev->vcn.vcn_config[adev->vcn.num_vcn_inst] = 1145 ip->revision & 0xc0; 1146 ip->revision &= ~0xc0; 1147 if (adev->vcn.num_vcn_inst < AMDGPU_MAX_VCN_INSTANCES) 1148 adev->vcn.num_vcn_inst++; 1149 else 1150 dev_err(adev->dev, "Too many VCN instances: %d vs %d\n", 1151 adev->vcn.num_vcn_inst + 1, 1152 AMDGPU_MAX_VCN_INSTANCES); 1153 } 1154 if (le16_to_cpu(ip->hw_id) == SDMA0_HWID || 1155 le16_to_cpu(ip->hw_id) == SDMA1_HWID || 1156 le16_to_cpu(ip->hw_id) == SDMA2_HWID || 1157 le16_to_cpu(ip->hw_id) == SDMA3_HWID) { 1158 if (adev->sdma.num_instances < AMDGPU_MAX_SDMA_INSTANCES) 1159 adev->sdma.num_instances++; 1160 else 1161 dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n", 1162 adev->sdma.num_instances + 1, 1163 AMDGPU_MAX_SDMA_INSTANCES); 1164 } 1165 1166 if (le16_to_cpu(ip->hw_id) == UMC_HWID) 1167 adev->gmc.num_umc++; 1168 1169 for (k = 0; k < num_base_address; k++) { 1170 /* 1171 * convert the endianness of base addresses in place, 1172 * so that we don't need to convert them when accessing adev->reg_offset. 1173 */ 1174 ip->base_address[k] = le32_to_cpu(ip->base_address[k]); 1175 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]); 1176 } 1177 1178 for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) { 1179 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) { 1180 DRM_DEBUG("set register base offset for %s\n", 1181 hw_id_names[le16_to_cpu(ip->hw_id)]); 1182 adev->reg_offset[hw_ip][ip->number_instance] = 1183 ip->base_address; 1184 /* Instance support is somewhat inconsistent. 1185 * SDMA is a good example. Sienna cichlid has 4 total 1186 * SDMA instances, each enumerated separately (HWIDs 1187 * 42, 43, 68, 69). Arcturus has 8 total SDMA instances, 1188 * but they are enumerated as multiple instances of the 1189 * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another 1190 * example. On most chips there are multiple instances 1191 * with the same HWID. 1192 */ 1193 adev->ip_versions[hw_ip][ip->number_instance] = 1194 IP_VERSION(ip->major, ip->minor, ip->revision); 1195 } 1196 } 1197 1198 next_ip: 1199 ip_offset += struct_size(ip, base_address, ip->num_base_address); 1200 } 1201 } 1202 1203 amdgpu_discovery_sysfs_init(adev); 1204 1205 return 0; 1206 } 1207 1208 int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance, 1209 int *major, int *minor, int *revision) 1210 { 1211 struct binary_header *bhdr; 1212 struct ip_discovery_header *ihdr; 1213 struct die_header *dhdr; 1214 struct ip *ip; 1215 uint16_t die_offset; 1216 uint16_t ip_offset; 1217 uint16_t num_dies; 1218 uint16_t num_ips; 1219 int i, j; 1220 1221 if (!adev->mman.discovery_bin) { 1222 DRM_ERROR("ip discovery uninitialized\n"); 1223 return -EINVAL; 1224 } 1225 1226 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1227 ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin + 1228 le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset)); 1229 num_dies = le16_to_cpu(ihdr->num_dies); 1230 1231 for (i = 0; i < num_dies; i++) { 1232 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset); 1233 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset); 1234 num_ips = le16_to_cpu(dhdr->num_ips); 1235 ip_offset = die_offset + sizeof(*dhdr); 1236 1237 for (j = 0; j < num_ips; j++) { 1238 ip = (struct ip *)(adev->mman.discovery_bin + ip_offset); 1239 1240 if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) { 1241 if (major) 1242 *major = ip->major; 1243 if (minor) 1244 *minor = ip->minor; 1245 if (revision) 1246 *revision = ip->revision; 1247 return 0; 1248 } 1249 ip_offset += struct_size(ip, base_address, ip->num_base_address); 1250 } 1251 } 1252 1253 return -EINVAL; 1254 } 1255 1256 static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) 1257 { 1258 int vcn_harvest_count = 0; 1259 int umc_harvest_count = 0; 1260 1261 /* 1262 * Harvest table does not fit Navi1x and legacy GPUs, 1263 * so read harvest bit per IP data structure to set 1264 * harvest configuration. 1265 */ 1266 if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0)) { 1267 if ((adev->pdev->device == 0x731E && 1268 (adev->pdev->revision == 0xC6 || 1269 adev->pdev->revision == 0xC7)) || 1270 (adev->pdev->device == 0x7340 && 1271 adev->pdev->revision == 0xC9) || 1272 (adev->pdev->device == 0x7360 && 1273 adev->pdev->revision == 0xC7)) 1274 amdgpu_discovery_read_harvest_bit_per_ip(adev, 1275 &vcn_harvest_count); 1276 } else { 1277 amdgpu_discovery_read_from_harvest_table(adev, 1278 &vcn_harvest_count, 1279 &umc_harvest_count); 1280 } 1281 1282 amdgpu_discovery_harvest_config_quirk(adev); 1283 1284 if (vcn_harvest_count == adev->vcn.num_vcn_inst) { 1285 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; 1286 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; 1287 } 1288 1289 if (umc_harvest_count < adev->gmc.num_umc) { 1290 adev->gmc.num_umc -= umc_harvest_count; 1291 } 1292 } 1293 1294 union gc_info { 1295 struct gc_info_v1_0 v1; 1296 struct gc_info_v1_1 v1_1; 1297 struct gc_info_v1_2 v1_2; 1298 struct gc_info_v2_0 v2; 1299 }; 1300 1301 static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev) 1302 { 1303 struct binary_header *bhdr; 1304 union gc_info *gc_info; 1305 u16 offset; 1306 1307 if (!adev->mman.discovery_bin) { 1308 DRM_ERROR("ip discovery uninitialized\n"); 1309 return -EINVAL; 1310 } 1311 1312 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1313 offset = le16_to_cpu(bhdr->table_list[GC].offset); 1314 1315 if (!offset) 1316 return 0; 1317 1318 gc_info = (union gc_info *)(adev->mman.discovery_bin + offset); 1319 1320 switch (le16_to_cpu(gc_info->v1.header.version_major)) { 1321 case 1: 1322 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se); 1323 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) + 1324 le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa)); 1325 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1326 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se); 1327 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c); 1328 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs); 1329 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds); 1330 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth); 1331 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth); 1332 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer); 1333 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size); 1334 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd); 1335 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu); 1336 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size); 1337 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) / 1338 le32_to_cpu(gc_info->v1.gc_num_sa_per_se); 1339 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc); 1340 if (gc_info->v1.header.version_minor >= 1) { 1341 adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa); 1342 adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface); 1343 adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps); 1344 } 1345 if (gc_info->v1.header.version_minor >= 2) { 1346 adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg); 1347 adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size); 1348 adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp); 1349 adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc); 1350 adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc); 1351 adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa); 1352 adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance); 1353 adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu); 1354 } 1355 break; 1356 case 2: 1357 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se); 1358 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh); 1359 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1360 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se); 1361 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs); 1362 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs); 1363 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds); 1364 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth); 1365 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth); 1366 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer); 1367 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size); 1368 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd); 1369 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu); 1370 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size); 1371 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) / 1372 le32_to_cpu(gc_info->v2.gc_num_sh_per_se); 1373 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc); 1374 break; 1375 default: 1376 dev_err(adev->dev, 1377 "Unhandled GC info table %d.%d\n", 1378 le16_to_cpu(gc_info->v1.header.version_major), 1379 le16_to_cpu(gc_info->v1.header.version_minor)); 1380 return -EINVAL; 1381 } 1382 return 0; 1383 } 1384 1385 union mall_info { 1386 struct mall_info_v1_0 v1; 1387 }; 1388 1389 static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev) 1390 { 1391 struct binary_header *bhdr; 1392 union mall_info *mall_info; 1393 u32 u, mall_size_per_umc, m_s_present, half_use; 1394 u64 mall_size; 1395 u16 offset; 1396 1397 if (!adev->mman.discovery_bin) { 1398 DRM_ERROR("ip discovery uninitialized\n"); 1399 return -EINVAL; 1400 } 1401 1402 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1403 offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset); 1404 1405 if (!offset) 1406 return 0; 1407 1408 mall_info = (union mall_info *)(adev->mman.discovery_bin + offset); 1409 1410 switch (le16_to_cpu(mall_info->v1.header.version_major)) { 1411 case 1: 1412 mall_size = 0; 1413 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m); 1414 m_s_present = le32_to_cpu(mall_info->v1.m_s_present); 1415 half_use = le32_to_cpu(mall_info->v1.m_half_use); 1416 for (u = 0; u < adev->gmc.num_umc; u++) { 1417 if (m_s_present & (1 << u)) 1418 mall_size += mall_size_per_umc * 2; 1419 else if (half_use & (1 << u)) 1420 mall_size += mall_size_per_umc / 2; 1421 else 1422 mall_size += mall_size_per_umc; 1423 } 1424 adev->gmc.mall_size = mall_size; 1425 break; 1426 default: 1427 dev_err(adev->dev, 1428 "Unhandled MALL info table %d.%d\n", 1429 le16_to_cpu(mall_info->v1.header.version_major), 1430 le16_to_cpu(mall_info->v1.header.version_minor)); 1431 return -EINVAL; 1432 } 1433 return 0; 1434 } 1435 1436 union vcn_info { 1437 struct vcn_info_v1_0 v1; 1438 }; 1439 1440 static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev) 1441 { 1442 struct binary_header *bhdr; 1443 union vcn_info *vcn_info; 1444 u16 offset; 1445 int v; 1446 1447 if (!adev->mman.discovery_bin) { 1448 DRM_ERROR("ip discovery uninitialized\n"); 1449 return -EINVAL; 1450 } 1451 1452 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1453 * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES 1454 * but that may change in the future with new GPUs so keep this 1455 * check for defensive purposes. 1456 */ 1457 if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) { 1458 dev_err(adev->dev, "invalid vcn instances\n"); 1459 return -EINVAL; 1460 } 1461 1462 bhdr = (struct binary_header *)adev->mman.discovery_bin; 1463 offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset); 1464 1465 if (!offset) 1466 return 0; 1467 1468 vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset); 1469 1470 switch (le16_to_cpu(vcn_info->v1.header.version_major)) { 1471 case 1: 1472 /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES 1473 * so this won't overflow. 1474 */ 1475 for (v = 0; v < adev->vcn.num_vcn_inst; v++) { 1476 adev->vcn.vcn_codec_disable_mask[v] = 1477 le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits); 1478 } 1479 break; 1480 default: 1481 dev_err(adev->dev, 1482 "Unhandled VCN info table %d.%d\n", 1483 le16_to_cpu(vcn_info->v1.header.version_major), 1484 le16_to_cpu(vcn_info->v1.header.version_minor)); 1485 return -EINVAL; 1486 } 1487 return 0; 1488 } 1489 1490 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) 1491 { 1492 /* what IP to use for this? */ 1493 switch (adev->ip_versions[GC_HWIP][0]) { 1494 case IP_VERSION(9, 0, 1): 1495 case IP_VERSION(9, 1, 0): 1496 case IP_VERSION(9, 2, 1): 1497 case IP_VERSION(9, 2, 2): 1498 case IP_VERSION(9, 3, 0): 1499 case IP_VERSION(9, 4, 0): 1500 case IP_VERSION(9, 4, 1): 1501 case IP_VERSION(9, 4, 2): 1502 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 1503 break; 1504 case IP_VERSION(10, 1, 10): 1505 case IP_VERSION(10, 1, 1): 1506 case IP_VERSION(10, 1, 2): 1507 case IP_VERSION(10, 1, 3): 1508 case IP_VERSION(10, 1, 4): 1509 case IP_VERSION(10, 3, 0): 1510 case IP_VERSION(10, 3, 1): 1511 case IP_VERSION(10, 3, 2): 1512 case IP_VERSION(10, 3, 3): 1513 case IP_VERSION(10, 3, 4): 1514 case IP_VERSION(10, 3, 5): 1515 case IP_VERSION(10, 3, 6): 1516 case IP_VERSION(10, 3, 7): 1517 amdgpu_device_ip_block_add(adev, &nv_common_ip_block); 1518 break; 1519 case IP_VERSION(11, 0, 0): 1520 case IP_VERSION(11, 0, 1): 1521 case IP_VERSION(11, 0, 2): 1522 case IP_VERSION(11, 0, 3): 1523 case IP_VERSION(11, 0, 4): 1524 amdgpu_device_ip_block_add(adev, &soc21_common_ip_block); 1525 break; 1526 default: 1527 dev_err(adev->dev, 1528 "Failed to add common ip block(GC_HWIP:0x%x)\n", 1529 adev->ip_versions[GC_HWIP][0]); 1530 return -EINVAL; 1531 } 1532 return 0; 1533 } 1534 1535 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) 1536 { 1537 /* use GC or MMHUB IP version */ 1538 switch (adev->ip_versions[GC_HWIP][0]) { 1539 case IP_VERSION(9, 0, 1): 1540 case IP_VERSION(9, 1, 0): 1541 case IP_VERSION(9, 2, 1): 1542 case IP_VERSION(9, 2, 2): 1543 case IP_VERSION(9, 3, 0): 1544 case IP_VERSION(9, 4, 0): 1545 case IP_VERSION(9, 4, 1): 1546 case IP_VERSION(9, 4, 2): 1547 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 1548 break; 1549 case IP_VERSION(10, 1, 10): 1550 case IP_VERSION(10, 1, 1): 1551 case IP_VERSION(10, 1, 2): 1552 case IP_VERSION(10, 1, 3): 1553 case IP_VERSION(10, 1, 4): 1554 case IP_VERSION(10, 3, 0): 1555 case IP_VERSION(10, 3, 1): 1556 case IP_VERSION(10, 3, 2): 1557 case IP_VERSION(10, 3, 3): 1558 case IP_VERSION(10, 3, 4): 1559 case IP_VERSION(10, 3, 5): 1560 case IP_VERSION(10, 3, 6): 1561 case IP_VERSION(10, 3, 7): 1562 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); 1563 break; 1564 case IP_VERSION(11, 0, 0): 1565 case IP_VERSION(11, 0, 1): 1566 case IP_VERSION(11, 0, 2): 1567 case IP_VERSION(11, 0, 3): 1568 case IP_VERSION(11, 0, 4): 1569 amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block); 1570 break; 1571 default: 1572 dev_err(adev->dev, 1573 "Failed to add gmc ip block(GC_HWIP:0x%x)\n", 1574 adev->ip_versions[GC_HWIP][0]); 1575 return -EINVAL; 1576 } 1577 return 0; 1578 } 1579 1580 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) 1581 { 1582 switch (adev->ip_versions[OSSSYS_HWIP][0]) { 1583 case IP_VERSION(4, 0, 0): 1584 case IP_VERSION(4, 0, 1): 1585 case IP_VERSION(4, 1, 0): 1586 case IP_VERSION(4, 1, 1): 1587 case IP_VERSION(4, 3, 0): 1588 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 1589 break; 1590 case IP_VERSION(4, 2, 0): 1591 case IP_VERSION(4, 2, 1): 1592 case IP_VERSION(4, 4, 0): 1593 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block); 1594 break; 1595 case IP_VERSION(5, 0, 0): 1596 case IP_VERSION(5, 0, 1): 1597 case IP_VERSION(5, 0, 2): 1598 case IP_VERSION(5, 0, 3): 1599 case IP_VERSION(5, 2, 0): 1600 case IP_VERSION(5, 2, 1): 1601 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); 1602 break; 1603 case IP_VERSION(6, 0, 0): 1604 case IP_VERSION(6, 0, 1): 1605 case IP_VERSION(6, 0, 2): 1606 amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block); 1607 break; 1608 default: 1609 dev_err(adev->dev, 1610 "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", 1611 adev->ip_versions[OSSSYS_HWIP][0]); 1612 return -EINVAL; 1613 } 1614 return 0; 1615 } 1616 1617 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) 1618 { 1619 switch (adev->ip_versions[MP0_HWIP][0]) { 1620 case IP_VERSION(9, 0, 0): 1621 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 1622 break; 1623 case IP_VERSION(10, 0, 0): 1624 case IP_VERSION(10, 0, 1): 1625 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 1626 break; 1627 case IP_VERSION(11, 0, 0): 1628 case IP_VERSION(11, 0, 2): 1629 case IP_VERSION(11, 0, 4): 1630 case IP_VERSION(11, 0, 5): 1631 case IP_VERSION(11, 0, 9): 1632 case IP_VERSION(11, 0, 7): 1633 case IP_VERSION(11, 0, 11): 1634 case IP_VERSION(11, 0, 12): 1635 case IP_VERSION(11, 0, 13): 1636 case IP_VERSION(11, 5, 0): 1637 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 1638 break; 1639 case IP_VERSION(11, 0, 8): 1640 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block); 1641 break; 1642 case IP_VERSION(11, 0, 3): 1643 case IP_VERSION(12, 0, 1): 1644 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); 1645 break; 1646 case IP_VERSION(13, 0, 0): 1647 case IP_VERSION(13, 0, 1): 1648 case IP_VERSION(13, 0, 2): 1649 case IP_VERSION(13, 0, 3): 1650 case IP_VERSION(13, 0, 5): 1651 case IP_VERSION(13, 0, 7): 1652 case IP_VERSION(13, 0, 8): 1653 case IP_VERSION(13, 0, 10): 1654 case IP_VERSION(13, 0, 11): 1655 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); 1656 break; 1657 case IP_VERSION(13, 0, 4): 1658 amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block); 1659 break; 1660 default: 1661 dev_err(adev->dev, 1662 "Failed to add psp ip block(MP0_HWIP:0x%x)\n", 1663 adev->ip_versions[MP0_HWIP][0]); 1664 return -EINVAL; 1665 } 1666 return 0; 1667 } 1668 1669 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) 1670 { 1671 switch (adev->ip_versions[MP1_HWIP][0]) { 1672 case IP_VERSION(9, 0, 0): 1673 case IP_VERSION(10, 0, 0): 1674 case IP_VERSION(10, 0, 1): 1675 case IP_VERSION(11, 0, 2): 1676 if (adev->asic_type == CHIP_ARCTURUS) 1677 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 1678 else 1679 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1680 break; 1681 case IP_VERSION(11, 0, 0): 1682 case IP_VERSION(11, 0, 5): 1683 case IP_VERSION(11, 0, 9): 1684 case IP_VERSION(11, 0, 7): 1685 case IP_VERSION(11, 0, 8): 1686 case IP_VERSION(11, 0, 11): 1687 case IP_VERSION(11, 0, 12): 1688 case IP_VERSION(11, 0, 13): 1689 case IP_VERSION(11, 5, 0): 1690 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 1691 break; 1692 case IP_VERSION(12, 0, 0): 1693 case IP_VERSION(12, 0, 1): 1694 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); 1695 break; 1696 case IP_VERSION(13, 0, 0): 1697 case IP_VERSION(13, 0, 1): 1698 case IP_VERSION(13, 0, 2): 1699 case IP_VERSION(13, 0, 3): 1700 case IP_VERSION(13, 0, 4): 1701 case IP_VERSION(13, 0, 5): 1702 case IP_VERSION(13, 0, 7): 1703 case IP_VERSION(13, 0, 8): 1704 case IP_VERSION(13, 0, 10): 1705 case IP_VERSION(13, 0, 11): 1706 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); 1707 break; 1708 default: 1709 dev_err(adev->dev, 1710 "Failed to add smu ip block(MP1_HWIP:0x%x)\n", 1711 adev->ip_versions[MP1_HWIP][0]); 1712 return -EINVAL; 1713 } 1714 return 0; 1715 } 1716 1717 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) 1718 { 1719 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) { 1720 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); 1721 return 0; 1722 } 1723 1724 if (!amdgpu_device_has_dc_support(adev)) 1725 return 0; 1726 1727 #if defined(CONFIG_DRM_AMD_DC) 1728 if (adev->ip_versions[DCE_HWIP][0]) { 1729 switch (adev->ip_versions[DCE_HWIP][0]) { 1730 case IP_VERSION(1, 0, 0): 1731 case IP_VERSION(1, 0, 1): 1732 case IP_VERSION(2, 0, 2): 1733 case IP_VERSION(2, 0, 0): 1734 case IP_VERSION(2, 0, 3): 1735 case IP_VERSION(2, 1, 0): 1736 case IP_VERSION(3, 0, 0): 1737 case IP_VERSION(3, 0, 2): 1738 case IP_VERSION(3, 0, 3): 1739 case IP_VERSION(3, 0, 1): 1740 case IP_VERSION(3, 1, 2): 1741 case IP_VERSION(3, 1, 3): 1742 case IP_VERSION(3, 1, 4): 1743 case IP_VERSION(3, 1, 5): 1744 case IP_VERSION(3, 1, 6): 1745 case IP_VERSION(3, 2, 0): 1746 case IP_VERSION(3, 2, 1): 1747 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1748 break; 1749 default: 1750 dev_err(adev->dev, 1751 "Failed to add dm ip block(DCE_HWIP:0x%x)\n", 1752 adev->ip_versions[DCE_HWIP][0]); 1753 return -EINVAL; 1754 } 1755 } else if (adev->ip_versions[DCI_HWIP][0]) { 1756 switch (adev->ip_versions[DCI_HWIP][0]) { 1757 case IP_VERSION(12, 0, 0): 1758 case IP_VERSION(12, 0, 1): 1759 case IP_VERSION(12, 1, 0): 1760 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1761 break; 1762 default: 1763 dev_err(adev->dev, 1764 "Failed to add dm ip block(DCI_HWIP:0x%x)\n", 1765 adev->ip_versions[DCI_HWIP][0]); 1766 return -EINVAL; 1767 } 1768 } 1769 #endif 1770 return 0; 1771 } 1772 1773 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) 1774 { 1775 switch (adev->ip_versions[GC_HWIP][0]) { 1776 case IP_VERSION(9, 0, 1): 1777 case IP_VERSION(9, 1, 0): 1778 case IP_VERSION(9, 2, 1): 1779 case IP_VERSION(9, 2, 2): 1780 case IP_VERSION(9, 3, 0): 1781 case IP_VERSION(9, 4, 0): 1782 case IP_VERSION(9, 4, 1): 1783 case IP_VERSION(9, 4, 2): 1784 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 1785 break; 1786 case IP_VERSION(10, 1, 10): 1787 case IP_VERSION(10, 1, 2): 1788 case IP_VERSION(10, 1, 1): 1789 case IP_VERSION(10, 1, 3): 1790 case IP_VERSION(10, 1, 4): 1791 case IP_VERSION(10, 3, 0): 1792 case IP_VERSION(10, 3, 2): 1793 case IP_VERSION(10, 3, 1): 1794 case IP_VERSION(10, 3, 4): 1795 case IP_VERSION(10, 3, 5): 1796 case IP_VERSION(10, 3, 6): 1797 case IP_VERSION(10, 3, 3): 1798 case IP_VERSION(10, 3, 7): 1799 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); 1800 break; 1801 case IP_VERSION(11, 0, 0): 1802 case IP_VERSION(11, 0, 1): 1803 case IP_VERSION(11, 0, 2): 1804 case IP_VERSION(11, 0, 3): 1805 case IP_VERSION(11, 0, 4): 1806 amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block); 1807 break; 1808 default: 1809 dev_err(adev->dev, 1810 "Failed to add gfx ip block(GC_HWIP:0x%x)\n", 1811 adev->ip_versions[GC_HWIP][0]); 1812 return -EINVAL; 1813 } 1814 return 0; 1815 } 1816 1817 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) 1818 { 1819 switch (adev->ip_versions[SDMA0_HWIP][0]) { 1820 case IP_VERSION(4, 0, 0): 1821 case IP_VERSION(4, 0, 1): 1822 case IP_VERSION(4, 1, 0): 1823 case IP_VERSION(4, 1, 1): 1824 case IP_VERSION(4, 1, 2): 1825 case IP_VERSION(4, 2, 0): 1826 case IP_VERSION(4, 2, 2): 1827 case IP_VERSION(4, 4, 0): 1828 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 1829 break; 1830 case IP_VERSION(5, 0, 0): 1831 case IP_VERSION(5, 0, 1): 1832 case IP_VERSION(5, 0, 2): 1833 case IP_VERSION(5, 0, 5): 1834 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); 1835 break; 1836 case IP_VERSION(5, 2, 0): 1837 case IP_VERSION(5, 2, 2): 1838 case IP_VERSION(5, 2, 4): 1839 case IP_VERSION(5, 2, 5): 1840 case IP_VERSION(5, 2, 6): 1841 case IP_VERSION(5, 2, 3): 1842 case IP_VERSION(5, 2, 1): 1843 case IP_VERSION(5, 2, 7): 1844 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); 1845 break; 1846 case IP_VERSION(6, 0, 0): 1847 case IP_VERSION(6, 0, 1): 1848 case IP_VERSION(6, 0, 2): 1849 case IP_VERSION(6, 0, 3): 1850 amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block); 1851 break; 1852 default: 1853 dev_err(adev->dev, 1854 "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", 1855 adev->ip_versions[SDMA0_HWIP][0]); 1856 return -EINVAL; 1857 } 1858 return 0; 1859 } 1860 1861 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) 1862 { 1863 if (adev->ip_versions[VCE_HWIP][0]) { 1864 switch (adev->ip_versions[UVD_HWIP][0]) { 1865 case IP_VERSION(7, 0, 0): 1866 case IP_VERSION(7, 2, 0): 1867 /* UVD is not supported on vega20 SR-IOV */ 1868 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 1869 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 1870 break; 1871 default: 1872 dev_err(adev->dev, 1873 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", 1874 adev->ip_versions[UVD_HWIP][0]); 1875 return -EINVAL; 1876 } 1877 switch (adev->ip_versions[VCE_HWIP][0]) { 1878 case IP_VERSION(4, 0, 0): 1879 case IP_VERSION(4, 1, 0): 1880 /* VCE is not supported on vega20 SR-IOV */ 1881 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) 1882 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 1883 break; 1884 default: 1885 dev_err(adev->dev, 1886 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", 1887 adev->ip_versions[VCE_HWIP][0]); 1888 return -EINVAL; 1889 } 1890 } else { 1891 switch (adev->ip_versions[UVD_HWIP][0]) { 1892 case IP_VERSION(1, 0, 0): 1893 case IP_VERSION(1, 0, 1): 1894 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 1895 break; 1896 case IP_VERSION(2, 0, 0): 1897 case IP_VERSION(2, 0, 2): 1898 case IP_VERSION(2, 2, 0): 1899 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 1900 if (!amdgpu_sriov_vf(adev)) 1901 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 1902 break; 1903 case IP_VERSION(2, 0, 3): 1904 break; 1905 case IP_VERSION(2, 5, 0): 1906 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 1907 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); 1908 break; 1909 case IP_VERSION(2, 6, 0): 1910 amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block); 1911 amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block); 1912 break; 1913 case IP_VERSION(3, 0, 0): 1914 case IP_VERSION(3, 0, 16): 1915 case IP_VERSION(3, 1, 1): 1916 case IP_VERSION(3, 1, 2): 1917 case IP_VERSION(3, 0, 2): 1918 case IP_VERSION(3, 0, 192): 1919 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 1920 if (!amdgpu_sriov_vf(adev)) 1921 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); 1922 break; 1923 case IP_VERSION(3, 0, 33): 1924 amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); 1925 break; 1926 case IP_VERSION(4, 0, 0): 1927 case IP_VERSION(4, 0, 2): 1928 case IP_VERSION(4, 0, 4): 1929 amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block); 1930 if (!amdgpu_sriov_vf(adev)) 1931 amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block); 1932 break; 1933 default: 1934 dev_err(adev->dev, 1935 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", 1936 adev->ip_versions[UVD_HWIP][0]); 1937 return -EINVAL; 1938 } 1939 } 1940 return 0; 1941 } 1942 1943 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev) 1944 { 1945 switch (adev->ip_versions[GC_HWIP][0]) { 1946 case IP_VERSION(10, 1, 10): 1947 case IP_VERSION(10, 1, 1): 1948 case IP_VERSION(10, 1, 2): 1949 case IP_VERSION(10, 1, 3): 1950 case IP_VERSION(10, 1, 4): 1951 case IP_VERSION(10, 3, 0): 1952 case IP_VERSION(10, 3, 1): 1953 case IP_VERSION(10, 3, 2): 1954 case IP_VERSION(10, 3, 3): 1955 case IP_VERSION(10, 3, 4): 1956 case IP_VERSION(10, 3, 5): 1957 case IP_VERSION(10, 3, 6): 1958 if (amdgpu_mes) { 1959 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block); 1960 adev->enable_mes = true; 1961 if (amdgpu_mes_kiq) 1962 adev->enable_mes_kiq = true; 1963 } 1964 break; 1965 case IP_VERSION(11, 0, 0): 1966 case IP_VERSION(11, 0, 1): 1967 case IP_VERSION(11, 0, 2): 1968 case IP_VERSION(11, 0, 3): 1969 case IP_VERSION(11, 0, 4): 1970 amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block); 1971 adev->enable_mes = true; 1972 adev->enable_mes_kiq = true; 1973 break; 1974 default: 1975 break; 1976 } 1977 return 0; 1978 } 1979 1980 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) 1981 { 1982 int r; 1983 1984 switch (adev->asic_type) { 1985 case CHIP_VEGA10: 1986 vega10_reg_base_init(adev); 1987 adev->sdma.num_instances = 2; 1988 adev->gmc.num_umc = 4; 1989 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0); 1990 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0); 1991 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0); 1992 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0); 1993 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0); 1994 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0); 1995 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 1996 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0); 1997 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0); 1998 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 1999 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2000 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2001 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0); 2002 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1); 2003 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2004 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2005 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0); 2006 break; 2007 case CHIP_VEGA12: 2008 vega10_reg_base_init(adev); 2009 adev->sdma.num_instances = 2; 2010 adev->gmc.num_umc = 4; 2011 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2012 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0); 2013 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1); 2014 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1); 2015 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1); 2016 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1); 2017 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0); 2018 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0); 2019 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0); 2020 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0); 2021 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0); 2022 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0); 2023 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1); 2024 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1); 2025 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0); 2026 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0); 2027 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1); 2028 break; 2029 case CHIP_RAVEN: 2030 vega10_reg_base_init(adev); 2031 adev->sdma.num_instances = 1; 2032 adev->vcn.num_vcn_inst = 1; 2033 adev->gmc.num_umc = 2; 2034 if (adev->apu_flags & AMD_APU_IS_RAVEN2) { 2035 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2036 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0); 2037 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1); 2038 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1); 2039 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1); 2040 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1); 2041 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1); 2042 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0); 2043 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1); 2044 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1); 2045 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0); 2046 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1); 2047 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2); 2048 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1); 2049 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1); 2050 } else { 2051 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2052 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0); 2053 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0); 2054 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0); 2055 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0); 2056 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0); 2057 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0); 2058 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0); 2059 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0); 2060 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0); 2061 adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0); 2062 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0); 2063 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0); 2064 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0); 2065 adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0); 2066 } 2067 break; 2068 case CHIP_VEGA20: 2069 vega20_reg_base_init(adev); 2070 adev->sdma.num_instances = 2; 2071 adev->gmc.num_umc = 8; 2072 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2073 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0); 2074 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0); 2075 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0); 2076 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0); 2077 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0); 2078 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0); 2079 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0); 2080 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1); 2081 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2); 2082 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2083 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2); 2084 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2); 2085 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0); 2086 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0); 2087 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0); 2088 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0); 2089 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0); 2090 break; 2091 case CHIP_ARCTURUS: 2092 arct_reg_base_init(adev); 2093 adev->sdma.num_instances = 8; 2094 adev->vcn.num_vcn_inst = 2; 2095 adev->gmc.num_umc = 8; 2096 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2097 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1); 2098 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1); 2099 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1); 2100 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2); 2101 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2); 2102 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2); 2103 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2); 2104 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2); 2105 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2); 2106 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2); 2107 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2); 2108 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1); 2109 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1); 2110 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2); 2111 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4); 2112 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2); 2113 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3); 2114 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3); 2115 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1); 2116 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0); 2117 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0); 2118 break; 2119 case CHIP_ALDEBARAN: 2120 aldebaran_reg_base_init(adev); 2121 adev->sdma.num_instances = 5; 2122 adev->vcn.num_vcn_inst = 2; 2123 adev->gmc.num_umc = 4; 2124 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2125 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2); 2126 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0); 2127 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0); 2128 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0); 2129 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0); 2130 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0); 2131 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0); 2132 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0); 2133 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2); 2134 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4); 2135 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0); 2136 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2); 2137 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2); 2138 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2); 2139 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2); 2140 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2); 2141 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0); 2142 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0); 2143 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); 2144 break; 2145 default: 2146 r = amdgpu_discovery_reg_base_init(adev); 2147 if (r) 2148 return -EINVAL; 2149 2150 amdgpu_discovery_harvest_ip(adev); 2151 amdgpu_discovery_get_gfx_info(adev); 2152 amdgpu_discovery_get_mall_info(adev); 2153 amdgpu_discovery_get_vcn_info(adev); 2154 break; 2155 } 2156 2157 switch (adev->ip_versions[GC_HWIP][0]) { 2158 case IP_VERSION(9, 0, 1): 2159 case IP_VERSION(9, 2, 1): 2160 case IP_VERSION(9, 4, 0): 2161 case IP_VERSION(9, 4, 1): 2162 case IP_VERSION(9, 4, 2): 2163 adev->family = AMDGPU_FAMILY_AI; 2164 break; 2165 case IP_VERSION(9, 1, 0): 2166 case IP_VERSION(9, 2, 2): 2167 case IP_VERSION(9, 3, 0): 2168 adev->family = AMDGPU_FAMILY_RV; 2169 break; 2170 case IP_VERSION(10, 1, 10): 2171 case IP_VERSION(10, 1, 1): 2172 case IP_VERSION(10, 1, 2): 2173 case IP_VERSION(10, 1, 3): 2174 case IP_VERSION(10, 1, 4): 2175 case IP_VERSION(10, 3, 0): 2176 case IP_VERSION(10, 3, 2): 2177 case IP_VERSION(10, 3, 4): 2178 case IP_VERSION(10, 3, 5): 2179 adev->family = AMDGPU_FAMILY_NV; 2180 break; 2181 case IP_VERSION(10, 3, 1): 2182 adev->family = AMDGPU_FAMILY_VGH; 2183 break; 2184 case IP_VERSION(10, 3, 3): 2185 adev->family = AMDGPU_FAMILY_YC; 2186 break; 2187 case IP_VERSION(10, 3, 6): 2188 adev->family = AMDGPU_FAMILY_GC_10_3_6; 2189 break; 2190 case IP_VERSION(10, 3, 7): 2191 adev->family = AMDGPU_FAMILY_GC_10_3_7; 2192 break; 2193 case IP_VERSION(11, 0, 0): 2194 case IP_VERSION(11, 0, 2): 2195 case IP_VERSION(11, 0, 3): 2196 adev->family = AMDGPU_FAMILY_GC_11_0_0; 2197 break; 2198 case IP_VERSION(11, 0, 1): 2199 case IP_VERSION(11, 0, 4): 2200 adev->family = AMDGPU_FAMILY_GC_11_0_1; 2201 break; 2202 default: 2203 return -EINVAL; 2204 } 2205 2206 switch (adev->ip_versions[GC_HWIP][0]) { 2207 case IP_VERSION(9, 1, 0): 2208 case IP_VERSION(9, 2, 2): 2209 case IP_VERSION(9, 3, 0): 2210 case IP_VERSION(10, 1, 3): 2211 case IP_VERSION(10, 1, 4): 2212 case IP_VERSION(10, 3, 1): 2213 case IP_VERSION(10, 3, 3): 2214 case IP_VERSION(10, 3, 6): 2215 case IP_VERSION(10, 3, 7): 2216 case IP_VERSION(11, 0, 1): 2217 case IP_VERSION(11, 0, 4): 2218 adev->flags |= AMD_IS_APU; 2219 break; 2220 default: 2221 break; 2222 } 2223 2224 if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0)) 2225 adev->gmc.xgmi.supported = true; 2226 2227 /* set NBIO version */ 2228 switch (adev->ip_versions[NBIO_HWIP][0]) { 2229 case IP_VERSION(6, 1, 0): 2230 case IP_VERSION(6, 2, 0): 2231 adev->nbio.funcs = &nbio_v6_1_funcs; 2232 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; 2233 break; 2234 case IP_VERSION(7, 0, 0): 2235 case IP_VERSION(7, 0, 1): 2236 case IP_VERSION(2, 5, 0): 2237 adev->nbio.funcs = &nbio_v7_0_funcs; 2238 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; 2239 break; 2240 case IP_VERSION(7, 4, 0): 2241 case IP_VERSION(7, 4, 1): 2242 case IP_VERSION(7, 4, 4): 2243 adev->nbio.funcs = &nbio_v7_4_funcs; 2244 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; 2245 break; 2246 case IP_VERSION(7, 2, 0): 2247 case IP_VERSION(7, 2, 1): 2248 case IP_VERSION(7, 3, 0): 2249 case IP_VERSION(7, 5, 0): 2250 case IP_VERSION(7, 5, 1): 2251 adev->nbio.funcs = &nbio_v7_2_funcs; 2252 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg; 2253 break; 2254 case IP_VERSION(2, 1, 1): 2255 case IP_VERSION(2, 3, 0): 2256 case IP_VERSION(2, 3, 1): 2257 case IP_VERSION(2, 3, 2): 2258 case IP_VERSION(3, 3, 0): 2259 case IP_VERSION(3, 3, 1): 2260 case IP_VERSION(3, 3, 2): 2261 case IP_VERSION(3, 3, 3): 2262 adev->nbio.funcs = &nbio_v2_3_funcs; 2263 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; 2264 break; 2265 case IP_VERSION(4, 3, 0): 2266 case IP_VERSION(4, 3, 1): 2267 if (amdgpu_sriov_vf(adev)) 2268 adev->nbio.funcs = &nbio_v4_3_sriov_funcs; 2269 else 2270 adev->nbio.funcs = &nbio_v4_3_funcs; 2271 adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg; 2272 break; 2273 case IP_VERSION(7, 7, 0): 2274 case IP_VERSION(7, 7, 1): 2275 adev->nbio.funcs = &nbio_v7_7_funcs; 2276 adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg; 2277 break; 2278 default: 2279 break; 2280 } 2281 2282 switch (adev->ip_versions[HDP_HWIP][0]) { 2283 case IP_VERSION(4, 0, 0): 2284 case IP_VERSION(4, 0, 1): 2285 case IP_VERSION(4, 1, 0): 2286 case IP_VERSION(4, 1, 1): 2287 case IP_VERSION(4, 1, 2): 2288 case IP_VERSION(4, 2, 0): 2289 case IP_VERSION(4, 2, 1): 2290 case IP_VERSION(4, 4, 0): 2291 adev->hdp.funcs = &hdp_v4_0_funcs; 2292 break; 2293 case IP_VERSION(5, 0, 0): 2294 case IP_VERSION(5, 0, 1): 2295 case IP_VERSION(5, 0, 2): 2296 case IP_VERSION(5, 0, 3): 2297 case IP_VERSION(5, 0, 4): 2298 case IP_VERSION(5, 2, 0): 2299 adev->hdp.funcs = &hdp_v5_0_funcs; 2300 break; 2301 case IP_VERSION(5, 2, 1): 2302 adev->hdp.funcs = &hdp_v5_2_funcs; 2303 break; 2304 case IP_VERSION(6, 0, 0): 2305 case IP_VERSION(6, 0, 1): 2306 adev->hdp.funcs = &hdp_v6_0_funcs; 2307 break; 2308 default: 2309 break; 2310 } 2311 2312 switch (adev->ip_versions[DF_HWIP][0]) { 2313 case IP_VERSION(3, 6, 0): 2314 case IP_VERSION(3, 6, 1): 2315 case IP_VERSION(3, 6, 2): 2316 adev->df.funcs = &df_v3_6_funcs; 2317 break; 2318 case IP_VERSION(2, 1, 0): 2319 case IP_VERSION(2, 1, 1): 2320 case IP_VERSION(2, 5, 0): 2321 case IP_VERSION(3, 5, 1): 2322 case IP_VERSION(3, 5, 2): 2323 adev->df.funcs = &df_v1_7_funcs; 2324 break; 2325 default: 2326 break; 2327 } 2328 2329 switch (adev->ip_versions[SMUIO_HWIP][0]) { 2330 case IP_VERSION(9, 0, 0): 2331 case IP_VERSION(9, 0, 1): 2332 case IP_VERSION(10, 0, 0): 2333 case IP_VERSION(10, 0, 1): 2334 case IP_VERSION(10, 0, 2): 2335 adev->smuio.funcs = &smuio_v9_0_funcs; 2336 break; 2337 case IP_VERSION(11, 0, 0): 2338 case IP_VERSION(11, 0, 2): 2339 case IP_VERSION(11, 0, 3): 2340 case IP_VERSION(11, 0, 4): 2341 case IP_VERSION(11, 0, 7): 2342 case IP_VERSION(11, 0, 8): 2343 adev->smuio.funcs = &smuio_v11_0_funcs; 2344 break; 2345 case IP_VERSION(11, 0, 6): 2346 case IP_VERSION(11, 0, 10): 2347 case IP_VERSION(11, 0, 11): 2348 case IP_VERSION(11, 5, 0): 2349 case IP_VERSION(13, 0, 1): 2350 case IP_VERSION(13, 0, 9): 2351 case IP_VERSION(13, 0, 10): 2352 adev->smuio.funcs = &smuio_v11_0_6_funcs; 2353 break; 2354 case IP_VERSION(13, 0, 2): 2355 adev->smuio.funcs = &smuio_v13_0_funcs; 2356 break; 2357 case IP_VERSION(13, 0, 6): 2358 case IP_VERSION(13, 0, 8): 2359 adev->smuio.funcs = &smuio_v13_0_6_funcs; 2360 break; 2361 default: 2362 break; 2363 } 2364 2365 switch (adev->ip_versions[LSDMA_HWIP][0]) { 2366 case IP_VERSION(6, 0, 0): 2367 case IP_VERSION(6, 0, 1): 2368 case IP_VERSION(6, 0, 2): 2369 case IP_VERSION(6, 0, 3): 2370 adev->lsdma.funcs = &lsdma_v6_0_funcs; 2371 break; 2372 default: 2373 break; 2374 } 2375 2376 r = amdgpu_discovery_set_common_ip_blocks(adev); 2377 if (r) 2378 return r; 2379 2380 r = amdgpu_discovery_set_gmc_ip_blocks(adev); 2381 if (r) 2382 return r; 2383 2384 /* For SR-IOV, PSP needs to be initialized before IH */ 2385 if (amdgpu_sriov_vf(adev)) { 2386 r = amdgpu_discovery_set_psp_ip_blocks(adev); 2387 if (r) 2388 return r; 2389 r = amdgpu_discovery_set_ih_ip_blocks(adev); 2390 if (r) 2391 return r; 2392 } else { 2393 r = amdgpu_discovery_set_ih_ip_blocks(adev); 2394 if (r) 2395 return r; 2396 2397 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 2398 r = amdgpu_discovery_set_psp_ip_blocks(adev); 2399 if (r) 2400 return r; 2401 } 2402 } 2403 2404 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 2405 r = amdgpu_discovery_set_smu_ip_blocks(adev); 2406 if (r) 2407 return r; 2408 } 2409 2410 r = amdgpu_discovery_set_display_ip_blocks(adev); 2411 if (r) 2412 return r; 2413 2414 r = amdgpu_discovery_set_gc_ip_blocks(adev); 2415 if (r) 2416 return r; 2417 2418 r = amdgpu_discovery_set_sdma_ip_blocks(adev); 2419 if (r) 2420 return r; 2421 2422 if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && 2423 !amdgpu_sriov_vf(adev)) || 2424 (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) { 2425 r = amdgpu_discovery_set_smu_ip_blocks(adev); 2426 if (r) 2427 return r; 2428 } 2429 2430 r = amdgpu_discovery_set_mm_ip_blocks(adev); 2431 if (r) 2432 return r; 2433 2434 r = amdgpu_discovery_set_mes_ip_blocks(adev); 2435 if (r) 2436 return r; 2437 2438 return 0; 2439 } 2440 2441