1 /* $NetBSD: amdgpu_vi.c,v 1.4 2023/09/30 10:46:45 mrg Exp $ */ 2 3 /* 4 * Copyright 2014 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25 26 #include <sys/cdefs.h> 27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_vi.c,v 1.4 2023/09/30 10:46:45 mrg Exp $"); 28 29 #include <linux/pci.h> 30 #include <linux/slab.h> 31 32 #include "amdgpu.h" 33 #include "amdgpu_atombios.h" 34 #include "amdgpu_ih.h" 35 #include "amdgpu_uvd.h" 36 #include "amdgpu_vce.h" 37 #include "amdgpu_ucode.h" 38 #include "atom.h" 39 #include "amd_pcie.h" 40 41 #include "gmc/gmc_8_1_d.h" 42 #include "gmc/gmc_8_1_sh_mask.h" 43 44 #include "oss/oss_3_0_d.h" 45 #include "oss/oss_3_0_sh_mask.h" 46 47 #include "bif/bif_5_0_d.h" 48 #include "bif/bif_5_0_sh_mask.h" 49 50 #include "gca/gfx_8_0_d.h" 51 #include "gca/gfx_8_0_sh_mask.h" 52 53 #include "smu/smu_7_1_1_d.h" 54 #include "smu/smu_7_1_1_sh_mask.h" 55 56 #include "uvd/uvd_5_0_d.h" 57 #include "uvd/uvd_5_0_sh_mask.h" 58 59 #include "vce/vce_3_0_d.h" 60 #include "vce/vce_3_0_sh_mask.h" 61 62 #include "dce/dce_10_0_d.h" 63 #include "dce/dce_10_0_sh_mask.h" 64 65 #include "vid.h" 66 #include "vi.h" 67 #include "gmc_v8_0.h" 68 #include "gmc_v7_0.h" 69 #include "gfx_v8_0.h" 70 #include "sdma_v2_4.h" 71 #include "sdma_v3_0.h" 72 #include "dce_v10_0.h" 73 #include "dce_v11_0.h" 74 #include "iceland_ih.h" 75 #include "tonga_ih.h" 76 #include "cz_ih.h" 77 #include "uvd_v5_0.h" 78 #include "uvd_v6_0.h" 79 #include "vce_v3_0.h" 80 #if defined(CONFIG_DRM_AMD_ACP) 81 #include "amdgpu_acp.h" 82 #endif 83 #include "dce_virtual.h" 84 #include "mxgpu_vi.h" 85 #include "amdgpu_dm.h" 86 87 #include <linux/nbsd-namespace.h> 88 89 /* 90 * Indirect registers accessor 91 */ 92 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 93 { 94 unsigned long flags; 95 u32 r; 96 97 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 98 WREG32_NO_KIQ(mmPCIE_INDEX, reg); 99 (void)RREG32_NO_KIQ(mmPCIE_INDEX); 100 r = RREG32_NO_KIQ(mmPCIE_DATA); 101 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 102 return r; 103 } 104 105 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 106 { 107 unsigned long flags; 108 109 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 110 WREG32_NO_KIQ(mmPCIE_INDEX, reg); 111 (void)RREG32_NO_KIQ(mmPCIE_INDEX); 112 WREG32_NO_KIQ(mmPCIE_DATA, v); 113 (void)RREG32_NO_KIQ(mmPCIE_DATA); 114 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 115 } 116 117 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 118 { 119 unsigned long flags; 120 u32 r; 121 122 spin_lock_irqsave(&adev->smc_idx_lock, flags); 123 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); 124 r = RREG32_NO_KIQ(mmSMC_IND_DATA_11); 125 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 126 return r; 127 } 128 129 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 130 { 131 unsigned long flags; 132 133 spin_lock_irqsave(&adev->smc_idx_lock, flags); 134 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); 135 WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v)); 136 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 137 } 138 139 /* smu_8_0_d.h */ 140 #define mmMP0PUB_IND_INDEX 0x180 141 #define mmMP0PUB_IND_DATA 0x181 142 143 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 144 { 145 unsigned long flags; 146 u32 r; 147 148 spin_lock_irqsave(&adev->smc_idx_lock, flags); 149 WREG32(mmMP0PUB_IND_INDEX, (reg)); 150 r = RREG32(mmMP0PUB_IND_DATA); 151 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 152 return r; 153 } 154 155 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 156 { 157 unsigned long flags; 158 159 spin_lock_irqsave(&adev->smc_idx_lock, flags); 160 WREG32(mmMP0PUB_IND_INDEX, (reg)); 161 WREG32(mmMP0PUB_IND_DATA, (v)); 162 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 163 } 164 165 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 166 { 167 unsigned long flags; 168 u32 r; 169 170 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 171 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 172 r = RREG32(mmUVD_CTX_DATA); 173 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 174 return r; 175 } 176 177 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 178 { 179 unsigned long flags; 180 181 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 182 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 183 WREG32(mmUVD_CTX_DATA, (v)); 184 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 185 } 186 187 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 188 { 189 unsigned long flags; 190 u32 r; 191 192 spin_lock_irqsave(&adev->didt_idx_lock, flags); 193 WREG32(mmDIDT_IND_INDEX, (reg)); 194 r = RREG32(mmDIDT_IND_DATA); 195 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 196 return r; 197 } 198 199 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 200 { 201 unsigned long flags; 202 203 spin_lock_irqsave(&adev->didt_idx_lock, flags); 204 WREG32(mmDIDT_IND_INDEX, (reg)); 205 WREG32(mmDIDT_IND_DATA, (v)); 206 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 207 } 208 209 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 210 { 211 unsigned long flags; 212 u32 r; 213 214 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 215 WREG32(mmGC_CAC_IND_INDEX, (reg)); 216 r = RREG32(mmGC_CAC_IND_DATA); 217 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 218 return r; 219 } 220 221 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 222 { 223 unsigned long flags; 224 225 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 226 WREG32(mmGC_CAC_IND_INDEX, (reg)); 227 WREG32(mmGC_CAC_IND_DATA, (v)); 228 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 229 } 230 231 232 static const u32 tonga_mgcg_cgcg_init[] = 233 { 234 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 235 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 236 mmPCIE_DATA, 0x000f0000, 0x00000000, 237 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 238 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 239 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 240 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 241 }; 242 243 static const u32 fiji_mgcg_cgcg_init[] = 244 { 245 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 246 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 247 mmPCIE_DATA, 0x000f0000, 0x00000000, 248 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 249 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 250 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 251 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 252 }; 253 254 static const u32 iceland_mgcg_cgcg_init[] = 255 { 256 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 257 mmPCIE_DATA, 0x000f0000, 0x00000000, 258 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 259 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 260 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 261 }; 262 263 static const u32 cz_mgcg_cgcg_init[] = 264 { 265 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 266 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 267 mmPCIE_DATA, 0x000f0000, 0x00000000, 268 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 269 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 270 }; 271 272 static const u32 stoney_mgcg_cgcg_init[] = 273 { 274 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 275 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 276 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 277 }; 278 279 static void vi_init_golden_registers(struct amdgpu_device *adev) 280 { 281 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 282 mutex_lock(&adev->grbm_idx_mutex); 283 284 if (amdgpu_sriov_vf(adev)) { 285 xgpu_vi_init_golden_registers(adev); 286 mutex_unlock(&adev->grbm_idx_mutex); 287 return; 288 } 289 290 switch (adev->asic_type) { 291 case CHIP_TOPAZ: 292 amdgpu_device_program_register_sequence(adev, 293 iceland_mgcg_cgcg_init, 294 ARRAY_SIZE(iceland_mgcg_cgcg_init)); 295 break; 296 case CHIP_FIJI: 297 amdgpu_device_program_register_sequence(adev, 298 fiji_mgcg_cgcg_init, 299 ARRAY_SIZE(fiji_mgcg_cgcg_init)); 300 break; 301 case CHIP_TONGA: 302 amdgpu_device_program_register_sequence(adev, 303 tonga_mgcg_cgcg_init, 304 ARRAY_SIZE(tonga_mgcg_cgcg_init)); 305 break; 306 case CHIP_CARRIZO: 307 amdgpu_device_program_register_sequence(adev, 308 cz_mgcg_cgcg_init, 309 ARRAY_SIZE(cz_mgcg_cgcg_init)); 310 break; 311 case CHIP_STONEY: 312 amdgpu_device_program_register_sequence(adev, 313 stoney_mgcg_cgcg_init, 314 ARRAY_SIZE(stoney_mgcg_cgcg_init)); 315 break; 316 case CHIP_POLARIS10: 317 case CHIP_POLARIS11: 318 case CHIP_POLARIS12: 319 case CHIP_VEGAM: 320 default: 321 break; 322 } 323 mutex_unlock(&adev->grbm_idx_mutex); 324 } 325 326 /** 327 * vi_get_xclk - get the xclk 328 * 329 * @adev: amdgpu_device pointer 330 * 331 * Returns the reference clock used by the gfx engine 332 * (VI). 333 */ 334 static u32 vi_get_xclk(struct amdgpu_device *adev) 335 { 336 u32 reference_clock = adev->clock.spll.reference_freq; 337 u32 tmp; 338 339 if (adev->flags & AMD_IS_APU) 340 return reference_clock; 341 342 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 343 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 344 return 1000; 345 346 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 347 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 348 return reference_clock / 4; 349 350 return reference_clock; 351 } 352 353 /** 354 * vi_srbm_select - select specific register instances 355 * 356 * @adev: amdgpu_device pointer 357 * @me: selected ME (micro engine) 358 * @pipe: pipe 359 * @queue: queue 360 * @vmid: VMID 361 * 362 * Switches the currently active registers instances. Some 363 * registers are instanced per VMID, others are instanced per 364 * me/pipe/queue combination. 365 */ 366 void vi_srbm_select(struct amdgpu_device *adev, 367 u32 me, u32 pipe, u32 queue, u32 vmid) 368 { 369 u32 srbm_gfx_cntl = 0; 370 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 371 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 372 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 373 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 374 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 375 } 376 377 static void vi_vga_set_state(struct amdgpu_device *adev, bool state) 378 { 379 /* todo */ 380 } 381 382 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 383 { 384 u32 bus_cntl; 385 u32 d1vga_control = 0; 386 u32 d2vga_control = 0; 387 u32 vga_render_control = 0; 388 u32 rom_cntl; 389 bool r; 390 391 bus_cntl = RREG32(mmBUS_CNTL); 392 if (adev->mode_info.num_crtc) { 393 d1vga_control = RREG32(mmD1VGA_CONTROL); 394 d2vga_control = RREG32(mmD2VGA_CONTROL); 395 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 396 } 397 rom_cntl = RREG32_SMC(ixROM_CNTL); 398 399 /* enable the rom */ 400 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 401 if (adev->mode_info.num_crtc) { 402 /* Disable VGA mode */ 403 WREG32(mmD1VGA_CONTROL, 404 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 405 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 406 WREG32(mmD2VGA_CONTROL, 407 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 408 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 409 WREG32(mmVGA_RENDER_CONTROL, 410 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 411 } 412 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 413 414 r = amdgpu_read_bios(adev); 415 416 /* restore regs */ 417 WREG32(mmBUS_CNTL, bus_cntl); 418 if (adev->mode_info.num_crtc) { 419 WREG32(mmD1VGA_CONTROL, d1vga_control); 420 WREG32(mmD2VGA_CONTROL, d2vga_control); 421 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 422 } 423 WREG32_SMC(ixROM_CNTL, rom_cntl); 424 return r; 425 } 426 427 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 428 u8 *bios, u32 length_bytes) 429 { 430 u32 *dw_ptr; 431 unsigned long flags; 432 u32 i, length_dw; 433 434 if (bios == NULL) 435 return false; 436 if (length_bytes == 0) 437 return false; 438 /* APU vbios image is part of sbios image */ 439 if (adev->flags & AMD_IS_APU) 440 return false; 441 442 dw_ptr = (u32 *)bios; 443 length_dw = ALIGN(length_bytes, 4) / 4; 444 /* take the smc lock since we are using the smc index */ 445 spin_lock_irqsave(&adev->smc_idx_lock, flags); 446 /* set rom index to 0 */ 447 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX); 448 WREG32(mmSMC_IND_DATA_11, 0); 449 /* set index to data for continous read */ 450 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA); 451 for (i = 0; i < length_dw; i++) 452 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11); 453 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 454 455 return true; 456 } 457 458 static void vi_detect_hw_virtualization(struct amdgpu_device *adev) 459 { 460 uint32_t reg = 0; 461 462 if (adev->asic_type == CHIP_TONGA || 463 adev->asic_type == CHIP_FIJI) { 464 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 465 /* bit0: 0 means pf and 1 means vf */ 466 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) 467 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; 468 /* bit31: 0 means disable IOV and 1 means enable */ 469 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) 470 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; 471 } 472 473 if (reg == 0) { 474 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ 475 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; 476 } 477 } 478 479 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 480 {mmGRBM_STATUS}, 481 {mmGRBM_STATUS2}, 482 {mmGRBM_STATUS_SE0}, 483 {mmGRBM_STATUS_SE1}, 484 {mmGRBM_STATUS_SE2}, 485 {mmGRBM_STATUS_SE3}, 486 {mmSRBM_STATUS}, 487 {mmSRBM_STATUS2}, 488 {mmSRBM_STATUS3}, 489 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET}, 490 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET}, 491 {mmCP_STAT}, 492 {mmCP_STALLED_STAT1}, 493 {mmCP_STALLED_STAT2}, 494 {mmCP_STALLED_STAT3}, 495 {mmCP_CPF_BUSY_STAT}, 496 {mmCP_CPF_STALLED_STAT1}, 497 {mmCP_CPF_STATUS}, 498 {mmCP_CPC_BUSY_STAT}, 499 {mmCP_CPC_STALLED_STAT1}, 500 {mmCP_CPC_STATUS}, 501 {mmGB_ADDR_CONFIG}, 502 {mmMC_ARB_RAMCFG}, 503 {mmGB_TILE_MODE0}, 504 {mmGB_TILE_MODE1}, 505 {mmGB_TILE_MODE2}, 506 {mmGB_TILE_MODE3}, 507 {mmGB_TILE_MODE4}, 508 {mmGB_TILE_MODE5}, 509 {mmGB_TILE_MODE6}, 510 {mmGB_TILE_MODE7}, 511 {mmGB_TILE_MODE8}, 512 {mmGB_TILE_MODE9}, 513 {mmGB_TILE_MODE10}, 514 {mmGB_TILE_MODE11}, 515 {mmGB_TILE_MODE12}, 516 {mmGB_TILE_MODE13}, 517 {mmGB_TILE_MODE14}, 518 {mmGB_TILE_MODE15}, 519 {mmGB_TILE_MODE16}, 520 {mmGB_TILE_MODE17}, 521 {mmGB_TILE_MODE18}, 522 {mmGB_TILE_MODE19}, 523 {mmGB_TILE_MODE20}, 524 {mmGB_TILE_MODE21}, 525 {mmGB_TILE_MODE22}, 526 {mmGB_TILE_MODE23}, 527 {mmGB_TILE_MODE24}, 528 {mmGB_TILE_MODE25}, 529 {mmGB_TILE_MODE26}, 530 {mmGB_TILE_MODE27}, 531 {mmGB_TILE_MODE28}, 532 {mmGB_TILE_MODE29}, 533 {mmGB_TILE_MODE30}, 534 {mmGB_TILE_MODE31}, 535 {mmGB_MACROTILE_MODE0}, 536 {mmGB_MACROTILE_MODE1}, 537 {mmGB_MACROTILE_MODE2}, 538 {mmGB_MACROTILE_MODE3}, 539 {mmGB_MACROTILE_MODE4}, 540 {mmGB_MACROTILE_MODE5}, 541 {mmGB_MACROTILE_MODE6}, 542 {mmGB_MACROTILE_MODE7}, 543 {mmGB_MACROTILE_MODE8}, 544 {mmGB_MACROTILE_MODE9}, 545 {mmGB_MACROTILE_MODE10}, 546 {mmGB_MACROTILE_MODE11}, 547 {mmGB_MACROTILE_MODE12}, 548 {mmGB_MACROTILE_MODE13}, 549 {mmGB_MACROTILE_MODE14}, 550 {mmGB_MACROTILE_MODE15}, 551 {mmCC_RB_BACKEND_DISABLE, true}, 552 {mmGC_USER_RB_BACKEND_DISABLE, true}, 553 {mmGB_BACKEND_MAP, false}, 554 {mmPA_SC_RASTER_CONFIG, true}, 555 {mmPA_SC_RASTER_CONFIG_1, true}, 556 }; 557 558 static uint32_t vi_get_register_value(struct amdgpu_device *adev, 559 bool indexed, u32 se_num, 560 u32 sh_num, u32 reg_offset) 561 { 562 if (indexed) { 563 uint32_t val; 564 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; 565 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; 566 567 switch (reg_offset) { 568 case mmCC_RB_BACKEND_DISABLE: 569 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; 570 case mmGC_USER_RB_BACKEND_DISABLE: 571 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; 572 case mmPA_SC_RASTER_CONFIG: 573 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; 574 case mmPA_SC_RASTER_CONFIG_1: 575 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; 576 } 577 578 mutex_lock(&adev->grbm_idx_mutex); 579 if (se_num != 0xffffffff || sh_num != 0xffffffff) 580 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 581 582 val = RREG32(reg_offset); 583 584 if (se_num != 0xffffffff || sh_num != 0xffffffff) 585 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 586 mutex_unlock(&adev->grbm_idx_mutex); 587 return val; 588 } else { 589 unsigned idx; 590 591 switch (reg_offset) { 592 case mmGB_ADDR_CONFIG: 593 return adev->gfx.config.gb_addr_config; 594 case mmMC_ARB_RAMCFG: 595 return adev->gfx.config.mc_arb_ramcfg; 596 case mmGB_TILE_MODE0: 597 case mmGB_TILE_MODE1: 598 case mmGB_TILE_MODE2: 599 case mmGB_TILE_MODE3: 600 case mmGB_TILE_MODE4: 601 case mmGB_TILE_MODE5: 602 case mmGB_TILE_MODE6: 603 case mmGB_TILE_MODE7: 604 case mmGB_TILE_MODE8: 605 case mmGB_TILE_MODE9: 606 case mmGB_TILE_MODE10: 607 case mmGB_TILE_MODE11: 608 case mmGB_TILE_MODE12: 609 case mmGB_TILE_MODE13: 610 case mmGB_TILE_MODE14: 611 case mmGB_TILE_MODE15: 612 case mmGB_TILE_MODE16: 613 case mmGB_TILE_MODE17: 614 case mmGB_TILE_MODE18: 615 case mmGB_TILE_MODE19: 616 case mmGB_TILE_MODE20: 617 case mmGB_TILE_MODE21: 618 case mmGB_TILE_MODE22: 619 case mmGB_TILE_MODE23: 620 case mmGB_TILE_MODE24: 621 case mmGB_TILE_MODE25: 622 case mmGB_TILE_MODE26: 623 case mmGB_TILE_MODE27: 624 case mmGB_TILE_MODE28: 625 case mmGB_TILE_MODE29: 626 case mmGB_TILE_MODE30: 627 case mmGB_TILE_MODE31: 628 idx = (reg_offset - mmGB_TILE_MODE0); 629 return adev->gfx.config.tile_mode_array[idx]; 630 case mmGB_MACROTILE_MODE0: 631 case mmGB_MACROTILE_MODE1: 632 case mmGB_MACROTILE_MODE2: 633 case mmGB_MACROTILE_MODE3: 634 case mmGB_MACROTILE_MODE4: 635 case mmGB_MACROTILE_MODE5: 636 case mmGB_MACROTILE_MODE6: 637 case mmGB_MACROTILE_MODE7: 638 case mmGB_MACROTILE_MODE8: 639 case mmGB_MACROTILE_MODE9: 640 case mmGB_MACROTILE_MODE10: 641 case mmGB_MACROTILE_MODE11: 642 case mmGB_MACROTILE_MODE12: 643 case mmGB_MACROTILE_MODE13: 644 case mmGB_MACROTILE_MODE14: 645 case mmGB_MACROTILE_MODE15: 646 idx = (reg_offset - mmGB_MACROTILE_MODE0); 647 return adev->gfx.config.macrotile_mode_array[idx]; 648 default: 649 return RREG32(reg_offset); 650 } 651 } 652 } 653 654 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 655 u32 sh_num, u32 reg_offset, u32 *value) 656 { 657 uint32_t i; 658 659 *value = 0; 660 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 661 bool indexed = vi_allowed_read_registers[i].grbm_indexed; 662 663 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 664 continue; 665 666 *value = vi_get_register_value(adev, indexed, se_num, sh_num, 667 reg_offset); 668 return 0; 669 } 670 return -EINVAL; 671 } 672 673 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) 674 { 675 u32 i; 676 677 dev_info(adev->dev, "GPU pci config reset\n"); 678 679 /* disable BM */ 680 pci_clear_master(adev->pdev); 681 /* reset */ 682 amdgpu_device_pci_config_reset(adev); 683 684 udelay(100); 685 686 /* wait for asic to come out of reset */ 687 for (i = 0; i < adev->usec_timeout; i++) { 688 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { 689 /* enable BM */ 690 pci_set_master(adev->pdev); 691 adev->has_hw_reset = true; 692 return 0; 693 } 694 udelay(1); 695 } 696 return -EINVAL; 697 } 698 699 /** 700 * vi_asic_pci_config_reset - soft reset GPU 701 * 702 * @adev: amdgpu_device pointer 703 * 704 * Use PCI Config method to reset the GPU. 705 * 706 * Returns 0 for success. 707 */ 708 static int vi_asic_pci_config_reset(struct amdgpu_device *adev) 709 { 710 int r; 711 712 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 713 714 r = vi_gpu_pci_config_reset(adev); 715 716 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 717 718 return r; 719 } 720 721 static bool vi_asic_supports_baco(struct amdgpu_device *adev) 722 { 723 switch (adev->asic_type) { 724 case CHIP_FIJI: 725 case CHIP_TONGA: 726 case CHIP_POLARIS10: 727 case CHIP_POLARIS11: 728 case CHIP_POLARIS12: 729 case CHIP_TOPAZ: 730 return amdgpu_dpm_is_baco_supported(adev); 731 default: 732 return false; 733 } 734 } 735 736 static enum amd_reset_method 737 vi_asic_reset_method(struct amdgpu_device *adev) 738 { 739 bool baco_reset; 740 741 switch (adev->asic_type) { 742 case CHIP_FIJI: 743 case CHIP_TONGA: 744 case CHIP_POLARIS10: 745 case CHIP_POLARIS11: 746 case CHIP_POLARIS12: 747 case CHIP_TOPAZ: 748 baco_reset = amdgpu_dpm_is_baco_supported(adev); 749 break; 750 default: 751 baco_reset = false; 752 break; 753 } 754 755 if (baco_reset) 756 return AMD_RESET_METHOD_BACO; 757 else 758 return AMD_RESET_METHOD_LEGACY; 759 } 760 761 /** 762 * vi_asic_reset - soft reset GPU 763 * 764 * @adev: amdgpu_device pointer 765 * 766 * Look up which blocks are hung and attempt 767 * to reset them. 768 * Returns 0 for success. 769 */ 770 static int vi_asic_reset(struct amdgpu_device *adev) 771 { 772 int r; 773 774 if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 775 if (!adev->in_suspend) 776 amdgpu_inc_vram_lost(adev); 777 r = amdgpu_dpm_baco_reset(adev); 778 } else { 779 r = vi_asic_pci_config_reset(adev); 780 } 781 782 return r; 783 } 784 785 static u32 vi_get_config_memsize(struct amdgpu_device *adev) 786 { 787 return RREG32(mmCONFIG_MEMSIZE); 788 } 789 790 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 791 u32 cntl_reg, u32 status_reg) 792 { 793 int r, i; 794 struct atom_clock_dividers dividers; 795 uint32_t tmp; 796 797 r = amdgpu_atombios_get_clock_dividers(adev, 798 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 799 clock, false, ÷rs); 800 if (r) 801 return r; 802 803 tmp = RREG32_SMC(cntl_reg); 804 805 if (adev->flags & AMD_IS_APU) 806 tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK; 807 else 808 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 809 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 810 tmp |= dividers.post_divider; 811 WREG32_SMC(cntl_reg, tmp); 812 813 for (i = 0; i < 100; i++) { 814 tmp = RREG32_SMC(status_reg); 815 if (adev->flags & AMD_IS_APU) { 816 if (tmp & 0x10000) 817 break; 818 } else { 819 if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK) 820 break; 821 } 822 mdelay(10); 823 } 824 if (i == 100) 825 return -ETIMEDOUT; 826 return 0; 827 } 828 829 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0 830 #define ixGNB_CLK1_STATUS 0xD822010C 831 #define ixGNB_CLK2_DFS_CNTL 0xD8220110 832 #define ixGNB_CLK2_STATUS 0xD822012C 833 #define ixGNB_CLK3_DFS_CNTL 0xD8220130 834 #define ixGNB_CLK3_STATUS 0xD822014C 835 836 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 837 { 838 int r; 839 840 if (adev->flags & AMD_IS_APU) { 841 r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS); 842 if (r) 843 return r; 844 845 r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS); 846 if (r) 847 return r; 848 } else { 849 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 850 if (r) 851 return r; 852 853 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 854 if (r) 855 return r; 856 } 857 858 return 0; 859 } 860 861 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 862 { 863 int r, i; 864 struct atom_clock_dividers dividers; 865 u32 tmp; 866 u32 reg_ctrl; 867 u32 reg_status; 868 u32 status_mask; 869 u32 reg_mask; 870 871 if (adev->flags & AMD_IS_APU) { 872 reg_ctrl = ixGNB_CLK3_DFS_CNTL; 873 reg_status = ixGNB_CLK3_STATUS; 874 status_mask = 0x00010000; 875 reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK; 876 } else { 877 reg_ctrl = ixCG_ECLK_CNTL; 878 reg_status = ixCG_ECLK_STATUS; 879 status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK; 880 reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK; 881 } 882 883 r = amdgpu_atombios_get_clock_dividers(adev, 884 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 885 ecclk, false, ÷rs); 886 if (r) 887 return r; 888 889 for (i = 0; i < 100; i++) { 890 if (RREG32_SMC(reg_status) & status_mask) 891 break; 892 mdelay(10); 893 } 894 895 if (i == 100) 896 return -ETIMEDOUT; 897 898 tmp = RREG32_SMC(reg_ctrl); 899 tmp &= ~reg_mask; 900 tmp |= dividers.post_divider; 901 WREG32_SMC(reg_ctrl, tmp); 902 903 for (i = 0; i < 100; i++) { 904 if (RREG32_SMC(reg_status) & status_mask) 905 break; 906 mdelay(10); 907 } 908 909 if (i == 100) 910 return -ETIMEDOUT; 911 912 return 0; 913 } 914 915 static void vi_pcie_gen3_enable(struct amdgpu_device *adev) 916 { 917 if (pci_is_root_bus(adev->pdev->bus)) 918 return; 919 920 if (amdgpu_pcie_gen2 == 0) 921 return; 922 923 if (adev->flags & AMD_IS_APU) 924 return; 925 926 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 927 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 928 return; 929 930 /* todo */ 931 } 932 933 static void vi_program_aspm(struct amdgpu_device *adev) 934 { 935 936 if (amdgpu_aspm == 0) 937 return; 938 939 /* todo */ 940 } 941 942 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 943 bool enable) 944 { 945 u32 tmp; 946 947 /* not necessary on CZ */ 948 if (adev->flags & AMD_IS_APU) 949 return; 950 951 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 952 if (enable) 953 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 954 else 955 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 956 957 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 958 } 959 960 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 961 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 962 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 963 964 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 965 { 966 if (adev->flags & AMD_IS_APU) 967 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 968 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 969 else 970 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 971 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 972 } 973 974 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 975 { 976 if (!ring || !ring->funcs->emit_wreg) { 977 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); 978 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL); 979 } else { 980 amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); 981 } 982 } 983 984 static void vi_invalidate_hdp(struct amdgpu_device *adev, 985 struct amdgpu_ring *ring) 986 { 987 if (!ring || !ring->funcs->emit_wreg) { 988 WREG32(mmHDP_DEBUG0, 1); 989 RREG32(mmHDP_DEBUG0); 990 } else { 991 amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1); 992 } 993 } 994 995 static bool vi_need_full_reset(struct amdgpu_device *adev) 996 { 997 switch (adev->asic_type) { 998 case CHIP_CARRIZO: 999 case CHIP_STONEY: 1000 /* CZ has hang issues with full reset at the moment */ 1001 return false; 1002 case CHIP_FIJI: 1003 case CHIP_TONGA: 1004 /* XXX: soft reset should work on fiji and tonga */ 1005 return true; 1006 case CHIP_POLARIS10: 1007 case CHIP_POLARIS11: 1008 case CHIP_POLARIS12: 1009 case CHIP_TOPAZ: 1010 default: 1011 /* change this when we support soft reset */ 1012 return true; 1013 } 1014 } 1015 1016 static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, 1017 uint64_t *count1) 1018 { 1019 uint32_t perfctr = 0; 1020 uint64_t cnt0_of, cnt1_of; 1021 int tmp; 1022 1023 /* This reports 0 on APUs, so return to avoid writing/reading registers 1024 * that may or may not be different from their GPU counterparts 1025 */ 1026 if (adev->flags & AMD_IS_APU) 1027 return; 1028 1029 /* Set the 2 events that we wish to watch, defined above */ 1030 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */ 1031 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); 1032 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); 1033 1034 /* Write to enable desired perf counters */ 1035 WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr); 1036 /* Zero out and enable the perf counters 1037 * Write 0x5: 1038 * Bit 0 = Start all counters(1) 1039 * Bit 2 = Global counter reset enable(1) 1040 */ 1041 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005); 1042 1043 msleep(1000); 1044 1045 /* Load the shadow and disable the perf counters 1046 * Write 0x2: 1047 * Bit 0 = Stop counters(0) 1048 * Bit 1 = Load the shadow counters(1) 1049 */ 1050 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002); 1051 1052 /* Read register values to get any >32bit overflow */ 1053 tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK); 1054 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); 1055 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); 1056 1057 /* Get the values and add the overflow */ 1058 *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); 1059 *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); 1060 } 1061 1062 static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev) 1063 { 1064 uint64_t nak_r, nak_g; 1065 1066 /* Get the number of NAKs received and generated */ 1067 nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK); 1068 nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED); 1069 1070 /* Add the total number of NAKs, i.e the number of replays */ 1071 return (nak_r + nak_g); 1072 } 1073 1074 static bool vi_need_reset_on_init(struct amdgpu_device *adev) 1075 { 1076 u32 clock_cntl, pc; 1077 1078 if (adev->flags & AMD_IS_APU) 1079 return false; 1080 1081 /* check if the SMC is already running */ 1082 clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); 1083 pc = RREG32_SMC(ixSMC_PC_C); 1084 if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) && 1085 (0x20100 <= pc)) 1086 return true; 1087 1088 return false; 1089 } 1090 1091 static const struct amdgpu_asic_funcs vi_asic_funcs = 1092 { 1093 .read_disabled_bios = &vi_read_disabled_bios, 1094 .read_bios_from_rom = &vi_read_bios_from_rom, 1095 .read_register = &vi_read_register, 1096 .reset = &vi_asic_reset, 1097 .reset_method = &vi_asic_reset_method, 1098 .set_vga_state = &vi_vga_set_state, 1099 .get_xclk = &vi_get_xclk, 1100 .set_uvd_clocks = &vi_set_uvd_clocks, 1101 .set_vce_clocks = &vi_set_vce_clocks, 1102 .get_config_memsize = &vi_get_config_memsize, 1103 .flush_hdp = &vi_flush_hdp, 1104 .invalidate_hdp = &vi_invalidate_hdp, 1105 .need_full_reset = &vi_need_full_reset, 1106 .init_doorbell_index = &legacy_doorbell_index_init, 1107 .get_pcie_usage = &vi_get_pcie_usage, 1108 .need_reset_on_init = &vi_need_reset_on_init, 1109 .get_pcie_replay_count = &vi_get_pcie_replay_count, 1110 .supports_baco = &vi_asic_supports_baco, 1111 }; 1112 1113 #define CZ_REV_BRISTOL(rev) \ 1114 ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6)) 1115 1116 static int vi_common_early_init(void *handle) 1117 { 1118 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1119 1120 if (adev->flags & AMD_IS_APU) { 1121 adev->smc_rreg = &cz_smc_rreg; 1122 adev->smc_wreg = &cz_smc_wreg; 1123 } else { 1124 adev->smc_rreg = &vi_smc_rreg; 1125 adev->smc_wreg = &vi_smc_wreg; 1126 } 1127 adev->pcie_rreg = &vi_pcie_rreg; 1128 adev->pcie_wreg = &vi_pcie_wreg; 1129 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 1130 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 1131 adev->didt_rreg = &vi_didt_rreg; 1132 adev->didt_wreg = &vi_didt_wreg; 1133 adev->gc_cac_rreg = &vi_gc_cac_rreg; 1134 adev->gc_cac_wreg = &vi_gc_cac_wreg; 1135 1136 adev->asic_funcs = &vi_asic_funcs; 1137 1138 adev->rev_id = vi_get_rev_id(adev); 1139 adev->external_rev_id = 0xFF; 1140 switch (adev->asic_type) { 1141 case CHIP_TOPAZ: 1142 adev->cg_flags = 0; 1143 adev->pg_flags = 0; 1144 adev->external_rev_id = 0x1; 1145 break; 1146 case CHIP_FIJI: 1147 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1148 AMD_CG_SUPPORT_GFX_MGLS | 1149 AMD_CG_SUPPORT_GFX_RLC_LS | 1150 AMD_CG_SUPPORT_GFX_CP_LS | 1151 AMD_CG_SUPPORT_GFX_CGTS | 1152 AMD_CG_SUPPORT_GFX_CGTS_LS | 1153 AMD_CG_SUPPORT_GFX_CGCG | 1154 AMD_CG_SUPPORT_GFX_CGLS | 1155 AMD_CG_SUPPORT_SDMA_MGCG | 1156 AMD_CG_SUPPORT_SDMA_LS | 1157 AMD_CG_SUPPORT_BIF_LS | 1158 AMD_CG_SUPPORT_HDP_MGCG | 1159 AMD_CG_SUPPORT_HDP_LS | 1160 AMD_CG_SUPPORT_ROM_MGCG | 1161 AMD_CG_SUPPORT_MC_MGCG | 1162 AMD_CG_SUPPORT_MC_LS | 1163 AMD_CG_SUPPORT_UVD_MGCG; 1164 adev->pg_flags = 0; 1165 adev->external_rev_id = adev->rev_id + 0x3c; 1166 break; 1167 case CHIP_TONGA: 1168 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1169 AMD_CG_SUPPORT_GFX_CGCG | 1170 AMD_CG_SUPPORT_GFX_CGLS | 1171 AMD_CG_SUPPORT_SDMA_MGCG | 1172 AMD_CG_SUPPORT_SDMA_LS | 1173 AMD_CG_SUPPORT_BIF_LS | 1174 AMD_CG_SUPPORT_HDP_MGCG | 1175 AMD_CG_SUPPORT_HDP_LS | 1176 AMD_CG_SUPPORT_ROM_MGCG | 1177 AMD_CG_SUPPORT_MC_MGCG | 1178 AMD_CG_SUPPORT_MC_LS | 1179 AMD_CG_SUPPORT_DRM_LS | 1180 AMD_CG_SUPPORT_UVD_MGCG; 1181 adev->pg_flags = 0; 1182 adev->external_rev_id = adev->rev_id + 0x14; 1183 break; 1184 case CHIP_POLARIS11: 1185 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1186 AMD_CG_SUPPORT_GFX_RLC_LS | 1187 AMD_CG_SUPPORT_GFX_CP_LS | 1188 AMD_CG_SUPPORT_GFX_CGCG | 1189 AMD_CG_SUPPORT_GFX_CGLS | 1190 AMD_CG_SUPPORT_GFX_3D_CGCG | 1191 AMD_CG_SUPPORT_GFX_3D_CGLS | 1192 AMD_CG_SUPPORT_SDMA_MGCG | 1193 AMD_CG_SUPPORT_SDMA_LS | 1194 AMD_CG_SUPPORT_BIF_MGCG | 1195 AMD_CG_SUPPORT_BIF_LS | 1196 AMD_CG_SUPPORT_HDP_MGCG | 1197 AMD_CG_SUPPORT_HDP_LS | 1198 AMD_CG_SUPPORT_ROM_MGCG | 1199 AMD_CG_SUPPORT_MC_MGCG | 1200 AMD_CG_SUPPORT_MC_LS | 1201 AMD_CG_SUPPORT_DRM_LS | 1202 AMD_CG_SUPPORT_UVD_MGCG | 1203 AMD_CG_SUPPORT_VCE_MGCG; 1204 adev->pg_flags = 0; 1205 adev->external_rev_id = adev->rev_id + 0x5A; 1206 break; 1207 case CHIP_POLARIS10: 1208 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1209 AMD_CG_SUPPORT_GFX_RLC_LS | 1210 AMD_CG_SUPPORT_GFX_CP_LS | 1211 AMD_CG_SUPPORT_GFX_CGCG | 1212 AMD_CG_SUPPORT_GFX_CGLS | 1213 AMD_CG_SUPPORT_GFX_3D_CGCG | 1214 AMD_CG_SUPPORT_GFX_3D_CGLS | 1215 AMD_CG_SUPPORT_SDMA_MGCG | 1216 AMD_CG_SUPPORT_SDMA_LS | 1217 AMD_CG_SUPPORT_BIF_MGCG | 1218 AMD_CG_SUPPORT_BIF_LS | 1219 AMD_CG_SUPPORT_HDP_MGCG | 1220 AMD_CG_SUPPORT_HDP_LS | 1221 AMD_CG_SUPPORT_ROM_MGCG | 1222 AMD_CG_SUPPORT_MC_MGCG | 1223 AMD_CG_SUPPORT_MC_LS | 1224 AMD_CG_SUPPORT_DRM_LS | 1225 AMD_CG_SUPPORT_UVD_MGCG | 1226 AMD_CG_SUPPORT_VCE_MGCG; 1227 adev->pg_flags = 0; 1228 adev->external_rev_id = adev->rev_id + 0x50; 1229 break; 1230 case CHIP_POLARIS12: 1231 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1232 AMD_CG_SUPPORT_GFX_RLC_LS | 1233 AMD_CG_SUPPORT_GFX_CP_LS | 1234 AMD_CG_SUPPORT_GFX_CGCG | 1235 AMD_CG_SUPPORT_GFX_CGLS | 1236 AMD_CG_SUPPORT_GFX_3D_CGCG | 1237 AMD_CG_SUPPORT_GFX_3D_CGLS | 1238 AMD_CG_SUPPORT_SDMA_MGCG | 1239 AMD_CG_SUPPORT_SDMA_LS | 1240 AMD_CG_SUPPORT_BIF_MGCG | 1241 AMD_CG_SUPPORT_BIF_LS | 1242 AMD_CG_SUPPORT_HDP_MGCG | 1243 AMD_CG_SUPPORT_HDP_LS | 1244 AMD_CG_SUPPORT_ROM_MGCG | 1245 AMD_CG_SUPPORT_MC_MGCG | 1246 AMD_CG_SUPPORT_MC_LS | 1247 AMD_CG_SUPPORT_DRM_LS | 1248 AMD_CG_SUPPORT_UVD_MGCG | 1249 AMD_CG_SUPPORT_VCE_MGCG; 1250 adev->pg_flags = 0; 1251 adev->external_rev_id = adev->rev_id + 0x64; 1252 break; 1253 case CHIP_VEGAM: 1254 adev->cg_flags = 0; 1255 /*AMD_CG_SUPPORT_GFX_MGCG | 1256 AMD_CG_SUPPORT_GFX_RLC_LS | 1257 AMD_CG_SUPPORT_GFX_CP_LS | 1258 AMD_CG_SUPPORT_GFX_CGCG | 1259 AMD_CG_SUPPORT_GFX_CGLS | 1260 AMD_CG_SUPPORT_GFX_3D_CGCG | 1261 AMD_CG_SUPPORT_GFX_3D_CGLS | 1262 AMD_CG_SUPPORT_SDMA_MGCG | 1263 AMD_CG_SUPPORT_SDMA_LS | 1264 AMD_CG_SUPPORT_BIF_MGCG | 1265 AMD_CG_SUPPORT_BIF_LS | 1266 AMD_CG_SUPPORT_HDP_MGCG | 1267 AMD_CG_SUPPORT_HDP_LS | 1268 AMD_CG_SUPPORT_ROM_MGCG | 1269 AMD_CG_SUPPORT_MC_MGCG | 1270 AMD_CG_SUPPORT_MC_LS | 1271 AMD_CG_SUPPORT_DRM_LS | 1272 AMD_CG_SUPPORT_UVD_MGCG | 1273 AMD_CG_SUPPORT_VCE_MGCG;*/ 1274 adev->pg_flags = 0; 1275 adev->external_rev_id = adev->rev_id + 0x6E; 1276 break; 1277 case CHIP_CARRIZO: 1278 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1279 AMD_CG_SUPPORT_GFX_MGCG | 1280 AMD_CG_SUPPORT_GFX_MGLS | 1281 AMD_CG_SUPPORT_GFX_RLC_LS | 1282 AMD_CG_SUPPORT_GFX_CP_LS | 1283 AMD_CG_SUPPORT_GFX_CGTS | 1284 AMD_CG_SUPPORT_GFX_CGTS_LS | 1285 AMD_CG_SUPPORT_GFX_CGCG | 1286 AMD_CG_SUPPORT_GFX_CGLS | 1287 AMD_CG_SUPPORT_BIF_LS | 1288 AMD_CG_SUPPORT_HDP_MGCG | 1289 AMD_CG_SUPPORT_HDP_LS | 1290 AMD_CG_SUPPORT_SDMA_MGCG | 1291 AMD_CG_SUPPORT_SDMA_LS | 1292 AMD_CG_SUPPORT_VCE_MGCG; 1293 /* rev0 hardware requires workarounds to support PG */ 1294 adev->pg_flags = 0; 1295 if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) { 1296 adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG | 1297 AMD_PG_SUPPORT_GFX_PIPELINE | 1298 AMD_PG_SUPPORT_CP | 1299 AMD_PG_SUPPORT_UVD | 1300 AMD_PG_SUPPORT_VCE; 1301 } 1302 adev->external_rev_id = adev->rev_id + 0x1; 1303 break; 1304 case CHIP_STONEY: 1305 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1306 AMD_CG_SUPPORT_GFX_MGCG | 1307 AMD_CG_SUPPORT_GFX_MGLS | 1308 AMD_CG_SUPPORT_GFX_RLC_LS | 1309 AMD_CG_SUPPORT_GFX_CP_LS | 1310 AMD_CG_SUPPORT_GFX_CGTS | 1311 AMD_CG_SUPPORT_GFX_CGTS_LS | 1312 AMD_CG_SUPPORT_GFX_CGLS | 1313 AMD_CG_SUPPORT_BIF_LS | 1314 AMD_CG_SUPPORT_HDP_MGCG | 1315 AMD_CG_SUPPORT_HDP_LS | 1316 AMD_CG_SUPPORT_SDMA_MGCG | 1317 AMD_CG_SUPPORT_SDMA_LS | 1318 AMD_CG_SUPPORT_VCE_MGCG; 1319 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | 1320 AMD_PG_SUPPORT_GFX_SMG | 1321 AMD_PG_SUPPORT_GFX_PIPELINE | 1322 AMD_PG_SUPPORT_CP | 1323 AMD_PG_SUPPORT_UVD | 1324 AMD_PG_SUPPORT_VCE; 1325 adev->external_rev_id = adev->rev_id + 0x61; 1326 break; 1327 default: 1328 /* FIXME: not supported yet */ 1329 return -EINVAL; 1330 } 1331 1332 if (amdgpu_sriov_vf(adev)) { 1333 amdgpu_virt_init_setting(adev); 1334 xgpu_vi_mailbox_set_irq_funcs(adev); 1335 } 1336 1337 return 0; 1338 } 1339 1340 static int vi_common_late_init(void *handle) 1341 { 1342 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1343 1344 if (amdgpu_sriov_vf(adev)) 1345 xgpu_vi_mailbox_get_irq(adev); 1346 1347 return 0; 1348 } 1349 1350 static int vi_common_sw_init(void *handle) 1351 { 1352 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1353 1354 if (amdgpu_sriov_vf(adev)) 1355 xgpu_vi_mailbox_add_irq_id(adev); 1356 1357 return 0; 1358 } 1359 1360 static int vi_common_sw_fini(void *handle) 1361 { 1362 return 0; 1363 } 1364 1365 static int vi_common_hw_init(void *handle) 1366 { 1367 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1368 1369 /* move the golden regs per IP block */ 1370 vi_init_golden_registers(adev); 1371 /* enable pcie gen2/3 link */ 1372 vi_pcie_gen3_enable(adev); 1373 /* enable aspm */ 1374 vi_program_aspm(adev); 1375 /* enable the doorbell aperture */ 1376 vi_enable_doorbell_aperture(adev, true); 1377 1378 return 0; 1379 } 1380 1381 static int vi_common_hw_fini(void *handle) 1382 { 1383 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1384 1385 /* enable the doorbell aperture */ 1386 vi_enable_doorbell_aperture(adev, false); 1387 1388 if (amdgpu_sriov_vf(adev)) 1389 xgpu_vi_mailbox_put_irq(adev); 1390 1391 return 0; 1392 } 1393 1394 static int vi_common_suspend(void *handle) 1395 { 1396 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1397 1398 return vi_common_hw_fini(adev); 1399 } 1400 1401 static int vi_common_resume(void *handle) 1402 { 1403 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1404 1405 return vi_common_hw_init(adev); 1406 } 1407 1408 static bool vi_common_is_idle(void *handle) 1409 { 1410 return true; 1411 } 1412 1413 static int vi_common_wait_for_idle(void *handle) 1414 { 1415 return 0; 1416 } 1417 1418 static int vi_common_soft_reset(void *handle) 1419 { 1420 return 0; 1421 } 1422 1423 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1424 bool enable) 1425 { 1426 uint32_t temp, data; 1427 1428 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1429 1430 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) 1431 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1432 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1433 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1434 else 1435 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1436 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1437 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1438 1439 if (temp != data) 1440 WREG32_PCIE(ixPCIE_CNTL2, data); 1441 } 1442 1443 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1444 bool enable) 1445 { 1446 uint32_t temp, data; 1447 1448 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1449 1450 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 1451 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1452 else 1453 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1454 1455 if (temp != data) 1456 WREG32(mmHDP_HOST_PATH_CNTL, data); 1457 } 1458 1459 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, 1460 bool enable) 1461 { 1462 uint32_t temp, data; 1463 1464 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1465 1466 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1467 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1468 else 1469 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1470 1471 if (temp != data) 1472 WREG32(mmHDP_MEM_POWER_LS, data); 1473 } 1474 1475 static void vi_update_drm_light_sleep(struct amdgpu_device *adev, 1476 bool enable) 1477 { 1478 uint32_t temp, data; 1479 1480 temp = data = RREG32(0x157a); 1481 1482 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) 1483 data |= 1; 1484 else 1485 data &= ~1; 1486 1487 if (temp != data) 1488 WREG32(0x157a, data); 1489 } 1490 1491 1492 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1493 bool enable) 1494 { 1495 uint32_t temp, data; 1496 1497 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1498 1499 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1500 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1501 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1502 else 1503 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1504 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1505 1506 if (temp != data) 1507 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1508 } 1509 1510 static int vi_common_set_clockgating_state_by_smu(void *handle, 1511 enum amd_clockgating_state state) 1512 { 1513 uint32_t msg_id, pp_state = 0; 1514 uint32_t pp_support_state = 0; 1515 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1516 1517 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) { 1518 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) { 1519 pp_support_state = PP_STATE_SUPPORT_LS; 1520 pp_state = PP_STATE_LS; 1521 } 1522 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) { 1523 pp_support_state |= PP_STATE_SUPPORT_CG; 1524 pp_state |= PP_STATE_CG; 1525 } 1526 if (state == AMD_CG_STATE_UNGATE) 1527 pp_state = 0; 1528 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1529 PP_BLOCK_SYS_MC, 1530 pp_support_state, 1531 pp_state); 1532 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1533 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1534 } 1535 1536 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) { 1537 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) { 1538 pp_support_state = PP_STATE_SUPPORT_LS; 1539 pp_state = PP_STATE_LS; 1540 } 1541 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) { 1542 pp_support_state |= PP_STATE_SUPPORT_CG; 1543 pp_state |= PP_STATE_CG; 1544 } 1545 if (state == AMD_CG_STATE_UNGATE) 1546 pp_state = 0; 1547 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1548 PP_BLOCK_SYS_SDMA, 1549 pp_support_state, 1550 pp_state); 1551 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1552 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1553 } 1554 1555 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) { 1556 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 1557 pp_support_state = PP_STATE_SUPPORT_LS; 1558 pp_state = PP_STATE_LS; 1559 } 1560 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) { 1561 pp_support_state |= PP_STATE_SUPPORT_CG; 1562 pp_state |= PP_STATE_CG; 1563 } 1564 if (state == AMD_CG_STATE_UNGATE) 1565 pp_state = 0; 1566 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1567 PP_BLOCK_SYS_HDP, 1568 pp_support_state, 1569 pp_state); 1570 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1571 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1572 } 1573 1574 1575 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) { 1576 if (state == AMD_CG_STATE_UNGATE) 1577 pp_state = 0; 1578 else 1579 pp_state = PP_STATE_LS; 1580 1581 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1582 PP_BLOCK_SYS_BIF, 1583 PP_STATE_SUPPORT_LS, 1584 pp_state); 1585 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1586 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1587 } 1588 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) { 1589 if (state == AMD_CG_STATE_UNGATE) 1590 pp_state = 0; 1591 else 1592 pp_state = PP_STATE_CG; 1593 1594 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1595 PP_BLOCK_SYS_BIF, 1596 PP_STATE_SUPPORT_CG, 1597 pp_state); 1598 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1599 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1600 } 1601 1602 if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) { 1603 1604 if (state == AMD_CG_STATE_UNGATE) 1605 pp_state = 0; 1606 else 1607 pp_state = PP_STATE_LS; 1608 1609 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1610 PP_BLOCK_SYS_DRM, 1611 PP_STATE_SUPPORT_LS, 1612 pp_state); 1613 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1614 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1615 } 1616 1617 if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) { 1618 1619 if (state == AMD_CG_STATE_UNGATE) 1620 pp_state = 0; 1621 else 1622 pp_state = PP_STATE_CG; 1623 1624 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1625 PP_BLOCK_SYS_ROM, 1626 PP_STATE_SUPPORT_CG, 1627 pp_state); 1628 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1629 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1630 } 1631 return 0; 1632 } 1633 1634 static int vi_common_set_clockgating_state(void *handle, 1635 enum amd_clockgating_state state) 1636 { 1637 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1638 1639 if (amdgpu_sriov_vf(adev)) 1640 return 0; 1641 1642 switch (adev->asic_type) { 1643 case CHIP_FIJI: 1644 vi_update_bif_medium_grain_light_sleep(adev, 1645 state == AMD_CG_STATE_GATE); 1646 vi_update_hdp_medium_grain_clock_gating(adev, 1647 state == AMD_CG_STATE_GATE); 1648 vi_update_hdp_light_sleep(adev, 1649 state == AMD_CG_STATE_GATE); 1650 vi_update_rom_medium_grain_clock_gating(adev, 1651 state == AMD_CG_STATE_GATE); 1652 break; 1653 case CHIP_CARRIZO: 1654 case CHIP_STONEY: 1655 vi_update_bif_medium_grain_light_sleep(adev, 1656 state == AMD_CG_STATE_GATE); 1657 vi_update_hdp_medium_grain_clock_gating(adev, 1658 state == AMD_CG_STATE_GATE); 1659 vi_update_hdp_light_sleep(adev, 1660 state == AMD_CG_STATE_GATE); 1661 vi_update_drm_light_sleep(adev, 1662 state == AMD_CG_STATE_GATE); 1663 break; 1664 case CHIP_TONGA: 1665 case CHIP_POLARIS10: 1666 case CHIP_POLARIS11: 1667 case CHIP_POLARIS12: 1668 case CHIP_VEGAM: 1669 vi_common_set_clockgating_state_by_smu(adev, state); 1670 default: 1671 break; 1672 } 1673 return 0; 1674 } 1675 1676 static int vi_common_set_powergating_state(void *handle, 1677 enum amd_powergating_state state) 1678 { 1679 return 0; 1680 } 1681 1682 static void vi_common_get_clockgating_state(void *handle, u32 *flags) 1683 { 1684 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1685 int data; 1686 1687 if (amdgpu_sriov_vf(adev)) 1688 *flags = 0; 1689 1690 /* AMD_CG_SUPPORT_BIF_LS */ 1691 data = RREG32_PCIE(ixPCIE_CNTL2); 1692 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) 1693 *flags |= AMD_CG_SUPPORT_BIF_LS; 1694 1695 /* AMD_CG_SUPPORT_HDP_LS */ 1696 data = RREG32(mmHDP_MEM_POWER_LS); 1697 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) 1698 *flags |= AMD_CG_SUPPORT_HDP_LS; 1699 1700 /* AMD_CG_SUPPORT_HDP_MGCG */ 1701 data = RREG32(mmHDP_HOST_PATH_CNTL); 1702 if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK)) 1703 *flags |= AMD_CG_SUPPORT_HDP_MGCG; 1704 1705 /* AMD_CG_SUPPORT_ROM_MGCG */ 1706 data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1707 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) 1708 *flags |= AMD_CG_SUPPORT_ROM_MGCG; 1709 } 1710 1711 static const struct amd_ip_funcs vi_common_ip_funcs = { 1712 .name = "vi_common", 1713 .early_init = vi_common_early_init, 1714 .late_init = vi_common_late_init, 1715 .sw_init = vi_common_sw_init, 1716 .sw_fini = vi_common_sw_fini, 1717 .hw_init = vi_common_hw_init, 1718 .hw_fini = vi_common_hw_fini, 1719 .suspend = vi_common_suspend, 1720 .resume = vi_common_resume, 1721 .is_idle = vi_common_is_idle, 1722 .wait_for_idle = vi_common_wait_for_idle, 1723 .soft_reset = vi_common_soft_reset, 1724 .set_clockgating_state = vi_common_set_clockgating_state, 1725 .set_powergating_state = vi_common_set_powergating_state, 1726 .get_clockgating_state = vi_common_get_clockgating_state, 1727 }; 1728 1729 static const struct amdgpu_ip_block_version vi_common_ip_block = 1730 { 1731 .type = AMD_IP_BLOCK_TYPE_COMMON, 1732 .major = 1, 1733 .minor = 0, 1734 .rev = 0, 1735 .funcs = &vi_common_ip_funcs, 1736 }; 1737 1738 int vi_set_ip_blocks(struct amdgpu_device *adev) 1739 { 1740 /* in early init stage, vbios code won't work */ 1741 vi_detect_hw_virtualization(adev); 1742 1743 if (amdgpu_sriov_vf(adev)) 1744 adev->virt.ops = &xgpu_vi_virt_ops; 1745 1746 switch (adev->asic_type) { 1747 case CHIP_TOPAZ: 1748 /* topaz has no DCE, UVD, VCE */ 1749 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1750 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block); 1751 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block); 1752 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1753 amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block); 1754 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1755 if (adev->enable_virtual_display) 1756 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1757 break; 1758 case CHIP_FIJI: 1759 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1760 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block); 1761 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1762 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1763 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1764 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1765 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1766 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1767 #if defined(CONFIG_DRM_AMD_DC) 1768 else if (amdgpu_device_has_dc_support(adev)) 1769 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1770 #endif 1771 else 1772 amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block); 1773 if (!amdgpu_sriov_vf(adev)) { 1774 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 1775 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 1776 } 1777 break; 1778 case CHIP_TONGA: 1779 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1780 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1781 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1782 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1783 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1784 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1785 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1786 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1787 #if defined(CONFIG_DRM_AMD_DC) 1788 else if (amdgpu_device_has_dc_support(adev)) 1789 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1790 #endif 1791 else 1792 amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block); 1793 if (!amdgpu_sriov_vf(adev)) { 1794 amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block); 1795 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 1796 } 1797 break; 1798 case CHIP_POLARIS10: 1799 case CHIP_POLARIS11: 1800 case CHIP_POLARIS12: 1801 case CHIP_VEGAM: 1802 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1803 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block); 1804 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1805 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1806 amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block); 1807 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1808 if (adev->enable_virtual_display) 1809 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1810 #if defined(CONFIG_DRM_AMD_DC) 1811 else if (amdgpu_device_has_dc_support(adev)) 1812 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1813 #endif 1814 else 1815 amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block); 1816 amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block); 1817 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 1818 break; 1819 case CHIP_CARRIZO: 1820 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1821 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1822 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1823 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1824 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1825 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1826 if (adev->enable_virtual_display) 1827 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1828 #if defined(CONFIG_DRM_AMD_DC) 1829 else if (amdgpu_device_has_dc_support(adev)) 1830 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1831 #endif 1832 else 1833 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 1834 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 1835 amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block); 1836 #if defined(CONFIG_DRM_AMD_ACP) 1837 amdgpu_device_ip_block_add(adev, &acp_ip_block); 1838 #endif 1839 break; 1840 case CHIP_STONEY: 1841 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1842 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1843 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1844 amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block); 1845 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1846 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1847 if (adev->enable_virtual_display) 1848 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1849 #if defined(CONFIG_DRM_AMD_DC) 1850 else if (amdgpu_device_has_dc_support(adev)) 1851 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1852 #endif 1853 else 1854 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 1855 amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block); 1856 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 1857 #if defined(CONFIG_DRM_AMD_ACP) 1858 amdgpu_device_ip_block_add(adev, &acp_ip_block); 1859 #endif 1860 break; 1861 default: 1862 /* FIXME: not supported yet */ 1863 return -EINVAL; 1864 } 1865 1866 return 0; 1867 } 1868 1869 void legacy_doorbell_index_init(struct amdgpu_device *adev) 1870 { 1871 adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ; 1872 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0; 1873 adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1; 1874 adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2; 1875 adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3; 1876 adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4; 1877 adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5; 1878 adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6; 1879 adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7; 1880 adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0; 1881 adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0; 1882 adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1; 1883 adev->doorbell_index.ih = AMDGPU_DOORBELL_IH; 1884 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT; 1885 } 1886