1 /* $NetBSD: amdgpu_vi.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $ */ 2 3 /* 4 * Copyright 2014 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25 26 #include <sys/cdefs.h> 27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_vi.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $"); 28 29 #include <linux/pci.h> 30 #include <linux/slab.h> 31 32 #include "amdgpu.h" 33 #include "amdgpu_atombios.h" 34 #include "amdgpu_ih.h" 35 #include "amdgpu_uvd.h" 36 #include "amdgpu_vce.h" 37 #include "amdgpu_ucode.h" 38 #include "atom.h" 39 #include "amd_pcie.h" 40 41 #include "gmc/gmc_8_1_d.h" 42 #include "gmc/gmc_8_1_sh_mask.h" 43 44 #include "oss/oss_3_0_d.h" 45 #include "oss/oss_3_0_sh_mask.h" 46 47 #include "bif/bif_5_0_d.h" 48 #include "bif/bif_5_0_sh_mask.h" 49 50 #include "gca/gfx_8_0_d.h" 51 #include "gca/gfx_8_0_sh_mask.h" 52 53 #include "smu/smu_7_1_1_d.h" 54 #include "smu/smu_7_1_1_sh_mask.h" 55 56 #include "uvd/uvd_5_0_d.h" 57 #include "uvd/uvd_5_0_sh_mask.h" 58 59 #include "vce/vce_3_0_d.h" 60 #include "vce/vce_3_0_sh_mask.h" 61 62 #include "dce/dce_10_0_d.h" 63 #include "dce/dce_10_0_sh_mask.h" 64 65 #include "vid.h" 66 #include "vi.h" 67 #include "gmc_v8_0.h" 68 #include "gmc_v7_0.h" 69 #include "gfx_v8_0.h" 70 #include "sdma_v2_4.h" 71 #include "sdma_v3_0.h" 72 #include "dce_v10_0.h" 73 #include "dce_v11_0.h" 74 #include "iceland_ih.h" 75 #include "tonga_ih.h" 76 #include "cz_ih.h" 77 #include "uvd_v5_0.h" 78 #include "uvd_v6_0.h" 79 #include "vce_v3_0.h" 80 #if defined(CONFIG_DRM_AMD_ACP) 81 #include "amdgpu_acp.h" 82 #endif 83 #include "dce_virtual.h" 84 #include "mxgpu_vi.h" 85 #include "amdgpu_dm.h" 86 87 #include <linux/nbsd-namespace.h> 88 89 /* 90 * Indirect registers accessor 91 */ 92 static u32 vi_pcie_rreg(struct amdgpu_device *adev, u32 reg) 93 { 94 unsigned long flags; 95 u32 r; 96 97 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 98 WREG32_NO_KIQ(mmPCIE_INDEX, reg); 99 (void)RREG32_NO_KIQ(mmPCIE_INDEX); 100 r = RREG32_NO_KIQ(mmPCIE_DATA); 101 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 102 return r; 103 } 104 105 static void vi_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 106 { 107 unsigned long flags; 108 109 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 110 WREG32_NO_KIQ(mmPCIE_INDEX, reg); 111 (void)RREG32_NO_KIQ(mmPCIE_INDEX); 112 WREG32_NO_KIQ(mmPCIE_DATA, v); 113 (void)RREG32_NO_KIQ(mmPCIE_DATA); 114 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 115 } 116 117 static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg) 118 { 119 unsigned long flags; 120 u32 r; 121 122 spin_lock_irqsave(&adev->smc_idx_lock, flags); 123 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); 124 r = RREG32_NO_KIQ(mmSMC_IND_DATA_11); 125 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 126 return r; 127 } 128 129 static void vi_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 130 { 131 unsigned long flags; 132 133 spin_lock_irqsave(&adev->smc_idx_lock, flags); 134 WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg)); 135 WREG32_NO_KIQ(mmSMC_IND_DATA_11, (v)); 136 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 137 } 138 139 /* smu_8_0_d.h */ 140 #define mmMP0PUB_IND_INDEX 0x180 141 #define mmMP0PUB_IND_DATA 0x181 142 143 static u32 cz_smc_rreg(struct amdgpu_device *adev, u32 reg) 144 { 145 unsigned long flags; 146 u32 r; 147 148 spin_lock_irqsave(&adev->smc_idx_lock, flags); 149 WREG32(mmMP0PUB_IND_INDEX, (reg)); 150 r = RREG32(mmMP0PUB_IND_DATA); 151 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 152 return r; 153 } 154 155 static void cz_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 156 { 157 unsigned long flags; 158 159 spin_lock_irqsave(&adev->smc_idx_lock, flags); 160 WREG32(mmMP0PUB_IND_INDEX, (reg)); 161 WREG32(mmMP0PUB_IND_DATA, (v)); 162 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 163 } 164 165 static u32 vi_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 166 { 167 unsigned long flags; 168 u32 r; 169 170 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 171 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 172 r = RREG32(mmUVD_CTX_DATA); 173 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 174 return r; 175 } 176 177 static void vi_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 178 { 179 unsigned long flags; 180 181 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 182 WREG32(mmUVD_CTX_INDEX, ((reg) & 0x1ff)); 183 WREG32(mmUVD_CTX_DATA, (v)); 184 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 185 } 186 187 static u32 vi_didt_rreg(struct amdgpu_device *adev, u32 reg) 188 { 189 unsigned long flags; 190 u32 r; 191 192 spin_lock_irqsave(&adev->didt_idx_lock, flags); 193 WREG32(mmDIDT_IND_INDEX, (reg)); 194 r = RREG32(mmDIDT_IND_DATA); 195 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 196 return r; 197 } 198 199 static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 200 { 201 unsigned long flags; 202 203 spin_lock_irqsave(&adev->didt_idx_lock, flags); 204 WREG32(mmDIDT_IND_INDEX, (reg)); 205 WREG32(mmDIDT_IND_DATA, (v)); 206 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 207 } 208 209 static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 210 { 211 unsigned long flags; 212 u32 r; 213 214 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 215 WREG32(mmGC_CAC_IND_INDEX, (reg)); 216 r = RREG32(mmGC_CAC_IND_DATA); 217 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 218 return r; 219 } 220 221 static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 222 { 223 unsigned long flags; 224 225 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 226 WREG32(mmGC_CAC_IND_INDEX, (reg)); 227 WREG32(mmGC_CAC_IND_DATA, (v)); 228 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 229 } 230 231 232 static const u32 tonga_mgcg_cgcg_init[] = 233 { 234 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 235 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 236 mmPCIE_DATA, 0x000f0000, 0x00000000, 237 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 238 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 239 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 240 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 241 }; 242 243 static const u32 fiji_mgcg_cgcg_init[] = 244 { 245 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 246 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 247 mmPCIE_DATA, 0x000f0000, 0x00000000, 248 mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C, 249 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 250 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 251 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 252 }; 253 254 static const u32 iceland_mgcg_cgcg_init[] = 255 { 256 mmPCIE_INDEX, 0xffffffff, ixPCIE_CNTL2, 257 mmPCIE_DATA, 0x000f0000, 0x00000000, 258 mmSMC_IND_INDEX_4, 0xffffffff, ixCGTT_ROM_CLK_CTRL0, 259 mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100, 260 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 261 }; 262 263 static const u32 cz_mgcg_cgcg_init[] = 264 { 265 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 266 mmPCIE_INDEX, 0xffffffff, 0x0140001c, 267 mmPCIE_DATA, 0x000f0000, 0x00000000, 268 mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100, 269 mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, 270 }; 271 272 static const u32 stoney_mgcg_cgcg_init[] = 273 { 274 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100, 275 mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104, 276 mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027, 277 }; 278 279 static void vi_init_golden_registers(struct amdgpu_device *adev) 280 { 281 /* Some of the registers might be dependent on GRBM_GFX_INDEX */ 282 mutex_lock(&adev->grbm_idx_mutex); 283 284 if (amdgpu_sriov_vf(adev)) { 285 xgpu_vi_init_golden_registers(adev); 286 mutex_unlock(&adev->grbm_idx_mutex); 287 return; 288 } 289 290 switch (adev->asic_type) { 291 case CHIP_TOPAZ: 292 amdgpu_device_program_register_sequence(adev, 293 iceland_mgcg_cgcg_init, 294 ARRAY_SIZE(iceland_mgcg_cgcg_init)); 295 break; 296 case CHIP_FIJI: 297 amdgpu_device_program_register_sequence(adev, 298 fiji_mgcg_cgcg_init, 299 ARRAY_SIZE(fiji_mgcg_cgcg_init)); 300 break; 301 case CHIP_TONGA: 302 amdgpu_device_program_register_sequence(adev, 303 tonga_mgcg_cgcg_init, 304 ARRAY_SIZE(tonga_mgcg_cgcg_init)); 305 break; 306 case CHIP_CARRIZO: 307 amdgpu_device_program_register_sequence(adev, 308 cz_mgcg_cgcg_init, 309 ARRAY_SIZE(cz_mgcg_cgcg_init)); 310 break; 311 case CHIP_STONEY: 312 amdgpu_device_program_register_sequence(adev, 313 stoney_mgcg_cgcg_init, 314 ARRAY_SIZE(stoney_mgcg_cgcg_init)); 315 break; 316 case CHIP_POLARIS10: 317 case CHIP_POLARIS11: 318 case CHIP_POLARIS12: 319 case CHIP_VEGAM: 320 default: 321 break; 322 } 323 mutex_unlock(&adev->grbm_idx_mutex); 324 } 325 326 /** 327 * vi_get_xclk - get the xclk 328 * 329 * @adev: amdgpu_device pointer 330 * 331 * Returns the reference clock used by the gfx engine 332 * (VI). 333 */ 334 static u32 vi_get_xclk(struct amdgpu_device *adev) 335 { 336 u32 reference_clock = adev->clock.spll.reference_freq; 337 u32 tmp; 338 339 if (adev->flags & AMD_IS_APU) 340 return reference_clock; 341 342 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2); 343 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK)) 344 return 1000; 345 346 tmp = RREG32_SMC(ixCG_CLKPIN_CNTL); 347 if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL, XTALIN_DIVIDE)) 348 return reference_clock / 4; 349 350 return reference_clock; 351 } 352 353 /** 354 * vi_srbm_select - select specific register instances 355 * 356 * @adev: amdgpu_device pointer 357 * @me: selected ME (micro engine) 358 * @pipe: pipe 359 * @queue: queue 360 * @vmid: VMID 361 * 362 * Switches the currently active registers instances. Some 363 * registers are instanced per VMID, others are instanced per 364 * me/pipe/queue combination. 365 */ 366 void vi_srbm_select(struct amdgpu_device *adev, 367 u32 me, u32 pipe, u32 queue, u32 vmid) 368 { 369 u32 srbm_gfx_cntl = 0; 370 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, PIPEID, pipe); 371 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, MEID, me); 372 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, VMID, vmid); 373 srbm_gfx_cntl = REG_SET_FIELD(srbm_gfx_cntl, SRBM_GFX_CNTL, QUEUEID, queue); 374 WREG32(mmSRBM_GFX_CNTL, srbm_gfx_cntl); 375 } 376 377 static void vi_vga_set_state(struct amdgpu_device *adev, bool state) 378 { 379 /* todo */ 380 } 381 382 static bool vi_read_disabled_bios(struct amdgpu_device *adev) 383 { 384 u32 bus_cntl; 385 u32 d1vga_control = 0; 386 u32 d2vga_control = 0; 387 u32 vga_render_control = 0; 388 u32 rom_cntl; 389 bool r; 390 391 bus_cntl = RREG32(mmBUS_CNTL); 392 if (adev->mode_info.num_crtc) { 393 d1vga_control = RREG32(mmD1VGA_CONTROL); 394 d2vga_control = RREG32(mmD2VGA_CONTROL); 395 vga_render_control = RREG32(mmVGA_RENDER_CONTROL); 396 } 397 rom_cntl = RREG32_SMC(ixROM_CNTL); 398 399 /* enable the rom */ 400 WREG32(mmBUS_CNTL, (bus_cntl & ~BUS_CNTL__BIOS_ROM_DIS_MASK)); 401 if (adev->mode_info.num_crtc) { 402 /* Disable VGA mode */ 403 WREG32(mmD1VGA_CONTROL, 404 (d1vga_control & ~(D1VGA_CONTROL__D1VGA_MODE_ENABLE_MASK | 405 D1VGA_CONTROL__D1VGA_TIMING_SELECT_MASK))); 406 WREG32(mmD2VGA_CONTROL, 407 (d2vga_control & ~(D2VGA_CONTROL__D2VGA_MODE_ENABLE_MASK | 408 D2VGA_CONTROL__D2VGA_TIMING_SELECT_MASK))); 409 WREG32(mmVGA_RENDER_CONTROL, 410 (vga_render_control & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK)); 411 } 412 WREG32_SMC(ixROM_CNTL, rom_cntl | ROM_CNTL__SCK_OVERWRITE_MASK); 413 414 r = amdgpu_read_bios(adev); 415 416 /* restore regs */ 417 WREG32(mmBUS_CNTL, bus_cntl); 418 if (adev->mode_info.num_crtc) { 419 WREG32(mmD1VGA_CONTROL, d1vga_control); 420 WREG32(mmD2VGA_CONTROL, d2vga_control); 421 WREG32(mmVGA_RENDER_CONTROL, vga_render_control); 422 } 423 WREG32_SMC(ixROM_CNTL, rom_cntl); 424 return r; 425 } 426 427 static bool vi_read_bios_from_rom(struct amdgpu_device *adev, 428 u8 *bios, u32 length_bytes) 429 { 430 u32 *dw_ptr; 431 unsigned long flags; 432 u32 i, length_dw; 433 434 if (bios == NULL) 435 return false; 436 if (length_bytes == 0) 437 return false; 438 /* APU vbios image is part of sbios image */ 439 if (adev->flags & AMD_IS_APU) 440 return false; 441 442 dw_ptr = (u32 *)bios; 443 length_dw = ALIGN(length_bytes, 4) / 4; 444 /* take the smc lock since we are using the smc index */ 445 spin_lock_irqsave(&adev->smc_idx_lock, flags); 446 /* set rom index to 0 */ 447 WREG32(mmSMC_IND_INDEX_11, ixROM_INDEX); 448 WREG32(mmSMC_IND_DATA_11, 0); 449 /* set index to data for continous read */ 450 WREG32(mmSMC_IND_INDEX_11, ixROM_DATA); 451 for (i = 0; i < length_dw; i++) 452 dw_ptr[i] = RREG32(mmSMC_IND_DATA_11); 453 spin_unlock_irqrestore(&adev->smc_idx_lock, flags); 454 455 return true; 456 } 457 458 static void vi_detect_hw_virtualization(struct amdgpu_device *adev) 459 { 460 uint32_t reg = 0; 461 462 if (adev->asic_type == CHIP_TONGA || 463 adev->asic_type == CHIP_FIJI) { 464 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); 465 /* bit0: 0 means pf and 1 means vf */ 466 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) 467 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; 468 /* bit31: 0 means disable IOV and 1 means enable */ 469 if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) 470 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; 471 } 472 473 if (reg == 0) { 474 if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */ 475 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; 476 } 477 } 478 479 static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = { 480 {mmGRBM_STATUS}, 481 {mmGRBM_STATUS2}, 482 {mmGRBM_STATUS_SE0}, 483 {mmGRBM_STATUS_SE1}, 484 {mmGRBM_STATUS_SE2}, 485 {mmGRBM_STATUS_SE3}, 486 {mmSRBM_STATUS}, 487 {mmSRBM_STATUS2}, 488 {mmSRBM_STATUS3}, 489 {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET}, 490 {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET}, 491 {mmCP_STAT}, 492 {mmCP_STALLED_STAT1}, 493 {mmCP_STALLED_STAT2}, 494 {mmCP_STALLED_STAT3}, 495 {mmCP_CPF_BUSY_STAT}, 496 {mmCP_CPF_STALLED_STAT1}, 497 {mmCP_CPF_STATUS}, 498 {mmCP_CPC_BUSY_STAT}, 499 {mmCP_CPC_STALLED_STAT1}, 500 {mmCP_CPC_STATUS}, 501 {mmGB_ADDR_CONFIG}, 502 {mmMC_ARB_RAMCFG}, 503 {mmGB_TILE_MODE0}, 504 {mmGB_TILE_MODE1}, 505 {mmGB_TILE_MODE2}, 506 {mmGB_TILE_MODE3}, 507 {mmGB_TILE_MODE4}, 508 {mmGB_TILE_MODE5}, 509 {mmGB_TILE_MODE6}, 510 {mmGB_TILE_MODE7}, 511 {mmGB_TILE_MODE8}, 512 {mmGB_TILE_MODE9}, 513 {mmGB_TILE_MODE10}, 514 {mmGB_TILE_MODE11}, 515 {mmGB_TILE_MODE12}, 516 {mmGB_TILE_MODE13}, 517 {mmGB_TILE_MODE14}, 518 {mmGB_TILE_MODE15}, 519 {mmGB_TILE_MODE16}, 520 {mmGB_TILE_MODE17}, 521 {mmGB_TILE_MODE18}, 522 {mmGB_TILE_MODE19}, 523 {mmGB_TILE_MODE20}, 524 {mmGB_TILE_MODE21}, 525 {mmGB_TILE_MODE22}, 526 {mmGB_TILE_MODE23}, 527 {mmGB_TILE_MODE24}, 528 {mmGB_TILE_MODE25}, 529 {mmGB_TILE_MODE26}, 530 {mmGB_TILE_MODE27}, 531 {mmGB_TILE_MODE28}, 532 {mmGB_TILE_MODE29}, 533 {mmGB_TILE_MODE30}, 534 {mmGB_TILE_MODE31}, 535 {mmGB_MACROTILE_MODE0}, 536 {mmGB_MACROTILE_MODE1}, 537 {mmGB_MACROTILE_MODE2}, 538 {mmGB_MACROTILE_MODE3}, 539 {mmGB_MACROTILE_MODE4}, 540 {mmGB_MACROTILE_MODE5}, 541 {mmGB_MACROTILE_MODE6}, 542 {mmGB_MACROTILE_MODE7}, 543 {mmGB_MACROTILE_MODE8}, 544 {mmGB_MACROTILE_MODE9}, 545 {mmGB_MACROTILE_MODE10}, 546 {mmGB_MACROTILE_MODE11}, 547 {mmGB_MACROTILE_MODE12}, 548 {mmGB_MACROTILE_MODE13}, 549 {mmGB_MACROTILE_MODE14}, 550 {mmGB_MACROTILE_MODE15}, 551 {mmCC_RB_BACKEND_DISABLE, true}, 552 {mmGC_USER_RB_BACKEND_DISABLE, true}, 553 {mmGB_BACKEND_MAP, false}, 554 {mmPA_SC_RASTER_CONFIG, true}, 555 {mmPA_SC_RASTER_CONFIG_1, true}, 556 }; 557 558 static uint32_t vi_get_register_value(struct amdgpu_device *adev, 559 bool indexed, u32 se_num, 560 u32 sh_num, u32 reg_offset) 561 { 562 if (indexed) { 563 uint32_t val; 564 unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; 565 unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; 566 567 switch (reg_offset) { 568 case mmCC_RB_BACKEND_DISABLE: 569 return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; 570 case mmGC_USER_RB_BACKEND_DISABLE: 571 return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; 572 case mmPA_SC_RASTER_CONFIG: 573 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; 574 case mmPA_SC_RASTER_CONFIG_1: 575 return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; 576 } 577 578 mutex_lock(&adev->grbm_idx_mutex); 579 if (se_num != 0xffffffff || sh_num != 0xffffffff) 580 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 581 582 val = RREG32(reg_offset); 583 584 if (se_num != 0xffffffff || sh_num != 0xffffffff) 585 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 586 mutex_unlock(&adev->grbm_idx_mutex); 587 return val; 588 } else { 589 unsigned idx; 590 591 switch (reg_offset) { 592 case mmGB_ADDR_CONFIG: 593 return adev->gfx.config.gb_addr_config; 594 case mmMC_ARB_RAMCFG: 595 return adev->gfx.config.mc_arb_ramcfg; 596 case mmGB_TILE_MODE0: 597 case mmGB_TILE_MODE1: 598 case mmGB_TILE_MODE2: 599 case mmGB_TILE_MODE3: 600 case mmGB_TILE_MODE4: 601 case mmGB_TILE_MODE5: 602 case mmGB_TILE_MODE6: 603 case mmGB_TILE_MODE7: 604 case mmGB_TILE_MODE8: 605 case mmGB_TILE_MODE9: 606 case mmGB_TILE_MODE10: 607 case mmGB_TILE_MODE11: 608 case mmGB_TILE_MODE12: 609 case mmGB_TILE_MODE13: 610 case mmGB_TILE_MODE14: 611 case mmGB_TILE_MODE15: 612 case mmGB_TILE_MODE16: 613 case mmGB_TILE_MODE17: 614 case mmGB_TILE_MODE18: 615 case mmGB_TILE_MODE19: 616 case mmGB_TILE_MODE20: 617 case mmGB_TILE_MODE21: 618 case mmGB_TILE_MODE22: 619 case mmGB_TILE_MODE23: 620 case mmGB_TILE_MODE24: 621 case mmGB_TILE_MODE25: 622 case mmGB_TILE_MODE26: 623 case mmGB_TILE_MODE27: 624 case mmGB_TILE_MODE28: 625 case mmGB_TILE_MODE29: 626 case mmGB_TILE_MODE30: 627 case mmGB_TILE_MODE31: 628 idx = (reg_offset - mmGB_TILE_MODE0); 629 return adev->gfx.config.tile_mode_array[idx]; 630 case mmGB_MACROTILE_MODE0: 631 case mmGB_MACROTILE_MODE1: 632 case mmGB_MACROTILE_MODE2: 633 case mmGB_MACROTILE_MODE3: 634 case mmGB_MACROTILE_MODE4: 635 case mmGB_MACROTILE_MODE5: 636 case mmGB_MACROTILE_MODE6: 637 case mmGB_MACROTILE_MODE7: 638 case mmGB_MACROTILE_MODE8: 639 case mmGB_MACROTILE_MODE9: 640 case mmGB_MACROTILE_MODE10: 641 case mmGB_MACROTILE_MODE11: 642 case mmGB_MACROTILE_MODE12: 643 case mmGB_MACROTILE_MODE13: 644 case mmGB_MACROTILE_MODE14: 645 case mmGB_MACROTILE_MODE15: 646 idx = (reg_offset - mmGB_MACROTILE_MODE0); 647 return adev->gfx.config.macrotile_mode_array[idx]; 648 default: 649 return RREG32(reg_offset); 650 } 651 } 652 } 653 654 static int vi_read_register(struct amdgpu_device *adev, u32 se_num, 655 u32 sh_num, u32 reg_offset, u32 *value) 656 { 657 uint32_t i; 658 659 *value = 0; 660 for (i = 0; i < ARRAY_SIZE(vi_allowed_read_registers); i++) { 661 bool indexed = vi_allowed_read_registers[i].grbm_indexed; 662 663 if (reg_offset != vi_allowed_read_registers[i].reg_offset) 664 continue; 665 666 *value = vi_get_register_value(adev, indexed, se_num, sh_num, 667 reg_offset); 668 return 0; 669 } 670 return -EINVAL; 671 } 672 673 static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) 674 { 675 u32 i; 676 677 dev_info(adev->dev, "GPU pci config reset\n"); 678 679 /* disable BM */ 680 pci_clear_master(adev->pdev); 681 /* reset */ 682 amdgpu_device_pci_config_reset(adev); 683 684 udelay(100); 685 686 /* wait for asic to come out of reset */ 687 for (i = 0; i < adev->usec_timeout; i++) { 688 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { 689 /* enable BM */ 690 pci_set_master(adev->pdev); 691 adev->has_hw_reset = true; 692 return 0; 693 } 694 udelay(1); 695 } 696 return -EINVAL; 697 } 698 699 /** 700 * vi_asic_pci_config_reset - soft reset GPU 701 * 702 * @adev: amdgpu_device pointer 703 * 704 * Use PCI Config method to reset the GPU. 705 * 706 * Returns 0 for success. 707 */ 708 static int vi_asic_pci_config_reset(struct amdgpu_device *adev) 709 { 710 int r; 711 712 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 713 714 r = vi_gpu_pci_config_reset(adev); 715 716 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 717 718 return r; 719 } 720 721 static bool vi_asic_supports_baco(struct amdgpu_device *adev) 722 { 723 switch (adev->asic_type) { 724 case CHIP_FIJI: 725 case CHIP_TONGA: 726 case CHIP_POLARIS10: 727 case CHIP_POLARIS11: 728 case CHIP_POLARIS12: 729 case CHIP_TOPAZ: 730 return amdgpu_dpm_is_baco_supported(adev); 731 default: 732 return false; 733 } 734 } 735 736 static enum amd_reset_method 737 vi_asic_reset_method(struct amdgpu_device *adev) 738 { 739 bool baco_reset; 740 741 switch (adev->asic_type) { 742 case CHIP_FIJI: 743 case CHIP_TONGA: 744 case CHIP_POLARIS10: 745 case CHIP_POLARIS11: 746 case CHIP_POLARIS12: 747 case CHIP_TOPAZ: 748 baco_reset = amdgpu_dpm_is_baco_supported(adev); 749 break; 750 default: 751 baco_reset = false; 752 break; 753 } 754 755 if (baco_reset) 756 return AMD_RESET_METHOD_BACO; 757 else 758 return AMD_RESET_METHOD_LEGACY; 759 } 760 761 /** 762 * vi_asic_reset - soft reset GPU 763 * 764 * @adev: amdgpu_device pointer 765 * 766 * Look up which blocks are hung and attempt 767 * to reset them. 768 * Returns 0 for success. 769 */ 770 static int vi_asic_reset(struct amdgpu_device *adev) 771 { 772 int r; 773 774 if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 775 if (!adev->in_suspend) 776 amdgpu_inc_vram_lost(adev); 777 r = amdgpu_dpm_baco_reset(adev); 778 } else { 779 r = vi_asic_pci_config_reset(adev); 780 } 781 782 return r; 783 } 784 785 static u32 vi_get_config_memsize(struct amdgpu_device *adev) 786 { 787 return RREG32(mmCONFIG_MEMSIZE); 788 } 789 790 static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 791 u32 cntl_reg, u32 status_reg) 792 { 793 int r, i; 794 struct atom_clock_dividers dividers; 795 uint32_t tmp; 796 797 r = amdgpu_atombios_get_clock_dividers(adev, 798 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 799 clock, false, ÷rs); 800 if (r) 801 return r; 802 803 tmp = RREG32_SMC(cntl_reg); 804 805 if (adev->flags & AMD_IS_APU) 806 tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK; 807 else 808 tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | 809 CG_DCLK_CNTL__DCLK_DIVIDER_MASK); 810 tmp |= dividers.post_divider; 811 WREG32_SMC(cntl_reg, tmp); 812 813 for (i = 0; i < 100; i++) { 814 tmp = RREG32_SMC(status_reg); 815 if (adev->flags & AMD_IS_APU) { 816 if (tmp & 0x10000) 817 break; 818 } else { 819 if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK) 820 break; 821 } 822 mdelay(10); 823 } 824 if (i == 100) 825 return -ETIMEDOUT; 826 return 0; 827 } 828 829 #define ixGNB_CLK1_DFS_CNTL 0xD82200F0 830 #define ixGNB_CLK1_STATUS 0xD822010C 831 #define ixGNB_CLK2_DFS_CNTL 0xD8220110 832 #define ixGNB_CLK2_STATUS 0xD822012C 833 #define ixGNB_CLK3_DFS_CNTL 0xD8220130 834 #define ixGNB_CLK3_STATUS 0xD822014C 835 836 static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 837 { 838 int r; 839 840 if (adev->flags & AMD_IS_APU) { 841 r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS); 842 if (r) 843 return r; 844 845 r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS); 846 if (r) 847 return r; 848 } else { 849 r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 850 if (r) 851 return r; 852 853 r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 854 if (r) 855 return r; 856 } 857 858 return 0; 859 } 860 861 static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 862 { 863 int r, i; 864 struct atom_clock_dividers dividers; 865 u32 tmp; 866 u32 reg_ctrl; 867 u32 reg_status; 868 u32 status_mask; 869 u32 reg_mask; 870 871 if (adev->flags & AMD_IS_APU) { 872 reg_ctrl = ixGNB_CLK3_DFS_CNTL; 873 reg_status = ixGNB_CLK3_STATUS; 874 status_mask = 0x00010000; 875 reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK; 876 } else { 877 reg_ctrl = ixCG_ECLK_CNTL; 878 reg_status = ixCG_ECLK_STATUS; 879 status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK; 880 reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK; 881 } 882 883 r = amdgpu_atombios_get_clock_dividers(adev, 884 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, 885 ecclk, false, ÷rs); 886 if (r) 887 return r; 888 889 for (i = 0; i < 100; i++) { 890 if (RREG32_SMC(reg_status) & status_mask) 891 break; 892 mdelay(10); 893 } 894 895 if (i == 100) 896 return -ETIMEDOUT; 897 898 tmp = RREG32_SMC(reg_ctrl); 899 tmp &= ~reg_mask; 900 tmp |= dividers.post_divider; 901 WREG32_SMC(reg_ctrl, tmp); 902 903 for (i = 0; i < 100; i++) { 904 if (RREG32_SMC(reg_status) & status_mask) 905 break; 906 mdelay(10); 907 } 908 909 if (i == 100) 910 return -ETIMEDOUT; 911 912 return 0; 913 } 914 915 static void vi_pcie_gen3_enable(struct amdgpu_device *adev) 916 { 917 #ifndef __NetBSD__ /* XXX amdgpu pcie */ 918 if (pci_is_root_bus(adev->pdev->bus)) 919 return; 920 921 if (amdgpu_pcie_gen2 == 0) 922 return; 923 924 if (adev->flags & AMD_IS_APU) 925 return; 926 927 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 928 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 929 return; 930 931 /* todo */ 932 #endif 933 } 934 935 static void vi_program_aspm(struct amdgpu_device *adev) 936 { 937 938 if (amdgpu_aspm == 0) 939 return; 940 941 /* todo */ 942 } 943 944 static void vi_enable_doorbell_aperture(struct amdgpu_device *adev, 945 bool enable) 946 { 947 u32 tmp; 948 949 /* not necessary on CZ */ 950 if (adev->flags & AMD_IS_APU) 951 return; 952 953 tmp = RREG32(mmBIF_DOORBELL_APER_EN); 954 if (enable) 955 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 1); 956 else 957 tmp = REG_SET_FIELD(tmp, BIF_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, 0); 958 959 WREG32(mmBIF_DOORBELL_APER_EN, tmp); 960 } 961 962 #define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044 963 #define ATI_REV_ID_FUSE_MACRO__SHIFT 9 964 #define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00 965 966 static uint32_t vi_get_rev_id(struct amdgpu_device *adev) 967 { 968 if (adev->flags & AMD_IS_APU) 969 return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) 970 >> ATI_REV_ID_FUSE_MACRO__SHIFT; 971 else 972 return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) 973 >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; 974 } 975 976 static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 977 { 978 if (!ring || !ring->funcs->emit_wreg) { 979 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); 980 RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL); 981 } else { 982 amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1); 983 } 984 } 985 986 static void vi_invalidate_hdp(struct amdgpu_device *adev, 987 struct amdgpu_ring *ring) 988 { 989 if (!ring || !ring->funcs->emit_wreg) { 990 WREG32(mmHDP_DEBUG0, 1); 991 RREG32(mmHDP_DEBUG0); 992 } else { 993 amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1); 994 } 995 } 996 997 static bool vi_need_full_reset(struct amdgpu_device *adev) 998 { 999 switch (adev->asic_type) { 1000 case CHIP_CARRIZO: 1001 case CHIP_STONEY: 1002 /* CZ has hang issues with full reset at the moment */ 1003 return false; 1004 case CHIP_FIJI: 1005 case CHIP_TONGA: 1006 /* XXX: soft reset should work on fiji and tonga */ 1007 return true; 1008 case CHIP_POLARIS10: 1009 case CHIP_POLARIS11: 1010 case CHIP_POLARIS12: 1011 case CHIP_TOPAZ: 1012 default: 1013 /* change this when we support soft reset */ 1014 return true; 1015 } 1016 } 1017 1018 static void vi_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, 1019 uint64_t *count1) 1020 { 1021 uint32_t perfctr = 0; 1022 uint64_t cnt0_of, cnt1_of; 1023 int tmp; 1024 1025 /* This reports 0 on APUs, so return to avoid writing/reading registers 1026 * that may or may not be different from their GPU counterparts 1027 */ 1028 if (adev->flags & AMD_IS_APU) 1029 return; 1030 1031 /* Set the 2 events that we wish to watch, defined above */ 1032 /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */ 1033 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); 1034 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); 1035 1036 /* Write to enable desired perf counters */ 1037 WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr); 1038 /* Zero out and enable the perf counters 1039 * Write 0x5: 1040 * Bit 0 = Start all counters(1) 1041 * Bit 2 = Global counter reset enable(1) 1042 */ 1043 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005); 1044 1045 msleep(1000); 1046 1047 /* Load the shadow and disable the perf counters 1048 * Write 0x2: 1049 * Bit 0 = Stop counters(0) 1050 * Bit 1 = Load the shadow counters(1) 1051 */ 1052 WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002); 1053 1054 /* Read register values to get any >32bit overflow */ 1055 tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK); 1056 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); 1057 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); 1058 1059 /* Get the values and add the overflow */ 1060 *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); 1061 *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); 1062 } 1063 1064 static uint64_t vi_get_pcie_replay_count(struct amdgpu_device *adev) 1065 { 1066 uint64_t nak_r, nak_g; 1067 1068 /* Get the number of NAKs received and generated */ 1069 nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK); 1070 nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED); 1071 1072 /* Add the total number of NAKs, i.e the number of replays */ 1073 return (nak_r + nak_g); 1074 } 1075 1076 static bool vi_need_reset_on_init(struct amdgpu_device *adev) 1077 { 1078 u32 clock_cntl, pc; 1079 1080 if (adev->flags & AMD_IS_APU) 1081 return false; 1082 1083 /* check if the SMC is already running */ 1084 clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0); 1085 pc = RREG32_SMC(ixSMC_PC_C); 1086 if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) && 1087 (0x20100 <= pc)) 1088 return true; 1089 1090 return false; 1091 } 1092 1093 static const struct amdgpu_asic_funcs vi_asic_funcs = 1094 { 1095 .read_disabled_bios = &vi_read_disabled_bios, 1096 .read_bios_from_rom = &vi_read_bios_from_rom, 1097 .read_register = &vi_read_register, 1098 .reset = &vi_asic_reset, 1099 .reset_method = &vi_asic_reset_method, 1100 .set_vga_state = &vi_vga_set_state, 1101 .get_xclk = &vi_get_xclk, 1102 .set_uvd_clocks = &vi_set_uvd_clocks, 1103 .set_vce_clocks = &vi_set_vce_clocks, 1104 .get_config_memsize = &vi_get_config_memsize, 1105 .flush_hdp = &vi_flush_hdp, 1106 .invalidate_hdp = &vi_invalidate_hdp, 1107 .need_full_reset = &vi_need_full_reset, 1108 .init_doorbell_index = &legacy_doorbell_index_init, 1109 .get_pcie_usage = &vi_get_pcie_usage, 1110 .need_reset_on_init = &vi_need_reset_on_init, 1111 .get_pcie_replay_count = &vi_get_pcie_replay_count, 1112 .supports_baco = &vi_asic_supports_baco, 1113 }; 1114 1115 #define CZ_REV_BRISTOL(rev) \ 1116 ((rev >= 0xC8 && rev <= 0xCE) || (rev >= 0xE1 && rev <= 0xE6)) 1117 1118 static int vi_common_early_init(void *handle) 1119 { 1120 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1121 1122 if (adev->flags & AMD_IS_APU) { 1123 adev->smc_rreg = &cz_smc_rreg; 1124 adev->smc_wreg = &cz_smc_wreg; 1125 } else { 1126 adev->smc_rreg = &vi_smc_rreg; 1127 adev->smc_wreg = &vi_smc_wreg; 1128 } 1129 adev->pcie_rreg = &vi_pcie_rreg; 1130 adev->pcie_wreg = &vi_pcie_wreg; 1131 adev->uvd_ctx_rreg = &vi_uvd_ctx_rreg; 1132 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 1133 adev->didt_rreg = &vi_didt_rreg; 1134 adev->didt_wreg = &vi_didt_wreg; 1135 adev->gc_cac_rreg = &vi_gc_cac_rreg; 1136 adev->gc_cac_wreg = &vi_gc_cac_wreg; 1137 1138 adev->asic_funcs = &vi_asic_funcs; 1139 1140 adev->rev_id = vi_get_rev_id(adev); 1141 adev->external_rev_id = 0xFF; 1142 switch (adev->asic_type) { 1143 case CHIP_TOPAZ: 1144 adev->cg_flags = 0; 1145 adev->pg_flags = 0; 1146 adev->external_rev_id = 0x1; 1147 break; 1148 case CHIP_FIJI: 1149 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1150 AMD_CG_SUPPORT_GFX_MGLS | 1151 AMD_CG_SUPPORT_GFX_RLC_LS | 1152 AMD_CG_SUPPORT_GFX_CP_LS | 1153 AMD_CG_SUPPORT_GFX_CGTS | 1154 AMD_CG_SUPPORT_GFX_CGTS_LS | 1155 AMD_CG_SUPPORT_GFX_CGCG | 1156 AMD_CG_SUPPORT_GFX_CGLS | 1157 AMD_CG_SUPPORT_SDMA_MGCG | 1158 AMD_CG_SUPPORT_SDMA_LS | 1159 AMD_CG_SUPPORT_BIF_LS | 1160 AMD_CG_SUPPORT_HDP_MGCG | 1161 AMD_CG_SUPPORT_HDP_LS | 1162 AMD_CG_SUPPORT_ROM_MGCG | 1163 AMD_CG_SUPPORT_MC_MGCG | 1164 AMD_CG_SUPPORT_MC_LS | 1165 AMD_CG_SUPPORT_UVD_MGCG; 1166 adev->pg_flags = 0; 1167 adev->external_rev_id = adev->rev_id + 0x3c; 1168 break; 1169 case CHIP_TONGA: 1170 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1171 AMD_CG_SUPPORT_GFX_CGCG | 1172 AMD_CG_SUPPORT_GFX_CGLS | 1173 AMD_CG_SUPPORT_SDMA_MGCG | 1174 AMD_CG_SUPPORT_SDMA_LS | 1175 AMD_CG_SUPPORT_BIF_LS | 1176 AMD_CG_SUPPORT_HDP_MGCG | 1177 AMD_CG_SUPPORT_HDP_LS | 1178 AMD_CG_SUPPORT_ROM_MGCG | 1179 AMD_CG_SUPPORT_MC_MGCG | 1180 AMD_CG_SUPPORT_MC_LS | 1181 AMD_CG_SUPPORT_DRM_LS | 1182 AMD_CG_SUPPORT_UVD_MGCG; 1183 adev->pg_flags = 0; 1184 adev->external_rev_id = adev->rev_id + 0x14; 1185 break; 1186 case CHIP_POLARIS11: 1187 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1188 AMD_CG_SUPPORT_GFX_RLC_LS | 1189 AMD_CG_SUPPORT_GFX_CP_LS | 1190 AMD_CG_SUPPORT_GFX_CGCG | 1191 AMD_CG_SUPPORT_GFX_CGLS | 1192 AMD_CG_SUPPORT_GFX_3D_CGCG | 1193 AMD_CG_SUPPORT_GFX_3D_CGLS | 1194 AMD_CG_SUPPORT_SDMA_MGCG | 1195 AMD_CG_SUPPORT_SDMA_LS | 1196 AMD_CG_SUPPORT_BIF_MGCG | 1197 AMD_CG_SUPPORT_BIF_LS | 1198 AMD_CG_SUPPORT_HDP_MGCG | 1199 AMD_CG_SUPPORT_HDP_LS | 1200 AMD_CG_SUPPORT_ROM_MGCG | 1201 AMD_CG_SUPPORT_MC_MGCG | 1202 AMD_CG_SUPPORT_MC_LS | 1203 AMD_CG_SUPPORT_DRM_LS | 1204 AMD_CG_SUPPORT_UVD_MGCG | 1205 AMD_CG_SUPPORT_VCE_MGCG; 1206 adev->pg_flags = 0; 1207 adev->external_rev_id = adev->rev_id + 0x5A; 1208 break; 1209 case CHIP_POLARIS10: 1210 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1211 AMD_CG_SUPPORT_GFX_RLC_LS | 1212 AMD_CG_SUPPORT_GFX_CP_LS | 1213 AMD_CG_SUPPORT_GFX_CGCG | 1214 AMD_CG_SUPPORT_GFX_CGLS | 1215 AMD_CG_SUPPORT_GFX_3D_CGCG | 1216 AMD_CG_SUPPORT_GFX_3D_CGLS | 1217 AMD_CG_SUPPORT_SDMA_MGCG | 1218 AMD_CG_SUPPORT_SDMA_LS | 1219 AMD_CG_SUPPORT_BIF_MGCG | 1220 AMD_CG_SUPPORT_BIF_LS | 1221 AMD_CG_SUPPORT_HDP_MGCG | 1222 AMD_CG_SUPPORT_HDP_LS | 1223 AMD_CG_SUPPORT_ROM_MGCG | 1224 AMD_CG_SUPPORT_MC_MGCG | 1225 AMD_CG_SUPPORT_MC_LS | 1226 AMD_CG_SUPPORT_DRM_LS | 1227 AMD_CG_SUPPORT_UVD_MGCG | 1228 AMD_CG_SUPPORT_VCE_MGCG; 1229 adev->pg_flags = 0; 1230 adev->external_rev_id = adev->rev_id + 0x50; 1231 break; 1232 case CHIP_POLARIS12: 1233 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1234 AMD_CG_SUPPORT_GFX_RLC_LS | 1235 AMD_CG_SUPPORT_GFX_CP_LS | 1236 AMD_CG_SUPPORT_GFX_CGCG | 1237 AMD_CG_SUPPORT_GFX_CGLS | 1238 AMD_CG_SUPPORT_GFX_3D_CGCG | 1239 AMD_CG_SUPPORT_GFX_3D_CGLS | 1240 AMD_CG_SUPPORT_SDMA_MGCG | 1241 AMD_CG_SUPPORT_SDMA_LS | 1242 AMD_CG_SUPPORT_BIF_MGCG | 1243 AMD_CG_SUPPORT_BIF_LS | 1244 AMD_CG_SUPPORT_HDP_MGCG | 1245 AMD_CG_SUPPORT_HDP_LS | 1246 AMD_CG_SUPPORT_ROM_MGCG | 1247 AMD_CG_SUPPORT_MC_MGCG | 1248 AMD_CG_SUPPORT_MC_LS | 1249 AMD_CG_SUPPORT_DRM_LS | 1250 AMD_CG_SUPPORT_UVD_MGCG | 1251 AMD_CG_SUPPORT_VCE_MGCG; 1252 adev->pg_flags = 0; 1253 adev->external_rev_id = adev->rev_id + 0x64; 1254 break; 1255 case CHIP_VEGAM: 1256 adev->cg_flags = 0; 1257 /*AMD_CG_SUPPORT_GFX_MGCG | 1258 AMD_CG_SUPPORT_GFX_RLC_LS | 1259 AMD_CG_SUPPORT_GFX_CP_LS | 1260 AMD_CG_SUPPORT_GFX_CGCG | 1261 AMD_CG_SUPPORT_GFX_CGLS | 1262 AMD_CG_SUPPORT_GFX_3D_CGCG | 1263 AMD_CG_SUPPORT_GFX_3D_CGLS | 1264 AMD_CG_SUPPORT_SDMA_MGCG | 1265 AMD_CG_SUPPORT_SDMA_LS | 1266 AMD_CG_SUPPORT_BIF_MGCG | 1267 AMD_CG_SUPPORT_BIF_LS | 1268 AMD_CG_SUPPORT_HDP_MGCG | 1269 AMD_CG_SUPPORT_HDP_LS | 1270 AMD_CG_SUPPORT_ROM_MGCG | 1271 AMD_CG_SUPPORT_MC_MGCG | 1272 AMD_CG_SUPPORT_MC_LS | 1273 AMD_CG_SUPPORT_DRM_LS | 1274 AMD_CG_SUPPORT_UVD_MGCG | 1275 AMD_CG_SUPPORT_VCE_MGCG;*/ 1276 adev->pg_flags = 0; 1277 adev->external_rev_id = adev->rev_id + 0x6E; 1278 break; 1279 case CHIP_CARRIZO: 1280 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1281 AMD_CG_SUPPORT_GFX_MGCG | 1282 AMD_CG_SUPPORT_GFX_MGLS | 1283 AMD_CG_SUPPORT_GFX_RLC_LS | 1284 AMD_CG_SUPPORT_GFX_CP_LS | 1285 AMD_CG_SUPPORT_GFX_CGTS | 1286 AMD_CG_SUPPORT_GFX_CGTS_LS | 1287 AMD_CG_SUPPORT_GFX_CGCG | 1288 AMD_CG_SUPPORT_GFX_CGLS | 1289 AMD_CG_SUPPORT_BIF_LS | 1290 AMD_CG_SUPPORT_HDP_MGCG | 1291 AMD_CG_SUPPORT_HDP_LS | 1292 AMD_CG_SUPPORT_SDMA_MGCG | 1293 AMD_CG_SUPPORT_SDMA_LS | 1294 AMD_CG_SUPPORT_VCE_MGCG; 1295 /* rev0 hardware requires workarounds to support PG */ 1296 adev->pg_flags = 0; 1297 if (adev->rev_id != 0x00 || CZ_REV_BRISTOL(adev->pdev->revision)) { 1298 adev->pg_flags |= AMD_PG_SUPPORT_GFX_SMG | 1299 AMD_PG_SUPPORT_GFX_PIPELINE | 1300 AMD_PG_SUPPORT_CP | 1301 AMD_PG_SUPPORT_UVD | 1302 AMD_PG_SUPPORT_VCE; 1303 } 1304 adev->external_rev_id = adev->rev_id + 0x1; 1305 break; 1306 case CHIP_STONEY: 1307 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1308 AMD_CG_SUPPORT_GFX_MGCG | 1309 AMD_CG_SUPPORT_GFX_MGLS | 1310 AMD_CG_SUPPORT_GFX_RLC_LS | 1311 AMD_CG_SUPPORT_GFX_CP_LS | 1312 AMD_CG_SUPPORT_GFX_CGTS | 1313 AMD_CG_SUPPORT_GFX_CGTS_LS | 1314 AMD_CG_SUPPORT_GFX_CGLS | 1315 AMD_CG_SUPPORT_BIF_LS | 1316 AMD_CG_SUPPORT_HDP_MGCG | 1317 AMD_CG_SUPPORT_HDP_LS | 1318 AMD_CG_SUPPORT_SDMA_MGCG | 1319 AMD_CG_SUPPORT_SDMA_LS | 1320 AMD_CG_SUPPORT_VCE_MGCG; 1321 adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | 1322 AMD_PG_SUPPORT_GFX_SMG | 1323 AMD_PG_SUPPORT_GFX_PIPELINE | 1324 AMD_PG_SUPPORT_CP | 1325 AMD_PG_SUPPORT_UVD | 1326 AMD_PG_SUPPORT_VCE; 1327 adev->external_rev_id = adev->rev_id + 0x61; 1328 break; 1329 default: 1330 /* FIXME: not supported yet */ 1331 return -EINVAL; 1332 } 1333 1334 if (amdgpu_sriov_vf(adev)) { 1335 amdgpu_virt_init_setting(adev); 1336 xgpu_vi_mailbox_set_irq_funcs(adev); 1337 } 1338 1339 return 0; 1340 } 1341 1342 static int vi_common_late_init(void *handle) 1343 { 1344 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1345 1346 if (amdgpu_sriov_vf(adev)) 1347 xgpu_vi_mailbox_get_irq(adev); 1348 1349 return 0; 1350 } 1351 1352 static int vi_common_sw_init(void *handle) 1353 { 1354 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1355 1356 if (amdgpu_sriov_vf(adev)) 1357 xgpu_vi_mailbox_add_irq_id(adev); 1358 1359 return 0; 1360 } 1361 1362 static int vi_common_sw_fini(void *handle) 1363 { 1364 return 0; 1365 } 1366 1367 static int vi_common_hw_init(void *handle) 1368 { 1369 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1370 1371 /* move the golden regs per IP block */ 1372 vi_init_golden_registers(adev); 1373 /* enable pcie gen2/3 link */ 1374 vi_pcie_gen3_enable(adev); 1375 /* enable aspm */ 1376 vi_program_aspm(adev); 1377 /* enable the doorbell aperture */ 1378 vi_enable_doorbell_aperture(adev, true); 1379 1380 return 0; 1381 } 1382 1383 static int vi_common_hw_fini(void *handle) 1384 { 1385 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1386 1387 /* enable the doorbell aperture */ 1388 vi_enable_doorbell_aperture(adev, false); 1389 1390 if (amdgpu_sriov_vf(adev)) 1391 xgpu_vi_mailbox_put_irq(adev); 1392 1393 return 0; 1394 } 1395 1396 static int vi_common_suspend(void *handle) 1397 { 1398 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1399 1400 return vi_common_hw_fini(adev); 1401 } 1402 1403 static int vi_common_resume(void *handle) 1404 { 1405 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1406 1407 return vi_common_hw_init(adev); 1408 } 1409 1410 static bool vi_common_is_idle(void *handle) 1411 { 1412 return true; 1413 } 1414 1415 static int vi_common_wait_for_idle(void *handle) 1416 { 1417 return 0; 1418 } 1419 1420 static int vi_common_soft_reset(void *handle) 1421 { 1422 return 0; 1423 } 1424 1425 static void vi_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev, 1426 bool enable) 1427 { 1428 uint32_t temp, data; 1429 1430 temp = data = RREG32_PCIE(ixPCIE_CNTL2); 1431 1432 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) 1433 data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1434 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1435 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK; 1436 else 1437 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 1438 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 1439 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 1440 1441 if (temp != data) 1442 WREG32_PCIE(ixPCIE_CNTL2, data); 1443 } 1444 1445 static void vi_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev, 1446 bool enable) 1447 { 1448 uint32_t temp, data; 1449 1450 temp = data = RREG32(mmHDP_HOST_PATH_CNTL); 1451 1452 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 1453 data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1454 else 1455 data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK; 1456 1457 if (temp != data) 1458 WREG32(mmHDP_HOST_PATH_CNTL, data); 1459 } 1460 1461 static void vi_update_hdp_light_sleep(struct amdgpu_device *adev, 1462 bool enable) 1463 { 1464 uint32_t temp, data; 1465 1466 temp = data = RREG32(mmHDP_MEM_POWER_LS); 1467 1468 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1469 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1470 else 1471 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1472 1473 if (temp != data) 1474 WREG32(mmHDP_MEM_POWER_LS, data); 1475 } 1476 1477 static void vi_update_drm_light_sleep(struct amdgpu_device *adev, 1478 bool enable) 1479 { 1480 uint32_t temp, data; 1481 1482 temp = data = RREG32(0x157a); 1483 1484 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) 1485 data |= 1; 1486 else 1487 data &= ~1; 1488 1489 if (temp != data) 1490 WREG32(0x157a, data); 1491 } 1492 1493 1494 static void vi_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1495 bool enable) 1496 { 1497 uint32_t temp, data; 1498 1499 temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1500 1501 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1502 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1503 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1504 else 1505 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1506 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1507 1508 if (temp != data) 1509 WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data); 1510 } 1511 1512 static int vi_common_set_clockgating_state_by_smu(void *handle, 1513 enum amd_clockgating_state state) 1514 { 1515 uint32_t msg_id, pp_state = 0; 1516 uint32_t pp_support_state = 0; 1517 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1518 1519 if (adev->cg_flags & (AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_MC_MGCG)) { 1520 if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) { 1521 pp_support_state = PP_STATE_SUPPORT_LS; 1522 pp_state = PP_STATE_LS; 1523 } 1524 if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) { 1525 pp_support_state |= PP_STATE_SUPPORT_CG; 1526 pp_state |= PP_STATE_CG; 1527 } 1528 if (state == AMD_CG_STATE_UNGATE) 1529 pp_state = 0; 1530 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1531 PP_BLOCK_SYS_MC, 1532 pp_support_state, 1533 pp_state); 1534 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1535 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1536 } 1537 1538 if (adev->cg_flags & (AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_SDMA_MGCG)) { 1539 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS) { 1540 pp_support_state = PP_STATE_SUPPORT_LS; 1541 pp_state = PP_STATE_LS; 1542 } 1543 if (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG) { 1544 pp_support_state |= PP_STATE_SUPPORT_CG; 1545 pp_state |= PP_STATE_CG; 1546 } 1547 if (state == AMD_CG_STATE_UNGATE) 1548 pp_state = 0; 1549 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1550 PP_BLOCK_SYS_SDMA, 1551 pp_support_state, 1552 pp_state); 1553 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1554 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1555 } 1556 1557 if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_MGCG)) { 1558 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) { 1559 pp_support_state = PP_STATE_SUPPORT_LS; 1560 pp_state = PP_STATE_LS; 1561 } 1562 if (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG) { 1563 pp_support_state |= PP_STATE_SUPPORT_CG; 1564 pp_state |= PP_STATE_CG; 1565 } 1566 if (state == AMD_CG_STATE_UNGATE) 1567 pp_state = 0; 1568 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1569 PP_BLOCK_SYS_HDP, 1570 pp_support_state, 1571 pp_state); 1572 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1573 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1574 } 1575 1576 1577 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS) { 1578 if (state == AMD_CG_STATE_UNGATE) 1579 pp_state = 0; 1580 else 1581 pp_state = PP_STATE_LS; 1582 1583 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1584 PP_BLOCK_SYS_BIF, 1585 PP_STATE_SUPPORT_LS, 1586 pp_state); 1587 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1588 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1589 } 1590 if (adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG) { 1591 if (state == AMD_CG_STATE_UNGATE) 1592 pp_state = 0; 1593 else 1594 pp_state = PP_STATE_CG; 1595 1596 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1597 PP_BLOCK_SYS_BIF, 1598 PP_STATE_SUPPORT_CG, 1599 pp_state); 1600 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1601 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1602 } 1603 1604 if (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS) { 1605 1606 if (state == AMD_CG_STATE_UNGATE) 1607 pp_state = 0; 1608 else 1609 pp_state = PP_STATE_LS; 1610 1611 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1612 PP_BLOCK_SYS_DRM, 1613 PP_STATE_SUPPORT_LS, 1614 pp_state); 1615 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1616 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1617 } 1618 1619 if (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG) { 1620 1621 if (state == AMD_CG_STATE_UNGATE) 1622 pp_state = 0; 1623 else 1624 pp_state = PP_STATE_CG; 1625 1626 msg_id = PP_CG_MSG_ID(PP_GROUP_SYS, 1627 PP_BLOCK_SYS_ROM, 1628 PP_STATE_SUPPORT_CG, 1629 pp_state); 1630 if (adev->powerplay.pp_funcs->set_clockgating_by_smu) 1631 amdgpu_dpm_set_clockgating_by_smu(adev, msg_id); 1632 } 1633 return 0; 1634 } 1635 1636 static int vi_common_set_clockgating_state(void *handle, 1637 enum amd_clockgating_state state) 1638 { 1639 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1640 1641 if (amdgpu_sriov_vf(adev)) 1642 return 0; 1643 1644 switch (adev->asic_type) { 1645 case CHIP_FIJI: 1646 vi_update_bif_medium_grain_light_sleep(adev, 1647 state == AMD_CG_STATE_GATE); 1648 vi_update_hdp_medium_grain_clock_gating(adev, 1649 state == AMD_CG_STATE_GATE); 1650 vi_update_hdp_light_sleep(adev, 1651 state == AMD_CG_STATE_GATE); 1652 vi_update_rom_medium_grain_clock_gating(adev, 1653 state == AMD_CG_STATE_GATE); 1654 break; 1655 case CHIP_CARRIZO: 1656 case CHIP_STONEY: 1657 vi_update_bif_medium_grain_light_sleep(adev, 1658 state == AMD_CG_STATE_GATE); 1659 vi_update_hdp_medium_grain_clock_gating(adev, 1660 state == AMD_CG_STATE_GATE); 1661 vi_update_hdp_light_sleep(adev, 1662 state == AMD_CG_STATE_GATE); 1663 vi_update_drm_light_sleep(adev, 1664 state == AMD_CG_STATE_GATE); 1665 break; 1666 case CHIP_TONGA: 1667 case CHIP_POLARIS10: 1668 case CHIP_POLARIS11: 1669 case CHIP_POLARIS12: 1670 case CHIP_VEGAM: 1671 vi_common_set_clockgating_state_by_smu(adev, state); 1672 default: 1673 break; 1674 } 1675 return 0; 1676 } 1677 1678 static int vi_common_set_powergating_state(void *handle, 1679 enum amd_powergating_state state) 1680 { 1681 return 0; 1682 } 1683 1684 static void vi_common_get_clockgating_state(void *handle, u32 *flags) 1685 { 1686 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1687 int data; 1688 1689 if (amdgpu_sriov_vf(adev)) 1690 *flags = 0; 1691 1692 /* AMD_CG_SUPPORT_BIF_LS */ 1693 data = RREG32_PCIE(ixPCIE_CNTL2); 1694 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) 1695 *flags |= AMD_CG_SUPPORT_BIF_LS; 1696 1697 /* AMD_CG_SUPPORT_HDP_LS */ 1698 data = RREG32(mmHDP_MEM_POWER_LS); 1699 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) 1700 *flags |= AMD_CG_SUPPORT_HDP_LS; 1701 1702 /* AMD_CG_SUPPORT_HDP_MGCG */ 1703 data = RREG32(mmHDP_HOST_PATH_CNTL); 1704 if (!(data & HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK)) 1705 *flags |= AMD_CG_SUPPORT_HDP_MGCG; 1706 1707 /* AMD_CG_SUPPORT_ROM_MGCG */ 1708 data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0); 1709 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) 1710 *flags |= AMD_CG_SUPPORT_ROM_MGCG; 1711 } 1712 1713 static const struct amd_ip_funcs vi_common_ip_funcs = { 1714 .name = "vi_common", 1715 .early_init = vi_common_early_init, 1716 .late_init = vi_common_late_init, 1717 .sw_init = vi_common_sw_init, 1718 .sw_fini = vi_common_sw_fini, 1719 .hw_init = vi_common_hw_init, 1720 .hw_fini = vi_common_hw_fini, 1721 .suspend = vi_common_suspend, 1722 .resume = vi_common_resume, 1723 .is_idle = vi_common_is_idle, 1724 .wait_for_idle = vi_common_wait_for_idle, 1725 .soft_reset = vi_common_soft_reset, 1726 .set_clockgating_state = vi_common_set_clockgating_state, 1727 .set_powergating_state = vi_common_set_powergating_state, 1728 .get_clockgating_state = vi_common_get_clockgating_state, 1729 }; 1730 1731 static const struct amdgpu_ip_block_version vi_common_ip_block = 1732 { 1733 .type = AMD_IP_BLOCK_TYPE_COMMON, 1734 .major = 1, 1735 .minor = 0, 1736 .rev = 0, 1737 .funcs = &vi_common_ip_funcs, 1738 }; 1739 1740 int vi_set_ip_blocks(struct amdgpu_device *adev) 1741 { 1742 /* in early init stage, vbios code won't work */ 1743 vi_detect_hw_virtualization(adev); 1744 1745 if (amdgpu_sriov_vf(adev)) 1746 adev->virt.ops = &xgpu_vi_virt_ops; 1747 1748 switch (adev->asic_type) { 1749 case CHIP_TOPAZ: 1750 /* topaz has no DCE, UVD, VCE */ 1751 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1752 amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block); 1753 amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block); 1754 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1755 amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block); 1756 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1757 if (adev->enable_virtual_display) 1758 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1759 break; 1760 case CHIP_FIJI: 1761 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1762 amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block); 1763 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1764 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1765 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1766 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1767 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1768 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1769 #if defined(CONFIG_DRM_AMD_DC) 1770 else if (amdgpu_device_has_dc_support(adev)) 1771 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1772 #endif 1773 else 1774 amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block); 1775 if (!amdgpu_sriov_vf(adev)) { 1776 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 1777 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 1778 } 1779 break; 1780 case CHIP_TONGA: 1781 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1782 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1783 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1784 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1785 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1786 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1787 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 1788 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1789 #if defined(CONFIG_DRM_AMD_DC) 1790 else if (amdgpu_device_has_dc_support(adev)) 1791 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1792 #endif 1793 else 1794 amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block); 1795 if (!amdgpu_sriov_vf(adev)) { 1796 amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block); 1797 amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); 1798 } 1799 break; 1800 case CHIP_POLARIS10: 1801 case CHIP_POLARIS11: 1802 case CHIP_POLARIS12: 1803 case CHIP_VEGAM: 1804 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1805 amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block); 1806 amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); 1807 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1808 amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block); 1809 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1810 if (adev->enable_virtual_display) 1811 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1812 #if defined(CONFIG_DRM_AMD_DC) 1813 else if (amdgpu_device_has_dc_support(adev)) 1814 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1815 #endif 1816 else 1817 amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block); 1818 amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block); 1819 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 1820 break; 1821 case CHIP_CARRIZO: 1822 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1823 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1824 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1825 amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); 1826 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1827 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1828 if (adev->enable_virtual_display) 1829 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1830 #if defined(CONFIG_DRM_AMD_DC) 1831 else if (amdgpu_device_has_dc_support(adev)) 1832 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1833 #endif 1834 else 1835 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 1836 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 1837 amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block); 1838 #if defined(CONFIG_DRM_AMD_ACP) 1839 amdgpu_device_ip_block_add(adev, &acp_ip_block); 1840 #endif 1841 break; 1842 case CHIP_STONEY: 1843 amdgpu_device_ip_block_add(adev, &vi_common_ip_block); 1844 amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); 1845 amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); 1846 amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block); 1847 amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); 1848 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 1849 if (adev->enable_virtual_display) 1850 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 1851 #if defined(CONFIG_DRM_AMD_DC) 1852 else if (amdgpu_device_has_dc_support(adev)) 1853 amdgpu_device_ip_block_add(adev, &dm_ip_block); 1854 #endif 1855 else 1856 amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 1857 amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block); 1858 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 1859 #if defined(CONFIG_DRM_AMD_ACP) 1860 amdgpu_device_ip_block_add(adev, &acp_ip_block); 1861 #endif 1862 break; 1863 default: 1864 /* FIXME: not supported yet */ 1865 return -EINVAL; 1866 } 1867 1868 return 0; 1869 } 1870 1871 void legacy_doorbell_index_init(struct amdgpu_device *adev) 1872 { 1873 adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ; 1874 adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0; 1875 adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1; 1876 adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2; 1877 adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3; 1878 adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4; 1879 adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5; 1880 adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6; 1881 adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7; 1882 adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0; 1883 adev->doorbell_index.sdma_engine[0] = AMDGPU_DOORBELL_sDMA_ENGINE0; 1884 adev->doorbell_index.sdma_engine[1] = AMDGPU_DOORBELL_sDMA_ENGINE1; 1885 adev->doorbell_index.ih = AMDGPU_DOORBELL_IH; 1886 adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT; 1887 } 1888