1 /* 2 * Copyright 2021 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #define SWSMU_CODE_LAYER_L2 25 26 #include <linux/firmware.h> 27 #include <linux/pci.h> 28 #include <linux/i2c.h> 29 #include "amdgpu.h" 30 #include "amdgpu_smu.h" 31 #include "atomfirmware.h" 32 #include "amdgpu_atomfirmware.h" 33 #include "amdgpu_atombios.h" 34 #include "smu_v13_0.h" 35 #include "smu13_driver_if_v13_0_7.h" 36 #include "soc15_common.h" 37 #include "atom.h" 38 #include "smu_v13_0_7_ppt.h" 39 #include "smu_v13_0_7_pptable.h" 40 #include "smu_v13_0_7_ppsmc.h" 41 #include "nbio/nbio_4_3_0_offset.h" 42 #include "nbio/nbio_4_3_0_sh_mask.h" 43 #include "mp/mp_13_0_0_offset.h" 44 #include "mp/mp_13_0_0_sh_mask.h" 45 46 #include "asic_reg/mp/mp_13_0_0_sh_mask.h" 47 #include "smu_cmn.h" 48 #include "amdgpu_ras.h" 49 50 /* 51 * DO NOT use these for err/warn/info/debug messages. 52 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 53 * They are more MGPU friendly. 54 */ 55 #undef pr_err 56 #undef pr_warn 57 #undef pr_info 58 #undef pr_debug 59 60 #define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) 61 62 #define FEATURE_MASK(feature) (1ULL << feature) 63 #define SMC_DPM_FEATURE ( \ 64 FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ 65 FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ 66 FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ 67 FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ 68 FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \ 69 FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)) 70 71 #define smnMP1_FIRMWARE_FLAGS_SMU_13_0_7 0x3b10028 72 73 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 74 75 #define PP_OD_FEATURE_GFXCLK_FMIN 0 76 #define PP_OD_FEATURE_GFXCLK_FMAX 1 77 #define PP_OD_FEATURE_UCLK_FMIN 2 78 #define PP_OD_FEATURE_UCLK_FMAX 3 79 #define PP_OD_FEATURE_GFX_VF_CURVE 4 80 81 #define LINK_SPEED_MAX 3 82 83 static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = { 84 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), 85 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), 86 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), 87 MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0), 88 MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0), 89 MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), 90 MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), 91 MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1), 92 MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1), 93 MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1), 94 MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1), 95 MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1), 96 MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1), 97 MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), 98 MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), 99 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), 100 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), 101 MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), 102 MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), 103 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), 104 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), 105 MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), 106 MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0), 107 MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), 108 MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0), 109 MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), 110 MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), 111 MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), 112 MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), 113 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), 114 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), 115 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), 116 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), 117 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), 118 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), 119 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), 120 MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1), 121 MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0), 122 MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0), 123 MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0), 124 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), 125 MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), 126 MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), 127 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), 128 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), 129 MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), 130 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), 131 MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), 132 MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), 133 MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), 134 MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0), 135 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), 136 MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), 137 }; 138 139 static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = { 140 CLK_MAP(GFXCLK, PPCLK_GFXCLK), 141 CLK_MAP(SCLK, PPCLK_GFXCLK), 142 CLK_MAP(SOCCLK, PPCLK_SOCCLK), 143 CLK_MAP(FCLK, PPCLK_FCLK), 144 CLK_MAP(UCLK, PPCLK_UCLK), 145 CLK_MAP(MCLK, PPCLK_UCLK), 146 CLK_MAP(VCLK, PPCLK_VCLK_0), 147 CLK_MAP(VCLK1, PPCLK_VCLK_1), 148 CLK_MAP(DCLK, PPCLK_DCLK_0), 149 CLK_MAP(DCLK1, PPCLK_DCLK_1), 150 }; 151 152 static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] = { 153 FEA_MAP(FW_DATA_READ), 154 FEA_MAP(DPM_GFXCLK), 155 FEA_MAP(DPM_GFX_POWER_OPTIMIZER), 156 FEA_MAP(DPM_UCLK), 157 FEA_MAP(DPM_FCLK), 158 FEA_MAP(DPM_SOCCLK), 159 FEA_MAP(DPM_MP0CLK), 160 FEA_MAP(DPM_LINK), 161 FEA_MAP(DPM_DCN), 162 FEA_MAP(VMEMP_SCALING), 163 FEA_MAP(VDDIO_MEM_SCALING), 164 FEA_MAP(DS_GFXCLK), 165 FEA_MAP(DS_SOCCLK), 166 FEA_MAP(DS_FCLK), 167 FEA_MAP(DS_LCLK), 168 FEA_MAP(DS_DCFCLK), 169 FEA_MAP(DS_UCLK), 170 FEA_MAP(GFX_ULV), 171 FEA_MAP(FW_DSTATE), 172 FEA_MAP(GFXOFF), 173 FEA_MAP(BACO), 174 FEA_MAP(MM_DPM), 175 FEA_MAP(SOC_MPCLK_DS), 176 FEA_MAP(BACO_MPCLK_DS), 177 FEA_MAP(THROTTLERS), 178 FEA_MAP(SMARTSHIFT), 179 FEA_MAP(GTHR), 180 FEA_MAP(ACDC), 181 FEA_MAP(VR0HOT), 182 FEA_MAP(FW_CTF), 183 FEA_MAP(FAN_CONTROL), 184 FEA_MAP(GFX_DCS), 185 FEA_MAP(GFX_READ_MARGIN), 186 FEA_MAP(LED_DISPLAY), 187 FEA_MAP(GFXCLK_SPREAD_SPECTRUM), 188 FEA_MAP(OUT_OF_BAND_MONITOR), 189 FEA_MAP(OPTIMIZED_VMIN), 190 FEA_MAP(GFX_IMU), 191 FEA_MAP(BOOT_TIME_CAL), 192 FEA_MAP(GFX_PCC_DFLL), 193 FEA_MAP(SOC_CG), 194 FEA_MAP(DF_CSTATE), 195 FEA_MAP(GFX_EDC), 196 FEA_MAP(BOOT_POWER_OPT), 197 FEA_MAP(CLOCK_POWER_DOWN_BYPASS), 198 FEA_MAP(DS_VCN), 199 FEA_MAP(BACO_CG), 200 FEA_MAP(MEM_TEMP_READ), 201 FEA_MAP(ATHUB_MMHUB_PG), 202 FEA_MAP(SOC_PCC), 203 [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 204 [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, 205 [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT}, 206 }; 207 208 static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = { 209 TAB_MAP(PPTABLE), 210 TAB_MAP(WATERMARKS), 211 TAB_MAP(AVFS_PSM_DEBUG), 212 TAB_MAP(PMSTATUSLOG), 213 TAB_MAP(SMU_METRICS), 214 TAB_MAP(DRIVER_SMU_CONFIG), 215 TAB_MAP(ACTIVITY_MONITOR_COEFF), 216 [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, 217 TAB_MAP(OVERDRIVE), 218 }; 219 220 static struct cmn2asic_mapping smu_v13_0_7_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { 221 PWR_MAP(AC), 222 PWR_MAP(DC), 223 }; 224 225 static struct cmn2asic_mapping smu_v13_0_7_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { 226 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT), 227 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), 228 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), 229 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), 230 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), 231 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), 232 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), 233 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT), 234 }; 235 236 static const uint8_t smu_v13_0_7_throttler_map[] = { 237 [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT), 238 [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT), 239 [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT), 240 [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT), 241 [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT), 242 [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT), 243 [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT), 244 [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT), 245 [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), 246 [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), 247 [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT), 248 [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT), 249 [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT), 250 [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT), 251 [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT), 252 [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT), 253 [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT), 254 }; 255 256 static int 257 smu_v13_0_7_get_allowed_feature_mask(struct smu_context *smu, 258 uint32_t *feature_mask, uint32_t num) 259 { 260 struct amdgpu_device *adev = smu->adev; 261 262 if (num > 2) 263 return -EINVAL; 264 265 memset(feature_mask, 0, sizeof(uint32_t) * num); 266 267 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DATA_READ_BIT); 268 269 if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) { 270 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); 271 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT); 272 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT); 273 } 274 275 if (adev->pm.pp_feature & PP_GFXOFF_MASK) 276 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT); 277 278 if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) { 279 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT); 280 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_FCLK_BIT); 281 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT); 282 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT); 283 } 284 285 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT); 286 287 if (adev->pm.pp_feature & PP_PCIE_DPM_MASK) 288 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT); 289 290 if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK) 291 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT); 292 293 if (adev->pm.pp_feature & PP_ULV_MASK) 294 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT); 295 296 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT); 297 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT); 298 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_BIT); 299 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_VCN_BIT); 300 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_FCLK_BIT); 301 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DF_CSTATE_BIT); 302 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_THROTTLERS_BIT); 303 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VR0HOT_BIT); 304 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_CTF_BIT); 305 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FAN_CONTROL_BIT); 306 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_SOCCLK_BIT); 307 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXCLK_SPREAD_SPECTRUM_BIT); 308 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MEM_TEMP_READ_BIT); 309 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_DSTATE_BIT); 310 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_MPCLK_DS_BIT); 311 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_MPCLK_DS_BIT); 312 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_PCC_DFLL_BIT); 313 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT); 314 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT); 315 316 if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK) 317 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCN_BIT); 318 319 if ((adev->pg_flags & AMD_PG_SUPPORT_ATHUB) && 320 (adev->pg_flags & AMD_PG_SUPPORT_MMHUB)) 321 *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT); 322 323 return 0; 324 } 325 326 static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu) 327 { 328 struct smu_table_context *table_context = &smu->smu_table; 329 struct smu_13_0_7_powerplay_table *powerplay_table = 330 table_context->power_play_table; 331 struct smu_baco_context *smu_baco = &smu->smu_baco; 332 PPTable_t *smc_pptable = table_context->driver_pptable; 333 BoardTable_t *BoardTable = &smc_pptable->BoardTable; 334 #if 0 335 const OverDriveLimits_t * const overdrive_upperlimits = 336 &smc_pptable->SkuTable.OverDriveLimitsBasicMax; 337 const OverDriveLimits_t * const overdrive_lowerlimits = 338 &smc_pptable->SkuTable.OverDriveLimitsMin; 339 #endif 340 341 if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC) 342 smu->dc_controlled_by_gpio = true; 343 344 if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO) { 345 smu_baco->platform_support = true; 346 347 if ((powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO) 348 && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled)) 349 smu_baco->maco_support = true; 350 } 351 352 #if 0 353 if (!overdrive_lowerlimits->FeatureCtrlMask || 354 !overdrive_upperlimits->FeatureCtrlMask) 355 smu->od_enabled = false; 356 357 /* 358 * Instead of having its own buffer space and get overdrive_table copied, 359 * smu->od_settings just points to the actual overdrive_table 360 */ 361 smu->od_settings = &powerplay_table->overdrive_table; 362 #else 363 smu->od_enabled = false; 364 #endif 365 366 table_context->thermal_controller_type = 367 powerplay_table->thermal_controller_type; 368 369 return 0; 370 } 371 372 static int smu_v13_0_7_store_powerplay_table(struct smu_context *smu) 373 { 374 struct smu_table_context *table_context = &smu->smu_table; 375 struct smu_13_0_7_powerplay_table *powerplay_table = 376 table_context->power_play_table; 377 struct amdgpu_device *adev = smu->adev; 378 379 if (adev->pdev->device == 0x51) 380 powerplay_table->smc_pptable.SkuTable.DebugOverrides |= 0x00000080; 381 382 memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable, 383 sizeof(PPTable_t)); 384 385 return 0; 386 } 387 388 static int smu_v13_0_7_check_fw_status(struct smu_context *smu) 389 { 390 struct amdgpu_device *adev = smu->adev; 391 uint32_t mp1_fw_flags; 392 393 mp1_fw_flags = RREG32_PCIE(MP1_Public | 394 (smnMP1_FIRMWARE_FLAGS_SMU_13_0_7 & 0xffffffff)); 395 396 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 397 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 398 return 0; 399 400 return -EIO; 401 } 402 403 #ifndef atom_smc_dpm_info_table_13_0_7 404 struct atom_smc_dpm_info_table_13_0_7 { 405 struct atom_common_table_header table_header; 406 BoardTable_t BoardTable; 407 }; 408 #endif 409 410 static int smu_v13_0_7_append_powerplay_table(struct smu_context *smu) 411 { 412 struct smu_table_context *table_context = &smu->smu_table; 413 414 PPTable_t *smc_pptable = table_context->driver_pptable; 415 416 struct atom_smc_dpm_info_table_13_0_7 *smc_dpm_table; 417 418 BoardTable_t *BoardTable = &smc_pptable->BoardTable; 419 420 int index, ret; 421 422 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 423 smc_dpm_info); 424 425 ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL, 426 (uint8_t **)&smc_dpm_table); 427 if (ret) 428 return ret; 429 430 memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t)); 431 432 return 0; 433 } 434 435 static int smu_v13_0_7_get_pptable_from_pmfw(struct smu_context *smu, 436 void **table, 437 uint32_t *size) 438 { 439 struct smu_table_context *smu_table = &smu->smu_table; 440 void *combo_pptable = smu_table->combo_pptable; 441 int ret = 0; 442 443 ret = smu_cmn_get_combo_pptable(smu); 444 if (ret) 445 return ret; 446 447 *table = combo_pptable; 448 *size = sizeof(struct smu_13_0_7_powerplay_table); 449 450 return 0; 451 } 452 453 static int smu_v13_0_7_setup_pptable(struct smu_context *smu) 454 { 455 struct smu_table_context *smu_table = &smu->smu_table; 456 struct amdgpu_device *adev = smu->adev; 457 int ret = 0; 458 459 /* 460 * With SCPM enabled, the pptable used will be signed. It cannot 461 * be used directly by driver. To get the raw pptable, we need to 462 * rely on the combo pptable(and its revelant SMU message). 463 */ 464 ret = smu_v13_0_7_get_pptable_from_pmfw(smu, 465 &smu_table->power_play_table, 466 &smu_table->power_play_table_size); 467 if (ret) 468 return ret; 469 470 ret = smu_v13_0_7_store_powerplay_table(smu); 471 if (ret) 472 return ret; 473 474 /* 475 * With SCPM enabled, the operation below will be handled 476 * by PSP. Driver involvment is unnecessary and useless. 477 */ 478 if (!adev->scpm_enabled) { 479 ret = smu_v13_0_7_append_powerplay_table(smu); 480 if (ret) 481 return ret; 482 } 483 484 ret = smu_v13_0_7_check_powerplay_table(smu); 485 if (ret) 486 return ret; 487 488 return ret; 489 } 490 491 static int smu_v13_0_7_tables_init(struct smu_context *smu) 492 { 493 struct smu_table_context *smu_table = &smu->smu_table; 494 struct smu_table *tables = smu_table->tables; 495 496 SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), 497 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 498 499 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 500 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 501 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t), 502 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 503 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), 504 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 505 SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTableExternal_t), 506 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 507 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE, 508 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 509 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, 510 sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE, 511 AMDGPU_GEM_DOMAIN_VRAM); 512 SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE, 513 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 514 515 smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); 516 if (!smu_table->metrics_table) 517 goto err0_out; 518 smu_table->metrics_time = 0; 519 520 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3); 521 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 522 if (!smu_table->gpu_metrics_table) 523 goto err1_out; 524 525 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 526 if (!smu_table->watermarks_table) 527 goto err2_out; 528 529 return 0; 530 531 err2_out: 532 kfree(smu_table->gpu_metrics_table); 533 err1_out: 534 kfree(smu_table->metrics_table); 535 err0_out: 536 return -ENOMEM; 537 } 538 539 static int smu_v13_0_7_allocate_dpm_context(struct smu_context *smu) 540 { 541 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 542 543 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_13_0_dpm_context), 544 GFP_KERNEL); 545 if (!smu_dpm->dpm_context) 546 return -ENOMEM; 547 548 smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context); 549 550 return 0; 551 } 552 553 static int smu_v13_0_7_init_smc_tables(struct smu_context *smu) 554 { 555 int ret = 0; 556 557 ret = smu_v13_0_7_tables_init(smu); 558 if (ret) 559 return ret; 560 561 ret = smu_v13_0_7_allocate_dpm_context(smu); 562 if (ret) 563 return ret; 564 565 return smu_v13_0_init_smc_tables(smu); 566 } 567 568 static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu) 569 { 570 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; 571 PPTable_t *driver_ppt = smu->smu_table.driver_pptable; 572 SkuTable_t *skutable = &driver_ppt->SkuTable; 573 struct smu_13_0_dpm_table *dpm_table; 574 struct smu_13_0_pcie_table *pcie_table; 575 uint32_t link_level; 576 int ret = 0; 577 578 /* socclk dpm table setup */ 579 dpm_table = &dpm_context->dpm_tables.soc_table; 580 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { 581 ret = smu_v13_0_set_single_dpm_table(smu, 582 SMU_SOCCLK, 583 dpm_table); 584 if (ret) 585 return ret; 586 } else { 587 dpm_table->count = 1; 588 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100; 589 dpm_table->dpm_levels[0].enabled = true; 590 dpm_table->min = dpm_table->dpm_levels[0].value; 591 dpm_table->max = dpm_table->dpm_levels[0].value; 592 } 593 594 /* gfxclk dpm table setup */ 595 dpm_table = &dpm_context->dpm_tables.gfx_table; 596 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { 597 ret = smu_v13_0_set_single_dpm_table(smu, 598 SMU_GFXCLK, 599 dpm_table); 600 if (ret) 601 return ret; 602 603 if (skutable->DriverReportedClocks.GameClockAc && 604 (dpm_table->dpm_levels[dpm_table->count - 1].value > 605 skutable->DriverReportedClocks.GameClockAc)) { 606 dpm_table->dpm_levels[dpm_table->count - 1].value = 607 skutable->DriverReportedClocks.GameClockAc; 608 dpm_table->max = skutable->DriverReportedClocks.GameClockAc; 609 } 610 } else { 611 dpm_table->count = 1; 612 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; 613 dpm_table->dpm_levels[0].enabled = true; 614 dpm_table->min = dpm_table->dpm_levels[0].value; 615 dpm_table->max = dpm_table->dpm_levels[0].value; 616 } 617 618 /* uclk dpm table setup */ 619 dpm_table = &dpm_context->dpm_tables.uclk_table; 620 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { 621 ret = smu_v13_0_set_single_dpm_table(smu, 622 SMU_UCLK, 623 dpm_table); 624 if (ret) 625 return ret; 626 } else { 627 dpm_table->count = 1; 628 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100; 629 dpm_table->dpm_levels[0].enabled = true; 630 dpm_table->min = dpm_table->dpm_levels[0].value; 631 dpm_table->max = dpm_table->dpm_levels[0].value; 632 } 633 634 /* fclk dpm table setup */ 635 dpm_table = &dpm_context->dpm_tables.fclk_table; 636 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) { 637 ret = smu_v13_0_set_single_dpm_table(smu, 638 SMU_FCLK, 639 dpm_table); 640 if (ret) 641 return ret; 642 } else { 643 dpm_table->count = 1; 644 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100; 645 dpm_table->dpm_levels[0].enabled = true; 646 dpm_table->min = dpm_table->dpm_levels[0].value; 647 dpm_table->max = dpm_table->dpm_levels[0].value; 648 } 649 650 /* vclk dpm table setup */ 651 dpm_table = &dpm_context->dpm_tables.vclk_table; 652 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) { 653 ret = smu_v13_0_set_single_dpm_table(smu, 654 SMU_VCLK, 655 dpm_table); 656 if (ret) 657 return ret; 658 } else { 659 dpm_table->count = 1; 660 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; 661 dpm_table->dpm_levels[0].enabled = true; 662 dpm_table->min = dpm_table->dpm_levels[0].value; 663 dpm_table->max = dpm_table->dpm_levels[0].value; 664 } 665 666 /* dclk dpm table setup */ 667 dpm_table = &dpm_context->dpm_tables.dclk_table; 668 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) { 669 ret = smu_v13_0_set_single_dpm_table(smu, 670 SMU_DCLK, 671 dpm_table); 672 if (ret) 673 return ret; 674 } else { 675 dpm_table->count = 1; 676 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; 677 dpm_table->dpm_levels[0].enabled = true; 678 dpm_table->min = dpm_table->dpm_levels[0].value; 679 dpm_table->max = dpm_table->dpm_levels[0].value; 680 } 681 682 /* lclk dpm table setup */ 683 pcie_table = &dpm_context->dpm_tables.pcie_table; 684 pcie_table->num_of_link_levels = 0; 685 for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { 686 if (!skutable->PcieGenSpeed[link_level] && 687 !skutable->PcieLaneCount[link_level] && 688 !skutable->LclkFreq[link_level]) 689 continue; 690 691 pcie_table->pcie_gen[pcie_table->num_of_link_levels] = 692 skutable->PcieGenSpeed[link_level]; 693 pcie_table->pcie_lane[pcie_table->num_of_link_levels] = 694 skutable->PcieLaneCount[link_level]; 695 pcie_table->clk_freq[pcie_table->num_of_link_levels] = 696 skutable->LclkFreq[link_level]; 697 pcie_table->num_of_link_levels++; 698 } 699 700 return 0; 701 } 702 703 static bool smu_v13_0_7_is_dpm_running(struct smu_context *smu) 704 { 705 int ret = 0; 706 uint64_t feature_enabled; 707 708 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 709 if (ret) 710 return false; 711 712 return !!(feature_enabled & SMC_DPM_FEATURE); 713 } 714 715 static void smu_v13_0_7_dump_pptable(struct smu_context *smu) 716 { 717 struct smu_table_context *table_context = &smu->smu_table; 718 PPTable_t *pptable = table_context->driver_pptable; 719 SkuTable_t *skutable = &pptable->SkuTable; 720 721 dev_info(smu->adev->dev, "Dumped PPTable:\n"); 722 723 dev_info(smu->adev->dev, "Version = 0x%08x\n", skutable->Version); 724 dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", skutable->FeaturesToRun[0]); 725 dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", skutable->FeaturesToRun[1]); 726 } 727 728 static uint32_t smu_v13_0_7_get_throttler_status(SmuMetrics_t *metrics) 729 { 730 uint32_t throttler_status = 0; 731 int i; 732 733 for (i = 0; i < THROTTLER_COUNT; i++) 734 throttler_status |= 735 (metrics->ThrottlingPercentage[i] ? 1U << i : 0); 736 737 return throttler_status; 738 } 739 740 #define SMU_13_0_7_BUSY_THRESHOLD 15 741 static int smu_v13_0_7_get_smu_metrics_data(struct smu_context *smu, 742 MetricsMember_t member, 743 uint32_t *value) 744 { 745 struct smu_table_context *smu_table = &smu->smu_table; 746 SmuMetrics_t *metrics = 747 &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); 748 int ret = 0; 749 750 ret = smu_cmn_get_metrics_table(smu, 751 NULL, 752 false); 753 if (ret) 754 return ret; 755 756 switch (member) { 757 case METRICS_CURR_GFXCLK: 758 *value = metrics->CurrClock[PPCLK_GFXCLK]; 759 break; 760 case METRICS_CURR_SOCCLK: 761 *value = metrics->CurrClock[PPCLK_SOCCLK]; 762 break; 763 case METRICS_CURR_UCLK: 764 *value = metrics->CurrClock[PPCLK_UCLK]; 765 break; 766 case METRICS_CURR_VCLK: 767 *value = metrics->CurrClock[PPCLK_VCLK_0]; 768 break; 769 case METRICS_CURR_VCLK1: 770 *value = metrics->CurrClock[PPCLK_VCLK_1]; 771 break; 772 case METRICS_CURR_DCLK: 773 *value = metrics->CurrClock[PPCLK_DCLK_0]; 774 break; 775 case METRICS_CURR_DCLK1: 776 *value = metrics->CurrClock[PPCLK_DCLK_1]; 777 break; 778 case METRICS_CURR_FCLK: 779 *value = metrics->CurrClock[PPCLK_FCLK]; 780 break; 781 case METRICS_AVERAGE_GFXCLK: 782 *value = metrics->AverageGfxclkFrequencyPreDs; 783 break; 784 case METRICS_AVERAGE_FCLK: 785 if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD) 786 *value = metrics->AverageFclkFrequencyPostDs; 787 else 788 *value = metrics->AverageFclkFrequencyPreDs; 789 break; 790 case METRICS_AVERAGE_UCLK: 791 if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD) 792 *value = metrics->AverageMemclkFrequencyPostDs; 793 else 794 *value = metrics->AverageMemclkFrequencyPreDs; 795 break; 796 case METRICS_AVERAGE_VCLK: 797 *value = metrics->AverageVclk0Frequency; 798 break; 799 case METRICS_AVERAGE_DCLK: 800 *value = metrics->AverageDclk0Frequency; 801 break; 802 case METRICS_AVERAGE_VCLK1: 803 *value = metrics->AverageVclk1Frequency; 804 break; 805 case METRICS_AVERAGE_DCLK1: 806 *value = metrics->AverageDclk1Frequency; 807 break; 808 case METRICS_AVERAGE_GFXACTIVITY: 809 *value = metrics->AverageGfxActivity; 810 break; 811 case METRICS_AVERAGE_MEMACTIVITY: 812 *value = metrics->AverageUclkActivity; 813 break; 814 case METRICS_AVERAGE_SOCKETPOWER: 815 *value = metrics->AverageSocketPower << 8; 816 break; 817 case METRICS_TEMPERATURE_EDGE: 818 *value = metrics->AvgTemperature[TEMP_EDGE] * 819 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 820 break; 821 case METRICS_TEMPERATURE_HOTSPOT: 822 *value = metrics->AvgTemperature[TEMP_HOTSPOT] * 823 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 824 break; 825 case METRICS_TEMPERATURE_MEM: 826 *value = metrics->AvgTemperature[TEMP_MEM] * 827 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 828 break; 829 case METRICS_TEMPERATURE_VRGFX: 830 *value = metrics->AvgTemperature[TEMP_VR_GFX] * 831 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 832 break; 833 case METRICS_TEMPERATURE_VRSOC: 834 *value = metrics->AvgTemperature[TEMP_VR_SOC] * 835 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 836 break; 837 case METRICS_THROTTLER_STATUS: 838 *value = smu_v13_0_7_get_throttler_status(metrics); 839 break; 840 case METRICS_CURR_FANSPEED: 841 *value = metrics->AvgFanRpm; 842 break; 843 case METRICS_CURR_FANPWM: 844 *value = metrics->AvgFanPwm; 845 break; 846 case METRICS_VOLTAGE_VDDGFX: 847 *value = metrics->AvgVoltage[SVI_PLANE_GFX]; 848 break; 849 case METRICS_PCIE_RATE: 850 *value = metrics->PcieRate; 851 break; 852 case METRICS_PCIE_WIDTH: 853 *value = metrics->PcieWidth; 854 break; 855 default: 856 *value = UINT_MAX; 857 break; 858 } 859 860 return ret; 861 } 862 863 static int smu_v13_0_7_get_dpm_ultimate_freq(struct smu_context *smu, 864 enum smu_clk_type clk_type, 865 uint32_t *min, 866 uint32_t *max) 867 { 868 struct smu_13_0_dpm_context *dpm_context = 869 smu->smu_dpm.dpm_context; 870 struct smu_13_0_dpm_table *dpm_table; 871 872 switch (clk_type) { 873 case SMU_MCLK: 874 case SMU_UCLK: 875 /* uclk dpm table */ 876 dpm_table = &dpm_context->dpm_tables.uclk_table; 877 break; 878 case SMU_GFXCLK: 879 case SMU_SCLK: 880 /* gfxclk dpm table */ 881 dpm_table = &dpm_context->dpm_tables.gfx_table; 882 break; 883 case SMU_SOCCLK: 884 /* socclk dpm table */ 885 dpm_table = &dpm_context->dpm_tables.soc_table; 886 break; 887 case SMU_FCLK: 888 /* fclk dpm table */ 889 dpm_table = &dpm_context->dpm_tables.fclk_table; 890 break; 891 case SMU_VCLK: 892 case SMU_VCLK1: 893 /* vclk dpm table */ 894 dpm_table = &dpm_context->dpm_tables.vclk_table; 895 break; 896 case SMU_DCLK: 897 case SMU_DCLK1: 898 /* dclk dpm table */ 899 dpm_table = &dpm_context->dpm_tables.dclk_table; 900 break; 901 default: 902 dev_err(smu->adev->dev, "Unsupported clock type!\n"); 903 return -EINVAL; 904 } 905 906 if (min) 907 *min = dpm_table->min; 908 if (max) 909 *max = dpm_table->max; 910 911 return 0; 912 } 913 914 static int smu_v13_0_7_read_sensor(struct smu_context *smu, 915 enum amd_pp_sensors sensor, 916 void *data, 917 uint32_t *size) 918 { 919 struct smu_table_context *table_context = &smu->smu_table; 920 PPTable_t *smc_pptable = table_context->driver_pptable; 921 int ret = 0; 922 923 switch (sensor) { 924 case AMDGPU_PP_SENSOR_MAX_FAN_RPM: 925 *(uint16_t *)data = smc_pptable->SkuTable.FanMaximumRpm; 926 *size = 4; 927 break; 928 case AMDGPU_PP_SENSOR_MEM_LOAD: 929 ret = smu_v13_0_7_get_smu_metrics_data(smu, 930 METRICS_AVERAGE_MEMACTIVITY, 931 (uint32_t *)data); 932 *size = 4; 933 break; 934 case AMDGPU_PP_SENSOR_GPU_LOAD: 935 ret = smu_v13_0_7_get_smu_metrics_data(smu, 936 METRICS_AVERAGE_GFXACTIVITY, 937 (uint32_t *)data); 938 *size = 4; 939 break; 940 case AMDGPU_PP_SENSOR_GPU_AVG_POWER: 941 ret = smu_v13_0_7_get_smu_metrics_data(smu, 942 METRICS_AVERAGE_SOCKETPOWER, 943 (uint32_t *)data); 944 *size = 4; 945 break; 946 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 947 ret = smu_v13_0_7_get_smu_metrics_data(smu, 948 METRICS_TEMPERATURE_HOTSPOT, 949 (uint32_t *)data); 950 *size = 4; 951 break; 952 case AMDGPU_PP_SENSOR_EDGE_TEMP: 953 ret = smu_v13_0_7_get_smu_metrics_data(smu, 954 METRICS_TEMPERATURE_EDGE, 955 (uint32_t *)data); 956 *size = 4; 957 break; 958 case AMDGPU_PP_SENSOR_MEM_TEMP: 959 ret = smu_v13_0_7_get_smu_metrics_data(smu, 960 METRICS_TEMPERATURE_MEM, 961 (uint32_t *)data); 962 *size = 4; 963 break; 964 case AMDGPU_PP_SENSOR_GFX_MCLK: 965 ret = smu_v13_0_7_get_smu_metrics_data(smu, 966 METRICS_CURR_UCLK, 967 (uint32_t *)data); 968 *(uint32_t *)data *= 100; 969 *size = 4; 970 break; 971 case AMDGPU_PP_SENSOR_GFX_SCLK: 972 ret = smu_v13_0_7_get_smu_metrics_data(smu, 973 METRICS_AVERAGE_GFXCLK, 974 (uint32_t *)data); 975 *(uint32_t *)data *= 100; 976 *size = 4; 977 break; 978 case AMDGPU_PP_SENSOR_VDDGFX: 979 ret = smu_v13_0_7_get_smu_metrics_data(smu, 980 METRICS_VOLTAGE_VDDGFX, 981 (uint32_t *)data); 982 *size = 4; 983 break; 984 case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: 985 default: 986 ret = -EOPNOTSUPP; 987 break; 988 } 989 990 return ret; 991 } 992 993 static int smu_v13_0_7_get_current_clk_freq_by_table(struct smu_context *smu, 994 enum smu_clk_type clk_type, 995 uint32_t *value) 996 { 997 MetricsMember_t member_type; 998 int clk_id = 0; 999 1000 clk_id = smu_cmn_to_asic_specific_index(smu, 1001 CMN2ASIC_MAPPING_CLK, 1002 clk_type); 1003 if (clk_id < 0) 1004 return -EINVAL; 1005 1006 switch (clk_id) { 1007 case PPCLK_GFXCLK: 1008 member_type = METRICS_AVERAGE_GFXCLK; 1009 break; 1010 case PPCLK_UCLK: 1011 member_type = METRICS_CURR_UCLK; 1012 break; 1013 case PPCLK_FCLK: 1014 member_type = METRICS_CURR_FCLK; 1015 break; 1016 case PPCLK_SOCCLK: 1017 member_type = METRICS_CURR_SOCCLK; 1018 break; 1019 case PPCLK_VCLK_0: 1020 member_type = METRICS_CURR_VCLK; 1021 break; 1022 case PPCLK_DCLK_0: 1023 member_type = METRICS_CURR_DCLK; 1024 break; 1025 case PPCLK_VCLK_1: 1026 member_type = METRICS_CURR_VCLK1; 1027 break; 1028 case PPCLK_DCLK_1: 1029 member_type = METRICS_CURR_DCLK1; 1030 break; 1031 default: 1032 return -EINVAL; 1033 } 1034 1035 return smu_v13_0_7_get_smu_metrics_data(smu, 1036 member_type, 1037 value); 1038 } 1039 1040 static bool smu_v13_0_7_is_od_feature_supported(struct smu_context *smu, 1041 int od_feature_bit) 1042 { 1043 PPTable_t *pptable = smu->smu_table.driver_pptable; 1044 const OverDriveLimits_t * const overdrive_upperlimits = 1045 &pptable->SkuTable.OverDriveLimitsBasicMax; 1046 1047 return overdrive_upperlimits->FeatureCtrlMask & (1U << od_feature_bit); 1048 } 1049 1050 static void smu_v13_0_7_get_od_setting_limits(struct smu_context *smu, 1051 int od_feature_bit, 1052 int32_t *min, 1053 int32_t *max) 1054 { 1055 PPTable_t *pptable = smu->smu_table.driver_pptable; 1056 const OverDriveLimits_t * const overdrive_upperlimits = 1057 &pptable->SkuTable.OverDriveLimitsBasicMax; 1058 const OverDriveLimits_t * const overdrive_lowerlimits = 1059 &pptable->SkuTable.OverDriveLimitsMin; 1060 int32_t od_min_setting, od_max_setting; 1061 1062 switch (od_feature_bit) { 1063 case PP_OD_FEATURE_GFXCLK_FMIN: 1064 od_min_setting = overdrive_lowerlimits->GfxclkFmin; 1065 od_max_setting = overdrive_upperlimits->GfxclkFmin; 1066 break; 1067 case PP_OD_FEATURE_GFXCLK_FMAX: 1068 od_min_setting = overdrive_lowerlimits->GfxclkFmax; 1069 od_max_setting = overdrive_upperlimits->GfxclkFmax; 1070 break; 1071 case PP_OD_FEATURE_UCLK_FMIN: 1072 od_min_setting = overdrive_lowerlimits->UclkFmin; 1073 od_max_setting = overdrive_upperlimits->UclkFmin; 1074 break; 1075 case PP_OD_FEATURE_UCLK_FMAX: 1076 od_min_setting = overdrive_lowerlimits->UclkFmax; 1077 od_max_setting = overdrive_upperlimits->UclkFmax; 1078 break; 1079 case PP_OD_FEATURE_GFX_VF_CURVE: 1080 od_min_setting = overdrive_lowerlimits->VoltageOffsetPerZoneBoundary; 1081 od_max_setting = overdrive_upperlimits->VoltageOffsetPerZoneBoundary; 1082 break; 1083 default: 1084 od_min_setting = od_max_setting = INT_MAX; 1085 break; 1086 } 1087 1088 if (min) 1089 *min = od_min_setting; 1090 if (max) 1091 *max = od_max_setting; 1092 } 1093 1094 static void smu_v13_0_7_dump_od_table(struct smu_context *smu, 1095 OverDriveTableExternal_t *od_table) 1096 { 1097 struct amdgpu_device *adev = smu->adev; 1098 1099 dev_dbg(adev->dev, "OD: Gfxclk: (%d, %d)\n", od_table->OverDriveTable.GfxclkFmin, 1100 od_table->OverDriveTable.GfxclkFmax); 1101 dev_dbg(adev->dev, "OD: Uclk: (%d, %d)\n", od_table->OverDriveTable.UclkFmin, 1102 od_table->OverDriveTable.UclkFmax); 1103 } 1104 1105 static int smu_v13_0_7_get_overdrive_table(struct smu_context *smu, 1106 OverDriveTableExternal_t *od_table) 1107 { 1108 int ret = 0; 1109 1110 ret = smu_cmn_update_table(smu, 1111 SMU_TABLE_OVERDRIVE, 1112 0, 1113 (void *)od_table, 1114 false); 1115 if (ret) 1116 dev_err(smu->adev->dev, "Failed to get overdrive table!\n"); 1117 1118 return ret; 1119 } 1120 1121 static int smu_v13_0_7_upload_overdrive_table(struct smu_context *smu, 1122 OverDriveTableExternal_t *od_table) 1123 { 1124 int ret = 0; 1125 1126 ret = smu_cmn_update_table(smu, 1127 SMU_TABLE_OVERDRIVE, 1128 0, 1129 (void *)od_table, 1130 true); 1131 if (ret) 1132 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n"); 1133 1134 return ret; 1135 } 1136 1137 static int smu_v13_0_7_print_clk_levels(struct smu_context *smu, 1138 enum smu_clk_type clk_type, 1139 char *buf) 1140 { 1141 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1142 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; 1143 OverDriveTableExternal_t *od_table = 1144 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table; 1145 struct smu_13_0_dpm_table *single_dpm_table; 1146 struct smu_13_0_pcie_table *pcie_table; 1147 uint32_t gen_speed, lane_width; 1148 int i, curr_freq, size = 0; 1149 int32_t min_value, max_value; 1150 int ret = 0; 1151 1152 smu_cmn_get_sysfs_buf(&buf, &size); 1153 1154 if (amdgpu_ras_intr_triggered()) { 1155 size += sysfs_emit_at(buf, size, "unavailable\n"); 1156 return size; 1157 } 1158 1159 switch (clk_type) { 1160 case SMU_SCLK: 1161 single_dpm_table = &(dpm_context->dpm_tables.gfx_table); 1162 break; 1163 case SMU_MCLK: 1164 single_dpm_table = &(dpm_context->dpm_tables.uclk_table); 1165 break; 1166 case SMU_SOCCLK: 1167 single_dpm_table = &(dpm_context->dpm_tables.soc_table); 1168 break; 1169 case SMU_FCLK: 1170 single_dpm_table = &(dpm_context->dpm_tables.fclk_table); 1171 break; 1172 case SMU_VCLK: 1173 case SMU_VCLK1: 1174 single_dpm_table = &(dpm_context->dpm_tables.vclk_table); 1175 break; 1176 case SMU_DCLK: 1177 case SMU_DCLK1: 1178 single_dpm_table = &(dpm_context->dpm_tables.dclk_table); 1179 break; 1180 default: 1181 break; 1182 } 1183 1184 switch (clk_type) { 1185 case SMU_SCLK: 1186 case SMU_MCLK: 1187 case SMU_SOCCLK: 1188 case SMU_FCLK: 1189 case SMU_VCLK: 1190 case SMU_VCLK1: 1191 case SMU_DCLK: 1192 case SMU_DCLK1: 1193 ret = smu_v13_0_7_get_current_clk_freq_by_table(smu, clk_type, &curr_freq); 1194 if (ret) { 1195 dev_err(smu->adev->dev, "Failed to get current clock freq!"); 1196 return ret; 1197 } 1198 1199 if (single_dpm_table->is_fine_grained) { 1200 /* 1201 * For fine grained dpms, there are only two dpm levels: 1202 * - level 0 -> min clock freq 1203 * - level 1 -> max clock freq 1204 * And the current clock frequency can be any value between them. 1205 * So, if the current clock frequency is not at level 0 or level 1, 1206 * we will fake it as three dpm levels: 1207 * - level 0 -> min clock freq 1208 * - level 1 -> current actual clock freq 1209 * - level 2 -> max clock freq 1210 */ 1211 if ((single_dpm_table->dpm_levels[0].value != curr_freq) && 1212 (single_dpm_table->dpm_levels[1].value != curr_freq)) { 1213 size += sysfs_emit_at(buf, size, "0: %uMhz\n", 1214 single_dpm_table->dpm_levels[0].value); 1215 size += sysfs_emit_at(buf, size, "1: %uMhz *\n", 1216 curr_freq); 1217 size += sysfs_emit_at(buf, size, "2: %uMhz\n", 1218 single_dpm_table->dpm_levels[1].value); 1219 } else { 1220 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", 1221 single_dpm_table->dpm_levels[0].value, 1222 single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : ""); 1223 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 1224 single_dpm_table->dpm_levels[1].value, 1225 single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : ""); 1226 } 1227 } else { 1228 for (i = 0; i < single_dpm_table->count; i++) 1229 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", 1230 i, single_dpm_table->dpm_levels[i].value, 1231 single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : ""); 1232 } 1233 break; 1234 case SMU_PCIE: 1235 ret = smu_v13_0_7_get_smu_metrics_data(smu, 1236 METRICS_PCIE_RATE, 1237 &gen_speed); 1238 if (ret) 1239 return ret; 1240 1241 ret = smu_v13_0_7_get_smu_metrics_data(smu, 1242 METRICS_PCIE_WIDTH, 1243 &lane_width); 1244 if (ret) 1245 return ret; 1246 1247 pcie_table = &(dpm_context->dpm_tables.pcie_table); 1248 for (i = 0; i < pcie_table->num_of_link_levels; i++) 1249 size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i, 1250 (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," : 1251 (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," : 1252 (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," : 1253 (pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," : "", 1254 (pcie_table->pcie_lane[i] == 1) ? "x1" : 1255 (pcie_table->pcie_lane[i] == 2) ? "x2" : 1256 (pcie_table->pcie_lane[i] == 3) ? "x4" : 1257 (pcie_table->pcie_lane[i] == 4) ? "x8" : 1258 (pcie_table->pcie_lane[i] == 5) ? "x12" : 1259 (pcie_table->pcie_lane[i] == 6) ? "x16" : "", 1260 pcie_table->clk_freq[i], 1261 (gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) && 1262 (lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ? 1263 "*" : ""); 1264 break; 1265 1266 case SMU_OD_SCLK: 1267 if (!smu_v13_0_7_is_od_feature_supported(smu, 1268 PP_OD_FEATURE_GFXCLK_BIT)) 1269 break; 1270 1271 size += sysfs_emit_at(buf, size, "OD_SCLK:\n"); 1272 size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", 1273 od_table->OverDriveTable.GfxclkFmin, 1274 od_table->OverDriveTable.GfxclkFmax); 1275 break; 1276 1277 case SMU_OD_MCLK: 1278 if (!smu_v13_0_7_is_od_feature_supported(smu, 1279 PP_OD_FEATURE_UCLK_BIT)) 1280 break; 1281 1282 size += sysfs_emit_at(buf, size, "OD_MCLK:\n"); 1283 size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMHz\n", 1284 od_table->OverDriveTable.UclkFmin, 1285 od_table->OverDriveTable.UclkFmax); 1286 break; 1287 1288 case SMU_OD_VDDC_CURVE: 1289 if (!smu_v13_0_7_is_od_feature_supported(smu, 1290 PP_OD_FEATURE_GFX_VF_CURVE_BIT)) 1291 break; 1292 1293 size += sysfs_emit_at(buf, size, "OD_VDDC_CURVE:\n"); 1294 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) 1295 size += sysfs_emit_at(buf, size, "%d: %dmv\n", 1296 i, 1297 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i]); 1298 break; 1299 1300 case SMU_OD_RANGE: 1301 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT) && 1302 !smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT) && 1303 !smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) 1304 break; 1305 1306 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 1307 1308 if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { 1309 smu_v13_0_7_get_od_setting_limits(smu, 1310 PP_OD_FEATURE_GFXCLK_FMIN, 1311 &min_value, 1312 NULL); 1313 smu_v13_0_7_get_od_setting_limits(smu, 1314 PP_OD_FEATURE_GFXCLK_FMAX, 1315 NULL, 1316 &max_value); 1317 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 1318 min_value, max_value); 1319 } 1320 1321 if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) { 1322 smu_v13_0_7_get_od_setting_limits(smu, 1323 PP_OD_FEATURE_UCLK_FMIN, 1324 &min_value, 1325 NULL); 1326 smu_v13_0_7_get_od_setting_limits(smu, 1327 PP_OD_FEATURE_UCLK_FMAX, 1328 NULL, 1329 &max_value); 1330 size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n", 1331 min_value, max_value); 1332 } 1333 1334 if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { 1335 smu_v13_0_7_get_od_setting_limits(smu, 1336 PP_OD_FEATURE_GFX_VF_CURVE, 1337 &min_value, 1338 &max_value); 1339 size += sysfs_emit_at(buf, size, "VDDC_CURVE: %7dmv %10dmv\n", 1340 min_value, max_value); 1341 } 1342 break; 1343 1344 default: 1345 break; 1346 } 1347 1348 return size; 1349 } 1350 1351 static int smu_v13_0_7_od_edit_dpm_table(struct smu_context *smu, 1352 enum PP_OD_DPM_TABLE_COMMAND type, 1353 long input[], 1354 uint32_t size) 1355 { 1356 struct smu_table_context *table_context = &smu->smu_table; 1357 OverDriveTableExternal_t *od_table = 1358 (OverDriveTableExternal_t *)table_context->overdrive_table; 1359 struct amdgpu_device *adev = smu->adev; 1360 uint32_t offset_of_voltageoffset; 1361 int32_t minimum, maximum; 1362 uint32_t feature_ctrlmask; 1363 int i, ret = 0; 1364 1365 switch (type) { 1366 case PP_OD_EDIT_SCLK_VDDC_TABLE: 1367 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFXCLK_BIT)) { 1368 dev_warn(adev->dev, "GFXCLK_LIMITS setting not supported!\n"); 1369 return -ENOTSUPP; 1370 } 1371 1372 for (i = 0; i < size; i += 2) { 1373 if (i + 2 > size) { 1374 dev_info(adev->dev, "invalid number of input parameters %d\n", size); 1375 return -EINVAL; 1376 } 1377 1378 switch (input[i]) { 1379 case 0: 1380 smu_v13_0_7_get_od_setting_limits(smu, 1381 PP_OD_FEATURE_GFXCLK_FMIN, 1382 &minimum, 1383 &maximum); 1384 if (input[i + 1] < minimum || 1385 input[i + 1] > maximum) { 1386 dev_info(adev->dev, "GfxclkFmin (%ld) must be within [%u, %u]!\n", 1387 input[i + 1], minimum, maximum); 1388 return -EINVAL; 1389 } 1390 1391 od_table->OverDriveTable.GfxclkFmin = input[i + 1]; 1392 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; 1393 break; 1394 1395 case 1: 1396 smu_v13_0_7_get_od_setting_limits(smu, 1397 PP_OD_FEATURE_GFXCLK_FMAX, 1398 &minimum, 1399 &maximum); 1400 if (input[i + 1] < minimum || 1401 input[i + 1] > maximum) { 1402 dev_info(adev->dev, "GfxclkFmax (%ld) must be within [%u, %u]!\n", 1403 input[i + 1], minimum, maximum); 1404 return -EINVAL; 1405 } 1406 1407 od_table->OverDriveTable.GfxclkFmax = input[i + 1]; 1408 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFXCLK_BIT; 1409 break; 1410 1411 default: 1412 dev_info(adev->dev, "Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]); 1413 dev_info(adev->dev, "Supported indices: [0:min,1:max]\n"); 1414 return -EINVAL; 1415 } 1416 } 1417 1418 if (od_table->OverDriveTable.GfxclkFmin > od_table->OverDriveTable.GfxclkFmax) { 1419 dev_err(adev->dev, 1420 "Invalid setting: GfxclkFmin(%u) is bigger than GfxclkFmax(%u)\n", 1421 (uint32_t)od_table->OverDriveTable.GfxclkFmin, 1422 (uint32_t)od_table->OverDriveTable.GfxclkFmax); 1423 return -EINVAL; 1424 } 1425 break; 1426 1427 case PP_OD_EDIT_MCLK_VDDC_TABLE: 1428 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_UCLK_BIT)) { 1429 dev_warn(adev->dev, "UCLK_LIMITS setting not supported!\n"); 1430 return -ENOTSUPP; 1431 } 1432 1433 for (i = 0; i < size; i += 2) { 1434 if (i + 2 > size) { 1435 dev_info(adev->dev, "invalid number of input parameters %d\n", size); 1436 return -EINVAL; 1437 } 1438 1439 switch (input[i]) { 1440 case 0: 1441 smu_v13_0_7_get_od_setting_limits(smu, 1442 PP_OD_FEATURE_UCLK_FMIN, 1443 &minimum, 1444 &maximum); 1445 if (input[i + 1] < minimum || 1446 input[i + 1] > maximum) { 1447 dev_info(adev->dev, "UclkFmin (%ld) must be within [%u, %u]!\n", 1448 input[i + 1], minimum, maximum); 1449 return -EINVAL; 1450 } 1451 1452 od_table->OverDriveTable.UclkFmin = input[i + 1]; 1453 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT; 1454 break; 1455 1456 case 1: 1457 smu_v13_0_7_get_od_setting_limits(smu, 1458 PP_OD_FEATURE_UCLK_FMAX, 1459 &minimum, 1460 &maximum); 1461 if (input[i + 1] < minimum || 1462 input[i + 1] > maximum) { 1463 dev_info(adev->dev, "UclkFmax (%ld) must be within [%u, %u]!\n", 1464 input[i + 1], minimum, maximum); 1465 return -EINVAL; 1466 } 1467 1468 od_table->OverDriveTable.UclkFmax = input[i + 1]; 1469 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_UCLK_BIT; 1470 break; 1471 1472 default: 1473 dev_info(adev->dev, "Invalid MCLK_VDDC_TABLE index: %ld\n", input[i]); 1474 dev_info(adev->dev, "Supported indices: [0:min,1:max]\n"); 1475 return -EINVAL; 1476 } 1477 } 1478 1479 if (od_table->OverDriveTable.UclkFmin > od_table->OverDriveTable.UclkFmax) { 1480 dev_err(adev->dev, 1481 "Invalid setting: UclkFmin(%u) is bigger than UclkFmax(%u)\n", 1482 (uint32_t)od_table->OverDriveTable.UclkFmin, 1483 (uint32_t)od_table->OverDriveTable.UclkFmax); 1484 return -EINVAL; 1485 } 1486 break; 1487 1488 case PP_OD_EDIT_VDDC_CURVE: 1489 if (!smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_GFX_VF_CURVE_BIT)) { 1490 dev_warn(adev->dev, "VF curve setting not supported!\n"); 1491 return -ENOTSUPP; 1492 } 1493 1494 if (input[0] >= PP_NUM_OD_VF_CURVE_POINTS || 1495 input[0] < 0) 1496 return -EINVAL; 1497 1498 smu_v13_0_7_get_od_setting_limits(smu, 1499 PP_OD_FEATURE_GFX_VF_CURVE, 1500 &minimum, 1501 &maximum); 1502 if (input[1] < minimum || 1503 input[1] > maximum) { 1504 dev_info(adev->dev, "Voltage offset (%ld) must be within [%d, %d]!\n", 1505 input[1], minimum, maximum); 1506 return -EINVAL; 1507 } 1508 1509 od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[input[0]] = input[1]; 1510 od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT; 1511 break; 1512 1513 case PP_OD_RESTORE_DEFAULT_TABLE: 1514 feature_ctrlmask = od_table->OverDriveTable.FeatureCtrlMask; 1515 memcpy(od_table, 1516 table_context->boot_overdrive_table, 1517 sizeof(OverDriveTableExternal_t)); 1518 od_table->OverDriveTable.FeatureCtrlMask = feature_ctrlmask; 1519 fallthrough; 1520 1521 case PP_OD_COMMIT_DPM_TABLE: 1522 /* 1523 * The member below instructs PMFW the settings focused in 1524 * this single operation. 1525 * `uint32_t FeatureCtrlMask;` 1526 * It does not contain actual informations about user's custom 1527 * settings. Thus we do not cache it. 1528 */ 1529 offset_of_voltageoffset = offsetof(OverDriveTable_t, VoltageOffsetPerZoneBoundary); 1530 if (memcmp((u8 *)od_table + offset_of_voltageoffset, 1531 table_context->user_overdrive_table + offset_of_voltageoffset, 1532 sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset)) { 1533 smu_v13_0_7_dump_od_table(smu, od_table); 1534 1535 ret = smu_v13_0_7_upload_overdrive_table(smu, od_table); 1536 if (ret) { 1537 dev_err(adev->dev, "Failed to upload overdrive table!\n"); 1538 return ret; 1539 } 1540 1541 od_table->OverDriveTable.FeatureCtrlMask = 0; 1542 memcpy(table_context->user_overdrive_table + offset_of_voltageoffset, 1543 (u8 *)od_table + offset_of_voltageoffset, 1544 sizeof(OverDriveTableExternal_t) - offset_of_voltageoffset); 1545 1546 if (!memcmp(table_context->user_overdrive_table, 1547 table_context->boot_overdrive_table, 1548 sizeof(OverDriveTableExternal_t))) 1549 smu->user_dpm_profile.user_od = false; 1550 else 1551 smu->user_dpm_profile.user_od = true; 1552 } 1553 break; 1554 1555 default: 1556 return -ENOSYS; 1557 } 1558 1559 return ret; 1560 } 1561 1562 static int smu_v13_0_7_force_clk_levels(struct smu_context *smu, 1563 enum smu_clk_type clk_type, 1564 uint32_t mask) 1565 { 1566 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 1567 struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context; 1568 struct smu_13_0_dpm_table *single_dpm_table; 1569 uint32_t soft_min_level, soft_max_level; 1570 uint32_t min_freq, max_freq; 1571 int ret = 0; 1572 1573 soft_min_level = mask ? (ffs(mask) - 1) : 0; 1574 soft_max_level = mask ? (fls(mask) - 1) : 0; 1575 1576 switch (clk_type) { 1577 case SMU_GFXCLK: 1578 case SMU_SCLK: 1579 single_dpm_table = &(dpm_context->dpm_tables.gfx_table); 1580 break; 1581 case SMU_MCLK: 1582 case SMU_UCLK: 1583 single_dpm_table = &(dpm_context->dpm_tables.uclk_table); 1584 break; 1585 case SMU_SOCCLK: 1586 single_dpm_table = &(dpm_context->dpm_tables.soc_table); 1587 break; 1588 case SMU_FCLK: 1589 single_dpm_table = &(dpm_context->dpm_tables.fclk_table); 1590 break; 1591 case SMU_VCLK: 1592 case SMU_VCLK1: 1593 single_dpm_table = &(dpm_context->dpm_tables.vclk_table); 1594 break; 1595 case SMU_DCLK: 1596 case SMU_DCLK1: 1597 single_dpm_table = &(dpm_context->dpm_tables.dclk_table); 1598 break; 1599 default: 1600 break; 1601 } 1602 1603 switch (clk_type) { 1604 case SMU_GFXCLK: 1605 case SMU_SCLK: 1606 case SMU_MCLK: 1607 case SMU_UCLK: 1608 case SMU_SOCCLK: 1609 case SMU_FCLK: 1610 case SMU_VCLK: 1611 case SMU_VCLK1: 1612 case SMU_DCLK: 1613 case SMU_DCLK1: 1614 if (single_dpm_table->is_fine_grained) { 1615 /* There is only 2 levels for fine grained DPM */ 1616 soft_max_level = (soft_max_level >= 1 ? 1 : 0); 1617 soft_min_level = (soft_min_level >= 1 ? 1 : 0); 1618 } else { 1619 if ((soft_max_level >= single_dpm_table->count) || 1620 (soft_min_level >= single_dpm_table->count)) 1621 return -EINVAL; 1622 } 1623 1624 min_freq = single_dpm_table->dpm_levels[soft_min_level].value; 1625 max_freq = single_dpm_table->dpm_levels[soft_max_level].value; 1626 1627 ret = smu_v13_0_set_soft_freq_limited_range(smu, 1628 clk_type, 1629 min_freq, 1630 max_freq); 1631 break; 1632 case SMU_DCEFCLK: 1633 case SMU_PCIE: 1634 default: 1635 break; 1636 } 1637 1638 return ret; 1639 } 1640 1641 static const struct smu_temperature_range smu13_thermal_policy[] = { 1642 {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, 1643 { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, 1644 }; 1645 1646 static int smu_v13_0_7_get_thermal_temperature_range(struct smu_context *smu, 1647 struct smu_temperature_range *range) 1648 { 1649 struct smu_table_context *table_context = &smu->smu_table; 1650 struct smu_13_0_7_powerplay_table *powerplay_table = 1651 table_context->power_play_table; 1652 PPTable_t *pptable = smu->smu_table.driver_pptable; 1653 1654 if (!range) 1655 return -EINVAL; 1656 1657 memcpy(range, &smu13_thermal_policy[0], sizeof(struct smu_temperature_range)); 1658 1659 range->max = pptable->SkuTable.TemperatureLimit[TEMP_EDGE] * 1660 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1661 range->edge_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_EDGE] + CTF_OFFSET_EDGE) * 1662 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1663 range->hotspot_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] * 1664 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1665 range->hotspot_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_HOTSPOT] + CTF_OFFSET_HOTSPOT) * 1666 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1667 range->mem_crit_max = pptable->SkuTable.TemperatureLimit[TEMP_MEM] * 1668 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1669 range->mem_emergency_max = (pptable->SkuTable.TemperatureLimit[TEMP_MEM] + CTF_OFFSET_MEM)* 1670 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 1671 range->software_shutdown_temp = powerplay_table->software_shutdown_temp; 1672 range->software_shutdown_temp_offset = pptable->SkuTable.FanAbnormalTempLimitOffset; 1673 1674 return 0; 1675 } 1676 1677 #ifndef MAX 1678 #define MAX(a, b) ((a) > (b) ? (a) : (b)) 1679 #endif 1680 static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu, 1681 void **table) 1682 { 1683 struct smu_table_context *smu_table = &smu->smu_table; 1684 struct gpu_metrics_v1_3 *gpu_metrics = 1685 (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table; 1686 SmuMetricsExternal_t metrics_ext; 1687 SmuMetrics_t *metrics = &metrics_ext.SmuMetrics; 1688 int ret = 0; 1689 1690 ret = smu_cmn_get_metrics_table(smu, 1691 &metrics_ext, 1692 true); 1693 if (ret) 1694 return ret; 1695 1696 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); 1697 1698 gpu_metrics->temperature_edge = metrics->AvgTemperature[TEMP_EDGE]; 1699 gpu_metrics->temperature_hotspot = metrics->AvgTemperature[TEMP_HOTSPOT]; 1700 gpu_metrics->temperature_mem = metrics->AvgTemperature[TEMP_MEM]; 1701 gpu_metrics->temperature_vrgfx = metrics->AvgTemperature[TEMP_VR_GFX]; 1702 gpu_metrics->temperature_vrsoc = metrics->AvgTemperature[TEMP_VR_SOC]; 1703 gpu_metrics->temperature_vrmem = MAX(metrics->AvgTemperature[TEMP_VR_MEM0], 1704 metrics->AvgTemperature[TEMP_VR_MEM1]); 1705 1706 gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity; 1707 gpu_metrics->average_umc_activity = metrics->AverageUclkActivity; 1708 gpu_metrics->average_mm_activity = MAX(metrics->Vcn0ActivityPercentage, 1709 metrics->Vcn1ActivityPercentage); 1710 1711 gpu_metrics->average_socket_power = metrics->AverageSocketPower; 1712 gpu_metrics->energy_accumulator = metrics->EnergyAccumulator; 1713 1714 if (metrics->AverageGfxActivity <= SMU_13_0_7_BUSY_THRESHOLD) 1715 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs; 1716 else 1717 gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs; 1718 1719 if (metrics->AverageUclkActivity <= SMU_13_0_7_BUSY_THRESHOLD) 1720 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPostDs; 1721 else 1722 gpu_metrics->average_uclk_frequency = metrics->AverageMemclkFrequencyPreDs; 1723 1724 gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency; 1725 gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency; 1726 gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency; 1727 gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; 1728 1729 gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK]; 1730 gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK]; 1731 gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK]; 1732 gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; 1733 gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0]; 1734 gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1]; 1735 gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1]; 1736 1737 gpu_metrics->throttle_status = 1738 smu_v13_0_7_get_throttler_status(metrics); 1739 gpu_metrics->indep_throttle_status = 1740 smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status, 1741 smu_v13_0_7_throttler_map); 1742 1743 gpu_metrics->current_fan_speed = metrics->AvgFanRpm; 1744 1745 gpu_metrics->pcie_link_width = metrics->PcieWidth; 1746 if ((metrics->PcieRate - 1) > LINK_SPEED_MAX) 1747 gpu_metrics->pcie_link_speed = pcie_gen_to_speed(1); 1748 else 1749 gpu_metrics->pcie_link_speed = pcie_gen_to_speed(metrics->PcieRate); 1750 1751 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 1752 1753 gpu_metrics->voltage_gfx = metrics->AvgVoltage[SVI_PLANE_GFX]; 1754 gpu_metrics->voltage_soc = metrics->AvgVoltage[SVI_PLANE_SOC]; 1755 gpu_metrics->voltage_mem = metrics->AvgVoltage[SVI_PLANE_VMEMP]; 1756 1757 *table = (void *)gpu_metrics; 1758 1759 return sizeof(struct gpu_metrics_v1_3); 1760 } 1761 1762 static int smu_v13_0_7_set_default_od_settings(struct smu_context *smu) 1763 { 1764 OverDriveTableExternal_t *od_table = 1765 (OverDriveTableExternal_t *)smu->smu_table.overdrive_table; 1766 OverDriveTableExternal_t *boot_od_table = 1767 (OverDriveTableExternal_t *)smu->smu_table.boot_overdrive_table; 1768 OverDriveTableExternal_t *user_od_table = 1769 (OverDriveTableExternal_t *)smu->smu_table.user_overdrive_table; 1770 OverDriveTableExternal_t user_od_table_bak; 1771 int ret = 0; 1772 int i; 1773 1774 ret = smu_v13_0_7_get_overdrive_table(smu, boot_od_table); 1775 if (ret) 1776 return ret; 1777 1778 smu_v13_0_7_dump_od_table(smu, boot_od_table); 1779 1780 memcpy(od_table, 1781 boot_od_table, 1782 sizeof(OverDriveTableExternal_t)); 1783 1784 /* 1785 * For S3/S4/Runpm resume, we need to setup those overdrive tables again, 1786 * but we have to preserve user defined values in "user_od_table". 1787 */ 1788 if (!smu->adev->in_suspend) { 1789 memcpy(user_od_table, 1790 boot_od_table, 1791 sizeof(OverDriveTableExternal_t)); 1792 smu->user_dpm_profile.user_od = false; 1793 } else if (smu->user_dpm_profile.user_od) { 1794 memcpy(&user_od_table_bak, 1795 user_od_table, 1796 sizeof(OverDriveTableExternal_t)); 1797 memcpy(user_od_table, 1798 boot_od_table, 1799 sizeof(OverDriveTableExternal_t)); 1800 user_od_table->OverDriveTable.GfxclkFmin = 1801 user_od_table_bak.OverDriveTable.GfxclkFmin; 1802 user_od_table->OverDriveTable.GfxclkFmax = 1803 user_od_table_bak.OverDriveTable.GfxclkFmax; 1804 user_od_table->OverDriveTable.UclkFmin = 1805 user_od_table_bak.OverDriveTable.UclkFmin; 1806 user_od_table->OverDriveTable.UclkFmax = 1807 user_od_table_bak.OverDriveTable.UclkFmax; 1808 for (i = 0; i < PP_NUM_OD_VF_CURVE_POINTS; i++) 1809 user_od_table->OverDriveTable.VoltageOffsetPerZoneBoundary[i] = 1810 user_od_table_bak.OverDriveTable.VoltageOffsetPerZoneBoundary[i]; 1811 } 1812 1813 return 0; 1814 } 1815 1816 static int smu_v13_0_7_restore_user_od_settings(struct smu_context *smu) 1817 { 1818 struct smu_table_context *table_context = &smu->smu_table; 1819 OverDriveTableExternal_t *od_table = table_context->overdrive_table; 1820 OverDriveTableExternal_t *user_od_table = table_context->user_overdrive_table; 1821 int res; 1822 1823 user_od_table->OverDriveTable.FeatureCtrlMask = 1U << PP_OD_FEATURE_GFXCLK_BIT | 1824 1U << PP_OD_FEATURE_UCLK_BIT | 1825 1U << PP_OD_FEATURE_GFX_VF_CURVE_BIT; 1826 res = smu_v13_0_7_upload_overdrive_table(smu, user_od_table); 1827 user_od_table->OverDriveTable.FeatureCtrlMask = 0; 1828 if (res == 0) 1829 memcpy(od_table, user_od_table, sizeof(OverDriveTableExternal_t)); 1830 1831 return res; 1832 } 1833 1834 static int smu_v13_0_7_populate_umd_state_clk(struct smu_context *smu) 1835 { 1836 struct smu_13_0_dpm_context *dpm_context = 1837 smu->smu_dpm.dpm_context; 1838 struct smu_13_0_dpm_table *gfx_table = 1839 &dpm_context->dpm_tables.gfx_table; 1840 struct smu_13_0_dpm_table *mem_table = 1841 &dpm_context->dpm_tables.uclk_table; 1842 struct smu_13_0_dpm_table *soc_table = 1843 &dpm_context->dpm_tables.soc_table; 1844 struct smu_13_0_dpm_table *vclk_table = 1845 &dpm_context->dpm_tables.vclk_table; 1846 struct smu_13_0_dpm_table *dclk_table = 1847 &dpm_context->dpm_tables.dclk_table; 1848 struct smu_13_0_dpm_table *fclk_table = 1849 &dpm_context->dpm_tables.fclk_table; 1850 struct smu_umd_pstate_table *pstate_table = 1851 &smu->pstate_table; 1852 struct smu_table_context *table_context = &smu->smu_table; 1853 PPTable_t *pptable = table_context->driver_pptable; 1854 DriverReportedClocks_t driver_clocks = 1855 pptable->SkuTable.DriverReportedClocks; 1856 1857 pstate_table->gfxclk_pstate.min = gfx_table->min; 1858 if (driver_clocks.GameClockAc && 1859 (driver_clocks.GameClockAc < gfx_table->max)) 1860 pstate_table->gfxclk_pstate.peak = driver_clocks.GameClockAc; 1861 else 1862 pstate_table->gfxclk_pstate.peak = gfx_table->max; 1863 1864 pstate_table->uclk_pstate.min = mem_table->min; 1865 pstate_table->uclk_pstate.peak = mem_table->max; 1866 1867 pstate_table->socclk_pstate.min = soc_table->min; 1868 pstate_table->socclk_pstate.peak = soc_table->max; 1869 1870 pstate_table->vclk_pstate.min = vclk_table->min; 1871 pstate_table->vclk_pstate.peak = vclk_table->max; 1872 1873 pstate_table->dclk_pstate.min = dclk_table->min; 1874 pstate_table->dclk_pstate.peak = dclk_table->max; 1875 1876 pstate_table->fclk_pstate.min = fclk_table->min; 1877 pstate_table->fclk_pstate.peak = fclk_table->max; 1878 1879 if (driver_clocks.BaseClockAc && 1880 driver_clocks.BaseClockAc < gfx_table->max) 1881 pstate_table->gfxclk_pstate.standard = driver_clocks.BaseClockAc; 1882 else 1883 pstate_table->gfxclk_pstate.standard = gfx_table->max; 1884 pstate_table->uclk_pstate.standard = mem_table->max; 1885 pstate_table->socclk_pstate.standard = soc_table->min; 1886 pstate_table->vclk_pstate.standard = vclk_table->min; 1887 pstate_table->dclk_pstate.standard = dclk_table->min; 1888 pstate_table->fclk_pstate.standard = fclk_table->min; 1889 1890 return 0; 1891 } 1892 1893 static int smu_v13_0_7_get_fan_speed_pwm(struct smu_context *smu, 1894 uint32_t *speed) 1895 { 1896 int ret; 1897 1898 if (!speed) 1899 return -EINVAL; 1900 1901 ret = smu_v13_0_7_get_smu_metrics_data(smu, 1902 METRICS_CURR_FANPWM, 1903 speed); 1904 if (ret) { 1905 dev_err(smu->adev->dev, "Failed to get fan speed(PWM)!"); 1906 return ret; 1907 } 1908 1909 /* Convert the PMFW output which is in percent to pwm(255) based */ 1910 *speed = MIN(*speed * 255 / 100, 255); 1911 1912 return 0; 1913 } 1914 1915 static int smu_v13_0_7_get_fan_speed_rpm(struct smu_context *smu, 1916 uint32_t *speed) 1917 { 1918 if (!speed) 1919 return -EINVAL; 1920 1921 return smu_v13_0_7_get_smu_metrics_data(smu, 1922 METRICS_CURR_FANSPEED, 1923 speed); 1924 } 1925 1926 static int smu_v13_0_7_enable_mgpu_fan_boost(struct smu_context *smu) 1927 { 1928 struct smu_table_context *table_context = &smu->smu_table; 1929 PPTable_t *pptable = table_context->driver_pptable; 1930 SkuTable_t *skutable = &pptable->SkuTable; 1931 1932 /* 1933 * Skip the MGpuFanBoost setting for those ASICs 1934 * which do not support it 1935 */ 1936 if (skutable->MGpuAcousticLimitRpmThreshold == 0) 1937 return 0; 1938 1939 return smu_cmn_send_smc_msg_with_param(smu, 1940 SMU_MSG_SetMGpuFanBoostLimitRpm, 1941 0, 1942 NULL); 1943 } 1944 1945 static int smu_v13_0_7_get_power_limit(struct smu_context *smu, 1946 uint32_t *current_power_limit, 1947 uint32_t *default_power_limit, 1948 uint32_t *max_power_limit) 1949 { 1950 struct smu_table_context *table_context = &smu->smu_table; 1951 struct smu_13_0_7_powerplay_table *powerplay_table = 1952 (struct smu_13_0_7_powerplay_table *)table_context->power_play_table; 1953 PPTable_t *pptable = table_context->driver_pptable; 1954 SkuTable_t *skutable = &pptable->SkuTable; 1955 uint32_t power_limit, od_percent; 1956 1957 if (smu_v13_0_get_current_power_limit(smu, &power_limit)) 1958 power_limit = smu->adev->pm.ac_power ? 1959 skutable->SocketPowerLimitAc[PPT_THROTTLER_PPT0] : 1960 skutable->SocketPowerLimitDc[PPT_THROTTLER_PPT0]; 1961 1962 if (current_power_limit) 1963 *current_power_limit = power_limit; 1964 if (default_power_limit) 1965 *default_power_limit = power_limit; 1966 1967 if (max_power_limit) { 1968 if (smu->od_enabled) { 1969 od_percent = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]); 1970 1971 dev_dbg(smu->adev->dev, "ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_percent, power_limit); 1972 1973 power_limit *= (100 + od_percent); 1974 power_limit /= 100; 1975 } 1976 *max_power_limit = power_limit; 1977 } 1978 1979 return 0; 1980 } 1981 1982 static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf) 1983 { 1984 DpmActivityMonitorCoeffIntExternal_t *activity_monitor_external; 1985 uint32_t i, j, size = 0; 1986 int16_t workload_type = 0; 1987 int result = 0; 1988 1989 if (!buf) 1990 return -EINVAL; 1991 1992 activity_monitor_external = kcalloc(PP_SMC_POWER_PROFILE_COUNT, 1993 sizeof(*activity_monitor_external), 1994 GFP_KERNEL); 1995 if (!activity_monitor_external) 1996 return -ENOMEM; 1997 1998 size += sysfs_emit_at(buf, size, " "); 1999 for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++) 2000 size += sysfs_emit_at(buf, size, "%-14s%s", amdgpu_pp_profile_name[i], 2001 (i == smu->power_profile_mode) ? "* " : " "); 2002 2003 size += sysfs_emit_at(buf, size, "\n"); 2004 2005 for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++) { 2006 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 2007 workload_type = smu_cmn_to_asic_specific_index(smu, 2008 CMN2ASIC_MAPPING_WORKLOAD, 2009 i); 2010 if (workload_type == -ENOTSUPP) 2011 continue; 2012 else if (workload_type < 0) { 2013 result = -EINVAL; 2014 goto out; 2015 } 2016 2017 result = smu_cmn_update_table(smu, 2018 SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type, 2019 (void *)(&activity_monitor_external[i]), false); 2020 if (result) { 2021 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 2022 goto out; 2023 } 2024 } 2025 2026 #define PRINT_DPM_MONITOR(field) \ 2027 do { \ 2028 size += sysfs_emit_at(buf, size, "%-30s", #field); \ 2029 for (j = 0; j <= PP_SMC_POWER_PROFILE_WINDOW3D; j++) \ 2030 size += sysfs_emit_at(buf, size, "%-16d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \ 2031 size += sysfs_emit_at(buf, size, "\n"); \ 2032 } while (0) 2033 2034 PRINT_DPM_MONITOR(Gfx_ActiveHystLimit); 2035 PRINT_DPM_MONITOR(Gfx_IdleHystLimit); 2036 PRINT_DPM_MONITOR(Gfx_FPS); 2037 PRINT_DPM_MONITOR(Gfx_MinActiveFreqType); 2038 PRINT_DPM_MONITOR(Gfx_BoosterFreqType); 2039 PRINT_DPM_MONITOR(Gfx_MinActiveFreq); 2040 PRINT_DPM_MONITOR(Gfx_BoosterFreq); 2041 PRINT_DPM_MONITOR(Fclk_ActiveHystLimit); 2042 PRINT_DPM_MONITOR(Fclk_IdleHystLimit); 2043 PRINT_DPM_MONITOR(Fclk_FPS); 2044 PRINT_DPM_MONITOR(Fclk_MinActiveFreqType); 2045 PRINT_DPM_MONITOR(Fclk_BoosterFreqType); 2046 PRINT_DPM_MONITOR(Fclk_MinActiveFreq); 2047 PRINT_DPM_MONITOR(Fclk_BoosterFreq); 2048 #undef PRINT_DPM_MONITOR 2049 2050 result = size; 2051 out: 2052 kfree(activity_monitor_external); 2053 return result; 2054 } 2055 2056 static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) 2057 { 2058 2059 DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; 2060 DpmActivityMonitorCoeffInt_t *activity_monitor = 2061 &(activity_monitor_external.DpmActivityMonitorCoeffInt); 2062 int workload_type, ret = 0; 2063 2064 smu->power_profile_mode = input[size]; 2065 2066 if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_WINDOW3D) { 2067 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); 2068 return -EINVAL; 2069 } 2070 2071 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 2072 2073 ret = smu_cmn_update_table(smu, 2074 SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, 2075 (void *)(&activity_monitor_external), false); 2076 if (ret) { 2077 dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); 2078 return ret; 2079 } 2080 2081 switch (input[0]) { 2082 case 0: /* Gfxclk */ 2083 activity_monitor->Gfx_ActiveHystLimit = input[1]; 2084 activity_monitor->Gfx_IdleHystLimit = input[2]; 2085 activity_monitor->Gfx_FPS = input[3]; 2086 activity_monitor->Gfx_MinActiveFreqType = input[4]; 2087 activity_monitor->Gfx_BoosterFreqType = input[5]; 2088 activity_monitor->Gfx_MinActiveFreq = input[6]; 2089 activity_monitor->Gfx_BoosterFreq = input[7]; 2090 break; 2091 case 1: /* Fclk */ 2092 activity_monitor->Fclk_ActiveHystLimit = input[1]; 2093 activity_monitor->Fclk_IdleHystLimit = input[2]; 2094 activity_monitor->Fclk_FPS = input[3]; 2095 activity_monitor->Fclk_MinActiveFreqType = input[4]; 2096 activity_monitor->Fclk_BoosterFreqType = input[5]; 2097 activity_monitor->Fclk_MinActiveFreq = input[6]; 2098 activity_monitor->Fclk_BoosterFreq = input[7]; 2099 break; 2100 } 2101 2102 ret = smu_cmn_update_table(smu, 2103 SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT, 2104 (void *)(&activity_monitor_external), true); 2105 if (ret) { 2106 dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); 2107 return ret; 2108 } 2109 } 2110 2111 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ 2112 workload_type = smu_cmn_to_asic_specific_index(smu, 2113 CMN2ASIC_MAPPING_WORKLOAD, 2114 smu->power_profile_mode); 2115 if (workload_type < 0) 2116 return -EINVAL; 2117 smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, 2118 1 << workload_type, NULL); 2119 2120 return ret; 2121 } 2122 2123 static int smu_v13_0_7_set_mp1_state(struct smu_context *smu, 2124 enum pp_mp1_state mp1_state) 2125 { 2126 int ret; 2127 2128 switch (mp1_state) { 2129 case PP_MP1_STATE_UNLOAD: 2130 ret = smu_cmn_set_mp1_state(smu, mp1_state); 2131 break; 2132 default: 2133 /* Ignore others */ 2134 ret = 0; 2135 } 2136 2137 return ret; 2138 } 2139 2140 static int smu_v13_0_7_baco_enter(struct smu_context *smu) 2141 { 2142 struct smu_baco_context *smu_baco = &smu->smu_baco; 2143 struct amdgpu_device *adev = smu->adev; 2144 2145 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) 2146 return smu_v13_0_baco_set_armd3_sequence(smu, 2147 (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? 2148 BACO_SEQ_BAMACO : BACO_SEQ_BACO); 2149 else 2150 return smu_v13_0_baco_enter(smu); 2151 } 2152 2153 static int smu_v13_0_7_baco_exit(struct smu_context *smu) 2154 { 2155 struct amdgpu_device *adev = smu->adev; 2156 2157 if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { 2158 /* Wait for PMFW handling for the Dstate change */ 2159 usleep_range(10000, 11000); 2160 return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); 2161 } else { 2162 return smu_v13_0_baco_exit(smu); 2163 } 2164 } 2165 2166 static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu) 2167 { 2168 struct amdgpu_device *adev = smu->adev; 2169 2170 /* SRIOV does not support SMU mode1 reset */ 2171 if (amdgpu_sriov_vf(adev)) 2172 return false; 2173 2174 return true; 2175 } 2176 2177 static int smu_v13_0_7_set_df_cstate(struct smu_context *smu, 2178 enum pp_df_cstate state) 2179 { 2180 return smu_cmn_send_smc_msg_with_param(smu, 2181 SMU_MSG_DFCstateControl, 2182 state, 2183 NULL); 2184 } 2185 2186 static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { 2187 .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask, 2188 .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, 2189 .is_dpm_running = smu_v13_0_7_is_dpm_running, 2190 .dump_pptable = smu_v13_0_7_dump_pptable, 2191 .init_microcode = smu_v13_0_init_microcode, 2192 .load_microcode = smu_v13_0_load_microcode, 2193 .fini_microcode = smu_v13_0_fini_microcode, 2194 .init_smc_tables = smu_v13_0_7_init_smc_tables, 2195 .fini_smc_tables = smu_v13_0_fini_smc_tables, 2196 .init_power = smu_v13_0_init_power, 2197 .fini_power = smu_v13_0_fini_power, 2198 .check_fw_status = smu_v13_0_7_check_fw_status, 2199 .setup_pptable = smu_v13_0_7_setup_pptable, 2200 .check_fw_version = smu_v13_0_check_fw_version, 2201 .write_pptable = smu_cmn_write_pptable, 2202 .set_driver_table_location = smu_v13_0_set_driver_table_location, 2203 .system_features_control = smu_v13_0_system_features_control, 2204 .set_allowed_mask = smu_v13_0_set_allowed_mask, 2205 .get_enabled_mask = smu_cmn_get_enabled_mask, 2206 .dpm_set_vcn_enable = smu_v13_0_set_vcn_enable, 2207 .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable, 2208 .init_pptable_microcode = smu_v13_0_init_pptable_microcode, 2209 .populate_umd_state_clk = smu_v13_0_7_populate_umd_state_clk, 2210 .get_dpm_ultimate_freq = smu_v13_0_7_get_dpm_ultimate_freq, 2211 .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, 2212 .read_sensor = smu_v13_0_7_read_sensor, 2213 .feature_is_enabled = smu_cmn_feature_is_enabled, 2214 .print_clk_levels = smu_v13_0_7_print_clk_levels, 2215 .force_clk_levels = smu_v13_0_7_force_clk_levels, 2216 .update_pcie_parameters = smu_v13_0_update_pcie_parameters, 2217 .get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range, 2218 .register_irq_handler = smu_v13_0_register_irq_handler, 2219 .enable_thermal_alert = smu_v13_0_enable_thermal_alert, 2220 .disable_thermal_alert = smu_v13_0_disable_thermal_alert, 2221 .notify_memory_pool_location = smu_v13_0_notify_memory_pool_location, 2222 .get_gpu_metrics = smu_v13_0_7_get_gpu_metrics, 2223 .set_soft_freq_limited_range = smu_v13_0_set_soft_freq_limited_range, 2224 .set_default_od_settings = smu_v13_0_7_set_default_od_settings, 2225 .restore_user_od_settings = smu_v13_0_7_restore_user_od_settings, 2226 .od_edit_dpm_table = smu_v13_0_7_od_edit_dpm_table, 2227 .set_performance_level = smu_v13_0_set_performance_level, 2228 .gfx_off_control = smu_v13_0_gfx_off_control, 2229 .get_fan_speed_pwm = smu_v13_0_7_get_fan_speed_pwm, 2230 .get_fan_speed_rpm = smu_v13_0_7_get_fan_speed_rpm, 2231 .set_fan_speed_pwm = smu_v13_0_set_fan_speed_pwm, 2232 .set_fan_speed_rpm = smu_v13_0_set_fan_speed_rpm, 2233 .get_fan_control_mode = smu_v13_0_get_fan_control_mode, 2234 .set_fan_control_mode = smu_v13_0_set_fan_control_mode, 2235 .enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost, 2236 .get_power_limit = smu_v13_0_7_get_power_limit, 2237 .set_power_limit = smu_v13_0_set_power_limit, 2238 .set_power_source = smu_v13_0_set_power_source, 2239 .get_power_profile_mode = smu_v13_0_7_get_power_profile_mode, 2240 .set_power_profile_mode = smu_v13_0_7_set_power_profile_mode, 2241 .set_tool_table_location = smu_v13_0_set_tool_table_location, 2242 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 2243 .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, 2244 .baco_is_support = smu_v13_0_baco_is_support, 2245 .baco_get_state = smu_v13_0_baco_get_state, 2246 .baco_set_state = smu_v13_0_baco_set_state, 2247 .baco_enter = smu_v13_0_7_baco_enter, 2248 .baco_exit = smu_v13_0_7_baco_exit, 2249 .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported, 2250 .mode1_reset = smu_v13_0_mode1_reset, 2251 .set_mp1_state = smu_v13_0_7_set_mp1_state, 2252 .set_df_cstate = smu_v13_0_7_set_df_cstate, 2253 .gpo_control = smu_v13_0_gpo_control, 2254 }; 2255 2256 void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu) 2257 { 2258 smu->ppt_funcs = &smu_v13_0_7_ppt_funcs; 2259 smu->message_map = smu_v13_0_7_message_map; 2260 smu->clock_map = smu_v13_0_7_clk_map; 2261 smu->feature_map = smu_v13_0_7_feature_mask_map; 2262 smu->table_map = smu_v13_0_7_table_map; 2263 smu->pwr_src_map = smu_v13_0_7_pwr_src_map; 2264 smu->workload_map = smu_v13_0_7_workload_map; 2265 smu->smc_driver_if_version = SMU13_0_7_DRIVER_IF_VERSION; 2266 smu_v13_0_set_smu_mailbox_registers(smu); 2267 } 2268