1ad8b1aafSjsg /* 2ad8b1aafSjsg * Copyright 2019 Advanced Micro Devices, Inc. 3ad8b1aafSjsg * 4ad8b1aafSjsg * Permission is hereby granted, free of charge, to any person obtaining a 5ad8b1aafSjsg * copy of this software and associated documentation files (the "Software"), 6ad8b1aafSjsg * to deal in the Software without restriction, including without limitation 7ad8b1aafSjsg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8ad8b1aafSjsg * and/or sell copies of the Software, and to permit persons to whom the 9ad8b1aafSjsg * Software is furnished to do so, subject to the following conditions: 10ad8b1aafSjsg * 11ad8b1aafSjsg * The above copyright notice and this permission notice shall be included in 12ad8b1aafSjsg * all copies or substantial portions of the Software. 13ad8b1aafSjsg * 14ad8b1aafSjsg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15ad8b1aafSjsg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16ad8b1aafSjsg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17ad8b1aafSjsg * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18ad8b1aafSjsg * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19ad8b1aafSjsg * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20ad8b1aafSjsg * OTHER DEALINGS IN THE SOFTWARE. 21ad8b1aafSjsg */ 22ad8b1aafSjsg 23ad8b1aafSjsg #define SWSMU_CODE_LAYER_L1 24ad8b1aafSjsg 25ad8b1aafSjsg #include <linux/firmware.h> 26ad8b1aafSjsg #include <linux/pci.h> 27f1e3f907Sjsg #include <linux/power_supply.h> 28788b966fSjsg #include <linux/reboot.h> 29ad8b1aafSjsg 30ad8b1aafSjsg #include "amdgpu.h" 31ad8b1aafSjsg #include "amdgpu_smu.h" 32ad8b1aafSjsg #include "smu_internal.h" 33ad8b1aafSjsg #include "atom.h" 34ad8b1aafSjsg #include "arcturus_ppt.h" 35ad8b1aafSjsg #include "navi10_ppt.h" 36ad8b1aafSjsg #include "sienna_cichlid_ppt.h" 37ad8b1aafSjsg #include "renoir_ppt.h" 385ca02815Sjsg #include "vangogh_ppt.h" 395ca02815Sjsg #include "aldebaran_ppt.h" 405ca02815Sjsg #include "yellow_carp_ppt.h" 415ca02815Sjsg #include "cyan_skillfish_ppt.h" 421bb76ff1Sjsg #include "smu_v13_0_0_ppt.h" 431bb76ff1Sjsg #include "smu_v13_0_4_ppt.h" 441bb76ff1Sjsg #include "smu_v13_0_5_ppt.h" 45f005ef32Sjsg #include "smu_v13_0_6_ppt.h" 461bb76ff1Sjsg #include "smu_v13_0_7_ppt.h" 47ad8b1aafSjsg #include "amd_pcie.h" 48ad8b1aafSjsg 49ad8b1aafSjsg /* 50ad8b1aafSjsg * DO NOT use these for err/warn/info/debug messages. 51ad8b1aafSjsg * Use dev_err, dev_warn, dev_info and dev_dbg instead. 52ad8b1aafSjsg * They are more MGPU friendly. 53ad8b1aafSjsg */ 54ad8b1aafSjsg #undef pr_err 55ad8b1aafSjsg #undef pr_warn 56ad8b1aafSjsg #undef pr_info 57ad8b1aafSjsg #undef pr_debug 58ad8b1aafSjsg 595ca02815Sjsg static const struct amd_pm_funcs swsmu_pm_funcs; 605ca02815Sjsg static int smu_force_smuclk_levels(struct smu_context *smu, 615ca02815Sjsg enum smu_clk_type clk_type, 625ca02815Sjsg uint32_t mask); 635ca02815Sjsg static int smu_handle_task(struct smu_context *smu, 645ca02815Sjsg enum amd_dpm_forced_level level, 651bb76ff1Sjsg enum amd_pp_task task_id); 665ca02815Sjsg static int smu_reset(struct smu_context *smu); 675ca02815Sjsg static int smu_set_fan_speed_pwm(void *handle, u32 speed); 681bb76ff1Sjsg static int smu_set_fan_control_mode(void *handle, u32 value); 695ca02815Sjsg static int smu_set_power_limit(void *handle, uint32_t limit); 705ca02815Sjsg static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); 715ca02815Sjsg static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); 721bb76ff1Sjsg static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state); 735ca02815Sjsg 745ca02815Sjsg static int smu_sys_get_pp_feature_mask(void *handle, 755ca02815Sjsg char *buf) 76ad8b1aafSjsg { 775ca02815Sjsg struct smu_context *smu = handle; 78ad8b1aafSjsg 79ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 80ad8b1aafSjsg return -EOPNOTSUPP; 81ad8b1aafSjsg 821bb76ff1Sjsg return smu_get_pp_feature_mask(smu, buf); 83ad8b1aafSjsg } 84ad8b1aafSjsg 855ca02815Sjsg static int smu_sys_set_pp_feature_mask(void *handle, 865ca02815Sjsg uint64_t new_mask) 87ad8b1aafSjsg { 885ca02815Sjsg struct smu_context *smu = handle; 89ad8b1aafSjsg 90ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 91ad8b1aafSjsg return -EOPNOTSUPP; 92ad8b1aafSjsg 931bb76ff1Sjsg return smu_set_pp_feature_mask(smu, new_mask); 94ad8b1aafSjsg } 95ad8b1aafSjsg 961bb76ff1Sjsg int smu_set_residency_gfxoff(struct smu_context *smu, bool value) 97ad8b1aafSjsg { 981bb76ff1Sjsg if (!smu->ppt_funcs->set_gfx_off_residency) 991bb76ff1Sjsg return -EINVAL; 100ad8b1aafSjsg 1011bb76ff1Sjsg return smu_set_gfx_off_residency(smu, value); 1021bb76ff1Sjsg } 1031bb76ff1Sjsg 1041bb76ff1Sjsg int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value) 1051bb76ff1Sjsg { 1061bb76ff1Sjsg if (!smu->ppt_funcs->get_gfx_off_residency) 1071bb76ff1Sjsg return -EINVAL; 1081bb76ff1Sjsg 1091bb76ff1Sjsg return smu_get_gfx_off_residency(smu, value); 1101bb76ff1Sjsg } 1111bb76ff1Sjsg 1121bb76ff1Sjsg int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value) 1131bb76ff1Sjsg { 1141bb76ff1Sjsg if (!smu->ppt_funcs->get_gfx_off_entrycount) 1151bb76ff1Sjsg return -EINVAL; 1161bb76ff1Sjsg 1171bb76ff1Sjsg return smu_get_gfx_off_entrycount(smu, value); 1181bb76ff1Sjsg } 1191bb76ff1Sjsg 1201bb76ff1Sjsg int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value) 1211bb76ff1Sjsg { 1221bb76ff1Sjsg if (!smu->ppt_funcs->get_gfx_off_status) 1231bb76ff1Sjsg return -EINVAL; 1241bb76ff1Sjsg 125ad8b1aafSjsg *value = smu_get_gfx_off_status(smu); 126ad8b1aafSjsg 1271bb76ff1Sjsg return 0; 128ad8b1aafSjsg } 129ad8b1aafSjsg 130ad8b1aafSjsg int smu_set_soft_freq_range(struct smu_context *smu, 131ad8b1aafSjsg enum smu_clk_type clk_type, 132ad8b1aafSjsg uint32_t min, 133ad8b1aafSjsg uint32_t max) 134ad8b1aafSjsg { 135ad8b1aafSjsg int ret = 0; 136ad8b1aafSjsg 137ad8b1aafSjsg if (smu->ppt_funcs->set_soft_freq_limited_range) 138ad8b1aafSjsg ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, 139ad8b1aafSjsg clk_type, 140ad8b1aafSjsg min, 141ad8b1aafSjsg max); 142ad8b1aafSjsg 143ad8b1aafSjsg return ret; 144ad8b1aafSjsg } 145ad8b1aafSjsg 146ad8b1aafSjsg int smu_get_dpm_freq_range(struct smu_context *smu, 147ad8b1aafSjsg enum smu_clk_type clk_type, 148ad8b1aafSjsg uint32_t *min, 149ad8b1aafSjsg uint32_t *max) 150ad8b1aafSjsg { 151a57ec6dbSjsg int ret = -ENOTSUPP; 152ad8b1aafSjsg 153ad8b1aafSjsg if (!min && !max) 154ad8b1aafSjsg return -EINVAL; 155ad8b1aafSjsg 156ad8b1aafSjsg if (smu->ppt_funcs->get_dpm_ultimate_freq) 157ad8b1aafSjsg ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu, 158ad8b1aafSjsg clk_type, 159ad8b1aafSjsg min, 160ad8b1aafSjsg max); 161ad8b1aafSjsg 162ad8b1aafSjsg return ret; 163ad8b1aafSjsg } 164ad8b1aafSjsg 1651bb76ff1Sjsg int smu_set_gfx_power_up_by_imu(struct smu_context *smu) 1661bb76ff1Sjsg { 1674e52571bSjsg int ret = 0; 1684e52571bSjsg struct amdgpu_device *adev = smu->adev; 1691bb76ff1Sjsg 1704e52571bSjsg if (smu->ppt_funcs->set_gfx_power_up_by_imu) { 1714e52571bSjsg ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu); 1724e52571bSjsg if (ret) 1734e52571bSjsg dev_err(adev->dev, "Failed to enable gfx imu!\n"); 1744e52571bSjsg } 1754e52571bSjsg return ret; 1761bb76ff1Sjsg } 1771bb76ff1Sjsg 1785ca02815Sjsg static u32 smu_get_mclk(void *handle, bool low) 1795ca02815Sjsg { 1805ca02815Sjsg struct smu_context *smu = handle; 1815ca02815Sjsg uint32_t clk_freq; 1825ca02815Sjsg int ret = 0; 1835ca02815Sjsg 1845ca02815Sjsg ret = smu_get_dpm_freq_range(smu, SMU_UCLK, 1855ca02815Sjsg low ? &clk_freq : NULL, 1865ca02815Sjsg !low ? &clk_freq : NULL); 1875ca02815Sjsg if (ret) 1885ca02815Sjsg return 0; 1895ca02815Sjsg return clk_freq * 100; 1905ca02815Sjsg } 1915ca02815Sjsg 1925ca02815Sjsg static u32 smu_get_sclk(void *handle, bool low) 1935ca02815Sjsg { 1945ca02815Sjsg struct smu_context *smu = handle; 1955ca02815Sjsg uint32_t clk_freq; 1965ca02815Sjsg int ret = 0; 1975ca02815Sjsg 1985ca02815Sjsg ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, 1995ca02815Sjsg low ? &clk_freq : NULL, 2005ca02815Sjsg !low ? &clk_freq : NULL); 2015ca02815Sjsg if (ret) 2025ca02815Sjsg return 0; 2035ca02815Sjsg return clk_freq * 100; 2045ca02815Sjsg } 2055ca02815Sjsg 2064e52571bSjsg static int smu_set_gfx_imu_enable(struct smu_context *smu) 2074e52571bSjsg { 2084e52571bSjsg struct amdgpu_device *adev = smu->adev; 2094e52571bSjsg 2104e52571bSjsg if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 2114e52571bSjsg return 0; 2124e52571bSjsg 2134e52571bSjsg if (amdgpu_in_reset(smu->adev) || adev->in_s0ix) 2144e52571bSjsg return 0; 2154e52571bSjsg 2164e52571bSjsg return smu_set_gfx_power_up_by_imu(smu); 2174e52571bSjsg } 2184e52571bSjsg 2191bb76ff1Sjsg static int smu_dpm_set_vcn_enable(struct smu_context *smu, 220ad8b1aafSjsg bool enable) 221ad8b1aafSjsg { 222ad8b1aafSjsg struct smu_power_context *smu_power = &smu->smu_power; 223ad8b1aafSjsg struct smu_power_gate *power_gate = &smu_power->power_gate; 224ad8b1aafSjsg int ret = 0; 225ad8b1aafSjsg 226ad8b1aafSjsg if (!smu->ppt_funcs->dpm_set_vcn_enable) 227ad8b1aafSjsg return 0; 228ad8b1aafSjsg 229ad8b1aafSjsg if (atomic_read(&power_gate->vcn_gated) ^ enable) 230ad8b1aafSjsg return 0; 231ad8b1aafSjsg 232ad8b1aafSjsg ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable); 233ad8b1aafSjsg if (!ret) 234ad8b1aafSjsg atomic_set(&power_gate->vcn_gated, !enable); 235ad8b1aafSjsg 236ad8b1aafSjsg return ret; 237ad8b1aafSjsg } 238ad8b1aafSjsg 2391bb76ff1Sjsg static int smu_dpm_set_jpeg_enable(struct smu_context *smu, 240ad8b1aafSjsg bool enable) 241ad8b1aafSjsg { 242ad8b1aafSjsg struct smu_power_context *smu_power = &smu->smu_power; 243ad8b1aafSjsg struct smu_power_gate *power_gate = &smu_power->power_gate; 244ad8b1aafSjsg int ret = 0; 245ad8b1aafSjsg 246ad8b1aafSjsg if (!smu->ppt_funcs->dpm_set_jpeg_enable) 247ad8b1aafSjsg return 0; 248ad8b1aafSjsg 249ad8b1aafSjsg if (atomic_read(&power_gate->jpeg_gated) ^ enable) 250ad8b1aafSjsg return 0; 251ad8b1aafSjsg 252ad8b1aafSjsg ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable); 253ad8b1aafSjsg if (!ret) 254ad8b1aafSjsg atomic_set(&power_gate->jpeg_gated, !enable); 255ad8b1aafSjsg 256ad8b1aafSjsg return ret; 257ad8b1aafSjsg } 258ad8b1aafSjsg 259ad8b1aafSjsg /** 260ad8b1aafSjsg * smu_dpm_set_power_gate - power gate/ungate the specific IP block 261ad8b1aafSjsg * 2625ca02815Sjsg * @handle: smu_context pointer 263ad8b1aafSjsg * @block_type: the IP block to power gate/ungate 264ad8b1aafSjsg * @gate: to power gate if true, ungate otherwise 265ad8b1aafSjsg * 266ad8b1aafSjsg * This API uses no smu->mutex lock protection due to: 267ad8b1aafSjsg * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce). 268ad8b1aafSjsg * This is guarded to be race condition free by the caller. 269ad8b1aafSjsg * 2. Or get called on user setting request of power_dpm_force_performance_level. 270ad8b1aafSjsg * Under this case, the smu->mutex lock protection is already enforced on 271ad8b1aafSjsg * the parent API smu_force_performance_level of the call path. 272ad8b1aafSjsg */ 2735ca02815Sjsg static int smu_dpm_set_power_gate(void *handle, 2745ca02815Sjsg uint32_t block_type, 275ad8b1aafSjsg bool gate) 276ad8b1aafSjsg { 2775ca02815Sjsg struct smu_context *smu = handle; 278ad8b1aafSjsg int ret = 0; 279ad8b1aafSjsg 2801bb76ff1Sjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) { 2811bb76ff1Sjsg dev_WARN(smu->adev->dev, 2821bb76ff1Sjsg "SMU uninitialized but power %s requested for %u!\n", 2831bb76ff1Sjsg gate ? "gate" : "ungate", block_type); 284ad8b1aafSjsg return -EOPNOTSUPP; 2851bb76ff1Sjsg } 286ad8b1aafSjsg 287ad8b1aafSjsg switch (block_type) { 288ad8b1aafSjsg /* 289ad8b1aafSjsg * Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses 290ad8b1aafSjsg * AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept. 291ad8b1aafSjsg */ 292ad8b1aafSjsg case AMD_IP_BLOCK_TYPE_UVD: 293ad8b1aafSjsg case AMD_IP_BLOCK_TYPE_VCN: 294ad8b1aafSjsg ret = smu_dpm_set_vcn_enable(smu, !gate); 295ad8b1aafSjsg if (ret) 296ad8b1aafSjsg dev_err(smu->adev->dev, "Failed to power %s VCN!\n", 297ad8b1aafSjsg gate ? "gate" : "ungate"); 298ad8b1aafSjsg break; 299ad8b1aafSjsg case AMD_IP_BLOCK_TYPE_GFX: 300ad8b1aafSjsg ret = smu_gfx_off_control(smu, gate); 301ad8b1aafSjsg if (ret) 302ad8b1aafSjsg dev_err(smu->adev->dev, "Failed to %s gfxoff!\n", 303ad8b1aafSjsg gate ? "enable" : "disable"); 304ad8b1aafSjsg break; 305ad8b1aafSjsg case AMD_IP_BLOCK_TYPE_SDMA: 306ad8b1aafSjsg ret = smu_powergate_sdma(smu, gate); 307ad8b1aafSjsg if (ret) 308ad8b1aafSjsg dev_err(smu->adev->dev, "Failed to power %s SDMA!\n", 309ad8b1aafSjsg gate ? "gate" : "ungate"); 310ad8b1aafSjsg break; 311ad8b1aafSjsg case AMD_IP_BLOCK_TYPE_JPEG: 312ad8b1aafSjsg ret = smu_dpm_set_jpeg_enable(smu, !gate); 313ad8b1aafSjsg if (ret) 314ad8b1aafSjsg dev_err(smu->adev->dev, "Failed to power %s JPEG!\n", 315ad8b1aafSjsg gate ? "gate" : "ungate"); 316ad8b1aafSjsg break; 317ad8b1aafSjsg default: 318ad8b1aafSjsg dev_err(smu->adev->dev, "Unsupported block type!\n"); 319ad8b1aafSjsg return -EINVAL; 320ad8b1aafSjsg } 321ad8b1aafSjsg 322ad8b1aafSjsg return ret; 323ad8b1aafSjsg } 324ad8b1aafSjsg 3255ca02815Sjsg /** 3265ca02815Sjsg * smu_set_user_clk_dependencies - set user profile clock dependencies 3275ca02815Sjsg * 3285ca02815Sjsg * @smu: smu_context pointer 3295ca02815Sjsg * @clk: enum smu_clk_type type 3305ca02815Sjsg * 3315ca02815Sjsg * Enable/Disable the clock dependency for the @clk type. 3325ca02815Sjsg */ 3335ca02815Sjsg static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk) 3345ca02815Sjsg { 3355ca02815Sjsg if (smu->adev->in_suspend) 3365ca02815Sjsg return; 3375ca02815Sjsg 3385ca02815Sjsg if (clk == SMU_MCLK) { 3395ca02815Sjsg smu->user_dpm_profile.clk_dependency = 0; 3405ca02815Sjsg smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK); 3415ca02815Sjsg } else if (clk == SMU_FCLK) { 3425ca02815Sjsg /* MCLK takes precedence over FCLK */ 3435ca02815Sjsg if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) 3445ca02815Sjsg return; 3455ca02815Sjsg 3465ca02815Sjsg smu->user_dpm_profile.clk_dependency = 0; 3475ca02815Sjsg smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK); 3485ca02815Sjsg } else if (clk == SMU_SOCCLK) { 3495ca02815Sjsg /* MCLK takes precedence over SOCCLK */ 3505ca02815Sjsg if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK))) 3515ca02815Sjsg return; 3525ca02815Sjsg 3535ca02815Sjsg smu->user_dpm_profile.clk_dependency = 0; 3545ca02815Sjsg smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK); 3555ca02815Sjsg } else 3565ca02815Sjsg /* Add clk dependencies here, if any */ 3575ca02815Sjsg return; 3585ca02815Sjsg } 3595ca02815Sjsg 3605ca02815Sjsg /** 3615ca02815Sjsg * smu_restore_dpm_user_profile - reinstate user dpm profile 3625ca02815Sjsg * 3635ca02815Sjsg * @smu: smu_context pointer 3645ca02815Sjsg * 3655ca02815Sjsg * Restore the saved user power configurations include power limit, 3665ca02815Sjsg * clock frequencies, fan control mode and fan speed. 3675ca02815Sjsg */ 3685ca02815Sjsg static void smu_restore_dpm_user_profile(struct smu_context *smu) 3695ca02815Sjsg { 3705ca02815Sjsg struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 3715ca02815Sjsg int ret = 0; 3725ca02815Sjsg 3735ca02815Sjsg if (!smu->adev->in_suspend) 3745ca02815Sjsg return; 3755ca02815Sjsg 3765ca02815Sjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3775ca02815Sjsg return; 3785ca02815Sjsg 3795ca02815Sjsg /* Enable restore flag */ 3805ca02815Sjsg smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE; 3815ca02815Sjsg 3825ca02815Sjsg /* set the user dpm power limit */ 3835ca02815Sjsg if (smu->user_dpm_profile.power_limit) { 3845ca02815Sjsg ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit); 3855ca02815Sjsg if (ret) 3865ca02815Sjsg dev_err(smu->adev->dev, "Failed to set power limit value\n"); 3875ca02815Sjsg } 3885ca02815Sjsg 3895ca02815Sjsg /* set the user dpm clock configurations */ 3905ca02815Sjsg if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 3915ca02815Sjsg enum smu_clk_type clk_type; 3925ca02815Sjsg 3935ca02815Sjsg for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) { 3945ca02815Sjsg /* 3955ca02815Sjsg * Iterate over smu clk type and force the saved user clk 3965ca02815Sjsg * configs, skip if clock dependency is enabled 3975ca02815Sjsg */ 3985ca02815Sjsg if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) && 3995ca02815Sjsg smu->user_dpm_profile.clk_mask[clk_type]) { 4005ca02815Sjsg ret = smu_force_smuclk_levels(smu, clk_type, 4015ca02815Sjsg smu->user_dpm_profile.clk_mask[clk_type]); 4025ca02815Sjsg if (ret) 4035ca02815Sjsg dev_err(smu->adev->dev, 4045ca02815Sjsg "Failed to set clock type = %d\n", clk_type); 4055ca02815Sjsg } 4065ca02815Sjsg } 4075ca02815Sjsg } 4085ca02815Sjsg 4095ca02815Sjsg /* set the user dpm fan configurations */ 4105ca02815Sjsg if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL || 4115ca02815Sjsg smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) { 4125ca02815Sjsg ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode); 4131bb76ff1Sjsg if (ret != -EOPNOTSUPP) { 4145ca02815Sjsg smu->user_dpm_profile.fan_speed_pwm = 0; 4155ca02815Sjsg smu->user_dpm_profile.fan_speed_rpm = 0; 4165ca02815Sjsg smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO; 4175ca02815Sjsg dev_err(smu->adev->dev, "Failed to set manual fan control mode\n"); 4185ca02815Sjsg } 4195ca02815Sjsg 4205ca02815Sjsg if (smu->user_dpm_profile.fan_speed_pwm) { 4215ca02815Sjsg ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm); 4221bb76ff1Sjsg if (ret != -EOPNOTSUPP) 4235ca02815Sjsg dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n"); 4245ca02815Sjsg } 4255ca02815Sjsg 4265ca02815Sjsg if (smu->user_dpm_profile.fan_speed_rpm) { 4275ca02815Sjsg ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm); 4281bb76ff1Sjsg if (ret != -EOPNOTSUPP) 4295ca02815Sjsg dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n"); 4305ca02815Sjsg } 4315ca02815Sjsg } 4325ca02815Sjsg 4335ca02815Sjsg /* Restore user customized OD settings */ 4345ca02815Sjsg if (smu->user_dpm_profile.user_od) { 4355ca02815Sjsg if (smu->ppt_funcs->restore_user_od_settings) { 4365ca02815Sjsg ret = smu->ppt_funcs->restore_user_od_settings(smu); 4375ca02815Sjsg if (ret) 4385ca02815Sjsg dev_err(smu->adev->dev, "Failed to upload customized OD settings\n"); 4395ca02815Sjsg } 4405ca02815Sjsg } 4415ca02815Sjsg 4425ca02815Sjsg /* Disable restore flag */ 4435ca02815Sjsg smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE; 4445ca02815Sjsg } 4455ca02815Sjsg 4465ca02815Sjsg static int smu_get_power_num_states(void *handle, 447ad8b1aafSjsg struct pp_states_info *state_info) 448ad8b1aafSjsg { 449ad8b1aafSjsg if (!state_info) 450ad8b1aafSjsg return -EINVAL; 451ad8b1aafSjsg 452ad8b1aafSjsg /* not support power state */ 453ad8b1aafSjsg memset(state_info, 0, sizeof(struct pp_states_info)); 454ad8b1aafSjsg state_info->nums = 1; 455ad8b1aafSjsg state_info->states[0] = POWER_STATE_TYPE_DEFAULT; 456ad8b1aafSjsg 457ad8b1aafSjsg return 0; 458ad8b1aafSjsg } 459ad8b1aafSjsg 460ad8b1aafSjsg bool is_support_sw_smu(struct amdgpu_device *adev) 461ad8b1aafSjsg { 4621bb76ff1Sjsg /* vega20 is 11.0.2, but it's supported via the powerplay code */ 4631bb76ff1Sjsg if (adev->asic_type == CHIP_VEGA20) 4641bb76ff1Sjsg return false; 4651bb76ff1Sjsg 4661bb76ff1Sjsg if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0)) 467ad8b1aafSjsg return true; 468ad8b1aafSjsg 469ad8b1aafSjsg return false; 470ad8b1aafSjsg } 471ad8b1aafSjsg 4725ca02815Sjsg bool is_support_cclk_dpm(struct amdgpu_device *adev) 473ad8b1aafSjsg { 4741bb76ff1Sjsg struct smu_context *smu = adev->powerplay.pp_handle; 4755ca02815Sjsg 4765ca02815Sjsg if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT)) 4775ca02815Sjsg return false; 4785ca02815Sjsg 4795ca02815Sjsg return true; 4805ca02815Sjsg } 4815ca02815Sjsg 4825ca02815Sjsg 4835ca02815Sjsg static int smu_sys_get_pp_table(void *handle, 4845ca02815Sjsg char **table) 4855ca02815Sjsg { 4865ca02815Sjsg struct smu_context *smu = handle; 487ad8b1aafSjsg struct smu_table_context *smu_table = &smu->smu_table; 488ad8b1aafSjsg 489ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 490ad8b1aafSjsg return -EOPNOTSUPP; 491ad8b1aafSjsg 492ad8b1aafSjsg if (!smu_table->power_play_table && !smu_table->hardcode_pptable) 493ad8b1aafSjsg return -EINVAL; 494ad8b1aafSjsg 495ad8b1aafSjsg if (smu_table->hardcode_pptable) 496ad8b1aafSjsg *table = smu_table->hardcode_pptable; 497ad8b1aafSjsg else 498ad8b1aafSjsg *table = smu_table->power_play_table; 499ad8b1aafSjsg 5001bb76ff1Sjsg return smu_table->power_play_table_size; 501ad8b1aafSjsg } 502ad8b1aafSjsg 5035ca02815Sjsg static int smu_sys_set_pp_table(void *handle, 5045ca02815Sjsg const char *buf, 5055ca02815Sjsg size_t size) 506ad8b1aafSjsg { 5075ca02815Sjsg struct smu_context *smu = handle; 508ad8b1aafSjsg struct smu_table_context *smu_table = &smu->smu_table; 509ad8b1aafSjsg ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf; 510ad8b1aafSjsg int ret = 0; 511ad8b1aafSjsg 512ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 513ad8b1aafSjsg return -EOPNOTSUPP; 514ad8b1aafSjsg 515ad8b1aafSjsg if (header->usStructureSize != size) { 516ad8b1aafSjsg dev_err(smu->adev->dev, "pp table size not matched !\n"); 517ad8b1aafSjsg return -EIO; 518ad8b1aafSjsg } 519ad8b1aafSjsg 520ad8b1aafSjsg if (!smu_table->hardcode_pptable) { 5211bb76ff1Sjsg smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); 5221bb76ff1Sjsg if (!smu_table->hardcode_pptable) 5231bb76ff1Sjsg return -ENOMEM; 524ad8b1aafSjsg } 525ad8b1aafSjsg 526ad8b1aafSjsg memcpy(smu_table->hardcode_pptable, buf, size); 527ad8b1aafSjsg smu_table->power_play_table = smu_table->hardcode_pptable; 528ad8b1aafSjsg smu_table->power_play_table_size = size; 529ad8b1aafSjsg 530ad8b1aafSjsg /* 531ad8b1aafSjsg * Special hw_fini action(for Navi1x, the DPMs disablement will be 532ad8b1aafSjsg * skipped) may be needed for custom pptable uploading. 533ad8b1aafSjsg */ 534ad8b1aafSjsg smu->uploading_custom_pp_table = true; 535ad8b1aafSjsg 536ad8b1aafSjsg ret = smu_reset(smu); 537ad8b1aafSjsg if (ret) 538ad8b1aafSjsg dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret); 539ad8b1aafSjsg 540ad8b1aafSjsg smu->uploading_custom_pp_table = false; 541ad8b1aafSjsg 542ad8b1aafSjsg return ret; 543ad8b1aafSjsg } 544ad8b1aafSjsg 545ad8b1aafSjsg static int smu_get_driver_allowed_feature_mask(struct smu_context *smu) 546ad8b1aafSjsg { 547ad8b1aafSjsg struct smu_feature *feature = &smu->smu_feature; 548ad8b1aafSjsg uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32]; 5491bb76ff1Sjsg int ret = 0; 5501bb76ff1Sjsg 5511bb76ff1Sjsg /* 5521bb76ff1Sjsg * With SCPM enabled, the allowed featuremasks setting(via 5531bb76ff1Sjsg * PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted. 5541bb76ff1Sjsg * That means there is no way to let PMFW knows the settings below. 5551bb76ff1Sjsg * Thus, we just assume all the features are allowed under 5561bb76ff1Sjsg * such scenario. 5571bb76ff1Sjsg */ 5581bb76ff1Sjsg if (smu->adev->scpm_enabled) { 5591bb76ff1Sjsg bitmap_fill(feature->allowed, SMU_FEATURE_MAX); 5601bb76ff1Sjsg return 0; 5611bb76ff1Sjsg } 562ad8b1aafSjsg 563ad8b1aafSjsg bitmap_zero(feature->allowed, SMU_FEATURE_MAX); 564ad8b1aafSjsg 565ad8b1aafSjsg ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask, 566ad8b1aafSjsg SMU_FEATURE_MAX/32); 567ad8b1aafSjsg if (ret) 568ad8b1aafSjsg return ret; 569ad8b1aafSjsg 570ad8b1aafSjsg bitmap_or(feature->allowed, feature->allowed, 571ad8b1aafSjsg (unsigned long *)allowed_feature_mask, 572ad8b1aafSjsg feature->feature_num); 573ad8b1aafSjsg 574ad8b1aafSjsg return ret; 575ad8b1aafSjsg } 576ad8b1aafSjsg 577ad8b1aafSjsg static int smu_set_funcs(struct amdgpu_device *adev) 578ad8b1aafSjsg { 5791bb76ff1Sjsg struct smu_context *smu = adev->powerplay.pp_handle; 580ad8b1aafSjsg 581ad8b1aafSjsg if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) 582ad8b1aafSjsg smu->od_enabled = true; 583ad8b1aafSjsg 5841bb76ff1Sjsg switch (adev->ip_versions[MP1_HWIP][0]) { 5851bb76ff1Sjsg case IP_VERSION(11, 0, 0): 5861bb76ff1Sjsg case IP_VERSION(11, 0, 5): 5871bb76ff1Sjsg case IP_VERSION(11, 0, 9): 588ad8b1aafSjsg navi10_set_ppt_funcs(smu); 589ad8b1aafSjsg break; 5901bb76ff1Sjsg case IP_VERSION(11, 0, 7): 5911bb76ff1Sjsg case IP_VERSION(11, 0, 11): 5921bb76ff1Sjsg case IP_VERSION(11, 0, 12): 5931bb76ff1Sjsg case IP_VERSION(11, 0, 13): 5941bb76ff1Sjsg sienna_cichlid_set_ppt_funcs(smu); 5951bb76ff1Sjsg break; 5961bb76ff1Sjsg case IP_VERSION(12, 0, 0): 5971bb76ff1Sjsg case IP_VERSION(12, 0, 1): 5981bb76ff1Sjsg renoir_set_ppt_funcs(smu); 5991bb76ff1Sjsg break; 6001bb76ff1Sjsg case IP_VERSION(11, 5, 0): 6011bb76ff1Sjsg vangogh_set_ppt_funcs(smu); 6021bb76ff1Sjsg break; 6031bb76ff1Sjsg case IP_VERSION(13, 0, 1): 6041bb76ff1Sjsg case IP_VERSION(13, 0, 3): 6051bb76ff1Sjsg case IP_VERSION(13, 0, 8): 6061bb76ff1Sjsg yellow_carp_set_ppt_funcs(smu); 6071bb76ff1Sjsg break; 6081bb76ff1Sjsg case IP_VERSION(13, 0, 4): 60951678dfcSjsg case IP_VERSION(13, 0, 11): 6101bb76ff1Sjsg smu_v13_0_4_set_ppt_funcs(smu); 6111bb76ff1Sjsg break; 6121bb76ff1Sjsg case IP_VERSION(13, 0, 5): 6131bb76ff1Sjsg smu_v13_0_5_set_ppt_funcs(smu); 6141bb76ff1Sjsg break; 6151bb76ff1Sjsg case IP_VERSION(11, 0, 8): 6161bb76ff1Sjsg cyan_skillfish_set_ppt_funcs(smu); 6171bb76ff1Sjsg break; 6181bb76ff1Sjsg case IP_VERSION(11, 0, 2): 619ad8b1aafSjsg adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 620ad8b1aafSjsg arcturus_set_ppt_funcs(smu); 621ad8b1aafSjsg /* OD is not supported on Arcturus */ 622ad8b1aafSjsg smu->od_enabled = false; 623ad8b1aafSjsg break; 6241bb76ff1Sjsg case IP_VERSION(13, 0, 2): 6255ca02815Sjsg aldebaran_set_ppt_funcs(smu); 6265ca02815Sjsg /* Enable pp_od_clk_voltage node */ 6275ca02815Sjsg smu->od_enabled = true; 6285ca02815Sjsg break; 6291bb76ff1Sjsg case IP_VERSION(13, 0, 0): 6301bb76ff1Sjsg case IP_VERSION(13, 0, 10): 6311bb76ff1Sjsg smu_v13_0_0_set_ppt_funcs(smu); 632ad8b1aafSjsg break; 633f005ef32Sjsg case IP_VERSION(13, 0, 6): 634f005ef32Sjsg smu_v13_0_6_set_ppt_funcs(smu); 635f005ef32Sjsg /* Enable pp_od_clk_voltage node */ 636f005ef32Sjsg smu->od_enabled = true; 637f005ef32Sjsg break; 6381bb76ff1Sjsg case IP_VERSION(13, 0, 7): 6391bb76ff1Sjsg smu_v13_0_7_set_ppt_funcs(smu); 6405ca02815Sjsg break; 641ad8b1aafSjsg default: 642ad8b1aafSjsg return -EINVAL; 643ad8b1aafSjsg } 644ad8b1aafSjsg 645ad8b1aafSjsg return 0; 646ad8b1aafSjsg } 647ad8b1aafSjsg 648ad8b1aafSjsg static int smu_early_init(void *handle) 649ad8b1aafSjsg { 650ad8b1aafSjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle; 6511bb76ff1Sjsg struct smu_context *smu; 652f005ef32Sjsg int r; 6531bb76ff1Sjsg 6541bb76ff1Sjsg smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL); 6551bb76ff1Sjsg if (!smu) 6561bb76ff1Sjsg return -ENOMEM; 657ad8b1aafSjsg 658ad8b1aafSjsg smu->adev = adev; 659ad8b1aafSjsg smu->pm_enabled = !!amdgpu_dpm; 660ad8b1aafSjsg smu->is_apu = false; 661ad8b1aafSjsg smu->smu_baco.state = SMU_BACO_STATE_EXIT; 662ad8b1aafSjsg smu->smu_baco.platform_support = false; 6635ca02815Sjsg smu->user_dpm_profile.fan_mode = -1; 6645ca02815Sjsg 6651bb76ff1Sjsg rw_init(&smu->message_lock, "smuml"); 6661bb76ff1Sjsg 6675ca02815Sjsg adev->powerplay.pp_handle = smu; 6685ca02815Sjsg adev->powerplay.pp_funcs = &swsmu_pm_funcs; 669ad8b1aafSjsg 670f005ef32Sjsg r = smu_set_funcs(adev); 671f005ef32Sjsg if (r) 672f005ef32Sjsg return r; 673f005ef32Sjsg return smu_init_microcode(smu); 674ad8b1aafSjsg } 675ad8b1aafSjsg 676ad8b1aafSjsg static int smu_set_default_dpm_table(struct smu_context *smu) 677ad8b1aafSjsg { 678ad8b1aafSjsg struct smu_power_context *smu_power = &smu->smu_power; 679ad8b1aafSjsg struct smu_power_gate *power_gate = &smu_power->power_gate; 680ad8b1aafSjsg int vcn_gate, jpeg_gate; 681ad8b1aafSjsg int ret = 0; 682ad8b1aafSjsg 683ad8b1aafSjsg if (!smu->ppt_funcs->set_default_dpm_table) 684ad8b1aafSjsg return 0; 685ad8b1aafSjsg 686ad8b1aafSjsg vcn_gate = atomic_read(&power_gate->vcn_gated); 687ad8b1aafSjsg jpeg_gate = atomic_read(&power_gate->jpeg_gated); 688ad8b1aafSjsg 6891bb76ff1Sjsg ret = smu_dpm_set_vcn_enable(smu, true); 690ad8b1aafSjsg if (ret) 6911bb76ff1Sjsg return ret; 692ad8b1aafSjsg 6931bb76ff1Sjsg ret = smu_dpm_set_jpeg_enable(smu, true); 694ad8b1aafSjsg if (ret) 6951bb76ff1Sjsg goto err_out; 696ad8b1aafSjsg 697ad8b1aafSjsg ret = smu->ppt_funcs->set_default_dpm_table(smu); 698ad8b1aafSjsg if (ret) 699ad8b1aafSjsg dev_err(smu->adev->dev, 700ad8b1aafSjsg "Failed to setup default dpm clock tables!\n"); 701ad8b1aafSjsg 7021bb76ff1Sjsg smu_dpm_set_jpeg_enable(smu, !jpeg_gate); 7031bb76ff1Sjsg err_out: 7041bb76ff1Sjsg smu_dpm_set_vcn_enable(smu, !vcn_gate); 705ad8b1aafSjsg return ret; 706ad8b1aafSjsg } 707ad8b1aafSjsg 7081bb76ff1Sjsg static int smu_apply_default_config_table_settings(struct smu_context *smu) 7091bb76ff1Sjsg { 7101bb76ff1Sjsg struct amdgpu_device *adev = smu->adev; 7111bb76ff1Sjsg int ret = 0; 7121bb76ff1Sjsg 7131bb76ff1Sjsg ret = smu_get_default_config_table_settings(smu, 7141bb76ff1Sjsg &adev->pm.config_table); 7151bb76ff1Sjsg if (ret) 7161bb76ff1Sjsg return ret; 7171bb76ff1Sjsg 7181bb76ff1Sjsg return smu_set_config_table(smu, &adev->pm.config_table); 7191bb76ff1Sjsg } 7205ca02815Sjsg 721ad8b1aafSjsg static int smu_late_init(void *handle) 722ad8b1aafSjsg { 723ad8b1aafSjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle; 7241bb76ff1Sjsg struct smu_context *smu = adev->powerplay.pp_handle; 725ad8b1aafSjsg int ret = 0; 726ad8b1aafSjsg 7275ca02815Sjsg smu_set_fine_grain_gfx_freq_parameters(smu); 7285ca02815Sjsg 729ad8b1aafSjsg if (!smu->pm_enabled) 730ad8b1aafSjsg return 0; 731ad8b1aafSjsg 732ad8b1aafSjsg ret = smu_post_init(smu); 733ad8b1aafSjsg if (ret) { 734ad8b1aafSjsg dev_err(adev->dev, "Failed to post smu init!\n"); 735ad8b1aafSjsg return ret; 736ad8b1aafSjsg } 737ad8b1aafSjsg 738abbb18a8Sjsg /* 739abbb18a8Sjsg * Explicitly notify PMFW the power mode the system in. Since 740abbb18a8Sjsg * the PMFW may boot the ASIC with a different mode. 741abbb18a8Sjsg * For those supporting ACDC switch via gpio, PMFW will 742abbb18a8Sjsg * handle the switch automatically. Driver involvement 743abbb18a8Sjsg * is unnecessary. 744abbb18a8Sjsg */ 745f1e3f907Sjsg adev->pm.ac_power = power_supply_is_system_supplied() > 0; 746f1e3f907Sjsg smu_set_ac_dc(smu); 747abbb18a8Sjsg 7481bb76ff1Sjsg if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) || 7491bb76ff1Sjsg (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3))) 7505ca02815Sjsg return 0; 7515ca02815Sjsg 7525ca02815Sjsg if (!amdgpu_sriov_vf(adev) || smu->od_enabled) { 753ad8b1aafSjsg ret = smu_set_default_od_settings(smu); 754ad8b1aafSjsg if (ret) { 755ad8b1aafSjsg dev_err(adev->dev, "Failed to setup default OD settings!\n"); 756ad8b1aafSjsg return ret; 757ad8b1aafSjsg } 7585ca02815Sjsg } 759ad8b1aafSjsg 760ad8b1aafSjsg ret = smu_populate_umd_state_clk(smu); 761ad8b1aafSjsg if (ret) { 762ad8b1aafSjsg dev_err(adev->dev, "Failed to populate UMD state clocks!\n"); 763ad8b1aafSjsg return ret; 764ad8b1aafSjsg } 765ad8b1aafSjsg 7665ca02815Sjsg ret = smu_get_asic_power_limits(smu, 7675ca02815Sjsg &smu->current_power_limit, 7685ca02815Sjsg &smu->default_power_limit, 7695ca02815Sjsg &smu->max_power_limit); 770ad8b1aafSjsg if (ret) { 771ad8b1aafSjsg dev_err(adev->dev, "Failed to get asic power limits!\n"); 772ad8b1aafSjsg return ret; 773ad8b1aafSjsg } 774ad8b1aafSjsg 7755ca02815Sjsg if (!amdgpu_sriov_vf(adev)) 776ad8b1aafSjsg smu_get_unique_id(smu); 777ad8b1aafSjsg 778ad8b1aafSjsg smu_get_fan_parameters(smu); 779ad8b1aafSjsg 7801bb76ff1Sjsg smu_handle_task(smu, 781ad8b1aafSjsg smu->smu_dpm.dpm_level, 7821bb76ff1Sjsg AMD_PP_TASK_COMPLETE_INIT); 7831bb76ff1Sjsg 7841bb76ff1Sjsg ret = smu_apply_default_config_table_settings(smu); 7851bb76ff1Sjsg if (ret && (ret != -EOPNOTSUPP)) { 7861bb76ff1Sjsg dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n"); 7871bb76ff1Sjsg return ret; 7881bb76ff1Sjsg } 789ad8b1aafSjsg 7905ca02815Sjsg smu_restore_dpm_user_profile(smu); 7915ca02815Sjsg 792ad8b1aafSjsg return 0; 793ad8b1aafSjsg } 794ad8b1aafSjsg 795ad8b1aafSjsg static int smu_init_fb_allocations(struct smu_context *smu) 796ad8b1aafSjsg { 797ad8b1aafSjsg struct amdgpu_device *adev = smu->adev; 798ad8b1aafSjsg struct smu_table_context *smu_table = &smu->smu_table; 799ad8b1aafSjsg struct smu_table *tables = smu_table->tables; 800ad8b1aafSjsg struct smu_table *driver_table = &(smu_table->driver_table); 801ad8b1aafSjsg uint32_t max_table_size = 0; 802ad8b1aafSjsg int ret, i; 803ad8b1aafSjsg 804ad8b1aafSjsg /* VRAM allocation for tool table */ 805ad8b1aafSjsg if (tables[SMU_TABLE_PMSTATUSLOG].size) { 806ad8b1aafSjsg ret = amdgpu_bo_create_kernel(adev, 807ad8b1aafSjsg tables[SMU_TABLE_PMSTATUSLOG].size, 808ad8b1aafSjsg tables[SMU_TABLE_PMSTATUSLOG].align, 809ad8b1aafSjsg tables[SMU_TABLE_PMSTATUSLOG].domain, 810ad8b1aafSjsg &tables[SMU_TABLE_PMSTATUSLOG].bo, 811ad8b1aafSjsg &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 812ad8b1aafSjsg &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 813ad8b1aafSjsg if (ret) { 814ad8b1aafSjsg dev_err(adev->dev, "VRAM allocation for tool table failed!\n"); 815ad8b1aafSjsg return ret; 816ad8b1aafSjsg } 817ad8b1aafSjsg } 818ad8b1aafSjsg 819f005ef32Sjsg driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT; 820ad8b1aafSjsg /* VRAM allocation for driver table */ 821ad8b1aafSjsg for (i = 0; i < SMU_TABLE_COUNT; i++) { 822ad8b1aafSjsg if (tables[i].size == 0) 823ad8b1aafSjsg continue; 824ad8b1aafSjsg 825f005ef32Sjsg /* If one of the tables has VRAM domain restriction, keep it in 826f005ef32Sjsg * VRAM 827f005ef32Sjsg */ 828f005ef32Sjsg if ((tables[i].domain & 829f005ef32Sjsg (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) == 830f005ef32Sjsg AMDGPU_GEM_DOMAIN_VRAM) 831f005ef32Sjsg driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM; 832f005ef32Sjsg 833ad8b1aafSjsg if (i == SMU_TABLE_PMSTATUSLOG) 834ad8b1aafSjsg continue; 835ad8b1aafSjsg 836ad8b1aafSjsg if (max_table_size < tables[i].size) 837ad8b1aafSjsg max_table_size = tables[i].size; 838ad8b1aafSjsg } 839ad8b1aafSjsg 840ad8b1aafSjsg driver_table->size = max_table_size; 841ad8b1aafSjsg driver_table->align = PAGE_SIZE; 842ad8b1aafSjsg 843ad8b1aafSjsg ret = amdgpu_bo_create_kernel(adev, 844ad8b1aafSjsg driver_table->size, 845ad8b1aafSjsg driver_table->align, 846ad8b1aafSjsg driver_table->domain, 847ad8b1aafSjsg &driver_table->bo, 848ad8b1aafSjsg &driver_table->mc_address, 849ad8b1aafSjsg &driver_table->cpu_addr); 850ad8b1aafSjsg if (ret) { 851ad8b1aafSjsg dev_err(adev->dev, "VRAM allocation for driver table failed!\n"); 852ad8b1aafSjsg if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 853ad8b1aafSjsg amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 854ad8b1aafSjsg &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 855ad8b1aafSjsg &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 856ad8b1aafSjsg } 857ad8b1aafSjsg 858ad8b1aafSjsg return ret; 859ad8b1aafSjsg } 860ad8b1aafSjsg 861ad8b1aafSjsg static int smu_fini_fb_allocations(struct smu_context *smu) 862ad8b1aafSjsg { 863ad8b1aafSjsg struct smu_table_context *smu_table = &smu->smu_table; 864ad8b1aafSjsg struct smu_table *tables = smu_table->tables; 865ad8b1aafSjsg struct smu_table *driver_table = &(smu_table->driver_table); 866ad8b1aafSjsg 867ad8b1aafSjsg if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 868ad8b1aafSjsg amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 869ad8b1aafSjsg &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 870ad8b1aafSjsg &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 871ad8b1aafSjsg 872ad8b1aafSjsg amdgpu_bo_free_kernel(&driver_table->bo, 873ad8b1aafSjsg &driver_table->mc_address, 874ad8b1aafSjsg &driver_table->cpu_addr); 875ad8b1aafSjsg 876ad8b1aafSjsg return 0; 877ad8b1aafSjsg } 878ad8b1aafSjsg 879ad8b1aafSjsg /** 880ad8b1aafSjsg * smu_alloc_memory_pool - allocate memory pool in the system memory 881ad8b1aafSjsg * 882ad8b1aafSjsg * @smu: amdgpu_device pointer 883ad8b1aafSjsg * 884ad8b1aafSjsg * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr 885ad8b1aafSjsg * and DramLogSetDramAddr can notify it changed. 886ad8b1aafSjsg * 887ad8b1aafSjsg * Returns 0 on success, error on failure. 888ad8b1aafSjsg */ 889ad8b1aafSjsg static int smu_alloc_memory_pool(struct smu_context *smu) 890ad8b1aafSjsg { 891ad8b1aafSjsg struct amdgpu_device *adev = smu->adev; 892ad8b1aafSjsg struct smu_table_context *smu_table = &smu->smu_table; 893ad8b1aafSjsg struct smu_table *memory_pool = &smu_table->memory_pool; 894ad8b1aafSjsg uint64_t pool_size = smu->pool_size; 895ad8b1aafSjsg int ret = 0; 896ad8b1aafSjsg 897ad8b1aafSjsg if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO) 898ad8b1aafSjsg return ret; 899ad8b1aafSjsg 900ad8b1aafSjsg memory_pool->size = pool_size; 901ad8b1aafSjsg memory_pool->align = PAGE_SIZE; 902ad8b1aafSjsg memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT; 903ad8b1aafSjsg 904ad8b1aafSjsg switch (pool_size) { 905ad8b1aafSjsg case SMU_MEMORY_POOL_SIZE_256_MB: 906ad8b1aafSjsg case SMU_MEMORY_POOL_SIZE_512_MB: 907ad8b1aafSjsg case SMU_MEMORY_POOL_SIZE_1_GB: 908ad8b1aafSjsg case SMU_MEMORY_POOL_SIZE_2_GB: 909ad8b1aafSjsg ret = amdgpu_bo_create_kernel(adev, 910ad8b1aafSjsg memory_pool->size, 911ad8b1aafSjsg memory_pool->align, 912ad8b1aafSjsg memory_pool->domain, 913ad8b1aafSjsg &memory_pool->bo, 914ad8b1aafSjsg &memory_pool->mc_address, 915ad8b1aafSjsg &memory_pool->cpu_addr); 916ad8b1aafSjsg if (ret) 917ad8b1aafSjsg dev_err(adev->dev, "VRAM allocation for dramlog failed!\n"); 918ad8b1aafSjsg break; 919ad8b1aafSjsg default: 920ad8b1aafSjsg break; 921ad8b1aafSjsg } 922ad8b1aafSjsg 923ad8b1aafSjsg return ret; 924ad8b1aafSjsg } 925ad8b1aafSjsg 926ad8b1aafSjsg static int smu_free_memory_pool(struct smu_context *smu) 927ad8b1aafSjsg { 928ad8b1aafSjsg struct smu_table_context *smu_table = &smu->smu_table; 929ad8b1aafSjsg struct smu_table *memory_pool = &smu_table->memory_pool; 930ad8b1aafSjsg 931ad8b1aafSjsg if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO) 932ad8b1aafSjsg return 0; 933ad8b1aafSjsg 934ad8b1aafSjsg amdgpu_bo_free_kernel(&memory_pool->bo, 935ad8b1aafSjsg &memory_pool->mc_address, 936ad8b1aafSjsg &memory_pool->cpu_addr); 937ad8b1aafSjsg 938ad8b1aafSjsg memset(memory_pool, 0, sizeof(struct smu_table)); 939ad8b1aafSjsg 940ad8b1aafSjsg return 0; 941ad8b1aafSjsg } 942ad8b1aafSjsg 943ad8b1aafSjsg static int smu_alloc_dummy_read_table(struct smu_context *smu) 944ad8b1aafSjsg { 945ad8b1aafSjsg struct smu_table_context *smu_table = &smu->smu_table; 946ad8b1aafSjsg struct smu_table *dummy_read_1_table = 947ad8b1aafSjsg &smu_table->dummy_read_1_table; 948ad8b1aafSjsg struct amdgpu_device *adev = smu->adev; 949ad8b1aafSjsg int ret = 0; 950ad8b1aafSjsg 951f005ef32Sjsg if (!dummy_read_1_table->size) 952f005ef32Sjsg return 0; 953ad8b1aafSjsg 954ad8b1aafSjsg ret = amdgpu_bo_create_kernel(adev, 955ad8b1aafSjsg dummy_read_1_table->size, 956ad8b1aafSjsg dummy_read_1_table->align, 957ad8b1aafSjsg dummy_read_1_table->domain, 958ad8b1aafSjsg &dummy_read_1_table->bo, 959ad8b1aafSjsg &dummy_read_1_table->mc_address, 960ad8b1aafSjsg &dummy_read_1_table->cpu_addr); 961ad8b1aafSjsg if (ret) 962ad8b1aafSjsg dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n"); 963ad8b1aafSjsg 964ad8b1aafSjsg return ret; 965ad8b1aafSjsg } 966ad8b1aafSjsg 967ad8b1aafSjsg static void smu_free_dummy_read_table(struct smu_context *smu) 968ad8b1aafSjsg { 969ad8b1aafSjsg struct smu_table_context *smu_table = &smu->smu_table; 970ad8b1aafSjsg struct smu_table *dummy_read_1_table = 971ad8b1aafSjsg &smu_table->dummy_read_1_table; 972ad8b1aafSjsg 973ad8b1aafSjsg 974ad8b1aafSjsg amdgpu_bo_free_kernel(&dummy_read_1_table->bo, 975ad8b1aafSjsg &dummy_read_1_table->mc_address, 976ad8b1aafSjsg &dummy_read_1_table->cpu_addr); 977ad8b1aafSjsg 978ad8b1aafSjsg memset(dummy_read_1_table, 0, sizeof(struct smu_table)); 979ad8b1aafSjsg } 980ad8b1aafSjsg 981ad8b1aafSjsg static int smu_smc_table_sw_init(struct smu_context *smu) 982ad8b1aafSjsg { 983ad8b1aafSjsg int ret; 984ad8b1aafSjsg 985ad8b1aafSjsg /** 986ad8b1aafSjsg * Create smu_table structure, and init smc tables such as 987ad8b1aafSjsg * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc. 988ad8b1aafSjsg */ 989ad8b1aafSjsg ret = smu_init_smc_tables(smu); 990ad8b1aafSjsg if (ret) { 991ad8b1aafSjsg dev_err(smu->adev->dev, "Failed to init smc tables!\n"); 992ad8b1aafSjsg return ret; 993ad8b1aafSjsg } 994ad8b1aafSjsg 995ad8b1aafSjsg /** 996ad8b1aafSjsg * Create smu_power_context structure, and allocate smu_dpm_context and 997ad8b1aafSjsg * context size to fill the smu_power_context data. 998ad8b1aafSjsg */ 999ad8b1aafSjsg ret = smu_init_power(smu); 1000ad8b1aafSjsg if (ret) { 1001ad8b1aafSjsg dev_err(smu->adev->dev, "Failed to init smu_init_power!\n"); 1002ad8b1aafSjsg return ret; 1003ad8b1aafSjsg } 1004ad8b1aafSjsg 1005ad8b1aafSjsg /* 1006ad8b1aafSjsg * allocate vram bos to store smc table contents. 1007ad8b1aafSjsg */ 1008ad8b1aafSjsg ret = smu_init_fb_allocations(smu); 1009ad8b1aafSjsg if (ret) 1010ad8b1aafSjsg return ret; 1011ad8b1aafSjsg 1012ad8b1aafSjsg ret = smu_alloc_memory_pool(smu); 1013ad8b1aafSjsg if (ret) 1014ad8b1aafSjsg return ret; 1015ad8b1aafSjsg 1016ad8b1aafSjsg ret = smu_alloc_dummy_read_table(smu); 1017ad8b1aafSjsg if (ret) 1018ad8b1aafSjsg return ret; 1019ad8b1aafSjsg 10201bb76ff1Sjsg ret = smu_i2c_init(smu); 1021ad8b1aafSjsg if (ret) 1022ad8b1aafSjsg return ret; 1023ad8b1aafSjsg 1024ad8b1aafSjsg return 0; 1025ad8b1aafSjsg } 1026ad8b1aafSjsg 1027ad8b1aafSjsg static int smu_smc_table_sw_fini(struct smu_context *smu) 1028ad8b1aafSjsg { 1029ad8b1aafSjsg int ret; 1030ad8b1aafSjsg 10311bb76ff1Sjsg smu_i2c_fini(smu); 1032ad8b1aafSjsg 1033ad8b1aafSjsg smu_free_dummy_read_table(smu); 1034ad8b1aafSjsg 1035ad8b1aafSjsg ret = smu_free_memory_pool(smu); 1036ad8b1aafSjsg if (ret) 1037ad8b1aafSjsg return ret; 1038ad8b1aafSjsg 1039ad8b1aafSjsg ret = smu_fini_fb_allocations(smu); 1040ad8b1aafSjsg if (ret) 1041ad8b1aafSjsg return ret; 1042ad8b1aafSjsg 1043ad8b1aafSjsg ret = smu_fini_power(smu); 1044ad8b1aafSjsg if (ret) { 1045ad8b1aafSjsg dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n"); 1046ad8b1aafSjsg return ret; 1047ad8b1aafSjsg } 1048ad8b1aafSjsg 1049ad8b1aafSjsg ret = smu_fini_smc_tables(smu); 1050ad8b1aafSjsg if (ret) { 1051ad8b1aafSjsg dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n"); 1052ad8b1aafSjsg return ret; 1053ad8b1aafSjsg } 1054ad8b1aafSjsg 1055ad8b1aafSjsg return 0; 1056ad8b1aafSjsg } 1057ad8b1aafSjsg 1058ad8b1aafSjsg static void smu_throttling_logging_work_fn(struct work_struct *work) 1059ad8b1aafSjsg { 1060ad8b1aafSjsg struct smu_context *smu = container_of(work, struct smu_context, 1061ad8b1aafSjsg throttling_logging_work); 1062ad8b1aafSjsg 1063ad8b1aafSjsg smu_log_thermal_throttling(smu); 1064ad8b1aafSjsg } 1065ad8b1aafSjsg 1066ad8b1aafSjsg static void smu_interrupt_work_fn(struct work_struct *work) 1067ad8b1aafSjsg { 1068ad8b1aafSjsg struct smu_context *smu = container_of(work, struct smu_context, 1069ad8b1aafSjsg interrupt_work); 1070ad8b1aafSjsg 1071ad8b1aafSjsg if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work) 1072ad8b1aafSjsg smu->ppt_funcs->interrupt_work(smu); 1073ad8b1aafSjsg } 1074ad8b1aafSjsg 1075788b966fSjsg static void smu_swctf_delayed_work_handler(struct work_struct *work) 1076788b966fSjsg { 1077788b966fSjsg struct smu_context *smu = 1078788b966fSjsg container_of(work, struct smu_context, swctf_delayed_work.work); 1079788b966fSjsg struct smu_temperature_range *range = 1080788b966fSjsg &smu->thermal_range; 1081788b966fSjsg struct amdgpu_device *adev = smu->adev; 1082788b966fSjsg uint32_t hotspot_tmp, size; 1083788b966fSjsg 1084788b966fSjsg /* 1085788b966fSjsg * If the hotspot temperature is confirmed as below SW CTF setting point 1086788b966fSjsg * after the delay enforced, nothing will be done. 1087788b966fSjsg * Otherwise, a graceful shutdown will be performed to prevent further damage. 1088788b966fSjsg */ 1089788b966fSjsg if (range->software_shutdown_temp && 1090788b966fSjsg smu->ppt_funcs->read_sensor && 1091788b966fSjsg !smu->ppt_funcs->read_sensor(smu, 1092788b966fSjsg AMDGPU_PP_SENSOR_HOTSPOT_TEMP, 1093788b966fSjsg &hotspot_tmp, 1094788b966fSjsg &size) && 1095788b966fSjsg hotspot_tmp / 1000 < range->software_shutdown_temp) 1096788b966fSjsg return; 1097788b966fSjsg 1098788b966fSjsg dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n"); 1099788b966fSjsg dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n"); 1100788b966fSjsg orderly_poweroff(true); 1101788b966fSjsg } 1102788b966fSjsg 1103ad8b1aafSjsg static int smu_sw_init(void *handle) 1104ad8b1aafSjsg { 1105ad8b1aafSjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle; 11061bb76ff1Sjsg struct smu_context *smu = adev->powerplay.pp_handle; 1107ad8b1aafSjsg int ret; 1108ad8b1aafSjsg 1109ad8b1aafSjsg smu->pool_size = adev->pm.smu_prv_buffer_size; 1110ad8b1aafSjsg smu->smu_feature.feature_num = SMU_FEATURE_MAX; 1111ad8b1aafSjsg bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); 1112ad8b1aafSjsg bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); 1113ad8b1aafSjsg 1114ad8b1aafSjsg INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); 1115ad8b1aafSjsg INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn); 1116ad8b1aafSjsg atomic64_set(&smu->throttle_int_counter, 0); 1117ad8b1aafSjsg smu->watermarks_bitmap = 0; 1118ad8b1aafSjsg smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1119ad8b1aafSjsg smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1120ad8b1aafSjsg 1121ad8b1aafSjsg atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); 1122ad8b1aafSjsg atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); 1123ad8b1aafSjsg 1124ad8b1aafSjsg smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; 1125ad8b1aafSjsg smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; 1126ad8b1aafSjsg smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; 1127ad8b1aafSjsg smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; 1128ad8b1aafSjsg smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; 1129ad8b1aafSjsg smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; 1130ad8b1aafSjsg smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; 1131ad8b1aafSjsg smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; 1132ad8b1aafSjsg 1133ad8b1aafSjsg smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 1134ad8b1aafSjsg smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; 1135ad8b1aafSjsg smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; 1136ad8b1aafSjsg smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO; 1137ad8b1aafSjsg smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR; 1138ad8b1aafSjsg smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE; 1139ad8b1aafSjsg smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM; 1140ad8b1aafSjsg smu->display_config = &adev->pm.pm_display_cfg; 1141ad8b1aafSjsg 1142ad8b1aafSjsg smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 1143ad8b1aafSjsg smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO; 1144ad8b1aafSjsg 1145788b966fSjsg INIT_DELAYED_WORK(&smu->swctf_delayed_work, 1146788b966fSjsg smu_swctf_delayed_work_handler); 1147788b966fSjsg 1148ad8b1aafSjsg ret = smu_smc_table_sw_init(smu); 1149ad8b1aafSjsg if (ret) { 1150ad8b1aafSjsg dev_err(adev->dev, "Failed to sw init smc table!\n"); 1151ad8b1aafSjsg return ret; 1152ad8b1aafSjsg } 1153ad8b1aafSjsg 11541bb76ff1Sjsg /* get boot_values from vbios to set revision, gfxclk, and etc. */ 11551bb76ff1Sjsg ret = smu_get_vbios_bootup_values(smu); 11561bb76ff1Sjsg if (ret) { 11571bb76ff1Sjsg dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n"); 11581bb76ff1Sjsg return ret; 11591bb76ff1Sjsg } 11601bb76ff1Sjsg 11611bb76ff1Sjsg ret = smu_init_pptable_microcode(smu); 11621bb76ff1Sjsg if (ret) { 11631bb76ff1Sjsg dev_err(adev->dev, "Failed to setup pptable firmware!\n"); 11641bb76ff1Sjsg return ret; 11651bb76ff1Sjsg } 11661bb76ff1Sjsg 1167ad8b1aafSjsg ret = smu_register_irq_handler(smu); 1168ad8b1aafSjsg if (ret) { 1169ad8b1aafSjsg dev_err(adev->dev, "Failed to register smc irq handler!\n"); 1170ad8b1aafSjsg return ret; 1171ad8b1aafSjsg } 1172ad8b1aafSjsg 11735ca02815Sjsg /* If there is no way to query fan control mode, fan control is not supported */ 11745ca02815Sjsg if (!smu->ppt_funcs->get_fan_control_mode) 11755ca02815Sjsg smu->adev->pm.no_fan = true; 11765ca02815Sjsg 1177ad8b1aafSjsg return 0; 1178ad8b1aafSjsg } 1179ad8b1aafSjsg 1180ad8b1aafSjsg static int smu_sw_fini(void *handle) 1181ad8b1aafSjsg { 1182ad8b1aafSjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle; 11831bb76ff1Sjsg struct smu_context *smu = adev->powerplay.pp_handle; 1184ad8b1aafSjsg int ret; 1185ad8b1aafSjsg 1186ad8b1aafSjsg ret = smu_smc_table_sw_fini(smu); 1187ad8b1aafSjsg if (ret) { 1188ad8b1aafSjsg dev_err(adev->dev, "Failed to sw fini smc table!\n"); 1189ad8b1aafSjsg return ret; 1190ad8b1aafSjsg } 1191ad8b1aafSjsg 1192ad8b1aafSjsg smu_fini_microcode(smu); 1193ad8b1aafSjsg 1194ad8b1aafSjsg return 0; 1195ad8b1aafSjsg } 1196ad8b1aafSjsg 1197ad8b1aafSjsg static int smu_get_thermal_temperature_range(struct smu_context *smu) 1198ad8b1aafSjsg { 1199ad8b1aafSjsg struct amdgpu_device *adev = smu->adev; 1200ad8b1aafSjsg struct smu_temperature_range *range = 1201ad8b1aafSjsg &smu->thermal_range; 1202ad8b1aafSjsg int ret = 0; 1203ad8b1aafSjsg 1204ad8b1aafSjsg if (!smu->ppt_funcs->get_thermal_temperature_range) 1205ad8b1aafSjsg return 0; 1206ad8b1aafSjsg 1207ad8b1aafSjsg ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range); 1208ad8b1aafSjsg if (ret) 1209ad8b1aafSjsg return ret; 1210ad8b1aafSjsg 1211ad8b1aafSjsg adev->pm.dpm.thermal.min_temp = range->min; 1212ad8b1aafSjsg adev->pm.dpm.thermal.max_temp = range->max; 1213ad8b1aafSjsg adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max; 1214ad8b1aafSjsg adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min; 1215ad8b1aafSjsg adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max; 1216ad8b1aafSjsg adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max; 1217ad8b1aafSjsg adev->pm.dpm.thermal.min_mem_temp = range->mem_min; 1218ad8b1aafSjsg adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max; 1219ad8b1aafSjsg adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max; 1220ad8b1aafSjsg 1221ad8b1aafSjsg return ret; 1222ad8b1aafSjsg } 1223ad8b1aafSjsg 1224ad8b1aafSjsg static int smu_smc_hw_setup(struct smu_context *smu) 1225ad8b1aafSjsg { 12261bb76ff1Sjsg struct smu_feature *feature = &smu->smu_feature; 1227ad8b1aafSjsg struct amdgpu_device *adev = smu->adev; 12281493bc23Sjsg uint8_t pcie_gen = 0, pcie_width = 0; 12291bb76ff1Sjsg uint64_t features_supported; 12305ca02815Sjsg int ret = 0; 1231ad8b1aafSjsg 12321bb76ff1Sjsg switch (adev->ip_versions[MP1_HWIP][0]) { 12331bb76ff1Sjsg case IP_VERSION(11, 0, 7): 12341bb76ff1Sjsg case IP_VERSION(11, 0, 11): 12351bb76ff1Sjsg case IP_VERSION(11, 5, 0): 12361bb76ff1Sjsg case IP_VERSION(11, 0, 12): 1237ad8b1aafSjsg if (adev->in_suspend && smu_is_dpm_running(smu)) { 1238ad8b1aafSjsg dev_info(adev->dev, "dpm has been enabled\n"); 12395ca02815Sjsg ret = smu_system_features_control(smu, true); 12401bb76ff1Sjsg if (ret) 12411bb76ff1Sjsg dev_err(adev->dev, "Failed system features control!\n"); 12425ca02815Sjsg return ret; 1243ad8b1aafSjsg } 12441bb76ff1Sjsg break; 12451bb76ff1Sjsg default: 12461bb76ff1Sjsg break; 12471bb76ff1Sjsg } 1248ad8b1aafSjsg 1249ad8b1aafSjsg ret = smu_init_display_count(smu, 0); 1250ad8b1aafSjsg if (ret) { 1251ad8b1aafSjsg dev_info(adev->dev, "Failed to pre-set display count as 0!\n"); 1252ad8b1aafSjsg return ret; 1253ad8b1aafSjsg } 1254ad8b1aafSjsg 1255ad8b1aafSjsg ret = smu_set_driver_table_location(smu); 1256ad8b1aafSjsg if (ret) { 1257ad8b1aafSjsg dev_err(adev->dev, "Failed to SetDriverDramAddr!\n"); 1258ad8b1aafSjsg return ret; 1259ad8b1aafSjsg } 1260ad8b1aafSjsg 1261ad8b1aafSjsg /* 1262ad8b1aafSjsg * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. 1263ad8b1aafSjsg */ 1264ad8b1aafSjsg ret = smu_set_tool_table_location(smu); 1265ad8b1aafSjsg if (ret) { 1266ad8b1aafSjsg dev_err(adev->dev, "Failed to SetToolsDramAddr!\n"); 1267ad8b1aafSjsg return ret; 1268ad8b1aafSjsg } 1269ad8b1aafSjsg 1270ad8b1aafSjsg /* 1271ad8b1aafSjsg * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify 1272ad8b1aafSjsg * pool location. 1273ad8b1aafSjsg */ 1274ad8b1aafSjsg ret = smu_notify_memory_pool_location(smu); 1275ad8b1aafSjsg if (ret) { 1276ad8b1aafSjsg dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n"); 1277ad8b1aafSjsg return ret; 1278ad8b1aafSjsg } 1279ad8b1aafSjsg 1280f005ef32Sjsg /* 1281f005ef32Sjsg * It is assumed the pptable used before runpm is same as 1282f005ef32Sjsg * the one used afterwards. Thus, we can reuse the stored 1283f005ef32Sjsg * copy and do not need to resetup the pptable again. 1284f005ef32Sjsg */ 1285f005ef32Sjsg if (!adev->in_runpm) { 12861bb76ff1Sjsg ret = smu_setup_pptable(smu); 12871bb76ff1Sjsg if (ret) { 12881bb76ff1Sjsg dev_err(adev->dev, "Failed to setup pptable!\n"); 12891bb76ff1Sjsg return ret; 12901bb76ff1Sjsg } 1291f005ef32Sjsg } 12921bb76ff1Sjsg 1293ad8b1aafSjsg /* smu_dump_pptable(smu); */ 12941bb76ff1Sjsg 12951bb76ff1Sjsg /* 12961bb76ff1Sjsg * With SCPM enabled, PSP is responsible for the PPTable transferring 12971bb76ff1Sjsg * (to SMU). Driver involvement is not needed and permitted. 12981bb76ff1Sjsg */ 12991bb76ff1Sjsg if (!adev->scpm_enabled) { 1300ad8b1aafSjsg /* 1301ad8b1aafSjsg * Copy pptable bo in the vram to smc with SMU MSGs such as 1302ad8b1aafSjsg * SetDriverDramAddr and TransferTableDram2Smu. 1303ad8b1aafSjsg */ 1304ad8b1aafSjsg ret = smu_write_pptable(smu); 1305ad8b1aafSjsg if (ret) { 1306ad8b1aafSjsg dev_err(adev->dev, "Failed to transfer pptable to SMC!\n"); 1307ad8b1aafSjsg return ret; 1308ad8b1aafSjsg } 13091bb76ff1Sjsg } 1310ad8b1aafSjsg 1311ad8b1aafSjsg /* issue Run*Btc msg */ 1312ad8b1aafSjsg ret = smu_run_btc(smu); 1313ad8b1aafSjsg if (ret) 1314ad8b1aafSjsg return ret; 1315ad8b1aafSjsg 13161bb76ff1Sjsg /* 13171bb76ff1Sjsg * With SCPM enabled, these actions(and relevant messages) are 13181bb76ff1Sjsg * not needed and permitted. 13191bb76ff1Sjsg */ 13201bb76ff1Sjsg if (!adev->scpm_enabled) { 1321ad8b1aafSjsg ret = smu_feature_set_allowed_mask(smu); 1322ad8b1aafSjsg if (ret) { 1323ad8b1aafSjsg dev_err(adev->dev, "Failed to set driver allowed features mask!\n"); 1324ad8b1aafSjsg return ret; 1325ad8b1aafSjsg } 13261bb76ff1Sjsg } 1327ad8b1aafSjsg 1328ad8b1aafSjsg ret = smu_system_features_control(smu, true); 1329ad8b1aafSjsg if (ret) { 1330ad8b1aafSjsg dev_err(adev->dev, "Failed to enable requested dpm features!\n"); 1331ad8b1aafSjsg return ret; 1332ad8b1aafSjsg } 1333ad8b1aafSjsg 13341bb76ff1Sjsg ret = smu_feature_get_enabled_mask(smu, &features_supported); 13351bb76ff1Sjsg if (ret) { 13361bb76ff1Sjsg dev_err(adev->dev, "Failed to retrieve supported dpm features!\n"); 13371bb76ff1Sjsg return ret; 13381bb76ff1Sjsg } 13391bb76ff1Sjsg bitmap_copy(feature->supported, 13401bb76ff1Sjsg (unsigned long *)&features_supported, 13411bb76ff1Sjsg feature->feature_num); 13421bb76ff1Sjsg 1343ad8b1aafSjsg if (!smu_is_dpm_running(smu)) 1344ad8b1aafSjsg dev_info(adev->dev, "dpm has been disabled\n"); 1345ad8b1aafSjsg 13461bb76ff1Sjsg /* 13471bb76ff1Sjsg * Set initialized values (get from vbios) to dpm tables context such as 13481bb76ff1Sjsg * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each 13491bb76ff1Sjsg * type of clks. 13501bb76ff1Sjsg */ 13511bb76ff1Sjsg ret = smu_set_default_dpm_table(smu); 13521bb76ff1Sjsg if (ret) { 13531bb76ff1Sjsg dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); 13541bb76ff1Sjsg return ret; 13551bb76ff1Sjsg } 13561bb76ff1Sjsg 1357ad8b1aafSjsg if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) 1358ad8b1aafSjsg pcie_gen = 3; 1359ad8b1aafSjsg else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 1360ad8b1aafSjsg pcie_gen = 2; 1361ad8b1aafSjsg else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) 1362ad8b1aafSjsg pcie_gen = 1; 1363ad8b1aafSjsg else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) 1364ad8b1aafSjsg pcie_gen = 0; 1365ad8b1aafSjsg 1366ad8b1aafSjsg /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 1367ad8b1aafSjsg * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 1368ad8b1aafSjsg * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 1369ad8b1aafSjsg */ 1370ad8b1aafSjsg if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) 1371ad8b1aafSjsg pcie_width = 6; 1372ad8b1aafSjsg else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) 1373ad8b1aafSjsg pcie_width = 5; 1374ad8b1aafSjsg else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) 1375ad8b1aafSjsg pcie_width = 4; 1376ad8b1aafSjsg else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) 1377ad8b1aafSjsg pcie_width = 3; 1378ad8b1aafSjsg else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) 1379ad8b1aafSjsg pcie_width = 2; 1380ad8b1aafSjsg else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) 1381ad8b1aafSjsg pcie_width = 1; 1382ad8b1aafSjsg ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); 1383ad8b1aafSjsg if (ret) { 1384ad8b1aafSjsg dev_err(adev->dev, "Attempt to override pcie params failed!\n"); 1385ad8b1aafSjsg return ret; 1386ad8b1aafSjsg } 1387ad8b1aafSjsg 1388ad8b1aafSjsg ret = smu_get_thermal_temperature_range(smu); 1389ad8b1aafSjsg if (ret) { 1390ad8b1aafSjsg dev_err(adev->dev, "Failed to get thermal temperature ranges!\n"); 1391ad8b1aafSjsg return ret; 1392ad8b1aafSjsg } 1393ad8b1aafSjsg 1394ad8b1aafSjsg ret = smu_enable_thermal_alert(smu); 1395ad8b1aafSjsg if (ret) { 1396ad8b1aafSjsg dev_err(adev->dev, "Failed to enable thermal alert!\n"); 1397ad8b1aafSjsg return ret; 1398ad8b1aafSjsg } 1399ad8b1aafSjsg 14001bb76ff1Sjsg ret = smu_notify_display_change(smu); 1401ad8b1aafSjsg if (ret) { 14021bb76ff1Sjsg dev_err(adev->dev, "Failed to notify display change!\n"); 1403ad8b1aafSjsg return ret; 1404ad8b1aafSjsg } 1405ad8b1aafSjsg 1406ad8b1aafSjsg /* 1407ad8b1aafSjsg * Set min deep sleep dce fclk with bootup value from vbios via 1408ad8b1aafSjsg * SetMinDeepSleepDcefclk MSG. 1409ad8b1aafSjsg */ 1410ad8b1aafSjsg ret = smu_set_min_dcef_deep_sleep(smu, 1411ad8b1aafSjsg smu->smu_table.boot_values.dcefclk / 100); 1412ad8b1aafSjsg 1413ad8b1aafSjsg return ret; 1414ad8b1aafSjsg } 1415ad8b1aafSjsg 1416ad8b1aafSjsg static int smu_start_smc_engine(struct smu_context *smu) 1417ad8b1aafSjsg { 1418ad8b1aafSjsg struct amdgpu_device *adev = smu->adev; 1419ad8b1aafSjsg int ret = 0; 1420ad8b1aafSjsg 1421ad8b1aafSjsg if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 14221bb76ff1Sjsg if (adev->ip_versions[MP1_HWIP][0] < IP_VERSION(11, 0, 0)) { 1423ad8b1aafSjsg if (smu->ppt_funcs->load_microcode) { 1424ad8b1aafSjsg ret = smu->ppt_funcs->load_microcode(smu); 1425ad8b1aafSjsg if (ret) 1426ad8b1aafSjsg return ret; 1427ad8b1aafSjsg } 1428ad8b1aafSjsg } 1429ad8b1aafSjsg } 1430ad8b1aafSjsg 1431ad8b1aafSjsg if (smu->ppt_funcs->check_fw_status) { 1432ad8b1aafSjsg ret = smu->ppt_funcs->check_fw_status(smu); 1433ad8b1aafSjsg if (ret) { 1434ad8b1aafSjsg dev_err(adev->dev, "SMC is not ready\n"); 1435ad8b1aafSjsg return ret; 1436ad8b1aafSjsg } 1437ad8b1aafSjsg } 1438ad8b1aafSjsg 1439ad8b1aafSjsg /* 1440ad8b1aafSjsg * Send msg GetDriverIfVersion to check if the return value is equal 1441ad8b1aafSjsg * with DRIVER_IF_VERSION of smc header. 1442ad8b1aafSjsg */ 1443ad8b1aafSjsg ret = smu_check_fw_version(smu); 1444ad8b1aafSjsg if (ret) 1445ad8b1aafSjsg return ret; 1446ad8b1aafSjsg 1447ad8b1aafSjsg return ret; 1448ad8b1aafSjsg } 1449ad8b1aafSjsg 1450ad8b1aafSjsg static int smu_hw_init(void *handle) 1451ad8b1aafSjsg { 1452ad8b1aafSjsg int ret; 1453ad8b1aafSjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle; 14541bb76ff1Sjsg struct smu_context *smu = adev->powerplay.pp_handle; 1455ad8b1aafSjsg 1456ad8b1aafSjsg if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { 1457ad8b1aafSjsg smu->pm_enabled = false; 1458ad8b1aafSjsg return 0; 1459ad8b1aafSjsg } 1460ad8b1aafSjsg 1461ad8b1aafSjsg ret = smu_start_smc_engine(smu); 1462ad8b1aafSjsg if (ret) { 1463ad8b1aafSjsg dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1464ad8b1aafSjsg return ret; 1465ad8b1aafSjsg } 1466ad8b1aafSjsg 1467ad8b1aafSjsg if (smu->is_apu) { 14684e52571bSjsg ret = smu_set_gfx_imu_enable(smu); 14694e52571bSjsg if (ret) 14701bb76ff1Sjsg return ret; 1471ad8b1aafSjsg smu_dpm_set_vcn_enable(smu, true); 1472ad8b1aafSjsg smu_dpm_set_jpeg_enable(smu, true); 14731bb76ff1Sjsg smu_set_gfx_cgpg(smu, true); 1474ad8b1aafSjsg } 1475ad8b1aafSjsg 1476ad8b1aafSjsg if (!smu->pm_enabled) 1477ad8b1aafSjsg return 0; 1478ad8b1aafSjsg 1479ad8b1aafSjsg ret = smu_get_driver_allowed_feature_mask(smu); 1480ad8b1aafSjsg if (ret) 1481ad8b1aafSjsg return ret; 1482ad8b1aafSjsg 1483ad8b1aafSjsg ret = smu_smc_hw_setup(smu); 1484ad8b1aafSjsg if (ret) { 1485ad8b1aafSjsg dev_err(adev->dev, "Failed to setup smc hw!\n"); 1486ad8b1aafSjsg return ret; 1487ad8b1aafSjsg } 1488ad8b1aafSjsg 1489ad8b1aafSjsg /* 1490ad8b1aafSjsg * Move maximum sustainable clock retrieving here considering 1491ad8b1aafSjsg * 1. It is not needed on resume(from S3). 1492ad8b1aafSjsg * 2. DAL settings come between .hw_init and .late_init of SMU. 1493ad8b1aafSjsg * And DAL needs to know the maximum sustainable clocks. Thus 1494ad8b1aafSjsg * it cannot be put in .late_init(). 1495ad8b1aafSjsg */ 1496ad8b1aafSjsg ret = smu_init_max_sustainable_clocks(smu); 1497ad8b1aafSjsg if (ret) { 1498ad8b1aafSjsg dev_err(adev->dev, "Failed to init max sustainable clocks!\n"); 1499ad8b1aafSjsg return ret; 1500ad8b1aafSjsg } 1501ad8b1aafSjsg 1502ad8b1aafSjsg adev->pm.dpm_enabled = true; 1503ad8b1aafSjsg 1504ad8b1aafSjsg dev_info(adev->dev, "SMU is initialized successfully!\n"); 1505ad8b1aafSjsg 1506ad8b1aafSjsg return 0; 1507ad8b1aafSjsg } 1508ad8b1aafSjsg 1509ad8b1aafSjsg static int smu_disable_dpms(struct smu_context *smu) 1510ad8b1aafSjsg { 1511ad8b1aafSjsg struct amdgpu_device *adev = smu->adev; 1512ad8b1aafSjsg int ret = 0; 1513ad8b1aafSjsg bool use_baco = !smu->is_apu && 151419c7e219Sjsg ((amdgpu_in_reset(adev) && 1515ad8b1aafSjsg (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) || 15165ca02815Sjsg ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev))); 1517ad8b1aafSjsg 1518ad8b1aafSjsg /* 15191bb76ff1Sjsg * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others) 15201bb76ff1Sjsg * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues. 15211bb76ff1Sjsg */ 15221bb76ff1Sjsg switch (adev->ip_versions[MP1_HWIP][0]) { 15231bb76ff1Sjsg case IP_VERSION(13, 0, 0): 15241bb76ff1Sjsg case IP_VERSION(13, 0, 7): 1525f005ef32Sjsg case IP_VERSION(13, 0, 10): 15261bb76ff1Sjsg return 0; 15271bb76ff1Sjsg default: 15281bb76ff1Sjsg break; 15291bb76ff1Sjsg } 15301bb76ff1Sjsg 15311bb76ff1Sjsg /* 1532ad8b1aafSjsg * For custom pptable uploading, skip the DPM features 1533ad8b1aafSjsg * disable process on Navi1x ASICs. 1534ad8b1aafSjsg * - As the gfx related features are under control of 1535ad8b1aafSjsg * RLC on those ASICs. RLC reinitialization will be 1536ad8b1aafSjsg * needed to reenable them. That will cost much more 1537ad8b1aafSjsg * efforts. 1538ad8b1aafSjsg * 1539ad8b1aafSjsg * - SMU firmware can handle the DPM reenablement 1540ad8b1aafSjsg * properly. 1541ad8b1aafSjsg */ 15421bb76ff1Sjsg if (smu->uploading_custom_pp_table) { 15431bb76ff1Sjsg switch (adev->ip_versions[MP1_HWIP][0]) { 15441bb76ff1Sjsg case IP_VERSION(11, 0, 0): 15451bb76ff1Sjsg case IP_VERSION(11, 0, 5): 15461bb76ff1Sjsg case IP_VERSION(11, 0, 9): 15471bb76ff1Sjsg case IP_VERSION(11, 0, 7): 15481bb76ff1Sjsg case IP_VERSION(11, 0, 11): 15491bb76ff1Sjsg case IP_VERSION(11, 5, 0): 15501bb76ff1Sjsg case IP_VERSION(11, 0, 12): 15511bb76ff1Sjsg case IP_VERSION(11, 0, 13): 15521bb76ff1Sjsg return 0; 15531bb76ff1Sjsg default: 15541bb76ff1Sjsg break; 15551bb76ff1Sjsg } 15561bb76ff1Sjsg } 1557ad8b1aafSjsg 1558ad8b1aafSjsg /* 1559ad8b1aafSjsg * For Sienna_Cichlid, PMFW will handle the features disablement properly 1560ad8b1aafSjsg * on BACO in. Driver involvement is unnecessary. 1561ad8b1aafSjsg */ 15621bb76ff1Sjsg if (use_baco) { 15631bb76ff1Sjsg switch (adev->ip_versions[MP1_HWIP][0]) { 15641bb76ff1Sjsg case IP_VERSION(11, 0, 7): 15651bb76ff1Sjsg case IP_VERSION(11, 0, 0): 15661bb76ff1Sjsg case IP_VERSION(11, 0, 5): 15671bb76ff1Sjsg case IP_VERSION(11, 0, 9): 15681bb76ff1Sjsg case IP_VERSION(13, 0, 7): 15691bb76ff1Sjsg return 0; 15701bb76ff1Sjsg default: 15711bb76ff1Sjsg break; 15721bb76ff1Sjsg } 15731bb76ff1Sjsg } 1574ad8b1aafSjsg 1575ad8b1aafSjsg /* 1576fa345360Sjsg * For SMU 13.0.4/11, PMFW will handle the features disablement properly 15772f62752aSjsg * for gpu reset and S0i3 cases. Driver involvement is unnecessary. 1578fa345360Sjsg */ 15792f62752aSjsg if (amdgpu_in_reset(adev) || adev->in_s0ix) { 1580fa345360Sjsg switch (adev->ip_versions[MP1_HWIP][0]) { 1581fa345360Sjsg case IP_VERSION(13, 0, 4): 1582fa345360Sjsg case IP_VERSION(13, 0, 11): 1583fa345360Sjsg return 0; 1584fa345360Sjsg default: 1585fa345360Sjsg break; 1586fa345360Sjsg } 1587fa345360Sjsg } 1588fa345360Sjsg 1589fa345360Sjsg /* 1590ad8b1aafSjsg * For gpu reset, runpm and hibernation through BACO, 1591ad8b1aafSjsg * BACO feature has to be kept enabled. 1592ad8b1aafSjsg */ 1593ad8b1aafSjsg if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) { 1594ad8b1aafSjsg ret = smu_disable_all_features_with_exception(smu, 1595ad8b1aafSjsg SMU_FEATURE_BACO_BIT); 1596ad8b1aafSjsg if (ret) 1597ad8b1aafSjsg dev_err(adev->dev, "Failed to disable smu features except BACO.\n"); 1598ad8b1aafSjsg } else { 15991bb76ff1Sjsg /* DisableAllSmuFeatures message is not permitted with SCPM enabled */ 16001bb76ff1Sjsg if (!adev->scpm_enabled) { 1601ad8b1aafSjsg ret = smu_system_features_control(smu, false); 1602ad8b1aafSjsg if (ret) 1603ad8b1aafSjsg dev_err(adev->dev, "Failed to disable smu features.\n"); 1604ad8b1aafSjsg } 16051bb76ff1Sjsg } 1606ad8b1aafSjsg 16071bb76ff1Sjsg if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) && 1608f005ef32Sjsg !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop) 1609ad8b1aafSjsg adev->gfx.rlc.funcs->stop(adev); 1610ad8b1aafSjsg 1611ad8b1aafSjsg return ret; 1612ad8b1aafSjsg } 1613ad8b1aafSjsg 1614ad8b1aafSjsg static int smu_smc_hw_cleanup(struct smu_context *smu) 1615ad8b1aafSjsg { 1616ad8b1aafSjsg struct amdgpu_device *adev = smu->adev; 1617ad8b1aafSjsg int ret = 0; 1618ad8b1aafSjsg 1619ad8b1aafSjsg cancel_work_sync(&smu->throttling_logging_work); 1620ad8b1aafSjsg cancel_work_sync(&smu->interrupt_work); 1621ad8b1aafSjsg 1622ad8b1aafSjsg ret = smu_disable_thermal_alert(smu); 1623ad8b1aafSjsg if (ret) { 1624ad8b1aafSjsg dev_err(adev->dev, "Fail to disable thermal alert!\n"); 1625ad8b1aafSjsg return ret; 1626ad8b1aafSjsg } 1627ad8b1aafSjsg 1628788b966fSjsg cancel_delayed_work_sync(&smu->swctf_delayed_work); 1629788b966fSjsg 1630ad8b1aafSjsg ret = smu_disable_dpms(smu); 1631ad8b1aafSjsg if (ret) { 1632ad8b1aafSjsg dev_err(adev->dev, "Fail to disable dpm features!\n"); 1633ad8b1aafSjsg return ret; 1634ad8b1aafSjsg } 1635ad8b1aafSjsg 1636ad8b1aafSjsg return 0; 1637ad8b1aafSjsg } 1638ad8b1aafSjsg 1639ad8b1aafSjsg static int smu_hw_fini(void *handle) 1640ad8b1aafSjsg { 1641ad8b1aafSjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle; 16421bb76ff1Sjsg struct smu_context *smu = adev->powerplay.pp_handle; 1643ad8b1aafSjsg 1644ad8b1aafSjsg if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1645ad8b1aafSjsg return 0; 1646ad8b1aafSjsg 1647ad8b1aafSjsg smu_dpm_set_vcn_enable(smu, false); 1648ad8b1aafSjsg smu_dpm_set_jpeg_enable(smu, false); 16495ca02815Sjsg 16505ca02815Sjsg adev->vcn.cur_state = AMD_PG_STATE_GATE; 16515ca02815Sjsg adev->jpeg.cur_state = AMD_PG_STATE_GATE; 1652ad8b1aafSjsg 1653ad8b1aafSjsg if (!smu->pm_enabled) 1654ad8b1aafSjsg return 0; 1655ad8b1aafSjsg 1656ad8b1aafSjsg adev->pm.dpm_enabled = false; 1657ad8b1aafSjsg 1658ad8b1aafSjsg return smu_smc_hw_cleanup(smu); 1659ad8b1aafSjsg } 1660ad8b1aafSjsg 16611bb76ff1Sjsg static void smu_late_fini(void *handle) 16621bb76ff1Sjsg { 16631bb76ff1Sjsg struct amdgpu_device *adev = handle; 16641bb76ff1Sjsg struct smu_context *smu = adev->powerplay.pp_handle; 16651bb76ff1Sjsg 16661bb76ff1Sjsg kfree(smu); 16671bb76ff1Sjsg } 16681bb76ff1Sjsg 16695ca02815Sjsg static int smu_reset(struct smu_context *smu) 1670ad8b1aafSjsg { 1671ad8b1aafSjsg struct amdgpu_device *adev = smu->adev; 1672ad8b1aafSjsg int ret; 1673ad8b1aafSjsg 1674ad8b1aafSjsg ret = smu_hw_fini(adev); 1675ad8b1aafSjsg if (ret) 1676ad8b1aafSjsg return ret; 1677ad8b1aafSjsg 1678ad8b1aafSjsg ret = smu_hw_init(adev); 1679ad8b1aafSjsg if (ret) 1680ad8b1aafSjsg return ret; 1681ad8b1aafSjsg 1682ad8b1aafSjsg ret = smu_late_init(adev); 1683ad8b1aafSjsg if (ret) 1684ad8b1aafSjsg return ret; 1685ad8b1aafSjsg 1686ad8b1aafSjsg return 0; 1687ad8b1aafSjsg } 1688ad8b1aafSjsg 1689ad8b1aafSjsg static int smu_suspend(void *handle) 1690ad8b1aafSjsg { 1691ad8b1aafSjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle; 16921bb76ff1Sjsg struct smu_context *smu = adev->powerplay.pp_handle; 1693ad8b1aafSjsg int ret; 16941bb76ff1Sjsg uint64_t count; 1695ad8b1aafSjsg 1696ad8b1aafSjsg if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1697ad8b1aafSjsg return 0; 1698ad8b1aafSjsg 1699ad8b1aafSjsg if (!smu->pm_enabled) 1700ad8b1aafSjsg return 0; 1701ad8b1aafSjsg 1702ad8b1aafSjsg adev->pm.dpm_enabled = false; 1703ad8b1aafSjsg 1704ad8b1aafSjsg ret = smu_smc_hw_cleanup(smu); 1705ad8b1aafSjsg if (ret) 1706ad8b1aafSjsg return ret; 1707ad8b1aafSjsg 1708ad8b1aafSjsg smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); 1709ad8b1aafSjsg 17101bb76ff1Sjsg smu_set_gfx_cgpg(smu, false); 17111bb76ff1Sjsg 17121bb76ff1Sjsg /* 17131bb76ff1Sjsg * pwfw resets entrycount when device is suspended, so we save the 17141bb76ff1Sjsg * last value to be used when we resume to keep it consistent 17151bb76ff1Sjsg */ 17161bb76ff1Sjsg ret = smu_get_entrycount_gfxoff(smu, &count); 17171bb76ff1Sjsg if (!ret) 17181bb76ff1Sjsg adev->gfx.gfx_off_entrycount = count; 1719ad8b1aafSjsg 1720ad8b1aafSjsg return 0; 1721ad8b1aafSjsg } 1722ad8b1aafSjsg 1723ad8b1aafSjsg static int smu_resume(void *handle) 1724ad8b1aafSjsg { 1725ad8b1aafSjsg int ret; 1726ad8b1aafSjsg struct amdgpu_device *adev = (struct amdgpu_device *)handle; 17271bb76ff1Sjsg struct smu_context *smu = adev->powerplay.pp_handle; 1728ad8b1aafSjsg 1729ad8b1aafSjsg if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1730ad8b1aafSjsg return 0; 1731ad8b1aafSjsg 1732ad8b1aafSjsg if (!smu->pm_enabled) 1733ad8b1aafSjsg return 0; 1734ad8b1aafSjsg 1735ad8b1aafSjsg dev_info(adev->dev, "SMU is resuming...\n"); 1736ad8b1aafSjsg 1737ad8b1aafSjsg ret = smu_start_smc_engine(smu); 1738ad8b1aafSjsg if (ret) { 1739ad8b1aafSjsg dev_err(adev->dev, "SMC engine is not correctly up!\n"); 1740ad8b1aafSjsg return ret; 1741ad8b1aafSjsg } 1742ad8b1aafSjsg 1743ad8b1aafSjsg ret = smu_smc_hw_setup(smu); 1744ad8b1aafSjsg if (ret) { 1745ad8b1aafSjsg dev_err(adev->dev, "Failed to setup smc hw!\n"); 1746ad8b1aafSjsg return ret; 1747ad8b1aafSjsg } 1748ad8b1aafSjsg 17494e52571bSjsg ret = smu_set_gfx_imu_enable(smu); 17504e52571bSjsg if (ret) 17514e52571bSjsg return ret; 17524e52571bSjsg 17531bb76ff1Sjsg smu_set_gfx_cgpg(smu, true); 1754ad8b1aafSjsg 1755ad8b1aafSjsg smu->disable_uclk_switch = 0; 1756ad8b1aafSjsg 1757ad8b1aafSjsg adev->pm.dpm_enabled = true; 1758ad8b1aafSjsg 1759ad8b1aafSjsg dev_info(adev->dev, "SMU is resumed successfully!\n"); 1760ad8b1aafSjsg 1761ad8b1aafSjsg return 0; 1762ad8b1aafSjsg } 1763ad8b1aafSjsg 17645ca02815Sjsg static int smu_display_configuration_change(void *handle, 1765ad8b1aafSjsg const struct amd_pp_display_configuration *display_config) 1766ad8b1aafSjsg { 17675ca02815Sjsg struct smu_context *smu = handle; 1768ad8b1aafSjsg 1769ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1770ad8b1aafSjsg return -EOPNOTSUPP; 1771ad8b1aafSjsg 1772ad8b1aafSjsg if (!display_config) 1773ad8b1aafSjsg return -EINVAL; 1774ad8b1aafSjsg 1775ad8b1aafSjsg smu_set_min_dcef_deep_sleep(smu, 1776ad8b1aafSjsg display_config->min_dcef_deep_sleep_set_clk / 100); 1777ad8b1aafSjsg 1778ad8b1aafSjsg return 0; 1779ad8b1aafSjsg } 1780ad8b1aafSjsg 1781ad8b1aafSjsg static int smu_set_clockgating_state(void *handle, 1782ad8b1aafSjsg enum amd_clockgating_state state) 1783ad8b1aafSjsg { 1784ad8b1aafSjsg return 0; 1785ad8b1aafSjsg } 1786ad8b1aafSjsg 1787ad8b1aafSjsg static int smu_set_powergating_state(void *handle, 1788ad8b1aafSjsg enum amd_powergating_state state) 1789ad8b1aafSjsg { 1790ad8b1aafSjsg return 0; 1791ad8b1aafSjsg } 1792ad8b1aafSjsg 1793ad8b1aafSjsg static int smu_enable_umd_pstate(void *handle, 1794ad8b1aafSjsg enum amd_dpm_forced_level *level) 1795ad8b1aafSjsg { 1796ad8b1aafSjsg uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 1797ad8b1aafSjsg AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 1798ad8b1aafSjsg AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 1799ad8b1aafSjsg AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 1800ad8b1aafSjsg 1801ad8b1aafSjsg struct smu_context *smu = (struct smu_context*)(handle); 1802ad8b1aafSjsg struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1803ad8b1aafSjsg 1804ad8b1aafSjsg if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1805ad8b1aafSjsg return -EINVAL; 1806ad8b1aafSjsg 1807ad8b1aafSjsg if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { 1808ad8b1aafSjsg /* enter umd pstate, save current level, disable gfx cg*/ 1809ad8b1aafSjsg if (*level & profile_mode_mask) { 1810ad8b1aafSjsg smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; 18115ca02815Sjsg smu_gpo_control(smu, false); 1812ad8b1aafSjsg smu_gfx_ulv_control(smu, false); 1813ad8b1aafSjsg smu_deep_sleep_control(smu, false); 18145ca02815Sjsg amdgpu_asic_update_umd_stable_pstate(smu->adev, true); 1815ad8b1aafSjsg } 1816ad8b1aafSjsg } else { 1817ad8b1aafSjsg /* exit umd pstate, restore level, enable gfx cg*/ 1818ad8b1aafSjsg if (!(*level & profile_mode_mask)) { 1819ad8b1aafSjsg if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) 1820ad8b1aafSjsg *level = smu_dpm_ctx->saved_dpm_level; 18215ca02815Sjsg amdgpu_asic_update_umd_stable_pstate(smu->adev, false); 1822ad8b1aafSjsg smu_deep_sleep_control(smu, true); 1823ad8b1aafSjsg smu_gfx_ulv_control(smu, true); 18245ca02815Sjsg smu_gpo_control(smu, true); 1825ad8b1aafSjsg } 1826ad8b1aafSjsg } 1827ad8b1aafSjsg 1828ad8b1aafSjsg return 0; 1829ad8b1aafSjsg } 1830ad8b1aafSjsg 18315ca02815Sjsg static int smu_bump_power_profile_mode(struct smu_context *smu, 18325ca02815Sjsg long *param, 18335ca02815Sjsg uint32_t param_size) 18345ca02815Sjsg { 18355ca02815Sjsg int ret = 0; 18365ca02815Sjsg 18375ca02815Sjsg if (smu->ppt_funcs->set_power_profile_mode) 18385ca02815Sjsg ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); 18395ca02815Sjsg 18405ca02815Sjsg return ret; 18415ca02815Sjsg } 18425ca02815Sjsg 1843ad8b1aafSjsg static int smu_adjust_power_state_dynamic(struct smu_context *smu, 1844ad8b1aafSjsg enum amd_dpm_forced_level level, 184519846322Sjsg bool skip_display_settings, 1846*afb6e02fSjsg bool init) 1847ad8b1aafSjsg { 1848ad8b1aafSjsg int ret = 0; 1849ad8b1aafSjsg int index = 0; 1850b121103bSjsg long workload[1]; 1851ad8b1aafSjsg struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1852ad8b1aafSjsg 1853ad8b1aafSjsg if (!skip_display_settings) { 1854ad8b1aafSjsg ret = smu_display_config_changed(smu); 1855ad8b1aafSjsg if (ret) { 1856ad8b1aafSjsg dev_err(smu->adev->dev, "Failed to change display config!"); 1857ad8b1aafSjsg return ret; 1858ad8b1aafSjsg } 1859ad8b1aafSjsg } 1860ad8b1aafSjsg 1861ad8b1aafSjsg ret = smu_apply_clocks_adjust_rules(smu); 1862ad8b1aafSjsg if (ret) { 1863ad8b1aafSjsg dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!"); 1864ad8b1aafSjsg return ret; 1865ad8b1aafSjsg } 1866ad8b1aafSjsg 1867ad8b1aafSjsg if (!skip_display_settings) { 1868ad8b1aafSjsg ret = smu_notify_smc_display_config(smu); 1869ad8b1aafSjsg if (ret) { 1870ad8b1aafSjsg dev_err(smu->adev->dev, "Failed to notify smc display config!"); 1871ad8b1aafSjsg return ret; 1872ad8b1aafSjsg } 1873ad8b1aafSjsg } 1874ad8b1aafSjsg 1875*afb6e02fSjsg if (smu_dpm_ctx->dpm_level != level) { 1876ad8b1aafSjsg ret = smu_asic_set_performance_level(smu, level); 1877ad8b1aafSjsg if (ret) { 1878ad8b1aafSjsg dev_err(smu->adev->dev, "Failed to set performance level!"); 1879ad8b1aafSjsg return ret; 1880ad8b1aafSjsg } 1881ad8b1aafSjsg 1882ad8b1aafSjsg /* update the saved copy */ 1883ad8b1aafSjsg smu_dpm_ctx->dpm_level = level; 1884ad8b1aafSjsg } 1885ad8b1aafSjsg 18860357c97eSjsg if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && 18870357c97eSjsg smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { 1888ad8b1aafSjsg index = fls(smu->workload_mask); 1889ad8b1aafSjsg index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1890b121103bSjsg workload[0] = smu->workload_setting[index]; 1891ad8b1aafSjsg 1892*afb6e02fSjsg if (init || smu->power_profile_mode != workload[0]) 1893b121103bSjsg smu_bump_power_profile_mode(smu, workload, 0); 1894ad8b1aafSjsg } 1895ad8b1aafSjsg 1896ad8b1aafSjsg return ret; 1897ad8b1aafSjsg } 1898ad8b1aafSjsg 18995ca02815Sjsg static int smu_handle_task(struct smu_context *smu, 1900ad8b1aafSjsg enum amd_dpm_forced_level level, 19011bb76ff1Sjsg enum amd_pp_task task_id) 1902ad8b1aafSjsg { 1903ad8b1aafSjsg int ret = 0; 1904ad8b1aafSjsg 1905ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1906ad8b1aafSjsg return -EOPNOTSUPP; 1907ad8b1aafSjsg 1908ad8b1aafSjsg switch (task_id) { 1909ad8b1aafSjsg case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: 1910ad8b1aafSjsg ret = smu_pre_display_config_changed(smu); 1911ad8b1aafSjsg if (ret) 19121bb76ff1Sjsg return ret; 191319846322Sjsg ret = smu_adjust_power_state_dynamic(smu, level, false, false); 1914ad8b1aafSjsg break; 1915ad8b1aafSjsg case AMD_PP_TASK_COMPLETE_INIT: 191619846322Sjsg ret = smu_adjust_power_state_dynamic(smu, level, true, true); 191719846322Sjsg break; 1918ad8b1aafSjsg case AMD_PP_TASK_READJUST_POWER_STATE: 191919846322Sjsg ret = smu_adjust_power_state_dynamic(smu, level, true, false); 1920ad8b1aafSjsg break; 1921ad8b1aafSjsg default: 1922ad8b1aafSjsg break; 1923ad8b1aafSjsg } 1924ad8b1aafSjsg 1925ad8b1aafSjsg return ret; 1926ad8b1aafSjsg } 1927ad8b1aafSjsg 19285ca02815Sjsg static int smu_handle_dpm_task(void *handle, 19295ca02815Sjsg enum amd_pp_task task_id, 19305ca02815Sjsg enum amd_pm_state_type *user_state) 19315ca02815Sjsg { 19325ca02815Sjsg struct smu_context *smu = handle; 19335ca02815Sjsg struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 19345ca02815Sjsg 19351bb76ff1Sjsg return smu_handle_task(smu, smu_dpm->dpm_level, task_id); 19365ca02815Sjsg 19375ca02815Sjsg } 19385ca02815Sjsg 19395ca02815Sjsg static int smu_switch_power_profile(void *handle, 1940ad8b1aafSjsg enum PP_SMC_POWER_PROFILE type, 1941ad8b1aafSjsg bool en) 1942ad8b1aafSjsg { 19435ca02815Sjsg struct smu_context *smu = handle; 1944ad8b1aafSjsg struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1945b121103bSjsg long workload[1]; 1946ad8b1aafSjsg uint32_t index; 1947ad8b1aafSjsg 1948ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1949ad8b1aafSjsg return -EOPNOTSUPP; 1950ad8b1aafSjsg 1951ad8b1aafSjsg if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) 1952ad8b1aafSjsg return -EINVAL; 1953ad8b1aafSjsg 1954ad8b1aafSjsg if (!en) { 1955ad8b1aafSjsg smu->workload_mask &= ~(1 << smu->workload_prority[type]); 1956ad8b1aafSjsg index = fls(smu->workload_mask); 1957ad8b1aafSjsg index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1958b121103bSjsg workload[0] = smu->workload_setting[index]; 1959ad8b1aafSjsg } else { 1960ad8b1aafSjsg smu->workload_mask |= (1 << smu->workload_prority[type]); 1961ad8b1aafSjsg index = fls(smu->workload_mask); 1962ad8b1aafSjsg index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; 1963b121103bSjsg workload[0] = smu->workload_setting[index]; 1964ad8b1aafSjsg } 1965ad8b1aafSjsg 19660357c97eSjsg if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL && 19670357c97eSjsg smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) 1968b121103bSjsg smu_bump_power_profile_mode(smu, workload, 0); 1969ad8b1aafSjsg 1970ad8b1aafSjsg return 0; 1971ad8b1aafSjsg } 1972ad8b1aafSjsg 19735ca02815Sjsg static enum amd_dpm_forced_level smu_get_performance_level(void *handle) 1974ad8b1aafSjsg { 19755ca02815Sjsg struct smu_context *smu = handle; 1976ad8b1aafSjsg struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1977ad8b1aafSjsg 1978ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1979ad8b1aafSjsg return -EOPNOTSUPP; 1980ad8b1aafSjsg 1981ad8b1aafSjsg if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1982ad8b1aafSjsg return -EINVAL; 1983ad8b1aafSjsg 19841bb76ff1Sjsg return smu_dpm_ctx->dpm_level; 1985ad8b1aafSjsg } 1986ad8b1aafSjsg 19875ca02815Sjsg static int smu_force_performance_level(void *handle, 19885ca02815Sjsg enum amd_dpm_forced_level level) 1989ad8b1aafSjsg { 19905ca02815Sjsg struct smu_context *smu = handle; 1991ad8b1aafSjsg struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 1992ad8b1aafSjsg int ret = 0; 1993ad8b1aafSjsg 1994ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 1995ad8b1aafSjsg return -EOPNOTSUPP; 1996ad8b1aafSjsg 1997ad8b1aafSjsg if (!smu->is_apu && !smu_dpm_ctx->dpm_context) 1998ad8b1aafSjsg return -EINVAL; 1999ad8b1aafSjsg 2000ad8b1aafSjsg ret = smu_enable_umd_pstate(smu, &level); 20011bb76ff1Sjsg if (ret) 2002ad8b1aafSjsg return ret; 2003ad8b1aafSjsg 2004ad8b1aafSjsg ret = smu_handle_task(smu, level, 20051bb76ff1Sjsg AMD_PP_TASK_READJUST_POWER_STATE); 2006ad8b1aafSjsg 20075ca02815Sjsg /* reset user dpm clock state */ 20085ca02815Sjsg if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 20095ca02815Sjsg memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask)); 20105ca02815Sjsg smu->user_dpm_profile.clk_dependency = 0; 20115ca02815Sjsg } 20125ca02815Sjsg 2013ad8b1aafSjsg return ret; 2014ad8b1aafSjsg } 2015ad8b1aafSjsg 20165ca02815Sjsg static int smu_set_display_count(void *handle, uint32_t count) 2017ad8b1aafSjsg { 20185ca02815Sjsg struct smu_context *smu = handle; 2019ad8b1aafSjsg 2020ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2021ad8b1aafSjsg return -EOPNOTSUPP; 2022ad8b1aafSjsg 20231bb76ff1Sjsg return smu_init_display_count(smu, count); 2024ad8b1aafSjsg } 2025ad8b1aafSjsg 20265ca02815Sjsg static int smu_force_smuclk_levels(struct smu_context *smu, 2027ad8b1aafSjsg enum smu_clk_type clk_type, 2028ad8b1aafSjsg uint32_t mask) 2029ad8b1aafSjsg { 2030ad8b1aafSjsg struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); 2031ad8b1aafSjsg int ret = 0; 2032ad8b1aafSjsg 2033ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2034ad8b1aafSjsg return -EOPNOTSUPP; 2035ad8b1aafSjsg 2036ad8b1aafSjsg if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 2037ad8b1aafSjsg dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n"); 2038ad8b1aafSjsg return -EINVAL; 2039ad8b1aafSjsg } 2040ad8b1aafSjsg 20415ca02815Sjsg if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) { 2042ad8b1aafSjsg ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); 20435ca02815Sjsg if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 20445ca02815Sjsg smu->user_dpm_profile.clk_mask[clk_type] = mask; 20455ca02815Sjsg smu_set_user_clk_dependencies(smu, clk_type); 20465ca02815Sjsg } 20475ca02815Sjsg } 2048ad8b1aafSjsg 2049ad8b1aafSjsg return ret; 2050ad8b1aafSjsg } 2051ad8b1aafSjsg 20525ca02815Sjsg static int smu_force_ppclk_levels(void *handle, 20535ca02815Sjsg enum pp_clock_type type, 20545ca02815Sjsg uint32_t mask) 20555ca02815Sjsg { 20565ca02815Sjsg struct smu_context *smu = handle; 20575ca02815Sjsg enum smu_clk_type clk_type; 20585ca02815Sjsg 20595ca02815Sjsg switch (type) { 20605ca02815Sjsg case PP_SCLK: 20615ca02815Sjsg clk_type = SMU_SCLK; break; 20625ca02815Sjsg case PP_MCLK: 20635ca02815Sjsg clk_type = SMU_MCLK; break; 20645ca02815Sjsg case PP_PCIE: 20655ca02815Sjsg clk_type = SMU_PCIE; break; 20665ca02815Sjsg case PP_SOCCLK: 20675ca02815Sjsg clk_type = SMU_SOCCLK; break; 20685ca02815Sjsg case PP_FCLK: 20695ca02815Sjsg clk_type = SMU_FCLK; break; 20705ca02815Sjsg case PP_DCEFCLK: 20715ca02815Sjsg clk_type = SMU_DCEFCLK; break; 20725ca02815Sjsg case PP_VCLK: 20735ca02815Sjsg clk_type = SMU_VCLK; break; 2074f005ef32Sjsg case PP_VCLK1: 2075f005ef32Sjsg clk_type = SMU_VCLK1; break; 20765ca02815Sjsg case PP_DCLK: 20775ca02815Sjsg clk_type = SMU_DCLK; break; 2078f005ef32Sjsg case PP_DCLK1: 2079f005ef32Sjsg clk_type = SMU_DCLK1; break; 20805ca02815Sjsg case OD_SCLK: 20815ca02815Sjsg clk_type = SMU_OD_SCLK; break; 20825ca02815Sjsg case OD_MCLK: 20835ca02815Sjsg clk_type = SMU_OD_MCLK; break; 20845ca02815Sjsg case OD_VDDC_CURVE: 20855ca02815Sjsg clk_type = SMU_OD_VDDC_CURVE; break; 20865ca02815Sjsg case OD_RANGE: 20875ca02815Sjsg clk_type = SMU_OD_RANGE; break; 20885ca02815Sjsg default: 20895ca02815Sjsg return -EINVAL; 20905ca02815Sjsg } 20915ca02815Sjsg 20925ca02815Sjsg return smu_force_smuclk_levels(smu, clk_type, mask); 20935ca02815Sjsg } 20945ca02815Sjsg 2095ad8b1aafSjsg /* 2096ad8b1aafSjsg * On system suspending or resetting, the dpm_enabled 2097ad8b1aafSjsg * flag will be cleared. So that those SMU services which 2098ad8b1aafSjsg * are not supported will be gated. 2099ad8b1aafSjsg * However, the mp1 state setting should still be granted 2100ad8b1aafSjsg * even if the dpm_enabled cleared. 2101ad8b1aafSjsg */ 21025ca02815Sjsg static int smu_set_mp1_state(void *handle, 2103ad8b1aafSjsg enum pp_mp1_state mp1_state) 2104ad8b1aafSjsg { 21055ca02815Sjsg struct smu_context *smu = handle; 21065ca02815Sjsg int ret = 0; 2107ad8b1aafSjsg 2108ad8b1aafSjsg if (!smu->pm_enabled) 2109ad8b1aafSjsg return -EOPNOTSUPP; 2110ad8b1aafSjsg 21115ca02815Sjsg if (smu->ppt_funcs && 21125ca02815Sjsg smu->ppt_funcs->set_mp1_state) 21135ca02815Sjsg ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state); 2114ad8b1aafSjsg 2115ad8b1aafSjsg return ret; 2116ad8b1aafSjsg } 2117ad8b1aafSjsg 21185ca02815Sjsg static int smu_set_df_cstate(void *handle, 2119ad8b1aafSjsg enum pp_df_cstate state) 2120ad8b1aafSjsg { 21215ca02815Sjsg struct smu_context *smu = handle; 2122ad8b1aafSjsg int ret = 0; 2123ad8b1aafSjsg 2124ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2125ad8b1aafSjsg return -EOPNOTSUPP; 2126ad8b1aafSjsg 2127ad8b1aafSjsg if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) 2128ad8b1aafSjsg return 0; 2129ad8b1aafSjsg 2130ad8b1aafSjsg ret = smu->ppt_funcs->set_df_cstate(smu, state); 2131ad8b1aafSjsg if (ret) 2132ad8b1aafSjsg dev_err(smu->adev->dev, "[SetDfCstate] failed!\n"); 2133ad8b1aafSjsg 2134ad8b1aafSjsg return ret; 2135ad8b1aafSjsg } 2136ad8b1aafSjsg 2137ad8b1aafSjsg int smu_allow_xgmi_power_down(struct smu_context *smu, bool en) 2138ad8b1aafSjsg { 2139ad8b1aafSjsg int ret = 0; 2140ad8b1aafSjsg 2141ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2142ad8b1aafSjsg return -EOPNOTSUPP; 2143ad8b1aafSjsg 2144ad8b1aafSjsg if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down) 2145ad8b1aafSjsg return 0; 2146ad8b1aafSjsg 2147ad8b1aafSjsg ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en); 2148ad8b1aafSjsg if (ret) 2149ad8b1aafSjsg dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n"); 2150ad8b1aafSjsg 2151ad8b1aafSjsg return ret; 2152ad8b1aafSjsg } 2153ad8b1aafSjsg 2154ad8b1aafSjsg int smu_write_watermarks_table(struct smu_context *smu) 2155ad8b1aafSjsg { 2156ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2157ad8b1aafSjsg return -EOPNOTSUPP; 2158ad8b1aafSjsg 21591bb76ff1Sjsg return smu_set_watermarks_table(smu, NULL); 2160ad8b1aafSjsg } 2161ad8b1aafSjsg 21625ca02815Sjsg static int smu_set_watermarks_for_clock_ranges(void *handle, 2163ad8b1aafSjsg struct pp_smu_wm_range_sets *clock_ranges) 2164ad8b1aafSjsg { 21655ca02815Sjsg struct smu_context *smu = handle; 2166ad8b1aafSjsg 2167ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2168ad8b1aafSjsg return -EOPNOTSUPP; 2169ad8b1aafSjsg 2170ad8b1aafSjsg if (smu->disable_watermark) 2171ad8b1aafSjsg return 0; 2172ad8b1aafSjsg 21731bb76ff1Sjsg return smu_set_watermarks_table(smu, clock_ranges); 2174ad8b1aafSjsg } 2175ad8b1aafSjsg 2176ad8b1aafSjsg int smu_set_ac_dc(struct smu_context *smu) 2177ad8b1aafSjsg { 2178ad8b1aafSjsg int ret = 0; 2179ad8b1aafSjsg 2180ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2181ad8b1aafSjsg return -EOPNOTSUPP; 2182ad8b1aafSjsg 2183ad8b1aafSjsg /* controlled by firmware */ 2184ad8b1aafSjsg if (smu->dc_controlled_by_gpio) 2185ad8b1aafSjsg return 0; 2186ad8b1aafSjsg 2187ad8b1aafSjsg ret = smu_set_power_source(smu, 2188ad8b1aafSjsg smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC : 2189ad8b1aafSjsg SMU_POWER_SOURCE_DC); 2190ad8b1aafSjsg if (ret) 2191ad8b1aafSjsg dev_err(smu->adev->dev, "Failed to switch to %s mode!\n", 2192ad8b1aafSjsg smu->adev->pm.ac_power ? "AC" : "DC"); 2193ad8b1aafSjsg 2194ad8b1aafSjsg return ret; 2195ad8b1aafSjsg } 2196ad8b1aafSjsg 2197ad8b1aafSjsg const struct amd_ip_funcs smu_ip_funcs = { 2198ad8b1aafSjsg .name = "smu", 2199ad8b1aafSjsg .early_init = smu_early_init, 2200ad8b1aafSjsg .late_init = smu_late_init, 2201ad8b1aafSjsg .sw_init = smu_sw_init, 2202ad8b1aafSjsg .sw_fini = smu_sw_fini, 2203ad8b1aafSjsg .hw_init = smu_hw_init, 2204ad8b1aafSjsg .hw_fini = smu_hw_fini, 22051bb76ff1Sjsg .late_fini = smu_late_fini, 2206ad8b1aafSjsg .suspend = smu_suspend, 2207ad8b1aafSjsg .resume = smu_resume, 2208ad8b1aafSjsg .is_idle = NULL, 2209ad8b1aafSjsg .check_soft_reset = NULL, 2210ad8b1aafSjsg .wait_for_idle = NULL, 2211ad8b1aafSjsg .soft_reset = NULL, 2212ad8b1aafSjsg .set_clockgating_state = smu_set_clockgating_state, 2213ad8b1aafSjsg .set_powergating_state = smu_set_powergating_state, 2214ad8b1aafSjsg }; 2215ad8b1aafSjsg 2216f005ef32Sjsg const struct amdgpu_ip_block_version smu_v11_0_ip_block = { 2217ad8b1aafSjsg .type = AMD_IP_BLOCK_TYPE_SMC, 2218ad8b1aafSjsg .major = 11, 2219ad8b1aafSjsg .minor = 0, 2220ad8b1aafSjsg .rev = 0, 2221ad8b1aafSjsg .funcs = &smu_ip_funcs, 2222ad8b1aafSjsg }; 2223ad8b1aafSjsg 2224f005ef32Sjsg const struct amdgpu_ip_block_version smu_v12_0_ip_block = { 2225ad8b1aafSjsg .type = AMD_IP_BLOCK_TYPE_SMC, 2226ad8b1aafSjsg .major = 12, 2227ad8b1aafSjsg .minor = 0, 2228ad8b1aafSjsg .rev = 0, 2229ad8b1aafSjsg .funcs = &smu_ip_funcs, 2230ad8b1aafSjsg }; 2231ad8b1aafSjsg 2232f005ef32Sjsg const struct amdgpu_ip_block_version smu_v13_0_ip_block = { 22335ca02815Sjsg .type = AMD_IP_BLOCK_TYPE_SMC, 22345ca02815Sjsg .major = 13, 22355ca02815Sjsg .minor = 0, 22365ca02815Sjsg .rev = 0, 22375ca02815Sjsg .funcs = &smu_ip_funcs, 22385ca02815Sjsg }; 22395ca02815Sjsg 22405ca02815Sjsg static int smu_load_microcode(void *handle) 22415ca02815Sjsg { 22425ca02815Sjsg struct smu_context *smu = handle; 22435ca02815Sjsg struct amdgpu_device *adev = smu->adev; 2244ad8b1aafSjsg int ret = 0; 2245ad8b1aafSjsg 22465ca02815Sjsg if (!smu->pm_enabled) 2247ad8b1aafSjsg return -EOPNOTSUPP; 2248ad8b1aafSjsg 22495ca02815Sjsg /* This should be used for non PSP loading */ 22505ca02815Sjsg if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) 22515ca02815Sjsg return 0; 2252ad8b1aafSjsg 22535ca02815Sjsg if (smu->ppt_funcs->load_microcode) { 2254ad8b1aafSjsg ret = smu->ppt_funcs->load_microcode(smu); 22555ca02815Sjsg if (ret) { 22565ca02815Sjsg dev_err(adev->dev, "Load microcode failed\n"); 2257ad8b1aafSjsg return ret; 2258ad8b1aafSjsg } 22595ca02815Sjsg } 2260ad8b1aafSjsg 22615ca02815Sjsg if (smu->ppt_funcs->check_fw_status) { 2262ad8b1aafSjsg ret = smu->ppt_funcs->check_fw_status(smu); 22635ca02815Sjsg if (ret) { 22645ca02815Sjsg dev_err(adev->dev, "SMC is not ready\n"); 22655ca02815Sjsg return ret; 22665ca02815Sjsg } 22675ca02815Sjsg } 2268ad8b1aafSjsg 2269ad8b1aafSjsg return ret; 2270ad8b1aafSjsg } 2271ad8b1aafSjsg 22725ca02815Sjsg static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) 2273ad8b1aafSjsg { 2274ad8b1aafSjsg int ret = 0; 2275ad8b1aafSjsg 2276ad8b1aafSjsg if (smu->ppt_funcs->set_gfx_cgpg) 2277ad8b1aafSjsg ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); 2278ad8b1aafSjsg 2279ad8b1aafSjsg return ret; 2280ad8b1aafSjsg } 2281ad8b1aafSjsg 22825ca02815Sjsg static int smu_set_fan_speed_rpm(void *handle, uint32_t speed) 2283ad8b1aafSjsg { 22845ca02815Sjsg struct smu_context *smu = handle; 2285ad8b1aafSjsg int ret = 0; 2286ad8b1aafSjsg 2287ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2288ad8b1aafSjsg return -EOPNOTSUPP; 2289ad8b1aafSjsg 22901bb76ff1Sjsg if (!smu->ppt_funcs->set_fan_speed_rpm) 22911bb76ff1Sjsg return -EOPNOTSUPP; 2292ad8b1aafSjsg 22931bb76ff1Sjsg if (speed == U32_MAX) 22941bb76ff1Sjsg return -EINVAL; 22951bb76ff1Sjsg 2296ad8b1aafSjsg ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); 22975ca02815Sjsg if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 22985ca02815Sjsg smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM; 22995ca02815Sjsg smu->user_dpm_profile.fan_speed_rpm = speed; 23005ca02815Sjsg 23015ca02815Sjsg /* Override custom PWM setting as they cannot co-exist */ 23025ca02815Sjsg smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM; 23035ca02815Sjsg smu->user_dpm_profile.fan_speed_pwm = 0; 23045ca02815Sjsg } 2305ad8b1aafSjsg 2306ad8b1aafSjsg return ret; 2307ad8b1aafSjsg } 2308ad8b1aafSjsg 23095ca02815Sjsg /** 23105ca02815Sjsg * smu_get_power_limit - Request one of the SMU Power Limits 23115ca02815Sjsg * 23125ca02815Sjsg * @handle: pointer to smu context 23135ca02815Sjsg * @limit: requested limit is written back to this variable 23145ca02815Sjsg * @pp_limit_level: &pp_power_limit_level which limit of the power to return 23155ca02815Sjsg * @pp_power_type: &pp_power_type type of power 23165ca02815Sjsg * Return: 0 on success, <0 on error 23175ca02815Sjsg * 23185ca02815Sjsg */ 23195ca02815Sjsg int smu_get_power_limit(void *handle, 2320ad8b1aafSjsg uint32_t *limit, 23215ca02815Sjsg enum pp_power_limit_level pp_limit_level, 23225ca02815Sjsg enum pp_power_type pp_power_type) 2323ad8b1aafSjsg { 23245ca02815Sjsg struct smu_context *smu = handle; 23251bb76ff1Sjsg struct amdgpu_device *adev = smu->adev; 23265ca02815Sjsg enum smu_ppt_limit_level limit_level; 23275ca02815Sjsg uint32_t limit_type; 23285ca02815Sjsg int ret = 0; 23295ca02815Sjsg 2330ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2331ad8b1aafSjsg return -EOPNOTSUPP; 2332ad8b1aafSjsg 23335ca02815Sjsg switch (pp_power_type) { 23345ca02815Sjsg case PP_PWR_TYPE_SUSTAINED: 23355ca02815Sjsg limit_type = SMU_DEFAULT_PPT_LIMIT; 23365ca02815Sjsg break; 23375ca02815Sjsg case PP_PWR_TYPE_FAST: 23385ca02815Sjsg limit_type = SMU_FAST_PPT_LIMIT; 23395ca02815Sjsg break; 23405ca02815Sjsg default: 23415ca02815Sjsg return -EOPNOTSUPP; 23425ca02815Sjsg break; 23435ca02815Sjsg } 23445ca02815Sjsg 23455ca02815Sjsg switch (pp_limit_level) { 23465ca02815Sjsg case PP_PWR_LIMIT_CURRENT: 23475ca02815Sjsg limit_level = SMU_PPT_LIMIT_CURRENT; 23485ca02815Sjsg break; 23495ca02815Sjsg case PP_PWR_LIMIT_DEFAULT: 23505ca02815Sjsg limit_level = SMU_PPT_LIMIT_DEFAULT; 23515ca02815Sjsg break; 23525ca02815Sjsg case PP_PWR_LIMIT_MAX: 23535ca02815Sjsg limit_level = SMU_PPT_LIMIT_MAX; 23545ca02815Sjsg break; 23555ca02815Sjsg case PP_PWR_LIMIT_MIN: 23565ca02815Sjsg default: 23575ca02815Sjsg return -EOPNOTSUPP; 23585ca02815Sjsg break; 23595ca02815Sjsg } 23605ca02815Sjsg 23615ca02815Sjsg if (limit_type != SMU_DEFAULT_PPT_LIMIT) { 23625ca02815Sjsg if (smu->ppt_funcs->get_ppt_limit) 23635ca02815Sjsg ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level); 23645ca02815Sjsg } else { 23655ca02815Sjsg switch (limit_level) { 23665ca02815Sjsg case SMU_PPT_LIMIT_CURRENT: 23671bb76ff1Sjsg switch (adev->ip_versions[MP1_HWIP][0]) { 23681bb76ff1Sjsg case IP_VERSION(13, 0, 2): 23691bb76ff1Sjsg case IP_VERSION(11, 0, 7): 23701bb76ff1Sjsg case IP_VERSION(11, 0, 11): 23711bb76ff1Sjsg case IP_VERSION(11, 0, 12): 23721bb76ff1Sjsg case IP_VERSION(11, 0, 13): 23735ca02815Sjsg ret = smu_get_asic_power_limits(smu, 23745ca02815Sjsg &smu->current_power_limit, 23755ca02815Sjsg NULL, 23765ca02815Sjsg NULL); 23771bb76ff1Sjsg break; 23781bb76ff1Sjsg default: 23791bb76ff1Sjsg break; 23801bb76ff1Sjsg } 23815ca02815Sjsg *limit = smu->current_power_limit; 23825ca02815Sjsg break; 23835ca02815Sjsg case SMU_PPT_LIMIT_DEFAULT: 23845ca02815Sjsg *limit = smu->default_power_limit; 23855ca02815Sjsg break; 23865ca02815Sjsg case SMU_PPT_LIMIT_MAX: 23875ca02815Sjsg *limit = smu->max_power_limit; 23885ca02815Sjsg break; 23895ca02815Sjsg default: 23905ca02815Sjsg break; 23915ca02815Sjsg } 23925ca02815Sjsg } 2393ad8b1aafSjsg 23945ca02815Sjsg return ret; 2395ad8b1aafSjsg } 2396ad8b1aafSjsg 23975ca02815Sjsg static int smu_set_power_limit(void *handle, uint32_t limit) 2398ad8b1aafSjsg { 23995ca02815Sjsg struct smu_context *smu = handle; 24005ca02815Sjsg uint32_t limit_type = limit >> 24; 2401ad8b1aafSjsg int ret = 0; 2402ad8b1aafSjsg 2403ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2404ad8b1aafSjsg return -EOPNOTSUPP; 2405ad8b1aafSjsg 24061bb76ff1Sjsg limit &= (1<<24)-1; 24075ca02815Sjsg if (limit_type != SMU_DEFAULT_PPT_LIMIT) 24081bb76ff1Sjsg if (smu->ppt_funcs->set_power_limit) 24091bb76ff1Sjsg return smu->ppt_funcs->set_power_limit(smu, limit_type, limit); 24105ca02815Sjsg 2411ad8b1aafSjsg if (limit > smu->max_power_limit) { 2412ad8b1aafSjsg dev_err(smu->adev->dev, 2413ad8b1aafSjsg "New power limit (%d) is over the max allowed %d\n", 2414ad8b1aafSjsg limit, smu->max_power_limit); 24151bb76ff1Sjsg return -EINVAL; 2416ad8b1aafSjsg } 2417ad8b1aafSjsg 2418ad8b1aafSjsg if (!limit) 2419ad8b1aafSjsg limit = smu->current_power_limit; 2420ad8b1aafSjsg 24215ca02815Sjsg if (smu->ppt_funcs->set_power_limit) { 24221bb76ff1Sjsg ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); 24235ca02815Sjsg if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) 24245ca02815Sjsg smu->user_dpm_profile.power_limit = limit; 24255ca02815Sjsg } 2426ad8b1aafSjsg 2427ad8b1aafSjsg return ret; 2428ad8b1aafSjsg } 2429ad8b1aafSjsg 24305ca02815Sjsg static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) 2431ad8b1aafSjsg { 2432ad8b1aafSjsg int ret = 0; 2433ad8b1aafSjsg 2434ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2435ad8b1aafSjsg return -EOPNOTSUPP; 2436ad8b1aafSjsg 2437ad8b1aafSjsg if (smu->ppt_funcs->print_clk_levels) 2438ad8b1aafSjsg ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); 2439ad8b1aafSjsg 2440ad8b1aafSjsg return ret; 2441ad8b1aafSjsg } 2442ad8b1aafSjsg 24431bb76ff1Sjsg static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type) 2444ad8b1aafSjsg { 24455ca02815Sjsg enum smu_clk_type clk_type; 2446ad8b1aafSjsg 24475ca02815Sjsg switch (type) { 24485ca02815Sjsg case PP_SCLK: 24495ca02815Sjsg clk_type = SMU_SCLK; break; 24505ca02815Sjsg case PP_MCLK: 24515ca02815Sjsg clk_type = SMU_MCLK; break; 24525ca02815Sjsg case PP_PCIE: 24535ca02815Sjsg clk_type = SMU_PCIE; break; 24545ca02815Sjsg case PP_SOCCLK: 24555ca02815Sjsg clk_type = SMU_SOCCLK; break; 24565ca02815Sjsg case PP_FCLK: 24575ca02815Sjsg clk_type = SMU_FCLK; break; 24585ca02815Sjsg case PP_DCEFCLK: 24595ca02815Sjsg clk_type = SMU_DCEFCLK; break; 24605ca02815Sjsg case PP_VCLK: 24615ca02815Sjsg clk_type = SMU_VCLK; break; 2462f005ef32Sjsg case PP_VCLK1: 2463f005ef32Sjsg clk_type = SMU_VCLK1; break; 24645ca02815Sjsg case PP_DCLK: 24655ca02815Sjsg clk_type = SMU_DCLK; break; 2466f005ef32Sjsg case PP_DCLK1: 2467f005ef32Sjsg clk_type = SMU_DCLK1; break; 24685ca02815Sjsg case OD_SCLK: 24695ca02815Sjsg clk_type = SMU_OD_SCLK; break; 24705ca02815Sjsg case OD_MCLK: 24715ca02815Sjsg clk_type = SMU_OD_MCLK; break; 24725ca02815Sjsg case OD_VDDC_CURVE: 24735ca02815Sjsg clk_type = SMU_OD_VDDC_CURVE; break; 24745ca02815Sjsg case OD_RANGE: 24755ca02815Sjsg clk_type = SMU_OD_RANGE; break; 24765ca02815Sjsg case OD_VDDGFX_OFFSET: 24775ca02815Sjsg clk_type = SMU_OD_VDDGFX_OFFSET; break; 24785ca02815Sjsg case OD_CCLK: 24795ca02815Sjsg clk_type = SMU_OD_CCLK; break; 24805ca02815Sjsg default: 24811bb76ff1Sjsg clk_type = SMU_CLK_COUNT; break; 2482ad8b1aafSjsg } 2483ad8b1aafSjsg 24841bb76ff1Sjsg return clk_type; 24851bb76ff1Sjsg } 24861bb76ff1Sjsg 24871bb76ff1Sjsg static int smu_print_ppclk_levels(void *handle, 24881bb76ff1Sjsg enum pp_clock_type type, 24891bb76ff1Sjsg char *buf) 24901bb76ff1Sjsg { 24911bb76ff1Sjsg struct smu_context *smu = handle; 24921bb76ff1Sjsg enum smu_clk_type clk_type; 24931bb76ff1Sjsg 24941bb76ff1Sjsg clk_type = smu_convert_to_smuclk(type); 24951bb76ff1Sjsg if (clk_type == SMU_CLK_COUNT) 24961bb76ff1Sjsg return -EINVAL; 24971bb76ff1Sjsg 24985ca02815Sjsg return smu_print_smuclk_levels(smu, clk_type, buf); 2499ad8b1aafSjsg } 2500ad8b1aafSjsg 25011bb76ff1Sjsg static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset) 25021bb76ff1Sjsg { 25031bb76ff1Sjsg struct smu_context *smu = handle; 25041bb76ff1Sjsg enum smu_clk_type clk_type; 25051bb76ff1Sjsg 25061bb76ff1Sjsg clk_type = smu_convert_to_smuclk(type); 25071bb76ff1Sjsg if (clk_type == SMU_CLK_COUNT) 25081bb76ff1Sjsg return -EINVAL; 25091bb76ff1Sjsg 25101bb76ff1Sjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 25111bb76ff1Sjsg return -EOPNOTSUPP; 25121bb76ff1Sjsg 25131bb76ff1Sjsg if (!smu->ppt_funcs->emit_clk_levels) 25141bb76ff1Sjsg return -ENOENT; 25151bb76ff1Sjsg 25161bb76ff1Sjsg return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset); 25171bb76ff1Sjsg 25181bb76ff1Sjsg } 25191bb76ff1Sjsg 25205ca02815Sjsg static int smu_od_edit_dpm_table(void *handle, 2521ad8b1aafSjsg enum PP_OD_DPM_TABLE_COMMAND type, 2522ad8b1aafSjsg long *input, uint32_t size) 2523ad8b1aafSjsg { 25245ca02815Sjsg struct smu_context *smu = handle; 2525ad8b1aafSjsg int ret = 0; 2526ad8b1aafSjsg 2527ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2528ad8b1aafSjsg return -EOPNOTSUPP; 2529ad8b1aafSjsg 2530ad8b1aafSjsg if (smu->ppt_funcs->od_edit_dpm_table) { 2531ad8b1aafSjsg ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); 2532ad8b1aafSjsg } 2533ad8b1aafSjsg 2534ad8b1aafSjsg return ret; 2535ad8b1aafSjsg } 2536ad8b1aafSjsg 25375ca02815Sjsg static int smu_read_sensor(void *handle, 25385ca02815Sjsg int sensor, 25395ca02815Sjsg void *data, 25405ca02815Sjsg int *size_arg) 2541ad8b1aafSjsg { 25425ca02815Sjsg struct smu_context *smu = handle; 2543ad8b1aafSjsg struct smu_umd_pstate_table *pstate_table = 2544ad8b1aafSjsg &smu->pstate_table; 2545ad8b1aafSjsg int ret = 0; 25465ca02815Sjsg uint32_t *size, size_val; 2547ad8b1aafSjsg 2548ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2549ad8b1aafSjsg return -EOPNOTSUPP; 2550ad8b1aafSjsg 25515ca02815Sjsg if (!data || !size_arg) 2552ad8b1aafSjsg return -EINVAL; 2553ad8b1aafSjsg 25545ca02815Sjsg size_val = *size_arg; 25555ca02815Sjsg size = &size_val; 25565ca02815Sjsg 2557ad8b1aafSjsg if (smu->ppt_funcs->read_sensor) 2558ad8b1aafSjsg if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size)) 2559ad8b1aafSjsg goto unlock; 2560ad8b1aafSjsg 2561ad8b1aafSjsg switch (sensor) { 2562ad8b1aafSjsg case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: 2563ad8b1aafSjsg *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100; 2564ad8b1aafSjsg *size = 4; 2565ad8b1aafSjsg break; 2566ad8b1aafSjsg case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: 2567ad8b1aafSjsg *((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100; 2568ad8b1aafSjsg *size = 4; 2569ad8b1aafSjsg break; 25708feffbfbSjsg case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK: 25718feffbfbSjsg *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100; 25728feffbfbSjsg *size = 4; 25738feffbfbSjsg break; 25748feffbfbSjsg case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK: 25758feffbfbSjsg *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100; 25768feffbfbSjsg *size = 4; 25778feffbfbSjsg break; 2578ad8b1aafSjsg case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: 25791bb76ff1Sjsg ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data); 2580ad8b1aafSjsg *size = 8; 2581ad8b1aafSjsg break; 2582ad8b1aafSjsg case AMDGPU_PP_SENSOR_UVD_POWER: 2583ad8b1aafSjsg *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0; 2584ad8b1aafSjsg *size = 4; 2585ad8b1aafSjsg break; 2586ad8b1aafSjsg case AMDGPU_PP_SENSOR_VCE_POWER: 2587ad8b1aafSjsg *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; 2588ad8b1aafSjsg *size = 4; 2589ad8b1aafSjsg break; 2590ad8b1aafSjsg case AMDGPU_PP_SENSOR_VCN_POWER_STATE: 2591ad8b1aafSjsg *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1; 2592ad8b1aafSjsg *size = 4; 2593ad8b1aafSjsg break; 2594ad8b1aafSjsg case AMDGPU_PP_SENSOR_MIN_FAN_RPM: 2595ad8b1aafSjsg *(uint32_t *)data = 0; 2596ad8b1aafSjsg *size = 4; 2597ad8b1aafSjsg break; 2598ad8b1aafSjsg default: 2599ad8b1aafSjsg *size = 0; 2600ad8b1aafSjsg ret = -EOPNOTSUPP; 2601ad8b1aafSjsg break; 2602ad8b1aafSjsg } 2603ad8b1aafSjsg 2604ad8b1aafSjsg unlock: 26055ca02815Sjsg // assign uint32_t to int 26065ca02815Sjsg *size_arg = size_val; 26075ca02815Sjsg 2608ad8b1aafSjsg return ret; 2609ad8b1aafSjsg } 2610ad8b1aafSjsg 2611f005ef32Sjsg static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit) 2612f005ef32Sjsg { 2613f005ef32Sjsg int ret = -EINVAL; 2614f005ef32Sjsg struct smu_context *smu = handle; 2615f005ef32Sjsg 2616f005ef32Sjsg if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit) 2617f005ef32Sjsg ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit); 2618f005ef32Sjsg 2619f005ef32Sjsg return ret; 2620f005ef32Sjsg } 2621f005ef32Sjsg 2622f005ef32Sjsg static int smu_set_apu_thermal_limit(void *handle, uint32_t limit) 2623f005ef32Sjsg { 2624f005ef32Sjsg int ret = -EINVAL; 2625f005ef32Sjsg struct smu_context *smu = handle; 2626f005ef32Sjsg 2627f005ef32Sjsg if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit) 2628f005ef32Sjsg ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit); 2629f005ef32Sjsg 2630f005ef32Sjsg return ret; 2631f005ef32Sjsg } 2632f005ef32Sjsg 26335ca02815Sjsg static int smu_get_power_profile_mode(void *handle, char *buf) 2634ad8b1aafSjsg { 26355ca02815Sjsg struct smu_context *smu = handle; 2636ad8b1aafSjsg 26371bb76ff1Sjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || 26381bb76ff1Sjsg !smu->ppt_funcs->get_power_profile_mode) 2639ad8b1aafSjsg return -EOPNOTSUPP; 26401bb76ff1Sjsg if (!buf) 26411bb76ff1Sjsg return -EINVAL; 2642ad8b1aafSjsg 26431bb76ff1Sjsg return smu->ppt_funcs->get_power_profile_mode(smu, buf); 2644ad8b1aafSjsg } 2645ad8b1aafSjsg 26465ca02815Sjsg static int smu_set_power_profile_mode(void *handle, 2647ad8b1aafSjsg long *param, 26485ca02815Sjsg uint32_t param_size) 2649ad8b1aafSjsg { 26505ca02815Sjsg struct smu_context *smu = handle; 2651ad8b1aafSjsg 26521bb76ff1Sjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || 26531bb76ff1Sjsg !smu->ppt_funcs->set_power_profile_mode) 2654ad8b1aafSjsg return -EOPNOTSUPP; 2655ad8b1aafSjsg 26561bb76ff1Sjsg return smu_bump_power_profile_mode(smu, param, param_size); 2657ad8b1aafSjsg } 2658ad8b1aafSjsg 26591bb76ff1Sjsg static int smu_get_fan_control_mode(void *handle, u32 *fan_mode) 2660ad8b1aafSjsg { 26615ca02815Sjsg struct smu_context *smu = handle; 2662ad8b1aafSjsg 2663ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 26641bb76ff1Sjsg return -EOPNOTSUPP; 2665ad8b1aafSjsg 26661bb76ff1Sjsg if (!smu->ppt_funcs->get_fan_control_mode) 26671bb76ff1Sjsg return -EOPNOTSUPP; 2668ad8b1aafSjsg 26691bb76ff1Sjsg if (!fan_mode) 26701bb76ff1Sjsg return -EINVAL; 2671ad8b1aafSjsg 26721bb76ff1Sjsg *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu); 2673ad8b1aafSjsg 26741bb76ff1Sjsg return 0; 2675ad8b1aafSjsg } 2676ad8b1aafSjsg 26771bb76ff1Sjsg static int smu_set_fan_control_mode(void *handle, u32 value) 2678ad8b1aafSjsg { 26791bb76ff1Sjsg struct smu_context *smu = handle; 2680ad8b1aafSjsg int ret = 0; 2681ad8b1aafSjsg 2682ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2683ad8b1aafSjsg return -EOPNOTSUPP; 2684ad8b1aafSjsg 26851bb76ff1Sjsg if (!smu->ppt_funcs->set_fan_control_mode) 26861bb76ff1Sjsg return -EOPNOTSUPP; 2687ad8b1aafSjsg 26881bb76ff1Sjsg if (value == U32_MAX) 26891bb76ff1Sjsg return -EINVAL; 26901bb76ff1Sjsg 2691ad8b1aafSjsg ret = smu->ppt_funcs->set_fan_control_mode(smu, value); 26921bb76ff1Sjsg if (ret) 26931bb76ff1Sjsg goto out; 2694ad8b1aafSjsg 26951bb76ff1Sjsg if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 26961bb76ff1Sjsg smu->user_dpm_profile.fan_mode = value; 2697ad8b1aafSjsg 26985ca02815Sjsg /* reset user dpm fan speed */ 26991bb76ff1Sjsg if (value != AMD_FAN_CTRL_MANUAL) { 27005ca02815Sjsg smu->user_dpm_profile.fan_speed_pwm = 0; 27015ca02815Sjsg smu->user_dpm_profile.fan_speed_rpm = 0; 27025ca02815Sjsg smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM); 27035ca02815Sjsg } 27041bb76ff1Sjsg } 2705ad8b1aafSjsg 27061bb76ff1Sjsg out: 2707ad8b1aafSjsg return ret; 2708ad8b1aafSjsg } 2709ad8b1aafSjsg 27105ca02815Sjsg static int smu_get_fan_speed_pwm(void *handle, u32 *speed) 27115ca02815Sjsg { 27125ca02815Sjsg struct smu_context *smu = handle; 2713ad8b1aafSjsg int ret = 0; 2714ad8b1aafSjsg 2715ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2716ad8b1aafSjsg return -EOPNOTSUPP; 2717ad8b1aafSjsg 27181bb76ff1Sjsg if (!smu->ppt_funcs->get_fan_speed_pwm) 27191bb76ff1Sjsg return -EOPNOTSUPP; 2720ad8b1aafSjsg 27211bb76ff1Sjsg if (!speed) 27221bb76ff1Sjsg return -EINVAL; 27231bb76ff1Sjsg 27245ca02815Sjsg ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed); 2725ad8b1aafSjsg 2726ad8b1aafSjsg return ret; 2727ad8b1aafSjsg } 2728ad8b1aafSjsg 27295ca02815Sjsg static int smu_set_fan_speed_pwm(void *handle, u32 speed) 2730ad8b1aafSjsg { 27315ca02815Sjsg struct smu_context *smu = handle; 27325ca02815Sjsg int ret = 0; 27335ca02815Sjsg 27345ca02815Sjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 27355ca02815Sjsg return -EOPNOTSUPP; 27365ca02815Sjsg 27371bb76ff1Sjsg if (!smu->ppt_funcs->set_fan_speed_pwm) 27381bb76ff1Sjsg return -EOPNOTSUPP; 27395ca02815Sjsg 27401bb76ff1Sjsg if (speed == U32_MAX) 27411bb76ff1Sjsg return -EINVAL; 27421bb76ff1Sjsg 27435ca02815Sjsg ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed); 27445ca02815Sjsg if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { 27455ca02815Sjsg smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM; 27465ca02815Sjsg smu->user_dpm_profile.fan_speed_pwm = speed; 27475ca02815Sjsg 27485ca02815Sjsg /* Override custom RPM setting as they cannot co-exist */ 27495ca02815Sjsg smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM; 27505ca02815Sjsg smu->user_dpm_profile.fan_speed_rpm = 0; 27515ca02815Sjsg } 27525ca02815Sjsg 27535ca02815Sjsg return ret; 27545ca02815Sjsg } 27555ca02815Sjsg 27565ca02815Sjsg static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed) 27575ca02815Sjsg { 27585ca02815Sjsg struct smu_context *smu = handle; 2759ad8b1aafSjsg int ret = 0; 2760ad8b1aafSjsg 2761ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2762ad8b1aafSjsg return -EOPNOTSUPP; 2763ad8b1aafSjsg 27641bb76ff1Sjsg if (!smu->ppt_funcs->get_fan_speed_rpm) 27651bb76ff1Sjsg return -EOPNOTSUPP; 2766ad8b1aafSjsg 27671bb76ff1Sjsg if (!speed) 27681bb76ff1Sjsg return -EINVAL; 27691bb76ff1Sjsg 2770ad8b1aafSjsg ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); 2771ad8b1aafSjsg 2772ad8b1aafSjsg return ret; 2773ad8b1aafSjsg } 2774ad8b1aafSjsg 27755ca02815Sjsg static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk) 2776ad8b1aafSjsg { 27775ca02815Sjsg struct smu_context *smu = handle; 2778ad8b1aafSjsg 2779ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2780ad8b1aafSjsg return -EOPNOTSUPP; 2781ad8b1aafSjsg 27821bb76ff1Sjsg return smu_set_min_dcef_deep_sleep(smu, clk); 2783ad8b1aafSjsg } 2784ad8b1aafSjsg 27855ca02815Sjsg static int smu_get_clock_by_type_with_latency(void *handle, 2786ad8b1aafSjsg enum amd_pp_clock_type type, 2787ad8b1aafSjsg struct pp_clock_levels_with_latency *clocks) 2788ad8b1aafSjsg { 27895ca02815Sjsg struct smu_context *smu = handle; 27905ca02815Sjsg enum smu_clk_type clk_type; 2791ad8b1aafSjsg int ret = 0; 2792ad8b1aafSjsg 2793ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2794ad8b1aafSjsg return -EOPNOTSUPP; 2795ad8b1aafSjsg 27965ca02815Sjsg if (smu->ppt_funcs->get_clock_by_type_with_latency) { 27975ca02815Sjsg switch (type) { 27985ca02815Sjsg case amd_pp_sys_clock: 27995ca02815Sjsg clk_type = SMU_GFXCLK; 28005ca02815Sjsg break; 28015ca02815Sjsg case amd_pp_mem_clock: 28025ca02815Sjsg clk_type = SMU_MCLK; 28035ca02815Sjsg break; 28045ca02815Sjsg case amd_pp_dcef_clock: 28055ca02815Sjsg clk_type = SMU_DCEFCLK; 28065ca02815Sjsg break; 28075ca02815Sjsg case amd_pp_disp_clock: 28085ca02815Sjsg clk_type = SMU_DISPCLK; 28095ca02815Sjsg break; 28105ca02815Sjsg default: 28115ca02815Sjsg dev_err(smu->adev->dev, "Invalid clock type!\n"); 28125ca02815Sjsg return -EINVAL; 28135ca02815Sjsg } 28145ca02815Sjsg 2815ad8b1aafSjsg ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); 28165ca02815Sjsg } 2817ad8b1aafSjsg 2818ad8b1aafSjsg return ret; 2819ad8b1aafSjsg } 2820ad8b1aafSjsg 28215ca02815Sjsg static int smu_display_clock_voltage_request(void *handle, 2822ad8b1aafSjsg struct pp_display_clock_request *clock_req) 2823ad8b1aafSjsg { 28245ca02815Sjsg struct smu_context *smu = handle; 2825ad8b1aafSjsg int ret = 0; 2826ad8b1aafSjsg 2827ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2828ad8b1aafSjsg return -EOPNOTSUPP; 2829ad8b1aafSjsg 2830ad8b1aafSjsg if (smu->ppt_funcs->display_clock_voltage_request) 2831ad8b1aafSjsg ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); 2832ad8b1aafSjsg 2833ad8b1aafSjsg return ret; 2834ad8b1aafSjsg } 2835ad8b1aafSjsg 2836ad8b1aafSjsg 28375ca02815Sjsg static int smu_display_disable_memory_clock_switch(void *handle, 28385ca02815Sjsg bool disable_memory_clock_switch) 2839ad8b1aafSjsg { 28405ca02815Sjsg struct smu_context *smu = handle; 2841ad8b1aafSjsg int ret = -EINVAL; 2842ad8b1aafSjsg 2843ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2844ad8b1aafSjsg return -EOPNOTSUPP; 2845ad8b1aafSjsg 2846ad8b1aafSjsg if (smu->ppt_funcs->display_disable_memory_clock_switch) 2847ad8b1aafSjsg ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); 2848ad8b1aafSjsg 2849ad8b1aafSjsg return ret; 2850ad8b1aafSjsg } 2851ad8b1aafSjsg 28525ca02815Sjsg static int smu_set_xgmi_pstate(void *handle, 2853ad8b1aafSjsg uint32_t pstate) 2854ad8b1aafSjsg { 28555ca02815Sjsg struct smu_context *smu = handle; 2856ad8b1aafSjsg int ret = 0; 2857ad8b1aafSjsg 2858ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2859ad8b1aafSjsg return -EOPNOTSUPP; 2860ad8b1aafSjsg 2861ad8b1aafSjsg if (smu->ppt_funcs->set_xgmi_pstate) 2862ad8b1aafSjsg ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); 2863ad8b1aafSjsg 2864ad8b1aafSjsg if (ret) 2865ad8b1aafSjsg dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n"); 2866ad8b1aafSjsg 2867ad8b1aafSjsg return ret; 2868ad8b1aafSjsg } 2869ad8b1aafSjsg 28705ca02815Sjsg static int smu_get_baco_capability(void *handle, bool *cap) 2871ad8b1aafSjsg { 28725ca02815Sjsg struct smu_context *smu = handle; 2873ad8b1aafSjsg 28745ca02815Sjsg *cap = false; 2875ad8b1aafSjsg 2876ad8b1aafSjsg if (!smu->pm_enabled) 28775ca02815Sjsg return 0; 2878ad8b1aafSjsg 2879ad8b1aafSjsg if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) 28805ca02815Sjsg *cap = smu->ppt_funcs->baco_is_support(smu); 2881ad8b1aafSjsg 28821bb76ff1Sjsg return 0; 2883ad8b1aafSjsg } 2884ad8b1aafSjsg 28855ca02815Sjsg static int smu_baco_set_state(void *handle, int state) 2886ad8b1aafSjsg { 28875ca02815Sjsg struct smu_context *smu = handle; 2888ad8b1aafSjsg int ret = 0; 2889ad8b1aafSjsg 2890ad8b1aafSjsg if (!smu->pm_enabled) 2891ad8b1aafSjsg return -EOPNOTSUPP; 2892ad8b1aafSjsg 28935ca02815Sjsg if (state == 0) { 28945ca02815Sjsg if (smu->ppt_funcs->baco_exit) 28955ca02815Sjsg ret = smu->ppt_funcs->baco_exit(smu); 28965ca02815Sjsg } else if (state == 1) { 2897ad8b1aafSjsg if (smu->ppt_funcs->baco_enter) 2898ad8b1aafSjsg ret = smu->ppt_funcs->baco_enter(smu); 28995ca02815Sjsg } else { 29005ca02815Sjsg return -EINVAL; 2901ad8b1aafSjsg } 2902ad8b1aafSjsg 2903ad8b1aafSjsg if (ret) 29045ca02815Sjsg dev_err(smu->adev->dev, "Failed to %s BACO state!\n", 29055ca02815Sjsg (state)?"enter":"exit"); 2906ad8b1aafSjsg 2907ad8b1aafSjsg return ret; 2908ad8b1aafSjsg } 2909ad8b1aafSjsg 2910ad8b1aafSjsg bool smu_mode1_reset_is_support(struct smu_context *smu) 2911ad8b1aafSjsg { 2912ad8b1aafSjsg bool ret = false; 2913ad8b1aafSjsg 2914ad8b1aafSjsg if (!smu->pm_enabled) 2915ad8b1aafSjsg return false; 2916ad8b1aafSjsg 2917ad8b1aafSjsg if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) 2918ad8b1aafSjsg ret = smu->ppt_funcs->mode1_reset_is_support(smu); 2919ad8b1aafSjsg 2920ad8b1aafSjsg return ret; 2921ad8b1aafSjsg } 2922ad8b1aafSjsg 29235ca02815Sjsg bool smu_mode2_reset_is_support(struct smu_context *smu) 29245ca02815Sjsg { 29255ca02815Sjsg bool ret = false; 29265ca02815Sjsg 29275ca02815Sjsg if (!smu->pm_enabled) 29285ca02815Sjsg return false; 29295ca02815Sjsg 29305ca02815Sjsg if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support) 29315ca02815Sjsg ret = smu->ppt_funcs->mode2_reset_is_support(smu); 29325ca02815Sjsg 29335ca02815Sjsg return ret; 29345ca02815Sjsg } 29355ca02815Sjsg 2936ad8b1aafSjsg int smu_mode1_reset(struct smu_context *smu) 2937ad8b1aafSjsg { 2938ad8b1aafSjsg int ret = 0; 2939ad8b1aafSjsg 2940ad8b1aafSjsg if (!smu->pm_enabled) 2941ad8b1aafSjsg return -EOPNOTSUPP; 2942ad8b1aafSjsg 2943ad8b1aafSjsg if (smu->ppt_funcs->mode1_reset) 2944ad8b1aafSjsg ret = smu->ppt_funcs->mode1_reset(smu); 2945ad8b1aafSjsg 2946ad8b1aafSjsg return ret; 2947ad8b1aafSjsg } 2948ad8b1aafSjsg 29495ca02815Sjsg static int smu_mode2_reset(void *handle) 2950ad8b1aafSjsg { 29515ca02815Sjsg struct smu_context *smu = handle; 2952ad8b1aafSjsg int ret = 0; 2953ad8b1aafSjsg 2954ad8b1aafSjsg if (!smu->pm_enabled) 2955ad8b1aafSjsg return -EOPNOTSUPP; 2956ad8b1aafSjsg 2957ad8b1aafSjsg if (smu->ppt_funcs->mode2_reset) 2958ad8b1aafSjsg ret = smu->ppt_funcs->mode2_reset(smu); 2959ad8b1aafSjsg 2960ad8b1aafSjsg if (ret) 2961ad8b1aafSjsg dev_err(smu->adev->dev, "Mode2 reset failed!\n"); 2962ad8b1aafSjsg 2963ad8b1aafSjsg return ret; 2964ad8b1aafSjsg } 2965ad8b1aafSjsg 2966f005ef32Sjsg static int smu_enable_gfx_features(void *handle) 2967f005ef32Sjsg { 2968f005ef32Sjsg struct smu_context *smu = handle; 2969f005ef32Sjsg int ret = 0; 2970f005ef32Sjsg 2971f005ef32Sjsg if (!smu->pm_enabled) 2972f005ef32Sjsg return -EOPNOTSUPP; 2973f005ef32Sjsg 2974f005ef32Sjsg if (smu->ppt_funcs->enable_gfx_features) 2975f005ef32Sjsg ret = smu->ppt_funcs->enable_gfx_features(smu); 2976f005ef32Sjsg 2977f005ef32Sjsg if (ret) 2978f005ef32Sjsg dev_err(smu->adev->dev, "enable gfx features failed!\n"); 2979f005ef32Sjsg 2980f005ef32Sjsg return ret; 2981f005ef32Sjsg } 2982f005ef32Sjsg 29835ca02815Sjsg static int smu_get_max_sustainable_clocks_by_dc(void *handle, 2984ad8b1aafSjsg struct pp_smu_nv_clock_table *max_clocks) 2985ad8b1aafSjsg { 29865ca02815Sjsg struct smu_context *smu = handle; 2987ad8b1aafSjsg int ret = 0; 2988ad8b1aafSjsg 2989ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 2990ad8b1aafSjsg return -EOPNOTSUPP; 2991ad8b1aafSjsg 2992ad8b1aafSjsg if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) 2993ad8b1aafSjsg ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); 2994ad8b1aafSjsg 2995ad8b1aafSjsg return ret; 2996ad8b1aafSjsg } 2997ad8b1aafSjsg 29985ca02815Sjsg static int smu_get_uclk_dpm_states(void *handle, 2999ad8b1aafSjsg unsigned int *clock_values_in_khz, 3000ad8b1aafSjsg unsigned int *num_states) 3001ad8b1aafSjsg { 30025ca02815Sjsg struct smu_context *smu = handle; 3003ad8b1aafSjsg int ret = 0; 3004ad8b1aafSjsg 3005ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3006ad8b1aafSjsg return -EOPNOTSUPP; 3007ad8b1aafSjsg 3008ad8b1aafSjsg if (smu->ppt_funcs->get_uclk_dpm_states) 3009ad8b1aafSjsg ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); 3010ad8b1aafSjsg 3011ad8b1aafSjsg return ret; 3012ad8b1aafSjsg } 3013ad8b1aafSjsg 30145ca02815Sjsg static enum amd_pm_state_type smu_get_current_power_state(void *handle) 3015ad8b1aafSjsg { 30165ca02815Sjsg struct smu_context *smu = handle; 3017ad8b1aafSjsg enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; 3018ad8b1aafSjsg 3019ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3020ad8b1aafSjsg return -EOPNOTSUPP; 3021ad8b1aafSjsg 3022ad8b1aafSjsg if (smu->ppt_funcs->get_current_power_state) 3023ad8b1aafSjsg pm_state = smu->ppt_funcs->get_current_power_state(smu); 3024ad8b1aafSjsg 3025ad8b1aafSjsg return pm_state; 3026ad8b1aafSjsg } 3027ad8b1aafSjsg 30285ca02815Sjsg static int smu_get_dpm_clock_table(void *handle, 3029ad8b1aafSjsg struct dpm_clocks *clock_table) 3030ad8b1aafSjsg { 30315ca02815Sjsg struct smu_context *smu = handle; 3032ad8b1aafSjsg int ret = 0; 3033ad8b1aafSjsg 3034ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3035ad8b1aafSjsg return -EOPNOTSUPP; 3036ad8b1aafSjsg 3037ad8b1aafSjsg if (smu->ppt_funcs->get_dpm_clock_table) 3038ad8b1aafSjsg ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); 3039ad8b1aafSjsg 3040ad8b1aafSjsg return ret; 3041ad8b1aafSjsg } 3042ad8b1aafSjsg 30435ca02815Sjsg static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) 3044ad8b1aafSjsg { 30455ca02815Sjsg struct smu_context *smu = handle; 3046ad8b1aafSjsg 3047ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3048ad8b1aafSjsg return -EOPNOTSUPP; 3049ad8b1aafSjsg 3050ad8b1aafSjsg if (!smu->ppt_funcs->get_gpu_metrics) 3051ad8b1aafSjsg return -EOPNOTSUPP; 3052ad8b1aafSjsg 30531bb76ff1Sjsg return smu->ppt_funcs->get_gpu_metrics(smu, table); 3054ad8b1aafSjsg } 3055ad8b1aafSjsg 30565ca02815Sjsg static int smu_enable_mgpu_fan_boost(void *handle) 3057ad8b1aafSjsg { 30585ca02815Sjsg struct smu_context *smu = handle; 3059ad8b1aafSjsg int ret = 0; 3060ad8b1aafSjsg 3061ad8b1aafSjsg if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) 3062ad8b1aafSjsg return -EOPNOTSUPP; 3063ad8b1aafSjsg 3064ad8b1aafSjsg if (smu->ppt_funcs->enable_mgpu_fan_boost) 3065ad8b1aafSjsg ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu); 3066ad8b1aafSjsg 3067ad8b1aafSjsg return ret; 3068ad8b1aafSjsg } 30695ca02815Sjsg 30705ca02815Sjsg static int smu_gfx_state_change_set(void *handle, 30715ca02815Sjsg uint32_t state) 30725ca02815Sjsg { 30735ca02815Sjsg struct smu_context *smu = handle; 30745ca02815Sjsg int ret = 0; 30755ca02815Sjsg 30765ca02815Sjsg if (smu->ppt_funcs->gfx_state_change_set) 30775ca02815Sjsg ret = smu->ppt_funcs->gfx_state_change_set(smu, state); 30785ca02815Sjsg 30795ca02815Sjsg return ret; 30805ca02815Sjsg } 30815ca02815Sjsg 30821bb76ff1Sjsg int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable) 30835ca02815Sjsg { 30845ca02815Sjsg int ret = 0; 30855ca02815Sjsg 30861bb76ff1Sjsg if (smu->ppt_funcs->smu_handle_passthrough_sbr) 30871bb76ff1Sjsg ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable); 30885ca02815Sjsg 30895ca02815Sjsg return ret; 30905ca02815Sjsg } 30915ca02815Sjsg 30921bb76ff1Sjsg int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc) 30931bb76ff1Sjsg { 30941bb76ff1Sjsg int ret = -EOPNOTSUPP; 30951bb76ff1Sjsg 30961bb76ff1Sjsg if (smu->ppt_funcs && 30971bb76ff1Sjsg smu->ppt_funcs->get_ecc_info) 30981bb76ff1Sjsg ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc); 30991bb76ff1Sjsg 31001bb76ff1Sjsg return ret; 31011bb76ff1Sjsg 31021bb76ff1Sjsg } 31031bb76ff1Sjsg 31045ca02815Sjsg static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size) 31055ca02815Sjsg { 31065ca02815Sjsg struct smu_context *smu = handle; 31075ca02815Sjsg struct smu_table_context *smu_table = &smu->smu_table; 31085ca02815Sjsg struct smu_table *memory_pool = &smu_table->memory_pool; 31095ca02815Sjsg 31105ca02815Sjsg if (!addr || !size) 31115ca02815Sjsg return -EINVAL; 31125ca02815Sjsg 31135ca02815Sjsg *addr = NULL; 31145ca02815Sjsg *size = 0; 31155ca02815Sjsg if (memory_pool->bo) { 31165ca02815Sjsg *addr = memory_pool->cpu_addr; 31175ca02815Sjsg *size = memory_pool->size; 31185ca02815Sjsg } 31195ca02815Sjsg 31205ca02815Sjsg return 0; 31215ca02815Sjsg } 31225ca02815Sjsg 31235ca02815Sjsg static const struct amd_pm_funcs swsmu_pm_funcs = { 31245ca02815Sjsg /* export for sysfs */ 31251bb76ff1Sjsg .set_fan_control_mode = smu_set_fan_control_mode, 31265ca02815Sjsg .get_fan_control_mode = smu_get_fan_control_mode, 31275ca02815Sjsg .set_fan_speed_pwm = smu_set_fan_speed_pwm, 31285ca02815Sjsg .get_fan_speed_pwm = smu_get_fan_speed_pwm, 31295ca02815Sjsg .force_clock_level = smu_force_ppclk_levels, 31305ca02815Sjsg .print_clock_levels = smu_print_ppclk_levels, 31311bb76ff1Sjsg .emit_clock_levels = smu_emit_ppclk_levels, 31325ca02815Sjsg .force_performance_level = smu_force_performance_level, 31335ca02815Sjsg .read_sensor = smu_read_sensor, 3134f005ef32Sjsg .get_apu_thermal_limit = smu_get_apu_thermal_limit, 3135f005ef32Sjsg .set_apu_thermal_limit = smu_set_apu_thermal_limit, 31365ca02815Sjsg .get_performance_level = smu_get_performance_level, 31375ca02815Sjsg .get_current_power_state = smu_get_current_power_state, 31385ca02815Sjsg .get_fan_speed_rpm = smu_get_fan_speed_rpm, 31395ca02815Sjsg .set_fan_speed_rpm = smu_set_fan_speed_rpm, 31405ca02815Sjsg .get_pp_num_states = smu_get_power_num_states, 31415ca02815Sjsg .get_pp_table = smu_sys_get_pp_table, 31425ca02815Sjsg .set_pp_table = smu_sys_set_pp_table, 31435ca02815Sjsg .switch_power_profile = smu_switch_power_profile, 31445ca02815Sjsg /* export to amdgpu */ 31455ca02815Sjsg .dispatch_tasks = smu_handle_dpm_task, 31465ca02815Sjsg .load_firmware = smu_load_microcode, 31475ca02815Sjsg .set_powergating_by_smu = smu_dpm_set_power_gate, 31485ca02815Sjsg .set_power_limit = smu_set_power_limit, 31495ca02815Sjsg .get_power_limit = smu_get_power_limit, 31505ca02815Sjsg .get_power_profile_mode = smu_get_power_profile_mode, 31515ca02815Sjsg .set_power_profile_mode = smu_set_power_profile_mode, 31525ca02815Sjsg .odn_edit_dpm_table = smu_od_edit_dpm_table, 31535ca02815Sjsg .set_mp1_state = smu_set_mp1_state, 31545ca02815Sjsg .gfx_state_change_set = smu_gfx_state_change_set, 31555ca02815Sjsg /* export to DC */ 31565ca02815Sjsg .get_sclk = smu_get_sclk, 31575ca02815Sjsg .get_mclk = smu_get_mclk, 31585ca02815Sjsg .display_configuration_change = smu_display_configuration_change, 31595ca02815Sjsg .get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency, 31605ca02815Sjsg .display_clock_voltage_request = smu_display_clock_voltage_request, 31615ca02815Sjsg .enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost, 31625ca02815Sjsg .set_active_display_count = smu_set_display_count, 31635ca02815Sjsg .set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk, 31645ca02815Sjsg .get_asic_baco_capability = smu_get_baco_capability, 31655ca02815Sjsg .set_asic_baco_state = smu_baco_set_state, 31665ca02815Sjsg .get_ppfeature_status = smu_sys_get_pp_feature_mask, 31675ca02815Sjsg .set_ppfeature_status = smu_sys_set_pp_feature_mask, 31685ca02815Sjsg .asic_reset_mode_2 = smu_mode2_reset, 3169f005ef32Sjsg .asic_reset_enable_gfx_features = smu_enable_gfx_features, 31705ca02815Sjsg .set_df_cstate = smu_set_df_cstate, 31715ca02815Sjsg .set_xgmi_pstate = smu_set_xgmi_pstate, 31725ca02815Sjsg .get_gpu_metrics = smu_sys_get_gpu_metrics, 31735ca02815Sjsg .set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges, 31745ca02815Sjsg .display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch, 31755ca02815Sjsg .get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc, 31765ca02815Sjsg .get_uclk_dpm_states = smu_get_uclk_dpm_states, 31775ca02815Sjsg .get_dpm_clock_table = smu_get_dpm_clock_table, 31785ca02815Sjsg .get_smu_prv_buf_details = smu_get_prv_buffer_details, 31795ca02815Sjsg }; 31805ca02815Sjsg 31811bb76ff1Sjsg int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, 31825ca02815Sjsg uint64_t event_arg) 31835ca02815Sjsg { 31845ca02815Sjsg int ret = -EINVAL; 31855ca02815Sjsg 31861bb76ff1Sjsg if (smu->ppt_funcs->wait_for_event) 31875ca02815Sjsg ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg); 31881bb76ff1Sjsg 31891bb76ff1Sjsg return ret; 31905ca02815Sjsg } 31915ca02815Sjsg 31921bb76ff1Sjsg int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size) 31931bb76ff1Sjsg { 31941bb76ff1Sjsg 31951bb76ff1Sjsg if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled) 31961bb76ff1Sjsg return -EOPNOTSUPP; 31971bb76ff1Sjsg 31981bb76ff1Sjsg /* Confirm the buffer allocated is of correct size */ 31991bb76ff1Sjsg if (size != smu->stb_context.stb_buf_size) 32001bb76ff1Sjsg return -EINVAL; 32011bb76ff1Sjsg 32021bb76ff1Sjsg /* 32031bb76ff1Sjsg * No need to lock smu mutex as we access STB directly through MMIO 32041bb76ff1Sjsg * and not going through SMU messaging route (for now at least). 32051bb76ff1Sjsg * For registers access rely on implementation internal locking. 32061bb76ff1Sjsg */ 32071bb76ff1Sjsg return smu->ppt_funcs->stb_collect_info(smu, buf, size); 32081bb76ff1Sjsg } 32091bb76ff1Sjsg 32101bb76ff1Sjsg #if defined(CONFIG_DEBUG_FS) 32111bb76ff1Sjsg 32121bb76ff1Sjsg static int smu_stb_debugfs_open(struct inode *inode, struct file *filp) 32131bb76ff1Sjsg { 32141bb76ff1Sjsg struct amdgpu_device *adev = filp->f_inode->i_private; 32151bb76ff1Sjsg struct smu_context *smu = adev->powerplay.pp_handle; 32161bb76ff1Sjsg unsigned char *buf; 32171bb76ff1Sjsg int r; 32181bb76ff1Sjsg 32191bb76ff1Sjsg buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL); 32201bb76ff1Sjsg if (!buf) 32211bb76ff1Sjsg return -ENOMEM; 32221bb76ff1Sjsg 32231bb76ff1Sjsg r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size); 32241bb76ff1Sjsg if (r) 32251bb76ff1Sjsg goto out; 32261bb76ff1Sjsg 32271bb76ff1Sjsg filp->private_data = buf; 32281bb76ff1Sjsg 32291bb76ff1Sjsg return 0; 32301bb76ff1Sjsg 32311bb76ff1Sjsg out: 32321bb76ff1Sjsg kvfree(buf); 32331bb76ff1Sjsg return r; 32341bb76ff1Sjsg } 32351bb76ff1Sjsg 32361bb76ff1Sjsg static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size, 32371bb76ff1Sjsg loff_t *pos) 32381bb76ff1Sjsg { 32391bb76ff1Sjsg struct amdgpu_device *adev = filp->f_inode->i_private; 32401bb76ff1Sjsg struct smu_context *smu = adev->powerplay.pp_handle; 32411bb76ff1Sjsg 32421bb76ff1Sjsg 32431bb76ff1Sjsg if (!filp->private_data) 32441bb76ff1Sjsg return -EINVAL; 32451bb76ff1Sjsg 32461bb76ff1Sjsg return simple_read_from_buffer(buf, 32471bb76ff1Sjsg size, 32481bb76ff1Sjsg pos, filp->private_data, 32491bb76ff1Sjsg smu->stb_context.stb_buf_size); 32501bb76ff1Sjsg } 32511bb76ff1Sjsg 32521bb76ff1Sjsg static int smu_stb_debugfs_release(struct inode *inode, struct file *filp) 32531bb76ff1Sjsg { 32541bb76ff1Sjsg kvfree(filp->private_data); 32551bb76ff1Sjsg filp->private_data = NULL; 32561bb76ff1Sjsg 32571bb76ff1Sjsg return 0; 32581bb76ff1Sjsg } 32591bb76ff1Sjsg 32601bb76ff1Sjsg /* 32611bb76ff1Sjsg * We have to define not only read method but also 32621bb76ff1Sjsg * open and release because .read takes up to PAGE_SIZE 32631bb76ff1Sjsg * data each time so and so is invoked multiple times. 32641bb76ff1Sjsg * We allocate the STB buffer in .open and release it 32651bb76ff1Sjsg * in .release 32661bb76ff1Sjsg */ 32671bb76ff1Sjsg static const struct file_operations smu_stb_debugfs_fops = { 32681bb76ff1Sjsg .owner = THIS_MODULE, 32691bb76ff1Sjsg .open = smu_stb_debugfs_open, 32701bb76ff1Sjsg .read = smu_stb_debugfs_read, 32711bb76ff1Sjsg .release = smu_stb_debugfs_release, 32721bb76ff1Sjsg .llseek = default_llseek, 32731bb76ff1Sjsg }; 32741bb76ff1Sjsg 32751bb76ff1Sjsg #endif 32761bb76ff1Sjsg 32771bb76ff1Sjsg void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev) 32781bb76ff1Sjsg { 32791bb76ff1Sjsg #if defined(CONFIG_DEBUG_FS) 32801bb76ff1Sjsg 32811bb76ff1Sjsg struct smu_context *smu = adev->powerplay.pp_handle; 32821bb76ff1Sjsg 32831bb76ff1Sjsg if (!smu || (!smu->stb_context.stb_buf_size)) 32841bb76ff1Sjsg return; 32851bb76ff1Sjsg 32861bb76ff1Sjsg debugfs_create_file_size("amdgpu_smu_stb_dump", 32871bb76ff1Sjsg S_IRUSR, 32881bb76ff1Sjsg adev_to_drm(adev)->primary->debugfs_root, 32891bb76ff1Sjsg adev, 32901bb76ff1Sjsg &smu_stb_debugfs_fops, 32911bb76ff1Sjsg smu->stb_context.stb_buf_size); 32921bb76ff1Sjsg #endif 32931bb76ff1Sjsg } 32941bb76ff1Sjsg 32951bb76ff1Sjsg int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size) 32961bb76ff1Sjsg { 32971bb76ff1Sjsg int ret = 0; 32981bb76ff1Sjsg 32991bb76ff1Sjsg if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num) 33001bb76ff1Sjsg ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size); 33011bb76ff1Sjsg 33021bb76ff1Sjsg return ret; 33031bb76ff1Sjsg } 33041bb76ff1Sjsg 33051bb76ff1Sjsg int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size) 33061bb76ff1Sjsg { 33071bb76ff1Sjsg int ret = 0; 33081bb76ff1Sjsg 33091bb76ff1Sjsg if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag) 33101bb76ff1Sjsg ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size); 33111bb76ff1Sjsg 33125ca02815Sjsg return ret; 33135ca02815Sjsg } 3314