1ad8b1aafSjsg /* 2ad8b1aafSjsg * Copyright 2017 Advanced Micro Devices, Inc. 3ad8b1aafSjsg * 4ad8b1aafSjsg * Permission is hereby granted, free of charge, to any person obtaining a 5ad8b1aafSjsg * copy of this software and associated documentation files (the "Software"), 6ad8b1aafSjsg * to deal in the Software without restriction, including without limitation 7ad8b1aafSjsg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8ad8b1aafSjsg * and/or sell copies of the Software, and to permit persons to whom the 9ad8b1aafSjsg * Software is furnished to do so, subject to the following conditions: 10ad8b1aafSjsg * 11ad8b1aafSjsg * The above copyright notice and this permission notice shall be included in 12ad8b1aafSjsg * all copies or substantial portions of the Software. 13ad8b1aafSjsg * 14ad8b1aafSjsg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15ad8b1aafSjsg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16ad8b1aafSjsg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17ad8b1aafSjsg * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18ad8b1aafSjsg * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19ad8b1aafSjsg * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20ad8b1aafSjsg * OTHER DEALINGS IN THE SOFTWARE. 21ad8b1aafSjsg * 22ad8b1aafSjsg * Authors: Rafał Miłecki <zajec5@gmail.com> 23ad8b1aafSjsg * Alex Deucher <alexdeucher@gmail.com> 24ad8b1aafSjsg */ 25ad8b1aafSjsg 26ad8b1aafSjsg #include "amdgpu.h" 27ad8b1aafSjsg #include "amdgpu_drv.h" 28ad8b1aafSjsg #include "amdgpu_pm.h" 29ad8b1aafSjsg #include "amdgpu_dpm.h" 30ad8b1aafSjsg #include "atom.h" 31ad8b1aafSjsg #include <linux/pci.h> 32ad8b1aafSjsg #include <linux/hwmon.h> 33ad8b1aafSjsg #include <linux/hwmon-sysfs.h> 34ad8b1aafSjsg #include <linux/nospec.h> 35ad8b1aafSjsg #include <linux/pm_runtime.h> 365ca02815Sjsg #include <asm/processor.h> 37ad8b1aafSjsg 38ad8b1aafSjsg static const struct hwmon_temp_label { 39ad8b1aafSjsg enum PP_HWMON_TEMP channel; 40ad8b1aafSjsg const char *label; 41ad8b1aafSjsg } temp_label[] = { 42ad8b1aafSjsg {PP_TEMP_EDGE, "edge"}, 43ad8b1aafSjsg {PP_TEMP_JUNCTION, "junction"}, 44ad8b1aafSjsg {PP_TEMP_MEM, "mem"}, 45ad8b1aafSjsg }; 46ad8b1aafSjsg 471bb76ff1Sjsg const char * const amdgpu_pp_profile_name[] = { 481bb76ff1Sjsg "BOOTUP_DEFAULT", 491bb76ff1Sjsg "3D_FULL_SCREEN", 501bb76ff1Sjsg "POWER_SAVING", 511bb76ff1Sjsg "VIDEO", 521bb76ff1Sjsg "VR", 531bb76ff1Sjsg "COMPUTE", 541bb76ff1Sjsg "CUSTOM", 551bb76ff1Sjsg "WINDOW_3D", 56f005ef32Sjsg "CAPPED", 57f005ef32Sjsg "UNCAPPED", 581bb76ff1Sjsg }; 591bb76ff1Sjsg 60ad8b1aafSjsg #ifdef __linux__ 61ad8b1aafSjsg 62ad8b1aafSjsg /** 63ad8b1aafSjsg * DOC: power_dpm_state 64ad8b1aafSjsg * 65ad8b1aafSjsg * The power_dpm_state file is a legacy interface and is only provided for 66ad8b1aafSjsg * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting 67ad8b1aafSjsg * certain power related parameters. The file power_dpm_state is used for this. 68ad8b1aafSjsg * It accepts the following arguments: 69ad8b1aafSjsg * 70ad8b1aafSjsg * - battery 71ad8b1aafSjsg * 72ad8b1aafSjsg * - balanced 73ad8b1aafSjsg * 74ad8b1aafSjsg * - performance 75ad8b1aafSjsg * 76ad8b1aafSjsg * battery 77ad8b1aafSjsg * 78ad8b1aafSjsg * On older GPUs, the vbios provided a special power state for battery 79ad8b1aafSjsg * operation. Selecting battery switched to this state. This is no 80ad8b1aafSjsg * longer provided on newer GPUs so the option does nothing in that case. 81ad8b1aafSjsg * 82ad8b1aafSjsg * balanced 83ad8b1aafSjsg * 84ad8b1aafSjsg * On older GPUs, the vbios provided a special power state for balanced 85ad8b1aafSjsg * operation. Selecting balanced switched to this state. This is no 86ad8b1aafSjsg * longer provided on newer GPUs so the option does nothing in that case. 87ad8b1aafSjsg * 88ad8b1aafSjsg * performance 89ad8b1aafSjsg * 90ad8b1aafSjsg * On older GPUs, the vbios provided a special power state for performance 91ad8b1aafSjsg * operation. Selecting performance switched to this state. This is no 92ad8b1aafSjsg * longer provided on newer GPUs so the option does nothing in that case. 93ad8b1aafSjsg * 94ad8b1aafSjsg */ 95ad8b1aafSjsg 96ad8b1aafSjsg static ssize_t amdgpu_get_power_dpm_state(struct device *dev, 97ad8b1aafSjsg struct device_attribute *attr, 98ad8b1aafSjsg char *buf) 99ad8b1aafSjsg { 100ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 101ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 102ad8b1aafSjsg enum amd_pm_state_type pm; 103ad8b1aafSjsg int ret; 104ad8b1aafSjsg 105ad8b1aafSjsg if (amdgpu_in_reset(adev)) 106ad8b1aafSjsg return -EPERM; 1075ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 1085ca02815Sjsg return -EPERM; 109ad8b1aafSjsg 110ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 111ad8b1aafSjsg if (ret < 0) { 112ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 113ad8b1aafSjsg return ret; 114ad8b1aafSjsg } 115ad8b1aafSjsg 1161bb76ff1Sjsg amdgpu_dpm_get_current_power_state(adev, &pm); 117ad8b1aafSjsg 118ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 119ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 120ad8b1aafSjsg 1215ca02815Sjsg return sysfs_emit(buf, "%s\n", 122ad8b1aafSjsg (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 123ad8b1aafSjsg (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 124ad8b1aafSjsg } 125ad8b1aafSjsg 126ad8b1aafSjsg static ssize_t amdgpu_set_power_dpm_state(struct device *dev, 127ad8b1aafSjsg struct device_attribute *attr, 128ad8b1aafSjsg const char *buf, 129ad8b1aafSjsg size_t count) 130ad8b1aafSjsg { 131ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 132ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 133ad8b1aafSjsg enum amd_pm_state_type state; 134ad8b1aafSjsg int ret; 135ad8b1aafSjsg 136ad8b1aafSjsg if (amdgpu_in_reset(adev)) 137ad8b1aafSjsg return -EPERM; 1385ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 1395ca02815Sjsg return -EPERM; 140ad8b1aafSjsg 141ad8b1aafSjsg if (strncmp("battery", buf, strlen("battery")) == 0) 142ad8b1aafSjsg state = POWER_STATE_TYPE_BATTERY; 143ad8b1aafSjsg else if (strncmp("balanced", buf, strlen("balanced")) == 0) 144ad8b1aafSjsg state = POWER_STATE_TYPE_BALANCED; 145ad8b1aafSjsg else if (strncmp("performance", buf, strlen("performance")) == 0) 146ad8b1aafSjsg state = POWER_STATE_TYPE_PERFORMANCE; 147ad8b1aafSjsg else 148ad8b1aafSjsg return -EINVAL; 149ad8b1aafSjsg 150ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 151ad8b1aafSjsg if (ret < 0) { 152ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 153ad8b1aafSjsg return ret; 154ad8b1aafSjsg } 155ad8b1aafSjsg 1561bb76ff1Sjsg amdgpu_dpm_set_power_state(adev, state); 157ad8b1aafSjsg 158ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 159ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 160ad8b1aafSjsg 161ad8b1aafSjsg return count; 162ad8b1aafSjsg } 163ad8b1aafSjsg 164ad8b1aafSjsg 165ad8b1aafSjsg /** 166ad8b1aafSjsg * DOC: power_dpm_force_performance_level 167ad8b1aafSjsg * 168ad8b1aafSjsg * The amdgpu driver provides a sysfs API for adjusting certain power 169ad8b1aafSjsg * related parameters. The file power_dpm_force_performance_level is 170ad8b1aafSjsg * used for this. It accepts the following arguments: 171ad8b1aafSjsg * 172ad8b1aafSjsg * - auto 173ad8b1aafSjsg * 174ad8b1aafSjsg * - low 175ad8b1aafSjsg * 176ad8b1aafSjsg * - high 177ad8b1aafSjsg * 178ad8b1aafSjsg * - manual 179ad8b1aafSjsg * 180ad8b1aafSjsg * - profile_standard 181ad8b1aafSjsg * 182ad8b1aafSjsg * - profile_min_sclk 183ad8b1aafSjsg * 184ad8b1aafSjsg * - profile_min_mclk 185ad8b1aafSjsg * 186ad8b1aafSjsg * - profile_peak 187ad8b1aafSjsg * 188ad8b1aafSjsg * auto 189ad8b1aafSjsg * 190ad8b1aafSjsg * When auto is selected, the driver will attempt to dynamically select 191ad8b1aafSjsg * the optimal power profile for current conditions in the driver. 192ad8b1aafSjsg * 193ad8b1aafSjsg * low 194ad8b1aafSjsg * 195ad8b1aafSjsg * When low is selected, the clocks are forced to the lowest power state. 196ad8b1aafSjsg * 197ad8b1aafSjsg * high 198ad8b1aafSjsg * 199ad8b1aafSjsg * When high is selected, the clocks are forced to the highest power state. 200ad8b1aafSjsg * 201ad8b1aafSjsg * manual 202ad8b1aafSjsg * 203ad8b1aafSjsg * When manual is selected, the user can manually adjust which power states 204ad8b1aafSjsg * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk, 205ad8b1aafSjsg * and pp_dpm_pcie files and adjust the power state transition heuristics 206ad8b1aafSjsg * via the pp_power_profile_mode sysfs file. 207ad8b1aafSjsg * 208ad8b1aafSjsg * profile_standard 209ad8b1aafSjsg * profile_min_sclk 210ad8b1aafSjsg * profile_min_mclk 211ad8b1aafSjsg * profile_peak 212ad8b1aafSjsg * 213ad8b1aafSjsg * When the profiling modes are selected, clock and power gating are 214ad8b1aafSjsg * disabled and the clocks are set for different profiling cases. This 215ad8b1aafSjsg * mode is recommended for profiling specific work loads where you do 216ad8b1aafSjsg * not want clock or power gating for clock fluctuation to interfere 217ad8b1aafSjsg * with your results. profile_standard sets the clocks to a fixed clock 218ad8b1aafSjsg * level which varies from asic to asic. profile_min_sclk forces the sclk 219ad8b1aafSjsg * to the lowest level. profile_min_mclk forces the mclk to the lowest level. 220ad8b1aafSjsg * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels. 221ad8b1aafSjsg * 222ad8b1aafSjsg */ 223ad8b1aafSjsg 224ad8b1aafSjsg static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, 225ad8b1aafSjsg struct device_attribute *attr, 226ad8b1aafSjsg char *buf) 227ad8b1aafSjsg { 228ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 229ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 230ad8b1aafSjsg enum amd_dpm_forced_level level = 0xff; 231ad8b1aafSjsg int ret; 232ad8b1aafSjsg 233ad8b1aafSjsg if (amdgpu_in_reset(adev)) 234ad8b1aafSjsg return -EPERM; 2355ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 2365ca02815Sjsg return -EPERM; 237ad8b1aafSjsg 238ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 239ad8b1aafSjsg if (ret < 0) { 240ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 241ad8b1aafSjsg return ret; 242ad8b1aafSjsg } 243ad8b1aafSjsg 244ad8b1aafSjsg level = amdgpu_dpm_get_performance_level(adev); 245ad8b1aafSjsg 246ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 247ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 248ad8b1aafSjsg 2495ca02815Sjsg return sysfs_emit(buf, "%s\n", 250ad8b1aafSjsg (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : 251ad8b1aafSjsg (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : 252ad8b1aafSjsg (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : 253ad8b1aafSjsg (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : 254ad8b1aafSjsg (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : 255ad8b1aafSjsg (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : 256ad8b1aafSjsg (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : 257ad8b1aafSjsg (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : 2585ca02815Sjsg (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" : 259ad8b1aafSjsg "unknown"); 260ad8b1aafSjsg } 261ad8b1aafSjsg 262ad8b1aafSjsg static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, 263ad8b1aafSjsg struct device_attribute *attr, 264ad8b1aafSjsg const char *buf, 265ad8b1aafSjsg size_t count) 266ad8b1aafSjsg { 267ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 268ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 269ad8b1aafSjsg enum amd_dpm_forced_level level; 270ad8b1aafSjsg int ret = 0; 271ad8b1aafSjsg 272ad8b1aafSjsg if (amdgpu_in_reset(adev)) 273ad8b1aafSjsg return -EPERM; 2745ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 2755ca02815Sjsg return -EPERM; 276ad8b1aafSjsg 277ad8b1aafSjsg if (strncmp("low", buf, strlen("low")) == 0) { 278ad8b1aafSjsg level = AMD_DPM_FORCED_LEVEL_LOW; 279ad8b1aafSjsg } else if (strncmp("high", buf, strlen("high")) == 0) { 280ad8b1aafSjsg level = AMD_DPM_FORCED_LEVEL_HIGH; 281ad8b1aafSjsg } else if (strncmp("auto", buf, strlen("auto")) == 0) { 282ad8b1aafSjsg level = AMD_DPM_FORCED_LEVEL_AUTO; 283ad8b1aafSjsg } else if (strncmp("manual", buf, strlen("manual")) == 0) { 284ad8b1aafSjsg level = AMD_DPM_FORCED_LEVEL_MANUAL; 285ad8b1aafSjsg } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) { 286ad8b1aafSjsg level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT; 287ad8b1aafSjsg } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) { 288ad8b1aafSjsg level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; 289ad8b1aafSjsg } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) { 290ad8b1aafSjsg level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; 291ad8b1aafSjsg } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) { 292ad8b1aafSjsg level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; 293ad8b1aafSjsg } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) { 294ad8b1aafSjsg level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 2955ca02815Sjsg } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) { 2965ca02815Sjsg level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM; 297ad8b1aafSjsg } else { 298ad8b1aafSjsg return -EINVAL; 299ad8b1aafSjsg } 300ad8b1aafSjsg 301ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 302ad8b1aafSjsg if (ret < 0) { 303ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 304ad8b1aafSjsg return ret; 305ad8b1aafSjsg } 306ad8b1aafSjsg 3071bb76ff1Sjsg mutex_lock(&adev->pm.stable_pstate_ctx_lock); 3081bb76ff1Sjsg if (amdgpu_dpm_force_performance_level(adev, level)) { 309ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 310ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 3111bb76ff1Sjsg mutex_unlock(&adev->pm.stable_pstate_ctx_lock); 312ad8b1aafSjsg return -EINVAL; 313ad8b1aafSjsg } 3141bb76ff1Sjsg /* override whatever a user ctx may have set */ 3151bb76ff1Sjsg adev->pm.stable_pstate_ctx = NULL; 3161bb76ff1Sjsg mutex_unlock(&adev->pm.stable_pstate_ctx_lock); 317ad8b1aafSjsg 318ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 319ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 320ad8b1aafSjsg 321ad8b1aafSjsg return count; 322ad8b1aafSjsg } 323ad8b1aafSjsg 324ad8b1aafSjsg static ssize_t amdgpu_get_pp_num_states(struct device *dev, 325ad8b1aafSjsg struct device_attribute *attr, 326ad8b1aafSjsg char *buf) 327ad8b1aafSjsg { 328ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 329ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 330ad8b1aafSjsg struct pp_states_info data; 3315ca02815Sjsg uint32_t i; 3325ca02815Sjsg int buf_len, ret; 333ad8b1aafSjsg 334ad8b1aafSjsg if (amdgpu_in_reset(adev)) 335ad8b1aafSjsg return -EPERM; 3365ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 3375ca02815Sjsg return -EPERM; 338ad8b1aafSjsg 339ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 340ad8b1aafSjsg if (ret < 0) { 341ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 342ad8b1aafSjsg return ret; 343ad8b1aafSjsg } 344ad8b1aafSjsg 3451bb76ff1Sjsg if (amdgpu_dpm_get_pp_num_states(adev, &data)) 346ad8b1aafSjsg memset(&data, 0, sizeof(data)); 347ad8b1aafSjsg 348ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 349ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 350ad8b1aafSjsg 3515ca02815Sjsg buf_len = sysfs_emit(buf, "states: %d\n", data.nums); 352ad8b1aafSjsg for (i = 0; i < data.nums; i++) 3535ca02815Sjsg buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i, 354ad8b1aafSjsg (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" : 355ad8b1aafSjsg (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" : 356ad8b1aafSjsg (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" : 357ad8b1aafSjsg (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default"); 358ad8b1aafSjsg 359ad8b1aafSjsg return buf_len; 360ad8b1aafSjsg } 361ad8b1aafSjsg 362ad8b1aafSjsg static ssize_t amdgpu_get_pp_cur_state(struct device *dev, 363ad8b1aafSjsg struct device_attribute *attr, 364ad8b1aafSjsg char *buf) 365ad8b1aafSjsg { 366ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 367ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 3685ca02815Sjsg struct pp_states_info data = {0}; 369ad8b1aafSjsg enum amd_pm_state_type pm = 0; 370ad8b1aafSjsg int i = 0, ret = 0; 371ad8b1aafSjsg 372ad8b1aafSjsg if (amdgpu_in_reset(adev)) 373ad8b1aafSjsg return -EPERM; 3745ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 3755ca02815Sjsg return -EPERM; 376ad8b1aafSjsg 377ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 378ad8b1aafSjsg if (ret < 0) { 379ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 380ad8b1aafSjsg return ret; 381ad8b1aafSjsg } 382ad8b1aafSjsg 3831bb76ff1Sjsg amdgpu_dpm_get_current_power_state(adev, &pm); 3841bb76ff1Sjsg 3851bb76ff1Sjsg ret = amdgpu_dpm_get_pp_num_states(adev, &data); 386ad8b1aafSjsg 387ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 388ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 389ad8b1aafSjsg 3901bb76ff1Sjsg if (ret) 3911bb76ff1Sjsg return ret; 3921bb76ff1Sjsg 393ad8b1aafSjsg for (i = 0; i < data.nums; i++) { 394ad8b1aafSjsg if (pm == data.states[i]) 395ad8b1aafSjsg break; 396ad8b1aafSjsg } 397ad8b1aafSjsg 398ad8b1aafSjsg if (i == data.nums) 399ad8b1aafSjsg i = -EINVAL; 400ad8b1aafSjsg 4015ca02815Sjsg return sysfs_emit(buf, "%d\n", i); 402ad8b1aafSjsg } 403ad8b1aafSjsg 404ad8b1aafSjsg static ssize_t amdgpu_get_pp_force_state(struct device *dev, 405ad8b1aafSjsg struct device_attribute *attr, 406ad8b1aafSjsg char *buf) 407ad8b1aafSjsg { 408ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 409ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 410ad8b1aafSjsg 411ad8b1aafSjsg if (amdgpu_in_reset(adev)) 412ad8b1aafSjsg return -EPERM; 4135ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 4145ca02815Sjsg return -EPERM; 415ad8b1aafSjsg 4161bb76ff1Sjsg if (adev->pm.pp_force_state_enabled) 417ad8b1aafSjsg return amdgpu_get_pp_cur_state(dev, attr, buf); 418ad8b1aafSjsg else 4195ca02815Sjsg return sysfs_emit(buf, "\n"); 420ad8b1aafSjsg } 421ad8b1aafSjsg 422ad8b1aafSjsg static ssize_t amdgpu_set_pp_force_state(struct device *dev, 423ad8b1aafSjsg struct device_attribute *attr, 424ad8b1aafSjsg const char *buf, 425ad8b1aafSjsg size_t count) 426ad8b1aafSjsg { 427ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 428ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 429ad8b1aafSjsg enum amd_pm_state_type state = 0; 4301bb76ff1Sjsg struct pp_states_info data; 431ad8b1aafSjsg unsigned long idx; 432ad8b1aafSjsg int ret; 433ad8b1aafSjsg 434ad8b1aafSjsg if (amdgpu_in_reset(adev)) 435ad8b1aafSjsg return -EPERM; 4365ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 4375ca02815Sjsg return -EPERM; 438ad8b1aafSjsg 4391bb76ff1Sjsg adev->pm.pp_force_state_enabled = false; 4401bb76ff1Sjsg 441ad8b1aafSjsg if (strlen(buf) == 1) 4421bb76ff1Sjsg return count; 443ad8b1aafSjsg 444ad8b1aafSjsg ret = kstrtoul(buf, 0, &idx); 445ad8b1aafSjsg if (ret || idx >= ARRAY_SIZE(data.states)) 446ad8b1aafSjsg return -EINVAL; 447ad8b1aafSjsg 448ad8b1aafSjsg idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); 449ad8b1aafSjsg 450ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 451ad8b1aafSjsg if (ret < 0) { 452ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 453ad8b1aafSjsg return ret; 454ad8b1aafSjsg } 455ad8b1aafSjsg 4561bb76ff1Sjsg ret = amdgpu_dpm_get_pp_num_states(adev, &data); 4571bb76ff1Sjsg if (ret) 4581bb76ff1Sjsg goto err_out; 4591bb76ff1Sjsg 4601bb76ff1Sjsg state = data.states[idx]; 4611bb76ff1Sjsg 462ad8b1aafSjsg /* only set user selected power states */ 463ad8b1aafSjsg if (state != POWER_STATE_TYPE_INTERNAL_BOOT && 464ad8b1aafSjsg state != POWER_STATE_TYPE_DEFAULT) { 4651bb76ff1Sjsg ret = amdgpu_dpm_dispatch_task(adev, 466ad8b1aafSjsg AMD_PP_TASK_ENABLE_USER_STATE, &state); 4671bb76ff1Sjsg if (ret) 4681bb76ff1Sjsg goto err_out; 4691bb76ff1Sjsg 4701bb76ff1Sjsg adev->pm.pp_force_state_enabled = true; 471ad8b1aafSjsg } 472ad8b1aafSjsg 4731bb76ff1Sjsg pm_runtime_mark_last_busy(ddev->dev); 4741bb76ff1Sjsg pm_runtime_put_autosuspend(ddev->dev); 4751bb76ff1Sjsg 476ad8b1aafSjsg return count; 4771bb76ff1Sjsg 4781bb76ff1Sjsg err_out: 4791bb76ff1Sjsg pm_runtime_mark_last_busy(ddev->dev); 4801bb76ff1Sjsg pm_runtime_put_autosuspend(ddev->dev); 4811bb76ff1Sjsg return ret; 482ad8b1aafSjsg } 483ad8b1aafSjsg 484ad8b1aafSjsg /** 485ad8b1aafSjsg * DOC: pp_table 486ad8b1aafSjsg * 487ad8b1aafSjsg * The amdgpu driver provides a sysfs API for uploading new powerplay 488ad8b1aafSjsg * tables. The file pp_table is used for this. Reading the file 489ad8b1aafSjsg * will dump the current power play table. Writing to the file 490ad8b1aafSjsg * will attempt to upload a new powerplay table and re-initialize 491ad8b1aafSjsg * powerplay using that new table. 492ad8b1aafSjsg * 493ad8b1aafSjsg */ 494ad8b1aafSjsg 495ad8b1aafSjsg static ssize_t amdgpu_get_pp_table(struct device *dev, 496ad8b1aafSjsg struct device_attribute *attr, 497ad8b1aafSjsg char *buf) 498ad8b1aafSjsg { 499ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 500ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 501ad8b1aafSjsg char *table = NULL; 502ad8b1aafSjsg int size, ret; 503ad8b1aafSjsg 504ad8b1aafSjsg if (amdgpu_in_reset(adev)) 505ad8b1aafSjsg return -EPERM; 5065ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 5075ca02815Sjsg return -EPERM; 508ad8b1aafSjsg 509ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 510ad8b1aafSjsg if (ret < 0) { 511ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 512ad8b1aafSjsg return ret; 513ad8b1aafSjsg } 514ad8b1aafSjsg 515ad8b1aafSjsg size = amdgpu_dpm_get_pp_table(adev, &table); 5161bb76ff1Sjsg 517ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 518ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 5191bb76ff1Sjsg 5201bb76ff1Sjsg if (size <= 0) 521ad8b1aafSjsg return size; 522ad8b1aafSjsg 523ad8b1aafSjsg if (size >= PAGE_SIZE) 524ad8b1aafSjsg size = PAGE_SIZE - 1; 525ad8b1aafSjsg 526ad8b1aafSjsg memcpy(buf, table, size); 527ad8b1aafSjsg 528ad8b1aafSjsg return size; 529ad8b1aafSjsg } 530ad8b1aafSjsg 531ad8b1aafSjsg static ssize_t amdgpu_set_pp_table(struct device *dev, 532ad8b1aafSjsg struct device_attribute *attr, 533ad8b1aafSjsg const char *buf, 534ad8b1aafSjsg size_t count) 535ad8b1aafSjsg { 536ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 537ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 538ad8b1aafSjsg int ret = 0; 539ad8b1aafSjsg 540ad8b1aafSjsg if (amdgpu_in_reset(adev)) 541ad8b1aafSjsg return -EPERM; 5425ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 5435ca02815Sjsg return -EPERM; 544ad8b1aafSjsg 545ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 546ad8b1aafSjsg if (ret < 0) { 547ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 548ad8b1aafSjsg return ret; 549ad8b1aafSjsg } 550ad8b1aafSjsg 5515ca02815Sjsg ret = amdgpu_dpm_set_pp_table(adev, buf, count); 552ad8b1aafSjsg 553ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 554ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 555ad8b1aafSjsg 5561bb76ff1Sjsg if (ret) 5571bb76ff1Sjsg return ret; 5581bb76ff1Sjsg 559ad8b1aafSjsg return count; 560ad8b1aafSjsg } 561ad8b1aafSjsg 562ad8b1aafSjsg /** 563ad8b1aafSjsg * DOC: pp_od_clk_voltage 564ad8b1aafSjsg * 565ad8b1aafSjsg * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages 566ad8b1aafSjsg * in each power level within a power state. The pp_od_clk_voltage is used for 567ad8b1aafSjsg * this. 568ad8b1aafSjsg * 569ad8b1aafSjsg * Note that the actual memory controller clock rate are exposed, not 570ad8b1aafSjsg * the effective memory clock of the DRAMs. To translate it, use the 571ad8b1aafSjsg * following formula: 572ad8b1aafSjsg * 573ad8b1aafSjsg * Clock conversion (Mhz): 574ad8b1aafSjsg * 575ad8b1aafSjsg * HBM: effective_memory_clock = memory_controller_clock * 1 576ad8b1aafSjsg * 577ad8b1aafSjsg * G5: effective_memory_clock = memory_controller_clock * 1 578ad8b1aafSjsg * 579ad8b1aafSjsg * G6: effective_memory_clock = memory_controller_clock * 2 580ad8b1aafSjsg * 581ad8b1aafSjsg * DRAM data rate (MT/s): 582ad8b1aafSjsg * 583ad8b1aafSjsg * HBM: effective_memory_clock * 2 = data_rate 584ad8b1aafSjsg * 585ad8b1aafSjsg * G5: effective_memory_clock * 4 = data_rate 586ad8b1aafSjsg * 587ad8b1aafSjsg * G6: effective_memory_clock * 8 = data_rate 588ad8b1aafSjsg * 589ad8b1aafSjsg * Bandwidth (MB/s): 590ad8b1aafSjsg * 591ad8b1aafSjsg * data_rate * vram_bit_width / 8 = memory_bandwidth 592ad8b1aafSjsg * 593ad8b1aafSjsg * Some examples: 594ad8b1aafSjsg * 595ad8b1aafSjsg * G5 on RX460: 596ad8b1aafSjsg * 597ad8b1aafSjsg * memory_controller_clock = 1750 Mhz 598ad8b1aafSjsg * 599ad8b1aafSjsg * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz 600ad8b1aafSjsg * 601ad8b1aafSjsg * data rate = 1750 * 4 = 7000 MT/s 602ad8b1aafSjsg * 603ad8b1aafSjsg * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s 604ad8b1aafSjsg * 605ad8b1aafSjsg * G6 on RX5700: 606ad8b1aafSjsg * 607ad8b1aafSjsg * memory_controller_clock = 875 Mhz 608ad8b1aafSjsg * 609ad8b1aafSjsg * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz 610ad8b1aafSjsg * 611ad8b1aafSjsg * data rate = 1750 * 8 = 14000 MT/s 612ad8b1aafSjsg * 613ad8b1aafSjsg * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s 614ad8b1aafSjsg * 615ad8b1aafSjsg * < For Vega10 and previous ASICs > 616ad8b1aafSjsg * 617ad8b1aafSjsg * Reading the file will display: 618ad8b1aafSjsg * 619ad8b1aafSjsg * - a list of engine clock levels and voltages labeled OD_SCLK 620ad8b1aafSjsg * 621ad8b1aafSjsg * - a list of memory clock levels and voltages labeled OD_MCLK 622ad8b1aafSjsg * 623ad8b1aafSjsg * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE 624ad8b1aafSjsg * 625ad8b1aafSjsg * To manually adjust these settings, first select manual using 626ad8b1aafSjsg * power_dpm_force_performance_level. Enter a new value for each 627ad8b1aafSjsg * level by writing a string that contains "s/m level clock voltage" to 628ad8b1aafSjsg * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz 629ad8b1aafSjsg * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at 630ad8b1aafSjsg * 810 mV. When you have edited all of the states as needed, write 631ad8b1aafSjsg * "c" (commit) to the file to commit your changes. If you want to reset to the 632ad8b1aafSjsg * default power levels, write "r" (reset) to the file to reset them. 633ad8b1aafSjsg * 634ad8b1aafSjsg * 635ad8b1aafSjsg * < For Vega20 and newer ASICs > 636ad8b1aafSjsg * 637ad8b1aafSjsg * Reading the file will display: 638ad8b1aafSjsg * 639ad8b1aafSjsg * - minimum and maximum engine clock labeled OD_SCLK 640ad8b1aafSjsg * 6415ca02815Sjsg * - minimum(not available for Vega20 and Navi1x) and maximum memory 6425ca02815Sjsg * clock labeled OD_MCLK 643ad8b1aafSjsg * 644ad8b1aafSjsg * - three <frequency, voltage> points labeled OD_VDDC_CURVE. 645f005ef32Sjsg * They can be used to calibrate the sclk voltage curve. This is 646f005ef32Sjsg * available for Vega20 and NV1X. 647f005ef32Sjsg * 648f005ef32Sjsg * - voltage offset for the six anchor points of the v/f curve labeled 649f005ef32Sjsg * OD_VDDC_CURVE. They can be used to calibrate the v/f curve. This 650f005ef32Sjsg * is only availabe for some SMU13 ASICs. 651ad8b1aafSjsg * 6525ca02815Sjsg * - voltage offset(in mV) applied on target voltage calculation. 6535ca02815Sjsg * This is available for Sienna Cichlid, Navy Flounder and Dimgrey 6545ca02815Sjsg * Cavefish. For these ASICs, the target voltage calculation can be 6555ca02815Sjsg * illustrated by "voltage = voltage calculated from v/f curve + 6565ca02815Sjsg * overdrive vddgfx offset" 6575ca02815Sjsg * 658ad8b1aafSjsg * - a list of valid ranges for sclk, mclk, and voltage curve points 659ad8b1aafSjsg * labeled OD_RANGE 660ad8b1aafSjsg * 6615ca02815Sjsg * < For APUs > 6625ca02815Sjsg * 6635ca02815Sjsg * Reading the file will display: 6645ca02815Sjsg * 6655ca02815Sjsg * - minimum and maximum engine clock labeled OD_SCLK 6665ca02815Sjsg * 6675ca02815Sjsg * - a list of valid ranges for sclk labeled OD_RANGE 6685ca02815Sjsg * 6695ca02815Sjsg * < For VanGogh > 6705ca02815Sjsg * 6715ca02815Sjsg * Reading the file will display: 6725ca02815Sjsg * 6735ca02815Sjsg * - minimum and maximum engine clock labeled OD_SCLK 6745ca02815Sjsg * - minimum and maximum core clocks labeled OD_CCLK 6755ca02815Sjsg * 6765ca02815Sjsg * - a list of valid ranges for sclk and cclk labeled OD_RANGE 6775ca02815Sjsg * 678ad8b1aafSjsg * To manually adjust these settings: 679ad8b1aafSjsg * 680ad8b1aafSjsg * - First select manual using power_dpm_force_performance_level 681ad8b1aafSjsg * 682ad8b1aafSjsg * - For clock frequency setting, enter a new value by writing a 683ad8b1aafSjsg * string that contains "s/m index clock" to the file. The index 684ad8b1aafSjsg * should be 0 if to set minimum clock. And 1 if to set maximum 685ad8b1aafSjsg * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz. 6865ca02815Sjsg * "m 1 800" will update maximum mclk to be 800Mhz. For core 6875ca02815Sjsg * clocks on VanGogh, the string contains "p core index clock". 6885ca02815Sjsg * E.g., "p 2 0 800" would set the minimum core clock on core 6895ca02815Sjsg * 2 to 800Mhz. 690ad8b1aafSjsg * 691f005ef32Sjsg * For sclk voltage curve, 692f005ef32Sjsg * - For NV1X, enter the new values by writing a string that 693f005ef32Sjsg * contains "vc point clock voltage" to the file. The points 694f005ef32Sjsg * are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will update 695f005ef32Sjsg * point1 with clock set as 300Mhz and voltage as 600mV. "vc 2 696f005ef32Sjsg * 1000 1000" will update point3 with clock set as 1000Mhz and 697f005ef32Sjsg * voltage 1000mV. 698f005ef32Sjsg * - For SMU13 ASICs, enter the new values by writing a string that 699f005ef32Sjsg * contains "vc anchor_point_index voltage_offset" to the file. 700f005ef32Sjsg * There are total six anchor points defined on the v/f curve with 701f005ef32Sjsg * index as 0 - 5. 702f005ef32Sjsg * - "vc 0 10" will update the voltage offset for point1 as 10mv. 703f005ef32Sjsg * - "vc 5 -10" will update the voltage offset for point6 as -10mv. 704ad8b1aafSjsg * 7055ca02815Sjsg * To update the voltage offset applied for gfxclk/voltage calculation, 7065ca02815Sjsg * enter the new value by writing a string that contains "vo offset". 7075ca02815Sjsg * This is supported by Sienna Cichlid, Navy Flounder and Dimgrey Cavefish. 7085ca02815Sjsg * And the offset can be a positive or negative value. 7095ca02815Sjsg * 710ad8b1aafSjsg * - When you have edited all of the states as needed, write "c" (commit) 711ad8b1aafSjsg * to the file to commit your changes 712ad8b1aafSjsg * 713ad8b1aafSjsg * - If you want to reset to the default power levels, write "r" (reset) 714ad8b1aafSjsg * to the file to reset them 715ad8b1aafSjsg * 716ad8b1aafSjsg */ 717ad8b1aafSjsg 718ad8b1aafSjsg static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, 719ad8b1aafSjsg struct device_attribute *attr, 720ad8b1aafSjsg const char *buf, 721ad8b1aafSjsg size_t count) 722ad8b1aafSjsg { 723ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 724ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 725ad8b1aafSjsg int ret; 726ad8b1aafSjsg uint32_t parameter_size = 0; 727ad8b1aafSjsg long parameter[64]; 728ad8b1aafSjsg char buf_cpy[128]; 729ad8b1aafSjsg char *tmp_str; 730ad8b1aafSjsg char *sub_str; 731ad8b1aafSjsg const char delimiter[3] = {' ', '\n', '\0'}; 732ad8b1aafSjsg uint32_t type; 733ad8b1aafSjsg 734ad8b1aafSjsg if (amdgpu_in_reset(adev)) 735ad8b1aafSjsg return -EPERM; 7365ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 7375ca02815Sjsg return -EPERM; 738ad8b1aafSjsg 739385c2c27Sjsg if (count > 127 || count == 0) 740ad8b1aafSjsg return -EINVAL; 741ad8b1aafSjsg 742ad8b1aafSjsg if (*buf == 's') 743ad8b1aafSjsg type = PP_OD_EDIT_SCLK_VDDC_TABLE; 7445ca02815Sjsg else if (*buf == 'p') 7455ca02815Sjsg type = PP_OD_EDIT_CCLK_VDDC_TABLE; 746ad8b1aafSjsg else if (*buf == 'm') 747ad8b1aafSjsg type = PP_OD_EDIT_MCLK_VDDC_TABLE; 748ad8b1aafSjsg else if (*buf == 'r') 749ad8b1aafSjsg type = PP_OD_RESTORE_DEFAULT_TABLE; 750ad8b1aafSjsg else if (*buf == 'c') 751ad8b1aafSjsg type = PP_OD_COMMIT_DPM_TABLE; 752ad8b1aafSjsg else if (!strncmp(buf, "vc", 2)) 753ad8b1aafSjsg type = PP_OD_EDIT_VDDC_CURVE; 7545ca02815Sjsg else if (!strncmp(buf, "vo", 2)) 7555ca02815Sjsg type = PP_OD_EDIT_VDDGFX_OFFSET; 756ad8b1aafSjsg else 757ad8b1aafSjsg return -EINVAL; 758ad8b1aafSjsg 759385c2c27Sjsg memcpy(buf_cpy, buf, count); 760385c2c27Sjsg buf_cpy[count] = 0; 761ad8b1aafSjsg 762ad8b1aafSjsg tmp_str = buf_cpy; 763ad8b1aafSjsg 7645ca02815Sjsg if ((type == PP_OD_EDIT_VDDC_CURVE) || 7655ca02815Sjsg (type == PP_OD_EDIT_VDDGFX_OFFSET)) 766ad8b1aafSjsg tmp_str++; 767ad8b1aafSjsg while (isspace(*++tmp_str)); 768ad8b1aafSjsg 7695ca02815Sjsg while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) { 7705ca02815Sjsg if (strlen(sub_str) == 0) 7715ca02815Sjsg continue; 772ad8b1aafSjsg ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); 773ad8b1aafSjsg if (ret) 774ad8b1aafSjsg return -EINVAL; 775ad8b1aafSjsg parameter_size++; 776ad8b1aafSjsg 777385c2c27Sjsg if (!tmp_str) 778385c2c27Sjsg break; 779385c2c27Sjsg 780ad8b1aafSjsg while (isspace(*tmp_str)) 781ad8b1aafSjsg tmp_str++; 782ad8b1aafSjsg } 783ad8b1aafSjsg 784ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 785ad8b1aafSjsg if (ret < 0) { 786ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 787ad8b1aafSjsg return ret; 788ad8b1aafSjsg } 789ad8b1aafSjsg 7901bb76ff1Sjsg if (amdgpu_dpm_set_fine_grain_clk_vol(adev, 7911bb76ff1Sjsg type, 792ad8b1aafSjsg parameter, 7931bb76ff1Sjsg parameter_size)) 7941bb76ff1Sjsg goto err_out; 795ad8b1aafSjsg 7961bb76ff1Sjsg if (amdgpu_dpm_odn_edit_dpm_table(adev, type, 7971bb76ff1Sjsg parameter, parameter_size)) 7981bb76ff1Sjsg goto err_out; 799ad8b1aafSjsg 800ad8b1aafSjsg if (type == PP_OD_COMMIT_DPM_TABLE) { 8011bb76ff1Sjsg if (amdgpu_dpm_dispatch_task(adev, 802ad8b1aafSjsg AMD_PP_TASK_READJUST_POWER_STATE, 8031bb76ff1Sjsg NULL)) 8041bb76ff1Sjsg goto err_out; 8051bb76ff1Sjsg } 8061bb76ff1Sjsg 807ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 808ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 8091bb76ff1Sjsg 810ad8b1aafSjsg return count; 8111bb76ff1Sjsg 8121bb76ff1Sjsg err_out: 813ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 814ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 815ad8b1aafSjsg return -EINVAL; 816ad8b1aafSjsg } 817ad8b1aafSjsg 818ad8b1aafSjsg static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, 819ad8b1aafSjsg struct device_attribute *attr, 820ad8b1aafSjsg char *buf) 821ad8b1aafSjsg { 822ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 823ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 8241bb76ff1Sjsg int size = 0; 825ad8b1aafSjsg int ret; 8261bb76ff1Sjsg enum pp_clock_type od_clocks[6] = { 8271bb76ff1Sjsg OD_SCLK, 8281bb76ff1Sjsg OD_MCLK, 8291bb76ff1Sjsg OD_VDDC_CURVE, 8301bb76ff1Sjsg OD_RANGE, 8311bb76ff1Sjsg OD_VDDGFX_OFFSET, 8321bb76ff1Sjsg OD_CCLK, 8331bb76ff1Sjsg }; 8341bb76ff1Sjsg uint clk_index; 835ad8b1aafSjsg 836ad8b1aafSjsg if (amdgpu_in_reset(adev)) 837ad8b1aafSjsg return -EPERM; 8385ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 8395ca02815Sjsg return -EPERM; 840ad8b1aafSjsg 841ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 842ad8b1aafSjsg if (ret < 0) { 843ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 844ad8b1aafSjsg return ret; 845ad8b1aafSjsg } 846ad8b1aafSjsg 8471bb76ff1Sjsg for (clk_index = 0 ; clk_index < 6 ; clk_index++) { 8481bb76ff1Sjsg ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size); 8491bb76ff1Sjsg if (ret) 8501bb76ff1Sjsg break; 8511bb76ff1Sjsg } 8521bb76ff1Sjsg if (ret == -ENOENT) { 853ad8b1aafSjsg size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); 854ad8b1aafSjsg size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size); 855ad8b1aafSjsg size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size); 8565ca02815Sjsg size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size); 857ad8b1aafSjsg size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size); 8585ca02815Sjsg size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size); 859ad8b1aafSjsg } 8601bb76ff1Sjsg 8611bb76ff1Sjsg if (size == 0) 8621bb76ff1Sjsg size = sysfs_emit(buf, "\n"); 8631bb76ff1Sjsg 864ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 865ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 866ad8b1aafSjsg 867ad8b1aafSjsg return size; 868ad8b1aafSjsg } 869ad8b1aafSjsg 870ad8b1aafSjsg /** 871ad8b1aafSjsg * DOC: pp_features 872ad8b1aafSjsg * 873ad8b1aafSjsg * The amdgpu driver provides a sysfs API for adjusting what powerplay 874ad8b1aafSjsg * features to be enabled. The file pp_features is used for this. And 875ad8b1aafSjsg * this is only available for Vega10 and later dGPUs. 876ad8b1aafSjsg * 877ad8b1aafSjsg * Reading back the file will show you the followings: 878ad8b1aafSjsg * - Current ppfeature masks 879ad8b1aafSjsg * - List of the all supported powerplay features with their naming, 880ad8b1aafSjsg * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled"). 881ad8b1aafSjsg * 882ad8b1aafSjsg * To manually enable or disable a specific feature, just set or clear 883ad8b1aafSjsg * the corresponding bit from original ppfeature masks and input the 884ad8b1aafSjsg * new ppfeature masks. 885ad8b1aafSjsg */ 886ad8b1aafSjsg static ssize_t amdgpu_set_pp_features(struct device *dev, 887ad8b1aafSjsg struct device_attribute *attr, 888ad8b1aafSjsg const char *buf, 889ad8b1aafSjsg size_t count) 890ad8b1aafSjsg { 891ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 892ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 893ad8b1aafSjsg uint64_t featuremask; 894ad8b1aafSjsg int ret; 895ad8b1aafSjsg 896ad8b1aafSjsg if (amdgpu_in_reset(adev)) 897ad8b1aafSjsg return -EPERM; 8985ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 8995ca02815Sjsg return -EPERM; 900ad8b1aafSjsg 901ad8b1aafSjsg ret = kstrtou64(buf, 0, &featuremask); 902ad8b1aafSjsg if (ret) 903ad8b1aafSjsg return -EINVAL; 904ad8b1aafSjsg 905ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 906ad8b1aafSjsg if (ret < 0) { 907ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 908ad8b1aafSjsg return ret; 909ad8b1aafSjsg } 910ad8b1aafSjsg 911ad8b1aafSjsg ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); 9121bb76ff1Sjsg 913ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 914ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 9151bb76ff1Sjsg 9161bb76ff1Sjsg if (ret) 917ad8b1aafSjsg return -EINVAL; 918ad8b1aafSjsg 919ad8b1aafSjsg return count; 920ad8b1aafSjsg } 921ad8b1aafSjsg 922ad8b1aafSjsg static ssize_t amdgpu_get_pp_features(struct device *dev, 923ad8b1aafSjsg struct device_attribute *attr, 924ad8b1aafSjsg char *buf) 925ad8b1aafSjsg { 926ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 927ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 928ad8b1aafSjsg ssize_t size; 929ad8b1aafSjsg int ret; 930ad8b1aafSjsg 931ad8b1aafSjsg if (amdgpu_in_reset(adev)) 932ad8b1aafSjsg return -EPERM; 9335ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 9345ca02815Sjsg return -EPERM; 935ad8b1aafSjsg 936ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 937ad8b1aafSjsg if (ret < 0) { 938ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 939ad8b1aafSjsg return ret; 940ad8b1aafSjsg } 941ad8b1aafSjsg 942ad8b1aafSjsg size = amdgpu_dpm_get_ppfeature_status(adev, buf); 9431bb76ff1Sjsg if (size <= 0) 9445ca02815Sjsg size = sysfs_emit(buf, "\n"); 945ad8b1aafSjsg 946ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 947ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 948ad8b1aafSjsg 949ad8b1aafSjsg return size; 950ad8b1aafSjsg } 951ad8b1aafSjsg 952ad8b1aafSjsg /** 953ad8b1aafSjsg * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie 954ad8b1aafSjsg * 955ad8b1aafSjsg * The amdgpu driver provides a sysfs API for adjusting what power levels 956ad8b1aafSjsg * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk, 957ad8b1aafSjsg * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for 958ad8b1aafSjsg * this. 959ad8b1aafSjsg * 960ad8b1aafSjsg * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for 961ad8b1aafSjsg * Vega10 and later ASICs. 962ad8b1aafSjsg * pp_dpm_fclk interface is only available for Vega20 and later ASICs. 963ad8b1aafSjsg * 964ad8b1aafSjsg * Reading back the files will show you the available power levels within 965ad8b1aafSjsg * the power state and the clock information for those levels. 966ad8b1aafSjsg * 967ad8b1aafSjsg * To manually adjust these states, first select manual using 968ad8b1aafSjsg * power_dpm_force_performance_level. 969ad8b1aafSjsg * Secondly, enter a new value for each level by inputing a string that 970ad8b1aafSjsg * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie" 971ad8b1aafSjsg * E.g., 972ad8b1aafSjsg * 973ad8b1aafSjsg * .. code-block:: bash 974ad8b1aafSjsg * 975ad8b1aafSjsg * echo "4 5 6" > pp_dpm_sclk 976ad8b1aafSjsg * 977ad8b1aafSjsg * will enable sclk levels 4, 5, and 6. 978ad8b1aafSjsg * 979ad8b1aafSjsg * NOTE: change to the dcefclk max dpm level is not supported now 980ad8b1aafSjsg */ 981ad8b1aafSjsg 9825ca02815Sjsg static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, 9835ca02815Sjsg enum pp_clock_type type, 984ad8b1aafSjsg char *buf) 985ad8b1aafSjsg { 986ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 987ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 9881bb76ff1Sjsg int size = 0; 9891bb76ff1Sjsg int ret = 0; 990ad8b1aafSjsg 991ad8b1aafSjsg if (amdgpu_in_reset(adev)) 992ad8b1aafSjsg return -EPERM; 9935ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 9945ca02815Sjsg return -EPERM; 995ad8b1aafSjsg 996ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 997ad8b1aafSjsg if (ret < 0) { 998ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 999ad8b1aafSjsg return ret; 1000ad8b1aafSjsg } 1001ad8b1aafSjsg 10021bb76ff1Sjsg ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size); 10031bb76ff1Sjsg if (ret == -ENOENT) 10045ca02815Sjsg size = amdgpu_dpm_print_clock_levels(adev, type, buf); 10051bb76ff1Sjsg 10061bb76ff1Sjsg if (size == 0) 10075ca02815Sjsg size = sysfs_emit(buf, "\n"); 1008ad8b1aafSjsg 1009ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 1010ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 1011ad8b1aafSjsg 1012ad8b1aafSjsg return size; 1013ad8b1aafSjsg } 1014ad8b1aafSjsg 1015ad8b1aafSjsg /* 1016ad8b1aafSjsg * Worst case: 32 bits individually specified, in octal at 12 characters 1017ad8b1aafSjsg * per line (+1 for \n). 1018ad8b1aafSjsg */ 1019ad8b1aafSjsg #define AMDGPU_MASK_BUF_MAX (32 * 13) 1020ad8b1aafSjsg 1021ad8b1aafSjsg static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) 1022ad8b1aafSjsg { 1023ad8b1aafSjsg int ret; 1024ad8b1aafSjsg unsigned long level; 1025ad8b1aafSjsg char *sub_str = NULL; 1026ad8b1aafSjsg char *tmp; 1027ad8b1aafSjsg char buf_cpy[AMDGPU_MASK_BUF_MAX + 1]; 1028ad8b1aafSjsg const char delimiter[3] = {' ', '\n', '\0'}; 1029ad8b1aafSjsg size_t bytes; 1030ad8b1aafSjsg 1031ad8b1aafSjsg *mask = 0; 1032ad8b1aafSjsg 1033ad8b1aafSjsg bytes = min(count, sizeof(buf_cpy) - 1); 1034ad8b1aafSjsg memcpy(buf_cpy, buf, bytes); 1035ad8b1aafSjsg buf_cpy[bytes] = '\0'; 1036ad8b1aafSjsg tmp = buf_cpy; 10375ca02815Sjsg while ((sub_str = strsep(&tmp, delimiter)) != NULL) { 1038ad8b1aafSjsg if (strlen(sub_str)) { 1039ad8b1aafSjsg ret = kstrtoul(sub_str, 0, &level); 1040ad8b1aafSjsg if (ret || level > 31) 1041ad8b1aafSjsg return -EINVAL; 1042ad8b1aafSjsg *mask |= 1 << level; 1043ad8b1aafSjsg } else 1044ad8b1aafSjsg break; 1045ad8b1aafSjsg } 1046ad8b1aafSjsg 1047ad8b1aafSjsg return 0; 1048ad8b1aafSjsg } 1049ad8b1aafSjsg 10505ca02815Sjsg static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev, 10515ca02815Sjsg enum pp_clock_type type, 1052ad8b1aafSjsg const char *buf, 1053ad8b1aafSjsg size_t count) 1054ad8b1aafSjsg { 1055ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 1056ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 1057ad8b1aafSjsg int ret; 1058ad8b1aafSjsg uint32_t mask = 0; 1059ad8b1aafSjsg 1060ad8b1aafSjsg if (amdgpu_in_reset(adev)) 1061ad8b1aafSjsg return -EPERM; 10625ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 10635ca02815Sjsg return -EPERM; 1064ad8b1aafSjsg 1065ad8b1aafSjsg ret = amdgpu_read_mask(buf, count, &mask); 1066ad8b1aafSjsg if (ret) 1067ad8b1aafSjsg return ret; 1068ad8b1aafSjsg 1069ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 1070ad8b1aafSjsg if (ret < 0) { 1071ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 1072ad8b1aafSjsg return ret; 1073ad8b1aafSjsg } 1074ad8b1aafSjsg 10755ca02815Sjsg ret = amdgpu_dpm_force_clock_level(adev, type, mask); 1076ad8b1aafSjsg 1077ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 1078ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 1079ad8b1aafSjsg 1080ad8b1aafSjsg if (ret) 1081ad8b1aafSjsg return -EINVAL; 1082ad8b1aafSjsg 1083ad8b1aafSjsg return count; 1084ad8b1aafSjsg } 1085ad8b1aafSjsg 10865ca02815Sjsg static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, 10875ca02815Sjsg struct device_attribute *attr, 10885ca02815Sjsg char *buf) 10895ca02815Sjsg { 10905ca02815Sjsg return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf); 10915ca02815Sjsg } 10925ca02815Sjsg 10935ca02815Sjsg static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, 10945ca02815Sjsg struct device_attribute *attr, 10955ca02815Sjsg const char *buf, 10965ca02815Sjsg size_t count) 10975ca02815Sjsg { 10985ca02815Sjsg return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count); 10995ca02815Sjsg } 11005ca02815Sjsg 1101ad8b1aafSjsg static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, 1102ad8b1aafSjsg struct device_attribute *attr, 1103ad8b1aafSjsg char *buf) 1104ad8b1aafSjsg { 11055ca02815Sjsg return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf); 1106ad8b1aafSjsg } 1107ad8b1aafSjsg 1108ad8b1aafSjsg static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, 1109ad8b1aafSjsg struct device_attribute *attr, 1110ad8b1aafSjsg const char *buf, 1111ad8b1aafSjsg size_t count) 1112ad8b1aafSjsg { 11135ca02815Sjsg return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count); 1114ad8b1aafSjsg } 1115ad8b1aafSjsg 1116ad8b1aafSjsg static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, 1117ad8b1aafSjsg struct device_attribute *attr, 1118ad8b1aafSjsg char *buf) 1119ad8b1aafSjsg { 11205ca02815Sjsg return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf); 1121ad8b1aafSjsg } 1122ad8b1aafSjsg 1123ad8b1aafSjsg static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, 1124ad8b1aafSjsg struct device_attribute *attr, 1125ad8b1aafSjsg const char *buf, 1126ad8b1aafSjsg size_t count) 1127ad8b1aafSjsg { 11285ca02815Sjsg return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count); 1129ad8b1aafSjsg } 1130ad8b1aafSjsg 1131ad8b1aafSjsg static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, 1132ad8b1aafSjsg struct device_attribute *attr, 1133ad8b1aafSjsg char *buf) 1134ad8b1aafSjsg { 11355ca02815Sjsg return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf); 1136ad8b1aafSjsg } 1137ad8b1aafSjsg 1138ad8b1aafSjsg static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, 1139ad8b1aafSjsg struct device_attribute *attr, 1140ad8b1aafSjsg const char *buf, 1141ad8b1aafSjsg size_t count) 1142ad8b1aafSjsg { 11435ca02815Sjsg return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count); 1144ad8b1aafSjsg } 1145ad8b1aafSjsg 11465ca02815Sjsg static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev, 11475ca02815Sjsg struct device_attribute *attr, 11485ca02815Sjsg char *buf) 11495ca02815Sjsg { 11505ca02815Sjsg return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf); 11515ca02815Sjsg } 1152ad8b1aafSjsg 11535ca02815Sjsg static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev, 11545ca02815Sjsg struct device_attribute *attr, 11555ca02815Sjsg const char *buf, 11565ca02815Sjsg size_t count) 11575ca02815Sjsg { 11585ca02815Sjsg return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count); 11595ca02815Sjsg } 1160ad8b1aafSjsg 1161f005ef32Sjsg static ssize_t amdgpu_get_pp_dpm_vclk1(struct device *dev, 1162f005ef32Sjsg struct device_attribute *attr, 1163f005ef32Sjsg char *buf) 1164f005ef32Sjsg { 1165f005ef32Sjsg return amdgpu_get_pp_dpm_clock(dev, PP_VCLK1, buf); 1166f005ef32Sjsg } 1167f005ef32Sjsg 1168f005ef32Sjsg static ssize_t amdgpu_set_pp_dpm_vclk1(struct device *dev, 1169f005ef32Sjsg struct device_attribute *attr, 1170f005ef32Sjsg const char *buf, 1171f005ef32Sjsg size_t count) 1172f005ef32Sjsg { 1173f005ef32Sjsg return amdgpu_set_pp_dpm_clock(dev, PP_VCLK1, buf, count); 1174f005ef32Sjsg } 1175f005ef32Sjsg 11765ca02815Sjsg static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev, 11775ca02815Sjsg struct device_attribute *attr, 11785ca02815Sjsg char *buf) 11795ca02815Sjsg { 11805ca02815Sjsg return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf); 11815ca02815Sjsg } 1182ad8b1aafSjsg 11835ca02815Sjsg static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev, 11845ca02815Sjsg struct device_attribute *attr, 11855ca02815Sjsg const char *buf, 11865ca02815Sjsg size_t count) 11875ca02815Sjsg { 11885ca02815Sjsg return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count); 1189ad8b1aafSjsg } 1190ad8b1aafSjsg 1191f005ef32Sjsg static ssize_t amdgpu_get_pp_dpm_dclk1(struct device *dev, 1192f005ef32Sjsg struct device_attribute *attr, 1193f005ef32Sjsg char *buf) 1194f005ef32Sjsg { 1195f005ef32Sjsg return amdgpu_get_pp_dpm_clock(dev, PP_DCLK1, buf); 1196f005ef32Sjsg } 1197f005ef32Sjsg 1198f005ef32Sjsg static ssize_t amdgpu_set_pp_dpm_dclk1(struct device *dev, 1199f005ef32Sjsg struct device_attribute *attr, 1200f005ef32Sjsg const char *buf, 1201f005ef32Sjsg size_t count) 1202f005ef32Sjsg { 1203f005ef32Sjsg return amdgpu_set_pp_dpm_clock(dev, PP_DCLK1, buf, count); 1204f005ef32Sjsg } 1205f005ef32Sjsg 1206ad8b1aafSjsg static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, 1207ad8b1aafSjsg struct device_attribute *attr, 1208ad8b1aafSjsg char *buf) 1209ad8b1aafSjsg { 12105ca02815Sjsg return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf); 1211ad8b1aafSjsg } 1212ad8b1aafSjsg 1213ad8b1aafSjsg static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, 1214ad8b1aafSjsg struct device_attribute *attr, 1215ad8b1aafSjsg const char *buf, 1216ad8b1aafSjsg size_t count) 1217ad8b1aafSjsg { 12185ca02815Sjsg return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count); 1219ad8b1aafSjsg } 1220ad8b1aafSjsg 1221ad8b1aafSjsg static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, 1222ad8b1aafSjsg struct device_attribute *attr, 1223ad8b1aafSjsg char *buf) 1224ad8b1aafSjsg { 12255ca02815Sjsg return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf); 1226ad8b1aafSjsg } 1227ad8b1aafSjsg 1228ad8b1aafSjsg static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, 1229ad8b1aafSjsg struct device_attribute *attr, 1230ad8b1aafSjsg const char *buf, 1231ad8b1aafSjsg size_t count) 1232ad8b1aafSjsg { 12335ca02815Sjsg return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count); 1234ad8b1aafSjsg } 1235ad8b1aafSjsg 1236ad8b1aafSjsg static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, 1237ad8b1aafSjsg struct device_attribute *attr, 1238ad8b1aafSjsg char *buf) 1239ad8b1aafSjsg { 1240ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 1241ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 1242ad8b1aafSjsg uint32_t value = 0; 1243ad8b1aafSjsg int ret; 1244ad8b1aafSjsg 1245ad8b1aafSjsg if (amdgpu_in_reset(adev)) 1246ad8b1aafSjsg return -EPERM; 12475ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 12485ca02815Sjsg return -EPERM; 1249ad8b1aafSjsg 1250ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 1251ad8b1aafSjsg if (ret < 0) { 1252ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 1253ad8b1aafSjsg return ret; 1254ad8b1aafSjsg } 1255ad8b1aafSjsg 1256ad8b1aafSjsg value = amdgpu_dpm_get_sclk_od(adev); 1257ad8b1aafSjsg 1258ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 1259ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 1260ad8b1aafSjsg 12615ca02815Sjsg return sysfs_emit(buf, "%d\n", value); 1262ad8b1aafSjsg } 1263ad8b1aafSjsg 1264ad8b1aafSjsg static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, 1265ad8b1aafSjsg struct device_attribute *attr, 1266ad8b1aafSjsg const char *buf, 1267ad8b1aafSjsg size_t count) 1268ad8b1aafSjsg { 1269ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 1270ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 1271ad8b1aafSjsg int ret; 1272ad8b1aafSjsg long int value; 1273ad8b1aafSjsg 1274ad8b1aafSjsg if (amdgpu_in_reset(adev)) 1275ad8b1aafSjsg return -EPERM; 12765ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 12775ca02815Sjsg return -EPERM; 1278ad8b1aafSjsg 1279ad8b1aafSjsg ret = kstrtol(buf, 0, &value); 1280ad8b1aafSjsg 1281ad8b1aafSjsg if (ret) 1282ad8b1aafSjsg return -EINVAL; 1283ad8b1aafSjsg 1284ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 1285ad8b1aafSjsg if (ret < 0) { 1286ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 1287ad8b1aafSjsg return ret; 1288ad8b1aafSjsg } 1289ad8b1aafSjsg 1290ad8b1aafSjsg amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); 1291ad8b1aafSjsg 1292ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 1293ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 1294ad8b1aafSjsg 1295ad8b1aafSjsg return count; 1296ad8b1aafSjsg } 1297ad8b1aafSjsg 1298ad8b1aafSjsg static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, 1299ad8b1aafSjsg struct device_attribute *attr, 1300ad8b1aafSjsg char *buf) 1301ad8b1aafSjsg { 1302ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 1303ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 1304ad8b1aafSjsg uint32_t value = 0; 1305ad8b1aafSjsg int ret; 1306ad8b1aafSjsg 1307ad8b1aafSjsg if (amdgpu_in_reset(adev)) 1308ad8b1aafSjsg return -EPERM; 13095ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 13105ca02815Sjsg return -EPERM; 1311ad8b1aafSjsg 1312ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 1313ad8b1aafSjsg if (ret < 0) { 1314ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 1315ad8b1aafSjsg return ret; 1316ad8b1aafSjsg } 1317ad8b1aafSjsg 1318ad8b1aafSjsg value = amdgpu_dpm_get_mclk_od(adev); 1319ad8b1aafSjsg 1320ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 1321ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 1322ad8b1aafSjsg 13235ca02815Sjsg return sysfs_emit(buf, "%d\n", value); 1324ad8b1aafSjsg } 1325ad8b1aafSjsg 1326ad8b1aafSjsg static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, 1327ad8b1aafSjsg struct device_attribute *attr, 1328ad8b1aafSjsg const char *buf, 1329ad8b1aafSjsg size_t count) 1330ad8b1aafSjsg { 1331ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 1332ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 1333ad8b1aafSjsg int ret; 1334ad8b1aafSjsg long int value; 1335ad8b1aafSjsg 1336ad8b1aafSjsg if (amdgpu_in_reset(adev)) 1337ad8b1aafSjsg return -EPERM; 13385ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 13395ca02815Sjsg return -EPERM; 1340ad8b1aafSjsg 1341ad8b1aafSjsg ret = kstrtol(buf, 0, &value); 1342ad8b1aafSjsg 1343ad8b1aafSjsg if (ret) 1344ad8b1aafSjsg return -EINVAL; 1345ad8b1aafSjsg 1346ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 1347ad8b1aafSjsg if (ret < 0) { 1348ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 1349ad8b1aafSjsg return ret; 1350ad8b1aafSjsg } 1351ad8b1aafSjsg 1352ad8b1aafSjsg amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); 1353ad8b1aafSjsg 1354ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 1355ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 1356ad8b1aafSjsg 1357ad8b1aafSjsg return count; 1358ad8b1aafSjsg } 1359ad8b1aafSjsg 1360ad8b1aafSjsg /** 1361ad8b1aafSjsg * DOC: pp_power_profile_mode 1362ad8b1aafSjsg * 1363ad8b1aafSjsg * The amdgpu driver provides a sysfs API for adjusting the heuristics 1364ad8b1aafSjsg * related to switching between power levels in a power state. The file 1365ad8b1aafSjsg * pp_power_profile_mode is used for this. 1366ad8b1aafSjsg * 1367ad8b1aafSjsg * Reading this file outputs a list of all of the predefined power profiles 1368ad8b1aafSjsg * and the relevant heuristics settings for that profile. 1369ad8b1aafSjsg * 1370ad8b1aafSjsg * To select a profile or create a custom profile, first select manual using 1371ad8b1aafSjsg * power_dpm_force_performance_level. Writing the number of a predefined 1372ad8b1aafSjsg * profile to pp_power_profile_mode will enable those heuristics. To 1373ad8b1aafSjsg * create a custom set of heuristics, write a string of numbers to the file 1374ad8b1aafSjsg * starting with the number of the custom profile along with a setting 1375ad8b1aafSjsg * for each heuristic parameter. Due to differences across asic families 1376ad8b1aafSjsg * the heuristic parameters vary from family to family. 1377ad8b1aafSjsg * 1378ad8b1aafSjsg */ 1379ad8b1aafSjsg 1380ad8b1aafSjsg static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, 1381ad8b1aafSjsg struct device_attribute *attr, 1382ad8b1aafSjsg char *buf) 1383ad8b1aafSjsg { 1384ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 1385ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 1386ad8b1aafSjsg ssize_t size; 1387ad8b1aafSjsg int ret; 1388ad8b1aafSjsg 1389ad8b1aafSjsg if (amdgpu_in_reset(adev)) 1390ad8b1aafSjsg return -EPERM; 13915ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 13925ca02815Sjsg return -EPERM; 1393ad8b1aafSjsg 1394ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 1395ad8b1aafSjsg if (ret < 0) { 1396ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 1397ad8b1aafSjsg return ret; 1398ad8b1aafSjsg } 1399ad8b1aafSjsg 1400ad8b1aafSjsg size = amdgpu_dpm_get_power_profile_mode(adev, buf); 14011bb76ff1Sjsg if (size <= 0) 14025ca02815Sjsg size = sysfs_emit(buf, "\n"); 1403ad8b1aafSjsg 1404ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 1405ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 1406ad8b1aafSjsg 1407ad8b1aafSjsg return size; 1408ad8b1aafSjsg } 1409ad8b1aafSjsg 1410ad8b1aafSjsg 1411ad8b1aafSjsg static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, 1412ad8b1aafSjsg struct device_attribute *attr, 1413ad8b1aafSjsg const char *buf, 1414ad8b1aafSjsg size_t count) 1415ad8b1aafSjsg { 1416ad8b1aafSjsg int ret; 1417ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 1418ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 1419ad8b1aafSjsg uint32_t parameter_size = 0; 1420ad8b1aafSjsg long parameter[64]; 1421ad8b1aafSjsg char *sub_str, buf_cpy[128]; 1422ad8b1aafSjsg char *tmp_str; 1423ad8b1aafSjsg uint32_t i = 0; 1424ad8b1aafSjsg char tmp[2]; 1425ad8b1aafSjsg long int profile_mode = 0; 1426ad8b1aafSjsg const char delimiter[3] = {' ', '\n', '\0'}; 1427ad8b1aafSjsg 1428ad8b1aafSjsg if (amdgpu_in_reset(adev)) 1429ad8b1aafSjsg return -EPERM; 14305ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 14315ca02815Sjsg return -EPERM; 1432ad8b1aafSjsg 1433ad8b1aafSjsg tmp[0] = *(buf); 1434ad8b1aafSjsg tmp[1] = '\0'; 1435ad8b1aafSjsg ret = kstrtol(tmp, 0, &profile_mode); 1436ad8b1aafSjsg if (ret) 1437ad8b1aafSjsg return -EINVAL; 1438ad8b1aafSjsg 1439ad8b1aafSjsg if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 1440ad8b1aafSjsg if (count < 2 || count > 127) 1441ad8b1aafSjsg return -EINVAL; 1442ad8b1aafSjsg while (isspace(*++buf)) 1443ad8b1aafSjsg i++; 1444ad8b1aafSjsg memcpy(buf_cpy, buf, count-i); 1445ad8b1aafSjsg tmp_str = buf_cpy; 14465ca02815Sjsg while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) { 14475ca02815Sjsg if (strlen(sub_str) == 0) 14485ca02815Sjsg continue; 1449ad8b1aafSjsg ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); 1450ad8b1aafSjsg if (ret) 1451ad8b1aafSjsg return -EINVAL; 1452ad8b1aafSjsg parameter_size++; 1453ad8b1aafSjsg while (isspace(*tmp_str)) 1454ad8b1aafSjsg tmp_str++; 1455ad8b1aafSjsg } 1456ad8b1aafSjsg } 1457ad8b1aafSjsg parameter[parameter_size] = profile_mode; 1458ad8b1aafSjsg 1459ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 1460ad8b1aafSjsg if (ret < 0) { 1461ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 1462ad8b1aafSjsg return ret; 1463ad8b1aafSjsg } 1464ad8b1aafSjsg 1465ad8b1aafSjsg ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); 1466ad8b1aafSjsg 1467ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 1468ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 1469ad8b1aafSjsg 1470ad8b1aafSjsg if (!ret) 1471ad8b1aafSjsg return count; 1472ad8b1aafSjsg 1473ad8b1aafSjsg return -EINVAL; 1474ad8b1aafSjsg } 1475ad8b1aafSjsg 1476*ce3b705aSjsg static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev, 1477f005ef32Sjsg enum amd_pp_sensors sensor, 1478f005ef32Sjsg void *query) 1479f005ef32Sjsg { 1480f005ef32Sjsg int r, size = sizeof(uint32_t); 1481f005ef32Sjsg 1482f005ef32Sjsg if (amdgpu_in_reset(adev)) 1483f005ef32Sjsg return -EPERM; 1484f005ef32Sjsg if (adev->in_suspend && !adev->in_runpm) 1485f005ef32Sjsg return -EPERM; 1486f005ef32Sjsg 1487f005ef32Sjsg r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 1488f005ef32Sjsg if (r < 0) { 1489f005ef32Sjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1490f005ef32Sjsg return r; 1491f005ef32Sjsg } 1492f005ef32Sjsg 1493f005ef32Sjsg /* get the sensor value */ 1494f005ef32Sjsg r = amdgpu_dpm_read_sensor(adev, sensor, query, &size); 1495f005ef32Sjsg 1496f005ef32Sjsg pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 1497f005ef32Sjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 1498f005ef32Sjsg 1499f005ef32Sjsg return r; 1500f005ef32Sjsg } 1501f005ef32Sjsg 1502ad8b1aafSjsg /** 1503ad8b1aafSjsg * DOC: gpu_busy_percent 1504ad8b1aafSjsg * 1505ad8b1aafSjsg * The amdgpu driver provides a sysfs API for reading how busy the GPU 1506ad8b1aafSjsg * is as a percentage. The file gpu_busy_percent is used for this. 1507ad8b1aafSjsg * The SMU firmware computes a percentage of load based on the 1508ad8b1aafSjsg * aggregate activity level in the IP cores. 1509ad8b1aafSjsg */ 1510ad8b1aafSjsg static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev, 1511ad8b1aafSjsg struct device_attribute *attr, 1512ad8b1aafSjsg char *buf) 1513ad8b1aafSjsg { 1514ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 1515ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 1516f005ef32Sjsg unsigned int value; 1517f005ef32Sjsg int r; 1518ad8b1aafSjsg 1519f005ef32Sjsg r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value); 1520ad8b1aafSjsg if (r) 1521ad8b1aafSjsg return r; 1522ad8b1aafSjsg 15235ca02815Sjsg return sysfs_emit(buf, "%d\n", value); 1524ad8b1aafSjsg } 1525ad8b1aafSjsg 1526ad8b1aafSjsg /** 1527ad8b1aafSjsg * DOC: mem_busy_percent 1528ad8b1aafSjsg * 1529ad8b1aafSjsg * The amdgpu driver provides a sysfs API for reading how busy the VRAM 1530ad8b1aafSjsg * is as a percentage. The file mem_busy_percent is used for this. 1531ad8b1aafSjsg * The SMU firmware computes a percentage of load based on the 1532ad8b1aafSjsg * aggregate activity level in the IP cores. 1533ad8b1aafSjsg */ 1534ad8b1aafSjsg static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, 1535ad8b1aafSjsg struct device_attribute *attr, 1536ad8b1aafSjsg char *buf) 1537ad8b1aafSjsg { 1538ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 1539ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 1540f005ef32Sjsg unsigned int value; 1541f005ef32Sjsg int r; 1542ad8b1aafSjsg 1543f005ef32Sjsg r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value); 1544ad8b1aafSjsg if (r) 1545ad8b1aafSjsg return r; 1546ad8b1aafSjsg 15475ca02815Sjsg return sysfs_emit(buf, "%d\n", value); 1548ad8b1aafSjsg } 1549ad8b1aafSjsg 1550ad8b1aafSjsg /** 1551ad8b1aafSjsg * DOC: pcie_bw 1552ad8b1aafSjsg * 1553ad8b1aafSjsg * The amdgpu driver provides a sysfs API for estimating how much data 1554ad8b1aafSjsg * has been received and sent by the GPU in the last second through PCIe. 1555ad8b1aafSjsg * The file pcie_bw is used for this. 1556ad8b1aafSjsg * The Perf counters count the number of received and sent messages and return 1557ad8b1aafSjsg * those values, as well as the maximum payload size of a PCIe packet (mps). 1558ad8b1aafSjsg * Note that it is not possible to easily and quickly obtain the size of each 1559ad8b1aafSjsg * packet transmitted, so we output the max payload size (mps) to allow for 1560ad8b1aafSjsg * quick estimation of the PCIe bandwidth usage 1561ad8b1aafSjsg */ 1562ad8b1aafSjsg static ssize_t amdgpu_get_pcie_bw(struct device *dev, 1563ad8b1aafSjsg struct device_attribute *attr, 1564ad8b1aafSjsg char *buf) 1565ad8b1aafSjsg { 1566ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 1567ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 1568ad8b1aafSjsg uint64_t count0 = 0, count1 = 0; 1569ad8b1aafSjsg int ret; 1570ad8b1aafSjsg 1571ad8b1aafSjsg if (amdgpu_in_reset(adev)) 1572ad8b1aafSjsg return -EPERM; 15735ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 15745ca02815Sjsg return -EPERM; 1575ad8b1aafSjsg 1576ad8b1aafSjsg if (adev->flags & AMD_IS_APU) 1577ad8b1aafSjsg return -ENODATA; 1578ad8b1aafSjsg 1579ad8b1aafSjsg if (!adev->asic_funcs->get_pcie_usage) 1580ad8b1aafSjsg return -ENODATA; 1581ad8b1aafSjsg 1582ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 1583ad8b1aafSjsg if (ret < 0) { 1584ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 1585ad8b1aafSjsg return ret; 1586ad8b1aafSjsg } 1587ad8b1aafSjsg 1588ad8b1aafSjsg amdgpu_asic_get_pcie_usage(adev, &count0, &count1); 1589ad8b1aafSjsg 1590ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 1591ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 1592ad8b1aafSjsg 15935ca02815Sjsg return sysfs_emit(buf, "%llu %llu %i\n", 1594ad8b1aafSjsg count0, count1, pcie_get_mps(adev->pdev)); 1595ad8b1aafSjsg } 1596ad8b1aafSjsg 1597ad8b1aafSjsg /** 1598ad8b1aafSjsg * DOC: unique_id 1599ad8b1aafSjsg * 1600ad8b1aafSjsg * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU 1601ad8b1aafSjsg * The file unique_id is used for this. 1602ad8b1aafSjsg * This will provide a Unique ID that will persist from machine to machine 1603ad8b1aafSjsg * 1604ad8b1aafSjsg * NOTE: This will only work for GFX9 and newer. This file will be absent 1605ad8b1aafSjsg * on unsupported ASICs (GFX8 and older) 1606ad8b1aafSjsg */ 1607ad8b1aafSjsg static ssize_t amdgpu_get_unique_id(struct device *dev, 1608ad8b1aafSjsg struct device_attribute *attr, 1609ad8b1aafSjsg char *buf) 1610ad8b1aafSjsg { 1611ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 1612ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 1613ad8b1aafSjsg 1614ad8b1aafSjsg if (amdgpu_in_reset(adev)) 1615ad8b1aafSjsg return -EPERM; 16165ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 16175ca02815Sjsg return -EPERM; 1618ad8b1aafSjsg 1619ad8b1aafSjsg if (adev->unique_id) 16205ca02815Sjsg return sysfs_emit(buf, "%016llx\n", adev->unique_id); 1621ad8b1aafSjsg 1622ad8b1aafSjsg return 0; 1623ad8b1aafSjsg } 1624ad8b1aafSjsg 1625ad8b1aafSjsg /** 1626ad8b1aafSjsg * DOC: thermal_throttling_logging 1627ad8b1aafSjsg * 1628ad8b1aafSjsg * Thermal throttling pulls down the clock frequency and thus the performance. 1629ad8b1aafSjsg * It's an useful mechanism to protect the chip from overheating. Since it 1630ad8b1aafSjsg * impacts performance, the user controls whether it is enabled and if so, 1631ad8b1aafSjsg * the log frequency. 1632ad8b1aafSjsg * 1633ad8b1aafSjsg * Reading back the file shows you the status(enabled or disabled) and 1634ad8b1aafSjsg * the interval(in seconds) between each thermal logging. 1635ad8b1aafSjsg * 1636ad8b1aafSjsg * Writing an integer to the file, sets a new logging interval, in seconds. 1637ad8b1aafSjsg * The value should be between 1 and 3600. If the value is less than 1, 1638ad8b1aafSjsg * thermal logging is disabled. Values greater than 3600 are ignored. 1639ad8b1aafSjsg */ 1640ad8b1aafSjsg static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev, 1641ad8b1aafSjsg struct device_attribute *attr, 1642ad8b1aafSjsg char *buf) 1643ad8b1aafSjsg { 1644ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 1645ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 1646ad8b1aafSjsg 16475ca02815Sjsg return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n", 1648ad8b1aafSjsg adev_to_drm(adev)->unique, 1649ad8b1aafSjsg atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled", 1650ad8b1aafSjsg adev->throttling_logging_rs.interval / HZ + 1); 1651ad8b1aafSjsg } 1652ad8b1aafSjsg 1653ad8b1aafSjsg static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev, 1654ad8b1aafSjsg struct device_attribute *attr, 1655ad8b1aafSjsg const char *buf, 1656ad8b1aafSjsg size_t count) 1657ad8b1aafSjsg { 1658ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 1659ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 1660ad8b1aafSjsg long throttling_logging_interval; 1661ad8b1aafSjsg unsigned long flags; 1662ad8b1aafSjsg int ret = 0; 1663ad8b1aafSjsg 1664ad8b1aafSjsg ret = kstrtol(buf, 0, &throttling_logging_interval); 1665ad8b1aafSjsg if (ret) 1666ad8b1aafSjsg return ret; 1667ad8b1aafSjsg 1668ad8b1aafSjsg if (throttling_logging_interval > 3600) 1669ad8b1aafSjsg return -EINVAL; 1670ad8b1aafSjsg 1671ad8b1aafSjsg if (throttling_logging_interval > 0) { 1672ad8b1aafSjsg raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags); 1673ad8b1aafSjsg /* 1674ad8b1aafSjsg * Reset the ratelimit timer internals. 1675ad8b1aafSjsg * This can effectively restart the timer. 1676ad8b1aafSjsg */ 1677ad8b1aafSjsg adev->throttling_logging_rs.interval = 1678ad8b1aafSjsg (throttling_logging_interval - 1) * HZ; 1679ad8b1aafSjsg adev->throttling_logging_rs.begin = 0; 1680ad8b1aafSjsg adev->throttling_logging_rs.printed = 0; 1681ad8b1aafSjsg adev->throttling_logging_rs.missed = 0; 1682ad8b1aafSjsg raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags); 1683ad8b1aafSjsg 1684ad8b1aafSjsg atomic_set(&adev->throttling_logging_enabled, 1); 1685ad8b1aafSjsg } else { 1686ad8b1aafSjsg atomic_set(&adev->throttling_logging_enabled, 0); 1687ad8b1aafSjsg } 1688ad8b1aafSjsg 1689ad8b1aafSjsg return count; 1690ad8b1aafSjsg } 1691ad8b1aafSjsg 1692ad8b1aafSjsg /** 1693f005ef32Sjsg * DOC: apu_thermal_cap 1694f005ef32Sjsg * 1695f005ef32Sjsg * The amdgpu driver provides a sysfs API for retrieving/updating thermal 1696f005ef32Sjsg * limit temperature in millidegrees Celsius 1697f005ef32Sjsg * 1698f005ef32Sjsg * Reading back the file shows you core limit value 1699f005ef32Sjsg * 1700f005ef32Sjsg * Writing an integer to the file, sets a new thermal limit. The value 1701f005ef32Sjsg * should be between 0 and 100. If the value is less than 0 or greater 1702f005ef32Sjsg * than 100, then the write request will be ignored. 1703f005ef32Sjsg */ 1704f005ef32Sjsg static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev, 1705f005ef32Sjsg struct device_attribute *attr, 1706f005ef32Sjsg char *buf) 1707f005ef32Sjsg { 1708f005ef32Sjsg int ret, size; 1709f005ef32Sjsg u32 limit; 1710f005ef32Sjsg struct drm_device *ddev = dev_get_drvdata(dev); 1711f005ef32Sjsg struct amdgpu_device *adev = drm_to_adev(ddev); 1712f005ef32Sjsg 1713f005ef32Sjsg ret = pm_runtime_get_sync(ddev->dev); 1714f005ef32Sjsg if (ret < 0) { 1715f005ef32Sjsg pm_runtime_put_autosuspend(ddev->dev); 1716f005ef32Sjsg return ret; 1717f005ef32Sjsg } 1718f005ef32Sjsg 1719f005ef32Sjsg ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit); 1720f005ef32Sjsg if (!ret) 1721f005ef32Sjsg size = sysfs_emit(buf, "%u\n", limit); 1722f005ef32Sjsg else 1723f005ef32Sjsg size = sysfs_emit(buf, "failed to get thermal limit\n"); 1724f005ef32Sjsg 1725f005ef32Sjsg pm_runtime_mark_last_busy(ddev->dev); 1726f005ef32Sjsg pm_runtime_put_autosuspend(ddev->dev); 1727f005ef32Sjsg 1728f005ef32Sjsg return size; 1729f005ef32Sjsg } 1730f005ef32Sjsg 1731f005ef32Sjsg static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev, 1732f005ef32Sjsg struct device_attribute *attr, 1733f005ef32Sjsg const char *buf, 1734f005ef32Sjsg size_t count) 1735f005ef32Sjsg { 1736f005ef32Sjsg int ret; 1737f005ef32Sjsg u32 value; 1738f005ef32Sjsg struct drm_device *ddev = dev_get_drvdata(dev); 1739f005ef32Sjsg struct amdgpu_device *adev = drm_to_adev(ddev); 1740f005ef32Sjsg 1741f005ef32Sjsg ret = kstrtou32(buf, 10, &value); 1742f005ef32Sjsg if (ret) 1743f005ef32Sjsg return ret; 1744f005ef32Sjsg 1745f005ef32Sjsg if (value > 100) { 1746f005ef32Sjsg dev_err(dev, "Invalid argument !\n"); 1747f005ef32Sjsg return -EINVAL; 1748f005ef32Sjsg } 1749f005ef32Sjsg 1750f005ef32Sjsg ret = pm_runtime_get_sync(ddev->dev); 1751f005ef32Sjsg if (ret < 0) { 1752f005ef32Sjsg pm_runtime_put_autosuspend(ddev->dev); 1753f005ef32Sjsg return ret; 1754f005ef32Sjsg } 1755f005ef32Sjsg 1756f005ef32Sjsg ret = amdgpu_dpm_set_apu_thermal_limit(adev, value); 1757f005ef32Sjsg if (ret) { 1758f005ef32Sjsg dev_err(dev, "failed to update thermal limit\n"); 1759f005ef32Sjsg return ret; 1760f005ef32Sjsg } 1761f005ef32Sjsg 1762f005ef32Sjsg pm_runtime_mark_last_busy(ddev->dev); 1763f005ef32Sjsg pm_runtime_put_autosuspend(ddev->dev); 1764f005ef32Sjsg 1765f005ef32Sjsg return count; 1766f005ef32Sjsg } 1767f005ef32Sjsg 1768f005ef32Sjsg /** 1769ad8b1aafSjsg * DOC: gpu_metrics 1770ad8b1aafSjsg * 1771ad8b1aafSjsg * The amdgpu driver provides a sysfs API for retrieving current gpu 1772ad8b1aafSjsg * metrics data. The file gpu_metrics is used for this. Reading the 1773ad8b1aafSjsg * file will dump all the current gpu metrics data. 1774ad8b1aafSjsg * 1775ad8b1aafSjsg * These data include temperature, frequency, engines utilization, 1776ad8b1aafSjsg * power consume, throttler status, fan speed and cpu core statistics( 1777ad8b1aafSjsg * available for APU only). That's it will give a snapshot of all sensors 1778ad8b1aafSjsg * at the same time. 1779ad8b1aafSjsg */ 1780ad8b1aafSjsg static ssize_t amdgpu_get_gpu_metrics(struct device *dev, 1781ad8b1aafSjsg struct device_attribute *attr, 1782ad8b1aafSjsg char *buf) 1783ad8b1aafSjsg { 1784ad8b1aafSjsg struct drm_device *ddev = dev_get_drvdata(dev); 1785ad8b1aafSjsg struct amdgpu_device *adev = drm_to_adev(ddev); 1786ad8b1aafSjsg void *gpu_metrics; 1787ad8b1aafSjsg ssize_t size = 0; 1788ad8b1aafSjsg int ret; 1789ad8b1aafSjsg 1790ad8b1aafSjsg if (amdgpu_in_reset(adev)) 1791ad8b1aafSjsg return -EPERM; 17925ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 17935ca02815Sjsg return -EPERM; 1794ad8b1aafSjsg 1795ad8b1aafSjsg ret = pm_runtime_get_sync(ddev->dev); 1796ad8b1aafSjsg if (ret < 0) { 1797ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 1798ad8b1aafSjsg return ret; 1799ad8b1aafSjsg } 1800ad8b1aafSjsg 1801ad8b1aafSjsg size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics); 1802ad8b1aafSjsg if (size <= 0) 1803ad8b1aafSjsg goto out; 1804ad8b1aafSjsg 1805ad8b1aafSjsg if (size >= PAGE_SIZE) 1806ad8b1aafSjsg size = PAGE_SIZE - 1; 1807ad8b1aafSjsg 1808ad8b1aafSjsg memcpy(buf, gpu_metrics, size); 1809ad8b1aafSjsg 1810ad8b1aafSjsg out: 1811ad8b1aafSjsg pm_runtime_mark_last_busy(ddev->dev); 1812ad8b1aafSjsg pm_runtime_put_autosuspend(ddev->dev); 1813ad8b1aafSjsg 1814ad8b1aafSjsg return size; 1815ad8b1aafSjsg } 1816ad8b1aafSjsg 18171bb76ff1Sjsg static int amdgpu_show_powershift_percent(struct device *dev, 1818f005ef32Sjsg char *buf, enum amd_pp_sensors sensor) 18191bb76ff1Sjsg { 18201bb76ff1Sjsg struct drm_device *ddev = dev_get_drvdata(dev); 18211bb76ff1Sjsg struct amdgpu_device *adev = drm_to_adev(ddev); 18221bb76ff1Sjsg uint32_t ss_power; 18231bb76ff1Sjsg int r = 0, i; 18241bb76ff1Sjsg 1825f005ef32Sjsg r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power); 18261bb76ff1Sjsg if (r == -EOPNOTSUPP) { 18271bb76ff1Sjsg /* sensor not available on dGPU, try to read from APU */ 18281bb76ff1Sjsg adev = NULL; 18291bb76ff1Sjsg mutex_lock(&mgpu_info.mutex); 18301bb76ff1Sjsg for (i = 0; i < mgpu_info.num_gpu; i++) { 18311bb76ff1Sjsg if (mgpu_info.gpu_ins[i].adev->flags & AMD_IS_APU) { 18321bb76ff1Sjsg adev = mgpu_info.gpu_ins[i].adev; 18331bb76ff1Sjsg break; 18341bb76ff1Sjsg } 18351bb76ff1Sjsg } 18361bb76ff1Sjsg mutex_unlock(&mgpu_info.mutex); 18371bb76ff1Sjsg if (adev) 1838f005ef32Sjsg r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power); 18391bb76ff1Sjsg } 18401bb76ff1Sjsg 1841f005ef32Sjsg if (r) 18421bb76ff1Sjsg return r; 1843f005ef32Sjsg 1844f005ef32Sjsg return sysfs_emit(buf, "%u%%\n", ss_power); 18451bb76ff1Sjsg } 1846f005ef32Sjsg 18475ca02815Sjsg /** 18485ca02815Sjsg * DOC: smartshift_apu_power 18495ca02815Sjsg * 18505ca02815Sjsg * The amdgpu driver provides a sysfs API for reporting APU power 18511bb76ff1Sjsg * shift in percentage if platform supports smartshift. Value 0 means that 18521bb76ff1Sjsg * there is no powershift and values between [1-100] means that the power 18531bb76ff1Sjsg * is shifted to APU, the percentage of boost is with respect to APU power 18541bb76ff1Sjsg * limit on the platform. 18555ca02815Sjsg */ 18565ca02815Sjsg 18575ca02815Sjsg static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr, 18585ca02815Sjsg char *buf) 18595ca02815Sjsg { 1860f005ef32Sjsg return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_APU_SHARE); 18615ca02815Sjsg } 18625ca02815Sjsg 18635ca02815Sjsg /** 18645ca02815Sjsg * DOC: smartshift_dgpu_power 18655ca02815Sjsg * 18661bb76ff1Sjsg * The amdgpu driver provides a sysfs API for reporting dGPU power 18671bb76ff1Sjsg * shift in percentage if platform supports smartshift. Value 0 means that 18681bb76ff1Sjsg * there is no powershift and values between [1-100] means that the power is 18691bb76ff1Sjsg * shifted to dGPU, the percentage of boost is with respect to dGPU power 18701bb76ff1Sjsg * limit on the platform. 18715ca02815Sjsg */ 18725ca02815Sjsg 18735ca02815Sjsg static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr, 18745ca02815Sjsg char *buf) 18755ca02815Sjsg { 1876f005ef32Sjsg return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_DGPU_SHARE); 18775ca02815Sjsg } 18785ca02815Sjsg 18795ca02815Sjsg /** 18805ca02815Sjsg * DOC: smartshift_bias 18815ca02815Sjsg * 18825ca02815Sjsg * The amdgpu driver provides a sysfs API for reporting the 18835ca02815Sjsg * smartshift(SS2.0) bias level. The value ranges from -100 to 100 18845ca02815Sjsg * and the default is 0. -100 sets maximum preference to APU 18855ca02815Sjsg * and 100 sets max perference to dGPU. 18865ca02815Sjsg */ 18875ca02815Sjsg 18885ca02815Sjsg static ssize_t amdgpu_get_smartshift_bias(struct device *dev, 18895ca02815Sjsg struct device_attribute *attr, 18905ca02815Sjsg char *buf) 18915ca02815Sjsg { 18925ca02815Sjsg int r = 0; 18935ca02815Sjsg 18945ca02815Sjsg r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias); 18955ca02815Sjsg 18965ca02815Sjsg return r; 18975ca02815Sjsg } 18985ca02815Sjsg 18995ca02815Sjsg static ssize_t amdgpu_set_smartshift_bias(struct device *dev, 19005ca02815Sjsg struct device_attribute *attr, 19015ca02815Sjsg const char *buf, size_t count) 19025ca02815Sjsg { 19035ca02815Sjsg struct drm_device *ddev = dev_get_drvdata(dev); 19045ca02815Sjsg struct amdgpu_device *adev = drm_to_adev(ddev); 19055ca02815Sjsg int r = 0; 19065ca02815Sjsg int bias = 0; 19075ca02815Sjsg 19085ca02815Sjsg if (amdgpu_in_reset(adev)) 19095ca02815Sjsg return -EPERM; 19105ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 19115ca02815Sjsg return -EPERM; 19125ca02815Sjsg 19135ca02815Sjsg r = pm_runtime_get_sync(ddev->dev); 19145ca02815Sjsg if (r < 0) { 19155ca02815Sjsg pm_runtime_put_autosuspend(ddev->dev); 19165ca02815Sjsg return r; 19175ca02815Sjsg } 19185ca02815Sjsg 19195ca02815Sjsg r = kstrtoint(buf, 10, &bias); 19205ca02815Sjsg if (r) 19215ca02815Sjsg goto out; 19225ca02815Sjsg 19235ca02815Sjsg if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS) 19245ca02815Sjsg bias = AMDGPU_SMARTSHIFT_MAX_BIAS; 19255ca02815Sjsg else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS) 19265ca02815Sjsg bias = AMDGPU_SMARTSHIFT_MIN_BIAS; 19275ca02815Sjsg 19285ca02815Sjsg amdgpu_smartshift_bias = bias; 19295ca02815Sjsg r = count; 19305ca02815Sjsg 19311bb76ff1Sjsg /* TODO: update bias level with SMU message */ 19325ca02815Sjsg 19335ca02815Sjsg out: 19345ca02815Sjsg pm_runtime_mark_last_busy(ddev->dev); 19355ca02815Sjsg pm_runtime_put_autosuspend(ddev->dev); 19365ca02815Sjsg return r; 19375ca02815Sjsg } 19385ca02815Sjsg 19395ca02815Sjsg static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, 19405ca02815Sjsg uint32_t mask, enum amdgpu_device_attr_states *states) 19415ca02815Sjsg { 19421bb76ff1Sjsg if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev))) 19435ca02815Sjsg *states = ATTR_STATE_UNSUPPORTED; 19445ca02815Sjsg 19455ca02815Sjsg return 0; 19465ca02815Sjsg } 19475ca02815Sjsg 19485ca02815Sjsg static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, 19495ca02815Sjsg uint32_t mask, enum amdgpu_device_attr_states *states) 19505ca02815Sjsg { 1951f005ef32Sjsg uint32_t ss_power; 19525ca02815Sjsg 19535ca02815Sjsg if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev))) 19545ca02815Sjsg *states = ATTR_STATE_UNSUPPORTED; 1955f005ef32Sjsg else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE, 1956f005ef32Sjsg (void *)&ss_power)) 19575ca02815Sjsg *states = ATTR_STATE_UNSUPPORTED; 1958f005ef32Sjsg else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE, 1959f005ef32Sjsg (void *)&ss_power)) 19605ca02815Sjsg *states = ATTR_STATE_UNSUPPORTED; 19615ca02815Sjsg 19625ca02815Sjsg return 0; 19635ca02815Sjsg } 19645ca02815Sjsg 1965ad8b1aafSjsg static struct amdgpu_device_attr amdgpu_device_attrs[] = { 1966ad8b1aafSjsg AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 19675ca02815Sjsg AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 19685ca02815Sjsg AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 19695ca02815Sjsg AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 19705ca02815Sjsg AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 19715ca02815Sjsg AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1972ad8b1aafSjsg AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1973ad8b1aafSjsg AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1974ad8b1aafSjsg AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1975ad8b1aafSjsg AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 19765ca02815Sjsg AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1977f005ef32Sjsg AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 19785ca02815Sjsg AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1979f005ef32Sjsg AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 19801bb76ff1Sjsg AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 19811bb76ff1Sjsg AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1982ad8b1aafSjsg AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC), 1983ad8b1aafSjsg AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC), 19841bb76ff1Sjsg AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1985ad8b1aafSjsg AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC), 19861bb76ff1Sjsg AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 19871bb76ff1Sjsg AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1988ad8b1aafSjsg AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC), 19891bb76ff1Sjsg AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 19901bb76ff1Sjsg AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 19911bb76ff1Sjsg AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 1992f005ef32Sjsg AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 19931bb76ff1Sjsg AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), 19945ca02815Sjsg AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC, 19955ca02815Sjsg .attr_update = ss_power_attr_update), 19965ca02815Sjsg AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC, 19975ca02815Sjsg .attr_update = ss_power_attr_update), 19985ca02815Sjsg AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC, 19995ca02815Sjsg .attr_update = ss_bias_attr_update), 2000ad8b1aafSjsg }; 2001ad8b1aafSjsg 2002ad8b1aafSjsg static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, 2003ad8b1aafSjsg uint32_t mask, enum amdgpu_device_attr_states *states) 2004ad8b1aafSjsg { 2005ad8b1aafSjsg struct device_attribute *dev_attr = &attr->dev_attr; 20061bb76ff1Sjsg uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0]; 20071bb76ff1Sjsg uint32_t gc_ver = adev->ip_versions[GC_HWIP][0]; 2008ad8b1aafSjsg const char *attr_name = dev_attr->attr.name; 2009ad8b1aafSjsg 2010ad8b1aafSjsg if (!(attr->flags & mask)) { 2011ad8b1aafSjsg *states = ATTR_STATE_UNSUPPORTED; 2012ad8b1aafSjsg return 0; 2013ad8b1aafSjsg } 2014ad8b1aafSjsg 2015ad8b1aafSjsg #define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name)) 2016ad8b1aafSjsg 2017ad8b1aafSjsg if (DEVICE_ATTR_IS(pp_dpm_socclk)) { 20181bb76ff1Sjsg if (gc_ver < IP_VERSION(9, 0, 0)) 2019ad8b1aafSjsg *states = ATTR_STATE_UNSUPPORTED; 2020ad8b1aafSjsg } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { 20211bb76ff1Sjsg if (gc_ver < IP_VERSION(9, 0, 0) || 2022f005ef32Sjsg !amdgpu_device_has_display_hardware(adev)) 2023ad8b1aafSjsg *states = ATTR_STATE_UNSUPPORTED; 2024ad8b1aafSjsg } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { 20251bb76ff1Sjsg if (mp1_ver < IP_VERSION(10, 0, 0)) 2026ad8b1aafSjsg *states = ATTR_STATE_UNSUPPORTED; 2027ad8b1aafSjsg } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) { 2028ad8b1aafSjsg *states = ATTR_STATE_UNSUPPORTED; 20291bb76ff1Sjsg if (amdgpu_dpm_is_overdrive_supported(adev)) 2030ad8b1aafSjsg *states = ATTR_STATE_SUPPORTED; 2031ad8b1aafSjsg } else if (DEVICE_ATTR_IS(mem_busy_percent)) { 20321bb76ff1Sjsg if (adev->flags & AMD_IS_APU || gc_ver == IP_VERSION(9, 0, 1)) 2033ad8b1aafSjsg *states = ATTR_STATE_UNSUPPORTED; 2034ad8b1aafSjsg } else if (DEVICE_ATTR_IS(pcie_bw)) { 2035ad8b1aafSjsg /* PCIe Perf counters won't work on APU nodes */ 2036ad8b1aafSjsg if (adev->flags & AMD_IS_APU) 2037ad8b1aafSjsg *states = ATTR_STATE_UNSUPPORTED; 2038ad8b1aafSjsg } else if (DEVICE_ATTR_IS(unique_id)) { 20391bb76ff1Sjsg switch (gc_ver) { 20401bb76ff1Sjsg case IP_VERSION(9, 0, 1): 20411bb76ff1Sjsg case IP_VERSION(9, 4, 0): 20421bb76ff1Sjsg case IP_VERSION(9, 4, 1): 20431bb76ff1Sjsg case IP_VERSION(9, 4, 2): 2044f005ef32Sjsg case IP_VERSION(9, 4, 3): 20451bb76ff1Sjsg case IP_VERSION(10, 3, 0): 20461bb76ff1Sjsg case IP_VERSION(11, 0, 0): 2047f9ed5a91Sjsg case IP_VERSION(11, 0, 1): 2048f9ed5a91Sjsg case IP_VERSION(11, 0, 2): 2049a19ee134Sjsg case IP_VERSION(11, 0, 3): 20501bb76ff1Sjsg *states = ATTR_STATE_SUPPORTED; 20511bb76ff1Sjsg break; 20521bb76ff1Sjsg default: 2053ad8b1aafSjsg *states = ATTR_STATE_UNSUPPORTED; 20541bb76ff1Sjsg } 2055ad8b1aafSjsg } else if (DEVICE_ATTR_IS(pp_features)) { 2056f005ef32Sjsg if ((adev->flags & AMD_IS_APU && 2057f005ef32Sjsg gc_ver != IP_VERSION(9, 4, 3)) || 2058f005ef32Sjsg gc_ver < IP_VERSION(9, 0, 0)) 2059ad8b1aafSjsg *states = ATTR_STATE_UNSUPPORTED; 2060ad8b1aafSjsg } else if (DEVICE_ATTR_IS(gpu_metrics)) { 20611bb76ff1Sjsg if (gc_ver < IP_VERSION(9, 1, 0)) 2062ad8b1aafSjsg *states = ATTR_STATE_UNSUPPORTED; 20635ca02815Sjsg } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) { 20641bb76ff1Sjsg if (!(gc_ver == IP_VERSION(10, 3, 1) || 20651bb76ff1Sjsg gc_ver == IP_VERSION(10, 3, 0) || 20661bb76ff1Sjsg gc_ver == IP_VERSION(10, 1, 2) || 20671bb76ff1Sjsg gc_ver == IP_VERSION(11, 0, 0) || 2068c8811de4Sjsg gc_ver == IP_VERSION(11, 0, 2) || 2069f005ef32Sjsg gc_ver == IP_VERSION(11, 0, 3) || 2070f005ef32Sjsg gc_ver == IP_VERSION(9, 4, 3))) 2071f005ef32Sjsg *states = ATTR_STATE_UNSUPPORTED; 2072f005ef32Sjsg } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) { 2073f005ef32Sjsg if (!((gc_ver == IP_VERSION(10, 3, 1) || 2074f005ef32Sjsg gc_ver == IP_VERSION(10, 3, 0) || 2075f005ef32Sjsg gc_ver == IP_VERSION(11, 0, 2) || 2076f005ef32Sjsg gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) 20775ca02815Sjsg *states = ATTR_STATE_UNSUPPORTED; 20785ca02815Sjsg } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { 20791bb76ff1Sjsg if (!(gc_ver == IP_VERSION(10, 3, 1) || 20801bb76ff1Sjsg gc_ver == IP_VERSION(10, 3, 0) || 20811bb76ff1Sjsg gc_ver == IP_VERSION(10, 1, 2) || 20821bb76ff1Sjsg gc_ver == IP_VERSION(11, 0, 0) || 2083c8811de4Sjsg gc_ver == IP_VERSION(11, 0, 2) || 2084f005ef32Sjsg gc_ver == IP_VERSION(11, 0, 3) || 2085f005ef32Sjsg gc_ver == IP_VERSION(9, 4, 3))) 2086f005ef32Sjsg *states = ATTR_STATE_UNSUPPORTED; 2087f005ef32Sjsg } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) { 2088f005ef32Sjsg if (!((gc_ver == IP_VERSION(10, 3, 1) || 2089f005ef32Sjsg gc_ver == IP_VERSION(10, 3, 0) || 2090f005ef32Sjsg gc_ver == IP_VERSION(11, 0, 2) || 2091f005ef32Sjsg gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) 20921bb76ff1Sjsg *states = ATTR_STATE_UNSUPPORTED; 20931bb76ff1Sjsg } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) { 20941bb76ff1Sjsg if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP) 20951bb76ff1Sjsg *states = ATTR_STATE_UNSUPPORTED; 20961bb76ff1Sjsg else if (gc_ver == IP_VERSION(10, 3, 0) && amdgpu_sriov_vf(adev)) 20975ca02815Sjsg *states = ATTR_STATE_UNSUPPORTED; 2098ad8b1aafSjsg } 2099ad8b1aafSjsg 21001bb76ff1Sjsg switch (gc_ver) { 21011bb76ff1Sjsg case IP_VERSION(9, 4, 1): 21021bb76ff1Sjsg case IP_VERSION(9, 4, 2): 21035ca02815Sjsg /* the Mi series card does not support standalone mclk/socclk/fclk level setting */ 2104ad8b1aafSjsg if (DEVICE_ATTR_IS(pp_dpm_mclk) || 2105ad8b1aafSjsg DEVICE_ATTR_IS(pp_dpm_socclk) || 2106ad8b1aafSjsg DEVICE_ATTR_IS(pp_dpm_fclk)) { 2107ad8b1aafSjsg dev_attr->attr.mode &= ~S_IWUGO; 2108ad8b1aafSjsg dev_attr->store = NULL; 2109ad8b1aafSjsg } 21105ca02815Sjsg break; 21111bb76ff1Sjsg case IP_VERSION(10, 3, 0): 21121bb76ff1Sjsg if (DEVICE_ATTR_IS(power_dpm_force_performance_level) && 21131bb76ff1Sjsg amdgpu_sriov_vf(adev)) { 21141bb76ff1Sjsg dev_attr->attr.mode &= ~0222; 21151bb76ff1Sjsg dev_attr->store = NULL; 21161bb76ff1Sjsg } 21171bb76ff1Sjsg break; 21185ca02815Sjsg default: 21195ca02815Sjsg break; 21205ca02815Sjsg } 21215ca02815Sjsg 21225ca02815Sjsg if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) { 21235ca02815Sjsg /* SMU MP1 does not support dcefclk level setting */ 21241bb76ff1Sjsg if (gc_ver >= IP_VERSION(10, 0, 0)) { 21255ca02815Sjsg dev_attr->attr.mode &= ~S_IWUGO; 21265ca02815Sjsg dev_attr->store = NULL; 21275ca02815Sjsg } 2128ad8b1aafSjsg } 2129ad8b1aafSjsg 21306dcce042Sjsg /* setting should not be allowed from VF if not in one VF mode */ 21316dcce042Sjsg if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { 2132370d26e1Sjsg dev_attr->attr.mode &= ~S_IWUGO; 2133370d26e1Sjsg dev_attr->store = NULL; 2134370d26e1Sjsg } 2135370d26e1Sjsg 2136ad8b1aafSjsg #undef DEVICE_ATTR_IS 2137ad8b1aafSjsg 2138ad8b1aafSjsg return 0; 2139ad8b1aafSjsg } 2140ad8b1aafSjsg 2141ad8b1aafSjsg 2142ad8b1aafSjsg static int amdgpu_device_attr_create(struct amdgpu_device *adev, 2143ad8b1aafSjsg struct amdgpu_device_attr *attr, 2144ad8b1aafSjsg uint32_t mask, struct list_head *attr_list) 2145ad8b1aafSjsg { 2146ad8b1aafSjsg int ret = 0; 2147ad8b1aafSjsg enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED; 2148ad8b1aafSjsg struct amdgpu_device_attr_entry *attr_entry; 21490f285a81Sjsg struct device_attribute *dev_attr; 21500f285a81Sjsg const char *name; 2151ad8b1aafSjsg 2152ad8b1aafSjsg int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, 2153ad8b1aafSjsg uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update; 2154ad8b1aafSjsg 21550f285a81Sjsg if (!attr) 21560f285a81Sjsg return -EINVAL; 21570f285a81Sjsg 21580f285a81Sjsg dev_attr = &attr->dev_attr; 21590f285a81Sjsg name = dev_attr->attr.name; 2160ad8b1aafSjsg 21615ca02815Sjsg attr_update = attr->attr_update ? attr->attr_update : default_attr_update; 2162ad8b1aafSjsg 2163ad8b1aafSjsg ret = attr_update(adev, attr, mask, &attr_states); 2164ad8b1aafSjsg if (ret) { 2165ad8b1aafSjsg dev_err(adev->dev, "failed to update device file %s, ret = %d\n", 2166ad8b1aafSjsg name, ret); 2167ad8b1aafSjsg return ret; 2168ad8b1aafSjsg } 2169ad8b1aafSjsg 2170ad8b1aafSjsg if (attr_states == ATTR_STATE_UNSUPPORTED) 2171ad8b1aafSjsg return 0; 2172ad8b1aafSjsg 2173ad8b1aafSjsg ret = device_create_file(adev->dev, dev_attr); 2174ad8b1aafSjsg if (ret) { 2175ad8b1aafSjsg dev_err(adev->dev, "failed to create device file %s, ret = %d\n", 2176ad8b1aafSjsg name, ret); 2177ad8b1aafSjsg } 2178ad8b1aafSjsg 2179ad8b1aafSjsg attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL); 2180ad8b1aafSjsg if (!attr_entry) 2181ad8b1aafSjsg return -ENOMEM; 2182ad8b1aafSjsg 2183ad8b1aafSjsg attr_entry->attr = attr; 2184ad8b1aafSjsg INIT_LIST_HEAD(&attr_entry->entry); 2185ad8b1aafSjsg 2186ad8b1aafSjsg list_add_tail(&attr_entry->entry, attr_list); 2187ad8b1aafSjsg 2188ad8b1aafSjsg return ret; 2189ad8b1aafSjsg } 2190ad8b1aafSjsg 2191ad8b1aafSjsg static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr) 2192ad8b1aafSjsg { 2193ad8b1aafSjsg struct device_attribute *dev_attr = &attr->dev_attr; 2194ad8b1aafSjsg 2195ad8b1aafSjsg device_remove_file(adev->dev, dev_attr); 2196ad8b1aafSjsg } 2197ad8b1aafSjsg 2198ad8b1aafSjsg static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev, 2199ad8b1aafSjsg struct list_head *attr_list); 2200ad8b1aafSjsg 2201ad8b1aafSjsg static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev, 2202ad8b1aafSjsg struct amdgpu_device_attr *attrs, 2203ad8b1aafSjsg uint32_t counts, 2204ad8b1aafSjsg uint32_t mask, 2205ad8b1aafSjsg struct list_head *attr_list) 2206ad8b1aafSjsg { 2207ad8b1aafSjsg int ret = 0; 2208ad8b1aafSjsg uint32_t i = 0; 2209ad8b1aafSjsg 2210ad8b1aafSjsg for (i = 0; i < counts; i++) { 2211ad8b1aafSjsg ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list); 2212ad8b1aafSjsg if (ret) 2213ad8b1aafSjsg goto failed; 2214ad8b1aafSjsg } 2215ad8b1aafSjsg 2216ad8b1aafSjsg return 0; 2217ad8b1aafSjsg 2218ad8b1aafSjsg failed: 2219ad8b1aafSjsg amdgpu_device_attr_remove_groups(adev, attr_list); 2220ad8b1aafSjsg 2221ad8b1aafSjsg return ret; 2222ad8b1aafSjsg } 2223ad8b1aafSjsg 2224ad8b1aafSjsg static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev, 2225ad8b1aafSjsg struct list_head *attr_list) 2226ad8b1aafSjsg { 2227ad8b1aafSjsg struct amdgpu_device_attr_entry *entry, *entry_tmp; 2228ad8b1aafSjsg 2229ad8b1aafSjsg if (list_empty(attr_list)) 2230ad8b1aafSjsg return ; 2231ad8b1aafSjsg 2232ad8b1aafSjsg list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) { 2233ad8b1aafSjsg amdgpu_device_attr_remove(adev, entry->attr); 2234ad8b1aafSjsg list_del(&entry->entry); 2235ad8b1aafSjsg kfree(entry); 2236ad8b1aafSjsg } 2237ad8b1aafSjsg } 2238ad8b1aafSjsg 2239ad8b1aafSjsg static ssize_t amdgpu_hwmon_show_temp(struct device *dev, 2240ad8b1aafSjsg struct device_attribute *attr, 2241ad8b1aafSjsg char *buf) 2242ad8b1aafSjsg { 2243ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2244ad8b1aafSjsg int channel = to_sensor_dev_attr(attr)->index; 2245f005ef32Sjsg int r, temp = 0; 2246ad8b1aafSjsg 2247ad8b1aafSjsg if (channel >= PP_TEMP_MAX) 2248ad8b1aafSjsg return -EINVAL; 2249ad8b1aafSjsg 2250ad8b1aafSjsg switch (channel) { 2251ad8b1aafSjsg case PP_TEMP_JUNCTION: 2252ad8b1aafSjsg /* get current junction temperature */ 2253f005ef32Sjsg r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP, 2254f005ef32Sjsg (void *)&temp); 2255ad8b1aafSjsg break; 2256ad8b1aafSjsg case PP_TEMP_EDGE: 2257ad8b1aafSjsg /* get current edge temperature */ 2258f005ef32Sjsg r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP, 2259f005ef32Sjsg (void *)&temp); 2260ad8b1aafSjsg break; 2261ad8b1aafSjsg case PP_TEMP_MEM: 2262ad8b1aafSjsg /* get current memory temperature */ 2263f005ef32Sjsg r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP, 2264f005ef32Sjsg (void *)&temp); 2265ad8b1aafSjsg break; 2266ad8b1aafSjsg default: 2267ad8b1aafSjsg r = -EINVAL; 2268ad8b1aafSjsg break; 2269ad8b1aafSjsg } 2270ad8b1aafSjsg 2271ad8b1aafSjsg if (r) 2272ad8b1aafSjsg return r; 2273ad8b1aafSjsg 22745ca02815Sjsg return sysfs_emit(buf, "%d\n", temp); 2275ad8b1aafSjsg } 2276ad8b1aafSjsg 2277ad8b1aafSjsg static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, 2278ad8b1aafSjsg struct device_attribute *attr, 2279ad8b1aafSjsg char *buf) 2280ad8b1aafSjsg { 2281ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2282ad8b1aafSjsg int hyst = to_sensor_dev_attr(attr)->index; 2283ad8b1aafSjsg int temp; 2284ad8b1aafSjsg 2285ad8b1aafSjsg if (hyst) 2286ad8b1aafSjsg temp = adev->pm.dpm.thermal.min_temp; 2287ad8b1aafSjsg else 2288ad8b1aafSjsg temp = adev->pm.dpm.thermal.max_temp; 2289ad8b1aafSjsg 22905ca02815Sjsg return sysfs_emit(buf, "%d\n", temp); 2291ad8b1aafSjsg } 2292ad8b1aafSjsg 2293ad8b1aafSjsg static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev, 2294ad8b1aafSjsg struct device_attribute *attr, 2295ad8b1aafSjsg char *buf) 2296ad8b1aafSjsg { 2297ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2298ad8b1aafSjsg int hyst = to_sensor_dev_attr(attr)->index; 2299ad8b1aafSjsg int temp; 2300ad8b1aafSjsg 2301ad8b1aafSjsg if (hyst) 2302ad8b1aafSjsg temp = adev->pm.dpm.thermal.min_hotspot_temp; 2303ad8b1aafSjsg else 2304ad8b1aafSjsg temp = adev->pm.dpm.thermal.max_hotspot_crit_temp; 2305ad8b1aafSjsg 23065ca02815Sjsg return sysfs_emit(buf, "%d\n", temp); 2307ad8b1aafSjsg } 2308ad8b1aafSjsg 2309ad8b1aafSjsg static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev, 2310ad8b1aafSjsg struct device_attribute *attr, 2311ad8b1aafSjsg char *buf) 2312ad8b1aafSjsg { 2313ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2314ad8b1aafSjsg int hyst = to_sensor_dev_attr(attr)->index; 2315ad8b1aafSjsg int temp; 2316ad8b1aafSjsg 2317ad8b1aafSjsg if (hyst) 2318ad8b1aafSjsg temp = adev->pm.dpm.thermal.min_mem_temp; 2319ad8b1aafSjsg else 2320ad8b1aafSjsg temp = adev->pm.dpm.thermal.max_mem_crit_temp; 2321ad8b1aafSjsg 23225ca02815Sjsg return sysfs_emit(buf, "%d\n", temp); 2323ad8b1aafSjsg } 2324ad8b1aafSjsg 2325ad8b1aafSjsg static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev, 2326ad8b1aafSjsg struct device_attribute *attr, 2327ad8b1aafSjsg char *buf) 2328ad8b1aafSjsg { 2329ad8b1aafSjsg int channel = to_sensor_dev_attr(attr)->index; 2330ad8b1aafSjsg 2331ad8b1aafSjsg if (channel >= PP_TEMP_MAX) 2332ad8b1aafSjsg return -EINVAL; 2333ad8b1aafSjsg 23345ca02815Sjsg return sysfs_emit(buf, "%s\n", temp_label[channel].label); 2335ad8b1aafSjsg } 2336ad8b1aafSjsg 2337ad8b1aafSjsg static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev, 2338ad8b1aafSjsg struct device_attribute *attr, 2339ad8b1aafSjsg char *buf) 2340ad8b1aafSjsg { 2341ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2342ad8b1aafSjsg int channel = to_sensor_dev_attr(attr)->index; 2343ad8b1aafSjsg int temp = 0; 2344ad8b1aafSjsg 2345ad8b1aafSjsg if (channel >= PP_TEMP_MAX) 2346ad8b1aafSjsg return -EINVAL; 2347ad8b1aafSjsg 2348ad8b1aafSjsg switch (channel) { 2349ad8b1aafSjsg case PP_TEMP_JUNCTION: 2350ad8b1aafSjsg temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp; 2351ad8b1aafSjsg break; 2352ad8b1aafSjsg case PP_TEMP_EDGE: 2353ad8b1aafSjsg temp = adev->pm.dpm.thermal.max_edge_emergency_temp; 2354ad8b1aafSjsg break; 2355ad8b1aafSjsg case PP_TEMP_MEM: 2356ad8b1aafSjsg temp = adev->pm.dpm.thermal.max_mem_emergency_temp; 2357ad8b1aafSjsg break; 2358ad8b1aafSjsg } 2359ad8b1aafSjsg 23605ca02815Sjsg return sysfs_emit(buf, "%d\n", temp); 2361ad8b1aafSjsg } 2362ad8b1aafSjsg 2363ad8b1aafSjsg static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, 2364ad8b1aafSjsg struct device_attribute *attr, 2365ad8b1aafSjsg char *buf) 2366ad8b1aafSjsg { 2367ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2368ad8b1aafSjsg u32 pwm_mode = 0; 2369ad8b1aafSjsg int ret; 2370ad8b1aafSjsg 2371ad8b1aafSjsg if (amdgpu_in_reset(adev)) 2372ad8b1aafSjsg return -EPERM; 23735ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 23745ca02815Sjsg return -EPERM; 2375ad8b1aafSjsg 2376ad8b1aafSjsg ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2377ad8b1aafSjsg if (ret < 0) { 2378ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2379ad8b1aafSjsg return ret; 2380ad8b1aafSjsg } 2381ad8b1aafSjsg 23821bb76ff1Sjsg ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); 23831bb76ff1Sjsg 2384ad8b1aafSjsg pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2385ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 23861bb76ff1Sjsg 23871bb76ff1Sjsg if (ret) 2388ad8b1aafSjsg return -EINVAL; 2389ad8b1aafSjsg 23905ca02815Sjsg return sysfs_emit(buf, "%u\n", pwm_mode); 2391ad8b1aafSjsg } 2392ad8b1aafSjsg 2393ad8b1aafSjsg static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, 2394ad8b1aafSjsg struct device_attribute *attr, 2395ad8b1aafSjsg const char *buf, 2396ad8b1aafSjsg size_t count) 2397ad8b1aafSjsg { 2398ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2399ad8b1aafSjsg int err, ret; 2400971a55aaSjsg u32 pwm_mode; 2401ad8b1aafSjsg int value; 2402ad8b1aafSjsg 2403ad8b1aafSjsg if (amdgpu_in_reset(adev)) 2404ad8b1aafSjsg return -EPERM; 24055ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 24065ca02815Sjsg return -EPERM; 2407ad8b1aafSjsg 2408ad8b1aafSjsg err = kstrtoint(buf, 10, &value); 2409ad8b1aafSjsg if (err) 2410ad8b1aafSjsg return err; 2411ad8b1aafSjsg 2412971a55aaSjsg if (value == 0) 2413971a55aaSjsg pwm_mode = AMD_FAN_CTRL_NONE; 2414971a55aaSjsg else if (value == 1) 2415971a55aaSjsg pwm_mode = AMD_FAN_CTRL_MANUAL; 2416971a55aaSjsg else if (value == 2) 2417971a55aaSjsg pwm_mode = AMD_FAN_CTRL_AUTO; 2418971a55aaSjsg else 2419971a55aaSjsg return -EINVAL; 2420971a55aaSjsg 2421ad8b1aafSjsg ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2422ad8b1aafSjsg if (ret < 0) { 2423ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2424ad8b1aafSjsg return ret; 2425ad8b1aafSjsg } 2426ad8b1aafSjsg 2427971a55aaSjsg ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); 24281bb76ff1Sjsg 2429ad8b1aafSjsg pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2430ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 24311bb76ff1Sjsg 24321bb76ff1Sjsg if (ret) 2433ad8b1aafSjsg return -EINVAL; 2434ad8b1aafSjsg 2435ad8b1aafSjsg return count; 2436ad8b1aafSjsg } 2437ad8b1aafSjsg 2438ad8b1aafSjsg static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev, 2439ad8b1aafSjsg struct device_attribute *attr, 2440ad8b1aafSjsg char *buf) 2441ad8b1aafSjsg { 24425ca02815Sjsg return sysfs_emit(buf, "%i\n", 0); 2443ad8b1aafSjsg } 2444ad8b1aafSjsg 2445ad8b1aafSjsg static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev, 2446ad8b1aafSjsg struct device_attribute *attr, 2447ad8b1aafSjsg char *buf) 2448ad8b1aafSjsg { 24495ca02815Sjsg return sysfs_emit(buf, "%i\n", 255); 2450ad8b1aafSjsg } 2451ad8b1aafSjsg 2452ad8b1aafSjsg static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, 2453ad8b1aafSjsg struct device_attribute *attr, 2454ad8b1aafSjsg const char *buf, size_t count) 2455ad8b1aafSjsg { 2456ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2457ad8b1aafSjsg int err; 2458ad8b1aafSjsg u32 value; 2459ad8b1aafSjsg u32 pwm_mode; 2460ad8b1aafSjsg 2461ad8b1aafSjsg if (amdgpu_in_reset(adev)) 2462ad8b1aafSjsg return -EPERM; 24635ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 24645ca02815Sjsg return -EPERM; 2465ad8b1aafSjsg 24661bb76ff1Sjsg err = kstrtou32(buf, 10, &value); 24671bb76ff1Sjsg if (err) 24681bb76ff1Sjsg return err; 24691bb76ff1Sjsg 2470ad8b1aafSjsg err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2471ad8b1aafSjsg if (err < 0) { 2472ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2473ad8b1aafSjsg return err; 2474ad8b1aafSjsg } 2475ad8b1aafSjsg 24761bb76ff1Sjsg err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); 24771bb76ff1Sjsg if (err) 24781bb76ff1Sjsg goto out; 24791bb76ff1Sjsg 2480ad8b1aafSjsg if (pwm_mode != AMD_FAN_CTRL_MANUAL) { 2481ad8b1aafSjsg pr_info("manual fan speed control should be enabled first\n"); 2482ad8b1aafSjsg err = -EINVAL; 24831bb76ff1Sjsg goto out; 24841bb76ff1Sjsg } 2485ad8b1aafSjsg 24861bb76ff1Sjsg err = amdgpu_dpm_set_fan_speed_pwm(adev, value); 24871bb76ff1Sjsg 24881bb76ff1Sjsg out: 2489ad8b1aafSjsg pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2490ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2491ad8b1aafSjsg 2492ad8b1aafSjsg if (err) 2493ad8b1aafSjsg return err; 2494ad8b1aafSjsg 2495ad8b1aafSjsg return count; 2496ad8b1aafSjsg } 2497ad8b1aafSjsg 2498ad8b1aafSjsg static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, 2499ad8b1aafSjsg struct device_attribute *attr, 2500ad8b1aafSjsg char *buf) 2501ad8b1aafSjsg { 2502ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2503ad8b1aafSjsg int err; 2504ad8b1aafSjsg u32 speed = 0; 2505ad8b1aafSjsg 2506ad8b1aafSjsg if (amdgpu_in_reset(adev)) 2507ad8b1aafSjsg return -EPERM; 25085ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 25095ca02815Sjsg return -EPERM; 2510ad8b1aafSjsg 2511ad8b1aafSjsg err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2512ad8b1aafSjsg if (err < 0) { 2513ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2514ad8b1aafSjsg return err; 2515ad8b1aafSjsg } 2516ad8b1aafSjsg 25175ca02815Sjsg err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed); 2518ad8b1aafSjsg 2519ad8b1aafSjsg pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2520ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2521ad8b1aafSjsg 2522ad8b1aafSjsg if (err) 2523ad8b1aafSjsg return err; 2524ad8b1aafSjsg 25255ca02815Sjsg return sysfs_emit(buf, "%i\n", speed); 2526ad8b1aafSjsg } 2527ad8b1aafSjsg 2528ad8b1aafSjsg static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, 2529ad8b1aafSjsg struct device_attribute *attr, 2530ad8b1aafSjsg char *buf) 2531ad8b1aafSjsg { 2532ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2533ad8b1aafSjsg int err; 2534ad8b1aafSjsg u32 speed = 0; 2535ad8b1aafSjsg 2536ad8b1aafSjsg if (amdgpu_in_reset(adev)) 2537ad8b1aafSjsg return -EPERM; 25385ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 25395ca02815Sjsg return -EPERM; 2540ad8b1aafSjsg 2541ad8b1aafSjsg err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2542ad8b1aafSjsg if (err < 0) { 2543ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2544ad8b1aafSjsg return err; 2545ad8b1aafSjsg } 2546ad8b1aafSjsg 2547ad8b1aafSjsg err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); 2548ad8b1aafSjsg 2549ad8b1aafSjsg pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2550ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2551ad8b1aafSjsg 2552ad8b1aafSjsg if (err) 2553ad8b1aafSjsg return err; 2554ad8b1aafSjsg 25555ca02815Sjsg return sysfs_emit(buf, "%i\n", speed); 2556ad8b1aafSjsg } 2557ad8b1aafSjsg 2558ad8b1aafSjsg static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, 2559ad8b1aafSjsg struct device_attribute *attr, 2560ad8b1aafSjsg char *buf) 2561ad8b1aafSjsg { 2562ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2563ad8b1aafSjsg u32 min_rpm = 0; 2564ad8b1aafSjsg int r; 2565ad8b1aafSjsg 2566f005ef32Sjsg r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, 2567f005ef32Sjsg (void *)&min_rpm); 2568ad8b1aafSjsg 2569ad8b1aafSjsg if (r) 2570ad8b1aafSjsg return r; 2571ad8b1aafSjsg 25725ca02815Sjsg return sysfs_emit(buf, "%d\n", min_rpm); 2573ad8b1aafSjsg } 2574ad8b1aafSjsg 2575ad8b1aafSjsg static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, 2576ad8b1aafSjsg struct device_attribute *attr, 2577ad8b1aafSjsg char *buf) 2578ad8b1aafSjsg { 2579ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2580ad8b1aafSjsg u32 max_rpm = 0; 2581ad8b1aafSjsg int r; 2582ad8b1aafSjsg 2583f005ef32Sjsg r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, 2584f005ef32Sjsg (void *)&max_rpm); 2585ad8b1aafSjsg 2586ad8b1aafSjsg if (r) 2587ad8b1aafSjsg return r; 2588ad8b1aafSjsg 25895ca02815Sjsg return sysfs_emit(buf, "%d\n", max_rpm); 2590ad8b1aafSjsg } 2591ad8b1aafSjsg 2592ad8b1aafSjsg static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, 2593ad8b1aafSjsg struct device_attribute *attr, 2594ad8b1aafSjsg char *buf) 2595ad8b1aafSjsg { 2596ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2597ad8b1aafSjsg int err; 2598ad8b1aafSjsg u32 rpm = 0; 2599ad8b1aafSjsg 2600ad8b1aafSjsg if (amdgpu_in_reset(adev)) 2601ad8b1aafSjsg return -EPERM; 26025ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 26035ca02815Sjsg return -EPERM; 2604ad8b1aafSjsg 2605ad8b1aafSjsg err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2606ad8b1aafSjsg if (err < 0) { 2607ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2608ad8b1aafSjsg return err; 2609ad8b1aafSjsg } 2610ad8b1aafSjsg 2611ad8b1aafSjsg err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); 2612ad8b1aafSjsg 2613ad8b1aafSjsg pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2614ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2615ad8b1aafSjsg 2616ad8b1aafSjsg if (err) 2617ad8b1aafSjsg return err; 2618ad8b1aafSjsg 26195ca02815Sjsg return sysfs_emit(buf, "%i\n", rpm); 2620ad8b1aafSjsg } 2621ad8b1aafSjsg 2622ad8b1aafSjsg static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, 2623ad8b1aafSjsg struct device_attribute *attr, 2624ad8b1aafSjsg const char *buf, size_t count) 2625ad8b1aafSjsg { 2626ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2627ad8b1aafSjsg int err; 2628ad8b1aafSjsg u32 value; 2629ad8b1aafSjsg u32 pwm_mode; 2630ad8b1aafSjsg 2631ad8b1aafSjsg if (amdgpu_in_reset(adev)) 2632ad8b1aafSjsg return -EPERM; 26335ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 26345ca02815Sjsg return -EPERM; 2635ad8b1aafSjsg 26361bb76ff1Sjsg err = kstrtou32(buf, 10, &value); 26371bb76ff1Sjsg if (err) 26381bb76ff1Sjsg return err; 26391bb76ff1Sjsg 2640ad8b1aafSjsg err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2641ad8b1aafSjsg if (err < 0) { 2642ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2643ad8b1aafSjsg return err; 2644ad8b1aafSjsg } 2645ad8b1aafSjsg 26461bb76ff1Sjsg err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); 26471bb76ff1Sjsg if (err) 26481bb76ff1Sjsg goto out; 2649ad8b1aafSjsg 2650ad8b1aafSjsg if (pwm_mode != AMD_FAN_CTRL_MANUAL) { 26511bb76ff1Sjsg err = -ENODATA; 26521bb76ff1Sjsg goto out; 2653ad8b1aafSjsg } 2654ad8b1aafSjsg 2655ad8b1aafSjsg err = amdgpu_dpm_set_fan_speed_rpm(adev, value); 2656ad8b1aafSjsg 26571bb76ff1Sjsg out: 2658ad8b1aafSjsg pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2659ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2660ad8b1aafSjsg 2661ad8b1aafSjsg if (err) 2662ad8b1aafSjsg return err; 2663ad8b1aafSjsg 2664ad8b1aafSjsg return count; 2665ad8b1aafSjsg } 2666ad8b1aafSjsg 2667ad8b1aafSjsg static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, 2668ad8b1aafSjsg struct device_attribute *attr, 2669ad8b1aafSjsg char *buf) 2670ad8b1aafSjsg { 2671ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2672ad8b1aafSjsg u32 pwm_mode = 0; 2673ad8b1aafSjsg int ret; 2674ad8b1aafSjsg 2675ad8b1aafSjsg if (amdgpu_in_reset(adev)) 2676ad8b1aafSjsg return -EPERM; 26775ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 26785ca02815Sjsg return -EPERM; 2679ad8b1aafSjsg 2680ad8b1aafSjsg ret = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2681ad8b1aafSjsg if (ret < 0) { 2682ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2683ad8b1aafSjsg return ret; 2684ad8b1aafSjsg } 2685ad8b1aafSjsg 26861bb76ff1Sjsg ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); 26871bb76ff1Sjsg 2688ad8b1aafSjsg pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2689ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 26901bb76ff1Sjsg 26911bb76ff1Sjsg if (ret) 2692ad8b1aafSjsg return -EINVAL; 2693ad8b1aafSjsg 26945ca02815Sjsg return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1); 2695ad8b1aafSjsg } 2696ad8b1aafSjsg 2697ad8b1aafSjsg static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, 2698ad8b1aafSjsg struct device_attribute *attr, 2699ad8b1aafSjsg const char *buf, 2700ad8b1aafSjsg size_t count) 2701ad8b1aafSjsg { 2702ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2703ad8b1aafSjsg int err; 2704ad8b1aafSjsg int value; 2705ad8b1aafSjsg u32 pwm_mode; 2706ad8b1aafSjsg 2707ad8b1aafSjsg if (amdgpu_in_reset(adev)) 2708ad8b1aafSjsg return -EPERM; 27095ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 27105ca02815Sjsg return -EPERM; 2711ad8b1aafSjsg 2712ad8b1aafSjsg err = kstrtoint(buf, 10, &value); 2713ad8b1aafSjsg if (err) 2714ad8b1aafSjsg return err; 2715ad8b1aafSjsg 2716ad8b1aafSjsg if (value == 0) 2717ad8b1aafSjsg pwm_mode = AMD_FAN_CTRL_AUTO; 2718ad8b1aafSjsg else if (value == 1) 2719ad8b1aafSjsg pwm_mode = AMD_FAN_CTRL_MANUAL; 2720ad8b1aafSjsg else 2721ad8b1aafSjsg return -EINVAL; 2722ad8b1aafSjsg 2723ad8b1aafSjsg err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2724ad8b1aafSjsg if (err < 0) { 2725ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2726ad8b1aafSjsg return err; 2727ad8b1aafSjsg } 2728ad8b1aafSjsg 27291bb76ff1Sjsg err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); 2730ad8b1aafSjsg 2731ad8b1aafSjsg pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2732ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2733ad8b1aafSjsg 27341bb76ff1Sjsg if (err) 27351bb76ff1Sjsg return -EINVAL; 27361bb76ff1Sjsg 2737ad8b1aafSjsg return count; 2738ad8b1aafSjsg } 2739ad8b1aafSjsg 2740ad8b1aafSjsg static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, 2741ad8b1aafSjsg struct device_attribute *attr, 2742ad8b1aafSjsg char *buf) 2743ad8b1aafSjsg { 2744ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2745ad8b1aafSjsg u32 vddgfx; 2746f005ef32Sjsg int r; 2747ad8b1aafSjsg 2748ad8b1aafSjsg /* get the voltage */ 2749f005ef32Sjsg r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX, 2750f005ef32Sjsg (void *)&vddgfx); 2751ad8b1aafSjsg if (r) 2752ad8b1aafSjsg return r; 2753ad8b1aafSjsg 27545ca02815Sjsg return sysfs_emit(buf, "%d\n", vddgfx); 2755ad8b1aafSjsg } 2756ad8b1aafSjsg 2757ad8b1aafSjsg static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev, 2758ad8b1aafSjsg struct device_attribute *attr, 2759ad8b1aafSjsg char *buf) 2760ad8b1aafSjsg { 27615ca02815Sjsg return sysfs_emit(buf, "vddgfx\n"); 2762ad8b1aafSjsg } 2763ad8b1aafSjsg 2764ad8b1aafSjsg static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, 2765ad8b1aafSjsg struct device_attribute *attr, 2766ad8b1aafSjsg char *buf) 2767ad8b1aafSjsg { 2768ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2769ad8b1aafSjsg u32 vddnb; 2770f005ef32Sjsg int r; 2771ad8b1aafSjsg 2772ad8b1aafSjsg /* only APUs have vddnb */ 2773ad8b1aafSjsg if (!(adev->flags & AMD_IS_APU)) 2774ad8b1aafSjsg return -EINVAL; 2775ad8b1aafSjsg 2776ad8b1aafSjsg /* get the voltage */ 2777f005ef32Sjsg r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB, 2778f005ef32Sjsg (void *)&vddnb); 2779ad8b1aafSjsg if (r) 2780ad8b1aafSjsg return r; 2781ad8b1aafSjsg 27825ca02815Sjsg return sysfs_emit(buf, "%d\n", vddnb); 2783ad8b1aafSjsg } 2784ad8b1aafSjsg 2785ad8b1aafSjsg static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev, 2786ad8b1aafSjsg struct device_attribute *attr, 2787ad8b1aafSjsg char *buf) 2788ad8b1aafSjsg { 27895ca02815Sjsg return sysfs_emit(buf, "vddnb\n"); 2790ad8b1aafSjsg } 2791ad8b1aafSjsg 2792*ce3b705aSjsg static int amdgpu_hwmon_get_power(struct device *dev, 2793f005ef32Sjsg enum amd_pp_sensors sensor) 2794ad8b1aafSjsg { 2795ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2796f005ef32Sjsg unsigned int uw; 2797ad8b1aafSjsg u32 query = 0; 2798f005ef32Sjsg int r; 2799ad8b1aafSjsg 2800f005ef32Sjsg r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&query); 2801ad8b1aafSjsg if (r) 2802ad8b1aafSjsg return r; 2803ad8b1aafSjsg 2804ad8b1aafSjsg /* convert to microwatts */ 2805ad8b1aafSjsg uw = (query >> 8) * 1000000 + (query & 0xff) * 1000; 2806ad8b1aafSjsg 2807f005ef32Sjsg return uw; 2808f005ef32Sjsg } 2809f005ef32Sjsg 2810f005ef32Sjsg static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, 2811f005ef32Sjsg struct device_attribute *attr, 2812f005ef32Sjsg char *buf) 2813f005ef32Sjsg { 2814*ce3b705aSjsg int val; 2815f005ef32Sjsg 2816f005ef32Sjsg val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER); 2817f005ef32Sjsg if (val < 0) 2818f005ef32Sjsg return val; 2819f005ef32Sjsg 2820f005ef32Sjsg return sysfs_emit(buf, "%u\n", val); 2821f005ef32Sjsg } 2822f005ef32Sjsg 2823f005ef32Sjsg static ssize_t amdgpu_hwmon_show_power_input(struct device *dev, 2824f005ef32Sjsg struct device_attribute *attr, 2825f005ef32Sjsg char *buf) 2826f005ef32Sjsg { 2827*ce3b705aSjsg int val; 2828f005ef32Sjsg 2829f005ef32Sjsg val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER); 2830f005ef32Sjsg if (val < 0) 2831f005ef32Sjsg return val; 2832f005ef32Sjsg 2833f005ef32Sjsg return sysfs_emit(buf, "%u\n", val); 2834ad8b1aafSjsg } 2835ad8b1aafSjsg 2836ad8b1aafSjsg static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev, 2837ad8b1aafSjsg struct device_attribute *attr, 2838ad8b1aafSjsg char *buf) 2839ad8b1aafSjsg { 28405ca02815Sjsg return sysfs_emit(buf, "%i\n", 0); 2841ad8b1aafSjsg } 2842ad8b1aafSjsg 28435ca02815Sjsg 28445ca02815Sjsg static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev, 2845ad8b1aafSjsg struct device_attribute *attr, 28465ca02815Sjsg char *buf, 28475ca02815Sjsg enum pp_power_limit_level pp_limit_level) 2848ad8b1aafSjsg { 2849ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 28505ca02815Sjsg enum pp_power_type power_type = to_sensor_dev_attr(attr)->index; 28515ca02815Sjsg uint32_t limit; 2852ad8b1aafSjsg ssize_t size; 2853ad8b1aafSjsg int r; 2854ad8b1aafSjsg 2855ad8b1aafSjsg if (amdgpu_in_reset(adev)) 2856ad8b1aafSjsg return -EPERM; 28575ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 28585ca02815Sjsg return -EPERM; 28595ca02815Sjsg 2860ad8b1aafSjsg r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2861ad8b1aafSjsg if (r < 0) { 2862ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2863ad8b1aafSjsg return r; 2864ad8b1aafSjsg } 2865ad8b1aafSjsg 28661bb76ff1Sjsg r = amdgpu_dpm_get_power_limit(adev, &limit, 28675ca02815Sjsg pp_limit_level, power_type); 28685ca02815Sjsg 28695ca02815Sjsg if (!r) 28705ca02815Sjsg size = sysfs_emit(buf, "%u\n", limit * 1000000); 28715ca02815Sjsg else 28725ca02815Sjsg size = sysfs_emit(buf, "\n"); 2873ad8b1aafSjsg 2874ad8b1aafSjsg pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2875ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2876ad8b1aafSjsg 2877ad8b1aafSjsg return size; 2878ad8b1aafSjsg } 2879ad8b1aafSjsg 28805ca02815Sjsg 28815ca02815Sjsg static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, 28825ca02815Sjsg struct device_attribute *attr, 28835ca02815Sjsg char *buf) 28845ca02815Sjsg { 28855ca02815Sjsg return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX); 28865ca02815Sjsg 28875ca02815Sjsg } 28885ca02815Sjsg 2889ad8b1aafSjsg static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, 2890ad8b1aafSjsg struct device_attribute *attr, 2891ad8b1aafSjsg char *buf) 2892ad8b1aafSjsg { 28935ca02815Sjsg return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT); 2894ad8b1aafSjsg 2895ad8b1aafSjsg } 2896ad8b1aafSjsg 28975ca02815Sjsg static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev, 28985ca02815Sjsg struct device_attribute *attr, 28995ca02815Sjsg char *buf) 29005ca02815Sjsg { 29015ca02815Sjsg return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT); 29025ca02815Sjsg 2903ad8b1aafSjsg } 2904ad8b1aafSjsg 29055ca02815Sjsg static ssize_t amdgpu_hwmon_show_power_label(struct device *dev, 29065ca02815Sjsg struct device_attribute *attr, 29075ca02815Sjsg char *buf) 29085ca02815Sjsg { 29091bb76ff1Sjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 29101bb76ff1Sjsg uint32_t gc_ver = adev->ip_versions[GC_HWIP][0]; 2911ad8b1aafSjsg 29121bb76ff1Sjsg if (gc_ver == IP_VERSION(10, 3, 1)) 29135ca02815Sjsg return sysfs_emit(buf, "%s\n", 29141bb76ff1Sjsg to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ? 29151bb76ff1Sjsg "fastPPT" : "slowPPT"); 29161bb76ff1Sjsg else 29171bb76ff1Sjsg return sysfs_emit(buf, "PPT\n"); 2918ad8b1aafSjsg } 2919ad8b1aafSjsg 2920ad8b1aafSjsg static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, 2921ad8b1aafSjsg struct device_attribute *attr, 2922ad8b1aafSjsg const char *buf, 2923ad8b1aafSjsg size_t count) 2924ad8b1aafSjsg { 2925ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 29265ca02815Sjsg int limit_type = to_sensor_dev_attr(attr)->index; 2927ad8b1aafSjsg int err; 2928ad8b1aafSjsg u32 value; 2929ad8b1aafSjsg 2930ad8b1aafSjsg if (amdgpu_in_reset(adev)) 2931ad8b1aafSjsg return -EPERM; 29325ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 29335ca02815Sjsg return -EPERM; 2934ad8b1aafSjsg 2935ad8b1aafSjsg if (amdgpu_sriov_vf(adev)) 2936ad8b1aafSjsg return -EINVAL; 2937ad8b1aafSjsg 2938ad8b1aafSjsg err = kstrtou32(buf, 10, &value); 2939ad8b1aafSjsg if (err) 2940ad8b1aafSjsg return err; 2941ad8b1aafSjsg 2942ad8b1aafSjsg value = value / 1000000; /* convert to Watt */ 29435ca02815Sjsg value |= limit_type << 24; 2944ad8b1aafSjsg 2945ad8b1aafSjsg err = pm_runtime_get_sync(adev_to_drm(adev)->dev); 2946ad8b1aafSjsg if (err < 0) { 2947ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2948ad8b1aafSjsg return err; 2949ad8b1aafSjsg } 2950ad8b1aafSjsg 29511bb76ff1Sjsg err = amdgpu_dpm_set_power_limit(adev, value); 2952ad8b1aafSjsg 2953ad8b1aafSjsg pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); 2954ad8b1aafSjsg pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); 2955ad8b1aafSjsg 2956ad8b1aafSjsg if (err) 2957ad8b1aafSjsg return err; 2958ad8b1aafSjsg 2959ad8b1aafSjsg return count; 2960ad8b1aafSjsg } 2961ad8b1aafSjsg 2962ad8b1aafSjsg static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, 2963ad8b1aafSjsg struct device_attribute *attr, 2964ad8b1aafSjsg char *buf) 2965ad8b1aafSjsg { 2966ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2967ad8b1aafSjsg uint32_t sclk; 2968f005ef32Sjsg int r; 2969ad8b1aafSjsg 2970ad8b1aafSjsg /* get the sclk */ 2971f005ef32Sjsg r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK, 2972f005ef32Sjsg (void *)&sclk); 2973ad8b1aafSjsg if (r) 2974ad8b1aafSjsg return r; 2975ad8b1aafSjsg 29765ca02815Sjsg return sysfs_emit(buf, "%u\n", sclk * 10 * 1000); 2977ad8b1aafSjsg } 2978ad8b1aafSjsg 2979ad8b1aafSjsg static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev, 2980ad8b1aafSjsg struct device_attribute *attr, 2981ad8b1aafSjsg char *buf) 2982ad8b1aafSjsg { 29835ca02815Sjsg return sysfs_emit(buf, "sclk\n"); 2984ad8b1aafSjsg } 2985ad8b1aafSjsg 2986ad8b1aafSjsg static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, 2987ad8b1aafSjsg struct device_attribute *attr, 2988ad8b1aafSjsg char *buf) 2989ad8b1aafSjsg { 2990ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 2991ad8b1aafSjsg uint32_t mclk; 2992f005ef32Sjsg int r; 2993ad8b1aafSjsg 2994ad8b1aafSjsg /* get the sclk */ 2995f005ef32Sjsg r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK, 2996f005ef32Sjsg (void *)&mclk); 2997ad8b1aafSjsg if (r) 2998ad8b1aafSjsg return r; 2999ad8b1aafSjsg 30005ca02815Sjsg return sysfs_emit(buf, "%u\n", mclk * 10 * 1000); 3001ad8b1aafSjsg } 3002ad8b1aafSjsg 3003ad8b1aafSjsg static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, 3004ad8b1aafSjsg struct device_attribute *attr, 3005ad8b1aafSjsg char *buf) 3006ad8b1aafSjsg { 30075ca02815Sjsg return sysfs_emit(buf, "mclk\n"); 3008ad8b1aafSjsg } 3009ad8b1aafSjsg 3010ad8b1aafSjsg /** 3011ad8b1aafSjsg * DOC: hwmon 3012ad8b1aafSjsg * 3013ad8b1aafSjsg * The amdgpu driver exposes the following sensor interfaces: 3014ad8b1aafSjsg * 3015ad8b1aafSjsg * - GPU temperature (via the on-die sensor) 3016ad8b1aafSjsg * 3017ad8b1aafSjsg * - GPU voltage 3018ad8b1aafSjsg * 3019ad8b1aafSjsg * - Northbridge voltage (APUs only) 3020ad8b1aafSjsg * 3021ad8b1aafSjsg * - GPU power 3022ad8b1aafSjsg * 3023ad8b1aafSjsg * - GPU fan 3024ad8b1aafSjsg * 3025ad8b1aafSjsg * - GPU gfx/compute engine clock 3026ad8b1aafSjsg * 3027ad8b1aafSjsg * - GPU memory clock (dGPU only) 3028ad8b1aafSjsg * 3029ad8b1aafSjsg * hwmon interfaces for GPU temperature: 3030ad8b1aafSjsg * 3031ad8b1aafSjsg * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius 3032ad8b1aafSjsg * - temp2_input and temp3_input are supported on SOC15 dGPUs only 3033ad8b1aafSjsg * 3034ad8b1aafSjsg * - temp[1-3]_label: temperature channel label 3035ad8b1aafSjsg * - temp2_label and temp3_label are supported on SOC15 dGPUs only 3036ad8b1aafSjsg * 3037ad8b1aafSjsg * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius 3038ad8b1aafSjsg * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only 3039ad8b1aafSjsg * 3040ad8b1aafSjsg * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius 3041ad8b1aafSjsg * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only 3042ad8b1aafSjsg * 3043ad8b1aafSjsg * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius 3044ad8b1aafSjsg * - these are supported on SOC15 dGPUs only 3045ad8b1aafSjsg * 3046ad8b1aafSjsg * hwmon interfaces for GPU voltage: 3047ad8b1aafSjsg * 3048ad8b1aafSjsg * - in0_input: the voltage on the GPU in millivolts 3049ad8b1aafSjsg * 3050ad8b1aafSjsg * - in1_input: the voltage on the Northbridge in millivolts 3051ad8b1aafSjsg * 3052ad8b1aafSjsg * hwmon interfaces for GPU power: 3053ad8b1aafSjsg * 3054f005ef32Sjsg * - power1_average: average power used by the SoC in microWatts. On APUs this includes the CPU. 3055f005ef32Sjsg * 3056f005ef32Sjsg * - power1_input: instantaneous power used by the SoC in microWatts. On APUs this includes the CPU. 3057ad8b1aafSjsg * 3058ad8b1aafSjsg * - power1_cap_min: minimum cap supported in microWatts 3059ad8b1aafSjsg * 3060ad8b1aafSjsg * - power1_cap_max: maximum cap supported in microWatts 3061ad8b1aafSjsg * 3062ad8b1aafSjsg * - power1_cap: selected power cap in microWatts 3063ad8b1aafSjsg * 3064ad8b1aafSjsg * hwmon interfaces for GPU fan: 3065ad8b1aafSjsg * 3066ad8b1aafSjsg * - pwm1: pulse width modulation fan level (0-255) 3067ad8b1aafSjsg * 3068ad8b1aafSjsg * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control) 3069ad8b1aafSjsg * 3070ad8b1aafSjsg * - pwm1_min: pulse width modulation fan control minimum level (0) 3071ad8b1aafSjsg * 3072ad8b1aafSjsg * - pwm1_max: pulse width modulation fan control maximum level (255) 3073ad8b1aafSjsg * 30745ca02815Sjsg * - fan1_min: a minimum value Unit: revolution/min (RPM) 3075ad8b1aafSjsg * 30765ca02815Sjsg * - fan1_max: a maximum value Unit: revolution/max (RPM) 3077ad8b1aafSjsg * 3078ad8b1aafSjsg * - fan1_input: fan speed in RPM 3079ad8b1aafSjsg * 3080ad8b1aafSjsg * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM) 3081ad8b1aafSjsg * 3082ad8b1aafSjsg * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable 3083ad8b1aafSjsg * 30845ca02815Sjsg * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time. 30855ca02815Sjsg * That will get the former one overridden. 30865ca02815Sjsg * 3087ad8b1aafSjsg * hwmon interfaces for GPU clocks: 3088ad8b1aafSjsg * 3089ad8b1aafSjsg * - freq1_input: the gfx/compute clock in hertz 3090ad8b1aafSjsg * 3091ad8b1aafSjsg * - freq2_input: the memory clock in hertz 3092ad8b1aafSjsg * 3093ad8b1aafSjsg * You can use hwmon tools like sensors to view this information on your system. 3094ad8b1aafSjsg * 3095ad8b1aafSjsg */ 3096ad8b1aafSjsg 3097ad8b1aafSjsg static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE); 3098ad8b1aafSjsg static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); 3099ad8b1aafSjsg static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); 3100ad8b1aafSjsg static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE); 3101ad8b1aafSjsg static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION); 3102ad8b1aafSjsg static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0); 3103ad8b1aafSjsg static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1); 3104ad8b1aafSjsg static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION); 3105ad8b1aafSjsg static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM); 3106ad8b1aafSjsg static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0); 3107ad8b1aafSjsg static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1); 3108ad8b1aafSjsg static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM); 3109ad8b1aafSjsg static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE); 3110ad8b1aafSjsg static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION); 3111ad8b1aafSjsg static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM); 3112ad8b1aafSjsg static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0); 3113ad8b1aafSjsg static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0); 3114ad8b1aafSjsg static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0); 3115ad8b1aafSjsg static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0); 3116ad8b1aafSjsg static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0); 3117ad8b1aafSjsg static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0); 3118ad8b1aafSjsg static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0); 3119ad8b1aafSjsg static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0); 3120ad8b1aafSjsg static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0); 3121ad8b1aafSjsg static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0); 3122ad8b1aafSjsg static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0); 3123ad8b1aafSjsg static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0); 3124ad8b1aafSjsg static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0); 3125ad8b1aafSjsg static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0); 3126f005ef32Sjsg static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0); 3127ad8b1aafSjsg static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0); 3128ad8b1aafSjsg static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0); 3129ad8b1aafSjsg static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0); 31305ca02815Sjsg static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0); 31315ca02815Sjsg static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0); 31325ca02815Sjsg static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1); 31335ca02815Sjsg static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1); 31345ca02815Sjsg static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1); 31355ca02815Sjsg static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1); 31365ca02815Sjsg static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1); 31375ca02815Sjsg static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1); 3138ad8b1aafSjsg static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0); 3139ad8b1aafSjsg static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0); 3140ad8b1aafSjsg static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0); 3141ad8b1aafSjsg static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0); 3142ad8b1aafSjsg 3143ad8b1aafSjsg static struct attribute *hwmon_attributes[] = { 3144ad8b1aafSjsg &sensor_dev_attr_temp1_input.dev_attr.attr, 3145ad8b1aafSjsg &sensor_dev_attr_temp1_crit.dev_attr.attr, 3146ad8b1aafSjsg &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 3147ad8b1aafSjsg &sensor_dev_attr_temp2_input.dev_attr.attr, 3148ad8b1aafSjsg &sensor_dev_attr_temp2_crit.dev_attr.attr, 3149ad8b1aafSjsg &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr, 3150ad8b1aafSjsg &sensor_dev_attr_temp3_input.dev_attr.attr, 3151ad8b1aafSjsg &sensor_dev_attr_temp3_crit.dev_attr.attr, 3152ad8b1aafSjsg &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr, 3153ad8b1aafSjsg &sensor_dev_attr_temp1_emergency.dev_attr.attr, 3154ad8b1aafSjsg &sensor_dev_attr_temp2_emergency.dev_attr.attr, 3155ad8b1aafSjsg &sensor_dev_attr_temp3_emergency.dev_attr.attr, 3156ad8b1aafSjsg &sensor_dev_attr_temp1_label.dev_attr.attr, 3157ad8b1aafSjsg &sensor_dev_attr_temp2_label.dev_attr.attr, 3158ad8b1aafSjsg &sensor_dev_attr_temp3_label.dev_attr.attr, 3159ad8b1aafSjsg &sensor_dev_attr_pwm1.dev_attr.attr, 3160ad8b1aafSjsg &sensor_dev_attr_pwm1_enable.dev_attr.attr, 3161ad8b1aafSjsg &sensor_dev_attr_pwm1_min.dev_attr.attr, 3162ad8b1aafSjsg &sensor_dev_attr_pwm1_max.dev_attr.attr, 3163ad8b1aafSjsg &sensor_dev_attr_fan1_input.dev_attr.attr, 3164ad8b1aafSjsg &sensor_dev_attr_fan1_min.dev_attr.attr, 3165ad8b1aafSjsg &sensor_dev_attr_fan1_max.dev_attr.attr, 3166ad8b1aafSjsg &sensor_dev_attr_fan1_target.dev_attr.attr, 3167ad8b1aafSjsg &sensor_dev_attr_fan1_enable.dev_attr.attr, 3168ad8b1aafSjsg &sensor_dev_attr_in0_input.dev_attr.attr, 3169ad8b1aafSjsg &sensor_dev_attr_in0_label.dev_attr.attr, 3170ad8b1aafSjsg &sensor_dev_attr_in1_input.dev_attr.attr, 3171ad8b1aafSjsg &sensor_dev_attr_in1_label.dev_attr.attr, 3172ad8b1aafSjsg &sensor_dev_attr_power1_average.dev_attr.attr, 3173f005ef32Sjsg &sensor_dev_attr_power1_input.dev_attr.attr, 3174ad8b1aafSjsg &sensor_dev_attr_power1_cap_max.dev_attr.attr, 3175ad8b1aafSjsg &sensor_dev_attr_power1_cap_min.dev_attr.attr, 3176ad8b1aafSjsg &sensor_dev_attr_power1_cap.dev_attr.attr, 31775ca02815Sjsg &sensor_dev_attr_power1_cap_default.dev_attr.attr, 31785ca02815Sjsg &sensor_dev_attr_power1_label.dev_attr.attr, 31795ca02815Sjsg &sensor_dev_attr_power2_average.dev_attr.attr, 31805ca02815Sjsg &sensor_dev_attr_power2_cap_max.dev_attr.attr, 31815ca02815Sjsg &sensor_dev_attr_power2_cap_min.dev_attr.attr, 31825ca02815Sjsg &sensor_dev_attr_power2_cap.dev_attr.attr, 31835ca02815Sjsg &sensor_dev_attr_power2_cap_default.dev_attr.attr, 31845ca02815Sjsg &sensor_dev_attr_power2_label.dev_attr.attr, 3185ad8b1aafSjsg &sensor_dev_attr_freq1_input.dev_attr.attr, 3186ad8b1aafSjsg &sensor_dev_attr_freq1_label.dev_attr.attr, 3187ad8b1aafSjsg &sensor_dev_attr_freq2_input.dev_attr.attr, 3188ad8b1aafSjsg &sensor_dev_attr_freq2_label.dev_attr.attr, 3189ad8b1aafSjsg NULL 3190ad8b1aafSjsg }; 3191ad8b1aafSjsg 3192ad8b1aafSjsg static umode_t hwmon_attributes_visible(struct kobject *kobj, 3193ad8b1aafSjsg struct attribute *attr, int index) 3194ad8b1aafSjsg { 3195ad8b1aafSjsg struct device *dev = kobj_to_dev(kobj); 3196ad8b1aafSjsg struct amdgpu_device *adev = dev_get_drvdata(dev); 3197ad8b1aafSjsg umode_t effective_mode = attr->mode; 31981bb76ff1Sjsg uint32_t gc_ver = adev->ip_versions[GC_HWIP][0]; 3199f005ef32Sjsg uint32_t tmp; 3200ad8b1aafSjsg 3201ad8b1aafSjsg /* under multi-vf mode, the hwmon attributes are all not supported */ 3202ad8b1aafSjsg if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 3203ad8b1aafSjsg return 0; 3204ad8b1aafSjsg 32051bb76ff1Sjsg /* under pp one vf mode manage of hwmon attributes is not supported */ 32061bb76ff1Sjsg if (amdgpu_sriov_is_pp_one_vf(adev)) 32071bb76ff1Sjsg effective_mode &= ~S_IWUSR; 3208ad8b1aafSjsg 3209ad8b1aafSjsg /* Skip fan attributes if fan is not present */ 3210ad8b1aafSjsg if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 3211ad8b1aafSjsg attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 3212ad8b1aafSjsg attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3213ad8b1aafSjsg attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 3214ad8b1aafSjsg attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 3215ad8b1aafSjsg attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 3216ad8b1aafSjsg attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3217ad8b1aafSjsg attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 3218ad8b1aafSjsg attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 3219ad8b1aafSjsg return 0; 3220ad8b1aafSjsg 3221ad8b1aafSjsg /* Skip fan attributes on APU */ 3222ad8b1aafSjsg if ((adev->flags & AMD_IS_APU) && 3223ad8b1aafSjsg (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 3224ad8b1aafSjsg attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 3225ad8b1aafSjsg attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3226ad8b1aafSjsg attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 3227ad8b1aafSjsg attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 3228ad8b1aafSjsg attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 3229ad8b1aafSjsg attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3230ad8b1aafSjsg attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 3231ad8b1aafSjsg attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 3232ad8b1aafSjsg return 0; 3233ad8b1aafSjsg 3234ad8b1aafSjsg /* Skip crit temp on APU */ 3235f005ef32Sjsg if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) || 3236f005ef32Sjsg (gc_ver == IP_VERSION(9, 4, 3))) && 3237ad8b1aafSjsg (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 3238ad8b1aafSjsg attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) 3239ad8b1aafSjsg return 0; 3240ad8b1aafSjsg 3241ad8b1aafSjsg /* Skip limit attributes if DPM is not enabled */ 3242ad8b1aafSjsg if (!adev->pm.dpm_enabled && 3243ad8b1aafSjsg (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 3244ad8b1aafSjsg attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || 3245ad8b1aafSjsg attr == &sensor_dev_attr_pwm1.dev_attr.attr || 3246ad8b1aafSjsg attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 3247ad8b1aafSjsg attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3248ad8b1aafSjsg attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 3249ad8b1aafSjsg attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 3250ad8b1aafSjsg attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 3251ad8b1aafSjsg attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3252ad8b1aafSjsg attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 3253ad8b1aafSjsg attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 3254ad8b1aafSjsg return 0; 3255ad8b1aafSjsg 3256ad8b1aafSjsg /* mask fan attributes if we have no bindings for this asic to expose */ 32571bb76ff1Sjsg if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) && 3258ad8b1aafSjsg attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ 32591bb76ff1Sjsg ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) && 3260ad8b1aafSjsg attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ 3261ad8b1aafSjsg effective_mode &= ~S_IRUGO; 3262ad8b1aafSjsg 32631bb76ff1Sjsg if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) && 3264ad8b1aafSjsg attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ 32651bb76ff1Sjsg ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) && 3266ad8b1aafSjsg attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ 3267ad8b1aafSjsg effective_mode &= ~S_IWUSR; 3268ad8b1aafSjsg 3269f005ef32Sjsg /* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */ 32705ca02815Sjsg if (((adev->family == AMDGPU_FAMILY_SI) || 3271f005ef32Sjsg ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) && 3272f005ef32Sjsg (gc_ver != IP_VERSION(9, 4, 3)))) && 3273ad8b1aafSjsg (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || 3274ad8b1aafSjsg attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr || 32755ca02815Sjsg attr == &sensor_dev_attr_power1_cap.dev_attr.attr || 32765ca02815Sjsg attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr)) 3277ad8b1aafSjsg return 0; 3278ad8b1aafSjsg 3279f005ef32Sjsg /* not implemented yet for APUs having < GC 9.3.0 (Renoir) */ 3280ad8b1aafSjsg if (((adev->family == AMDGPU_FAMILY_SI) || 32811bb76ff1Sjsg ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) && 3282ad8b1aafSjsg (attr == &sensor_dev_attr_power1_average.dev_attr.attr)) 3283ad8b1aafSjsg return 0; 3284ad8b1aafSjsg 3285f005ef32Sjsg /* not all products support both average and instantaneous */ 3286f005ef32Sjsg if (attr == &sensor_dev_attr_power1_average.dev_attr.attr && 3287f005ef32Sjsg amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&tmp) == -EOPNOTSUPP) 3288f005ef32Sjsg return 0; 3289f005ef32Sjsg if (attr == &sensor_dev_attr_power1_input.dev_attr.attr && 3290f005ef32Sjsg amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&tmp) == -EOPNOTSUPP) 3291f005ef32Sjsg return 0; 3292f005ef32Sjsg 3293ad8b1aafSjsg /* hide max/min values if we can't both query and manage the fan */ 32941bb76ff1Sjsg if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) && 32951bb76ff1Sjsg (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) && 32961bb76ff1Sjsg (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) && 32971bb76ff1Sjsg (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) && 3298ad8b1aafSjsg (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 3299ad8b1aafSjsg attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 3300ad8b1aafSjsg return 0; 3301ad8b1aafSjsg 33021bb76ff1Sjsg if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) && 33031bb76ff1Sjsg (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) && 3304ad8b1aafSjsg (attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 3305ad8b1aafSjsg attr == &sensor_dev_attr_fan1_min.dev_attr.attr)) 3306ad8b1aafSjsg return 0; 3307ad8b1aafSjsg 3308ad8b1aafSjsg if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ 3309f005ef32Sjsg adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */ 3310f005ef32Sjsg (gc_ver == IP_VERSION(9, 4, 3))) && 3311ad8b1aafSjsg (attr == &sensor_dev_attr_in0_input.dev_attr.attr || 3312ad8b1aafSjsg attr == &sensor_dev_attr_in0_label.dev_attr.attr)) 3313ad8b1aafSjsg return 0; 3314ad8b1aafSjsg 3315f005ef32Sjsg /* only APUs other than gc 9,4,3 have vddnb */ 3316f005ef32Sjsg if ((!(adev->flags & AMD_IS_APU) || (gc_ver == IP_VERSION(9, 4, 3))) && 3317ad8b1aafSjsg (attr == &sensor_dev_attr_in1_input.dev_attr.attr || 3318ad8b1aafSjsg attr == &sensor_dev_attr_in1_label.dev_attr.attr)) 3319ad8b1aafSjsg return 0; 3320ad8b1aafSjsg 3321f005ef32Sjsg /* no mclk on APUs other than gc 9,4,3*/ 3322f005ef32Sjsg if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) && 3323ad8b1aafSjsg (attr == &sensor_dev_attr_freq2_input.dev_attr.attr || 3324ad8b1aafSjsg attr == &sensor_dev_attr_freq2_label.dev_attr.attr)) 3325ad8b1aafSjsg return 0; 3326ad8b1aafSjsg 33271bb76ff1Sjsg if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) && 3328f005ef32Sjsg (gc_ver != IP_VERSION(9, 4, 3)) && 3329f005ef32Sjsg (attr == &sensor_dev_attr_temp2_input.dev_attr.attr || 3330f005ef32Sjsg attr == &sensor_dev_attr_temp2_label.dev_attr.attr || 3331f005ef32Sjsg attr == &sensor_dev_attr_temp2_crit.dev_attr.attr || 3332f005ef32Sjsg attr == &sensor_dev_attr_temp3_input.dev_attr.attr || 3333f005ef32Sjsg attr == &sensor_dev_attr_temp3_label.dev_attr.attr || 3334f005ef32Sjsg attr == &sensor_dev_attr_temp3_crit.dev_attr.attr)) 3335f005ef32Sjsg return 0; 3336f005ef32Sjsg 3337f005ef32Sjsg /* hotspot temperature for gc 9,4,3*/ 3338f005ef32Sjsg if ((gc_ver == IP_VERSION(9, 4, 3)) && 3339f005ef32Sjsg (attr == &sensor_dev_attr_temp1_input.dev_attr.attr || 3340f005ef32Sjsg attr == &sensor_dev_attr_temp1_label.dev_attr.attr)) 3341f005ef32Sjsg return 0; 3342f005ef32Sjsg 3343f005ef32Sjsg /* only SOC15 dGPUs support hotspot and mem temperatures */ 3344f005ef32Sjsg if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0) || 3345f005ef32Sjsg (gc_ver == IP_VERSION(9, 4, 3))) && 3346f005ef32Sjsg (attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr || 3347ad8b1aafSjsg attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr || 3348ad8b1aafSjsg attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr || 3349ad8b1aafSjsg attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr || 3350f005ef32Sjsg attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr)) 3351ad8b1aafSjsg return 0; 3352ad8b1aafSjsg 33535ca02815Sjsg /* only Vangogh has fast PPT limit and power labels */ 33541bb76ff1Sjsg if (!(gc_ver == IP_VERSION(10, 3, 1)) && 33555ca02815Sjsg (attr == &sensor_dev_attr_power2_average.dev_attr.attr || 33565ca02815Sjsg attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr || 33575ca02815Sjsg attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr || 33585ca02815Sjsg attr == &sensor_dev_attr_power2_cap.dev_attr.attr || 33595ca02815Sjsg attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr || 3360ecd3ea8dSjsg attr == &sensor_dev_attr_power2_label.dev_attr.attr)) 33615ca02815Sjsg return 0; 33625ca02815Sjsg 3363ad8b1aafSjsg return effective_mode; 3364ad8b1aafSjsg } 3365ad8b1aafSjsg 3366ad8b1aafSjsg static const struct attribute_group hwmon_attrgroup = { 3367ad8b1aafSjsg .attrs = hwmon_attributes, 3368ad8b1aafSjsg .is_visible = hwmon_attributes_visible, 3369ad8b1aafSjsg }; 3370ad8b1aafSjsg 3371ad8b1aafSjsg static const struct attribute_group *hwmon_groups[] = { 3372ad8b1aafSjsg &hwmon_attrgroup, 3373ad8b1aafSjsg NULL 3374ad8b1aafSjsg }; 3375ad8b1aafSjsg 3376ad8b1aafSjsg #endif /* __linux__ */ 3377ad8b1aafSjsg 3378ad8b1aafSjsg int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 3379ad8b1aafSjsg { 3380ad8b1aafSjsg return 0; 3381ad8b1aafSjsg #ifdef __linux__ 3382ad8b1aafSjsg int ret; 3383ad8b1aafSjsg uint32_t mask = 0; 3384ad8b1aafSjsg 3385ad8b1aafSjsg if (adev->pm.sysfs_initialized) 3386ad8b1aafSjsg return 0; 3387ad8b1aafSjsg 33881bb76ff1Sjsg INIT_LIST_HEAD(&adev->pm.pm_attr_list); 33891bb76ff1Sjsg 3390ad8b1aafSjsg if (adev->pm.dpm_enabled == 0) 3391ad8b1aafSjsg return 0; 3392ad8b1aafSjsg 3393ad8b1aafSjsg adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, 3394ad8b1aafSjsg DRIVER_NAME, adev, 3395ad8b1aafSjsg hwmon_groups); 3396ad8b1aafSjsg if (IS_ERR(adev->pm.int_hwmon_dev)) { 3397ad8b1aafSjsg ret = PTR_ERR(adev->pm.int_hwmon_dev); 3398ad8b1aafSjsg dev_err(adev->dev, 3399ad8b1aafSjsg "Unable to register hwmon device: %d\n", ret); 3400ad8b1aafSjsg return ret; 3401ad8b1aafSjsg } 3402ad8b1aafSjsg 3403ad8b1aafSjsg switch (amdgpu_virt_get_sriov_vf_mode(adev)) { 3404ad8b1aafSjsg case SRIOV_VF_MODE_ONE_VF: 3405ad8b1aafSjsg mask = ATTR_FLAG_ONEVF; 3406ad8b1aafSjsg break; 3407ad8b1aafSjsg case SRIOV_VF_MODE_MULTI_VF: 3408ad8b1aafSjsg mask = 0; 3409ad8b1aafSjsg break; 3410ad8b1aafSjsg case SRIOV_VF_MODE_BARE_METAL: 3411ad8b1aafSjsg default: 3412ad8b1aafSjsg mask = ATTR_FLAG_MASK_ALL; 3413ad8b1aafSjsg break; 3414ad8b1aafSjsg } 3415ad8b1aafSjsg 3416ad8b1aafSjsg ret = amdgpu_device_attr_create_groups(adev, 3417ad8b1aafSjsg amdgpu_device_attrs, 3418ad8b1aafSjsg ARRAY_SIZE(amdgpu_device_attrs), 3419ad8b1aafSjsg mask, 3420ad8b1aafSjsg &adev->pm.pm_attr_list); 3421ad8b1aafSjsg if (ret) 3422ad8b1aafSjsg return ret; 3423ad8b1aafSjsg 3424ad8b1aafSjsg adev->pm.sysfs_initialized = true; 3425ad8b1aafSjsg 3426ad8b1aafSjsg return 0; 3427ad8b1aafSjsg #endif 3428ad8b1aafSjsg } 3429ad8b1aafSjsg 3430ad8b1aafSjsg void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) 3431ad8b1aafSjsg { 3432ad8b1aafSjsg #ifdef __linux__ 3433ad8b1aafSjsg if (adev->pm.int_hwmon_dev) 3434ad8b1aafSjsg hwmon_device_unregister(adev->pm.int_hwmon_dev); 3435ad8b1aafSjsg 3436ad8b1aafSjsg amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list); 3437ad8b1aafSjsg #endif 3438ad8b1aafSjsg } 3439ad8b1aafSjsg 3440ad8b1aafSjsg /* 3441ad8b1aafSjsg * Debugfs info 3442ad8b1aafSjsg */ 3443ad8b1aafSjsg #if defined(CONFIG_DEBUG_FS) 3444ad8b1aafSjsg 34455ca02815Sjsg static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m, 3446f005ef32Sjsg struct amdgpu_device *adev) 3447f005ef32Sjsg { 34485ca02815Sjsg uint16_t *p_val; 34495ca02815Sjsg uint32_t size; 34505ca02815Sjsg int i; 34511bb76ff1Sjsg uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev); 34525ca02815Sjsg 34531bb76ff1Sjsg if (amdgpu_dpm_is_cclk_dpm_supported(adev)) { 34541bb76ff1Sjsg p_val = kcalloc(num_cpu_cores, sizeof(uint16_t), 34555ca02815Sjsg GFP_KERNEL); 34565ca02815Sjsg 34575ca02815Sjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK, 34585ca02815Sjsg (void *)p_val, &size)) { 34591bb76ff1Sjsg for (i = 0; i < num_cpu_cores; i++) 34605ca02815Sjsg seq_printf(m, "\t%u MHz (CPU%d)\n", 34615ca02815Sjsg *(p_val + i), i); 34625ca02815Sjsg } 34635ca02815Sjsg 34645ca02815Sjsg kfree(p_val); 34655ca02815Sjsg } 34665ca02815Sjsg } 34675ca02815Sjsg 3468ad8b1aafSjsg static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) 3469ad8b1aafSjsg { 34701bb76ff1Sjsg uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0]; 34711bb76ff1Sjsg uint32_t gc_ver = adev->ip_versions[GC_HWIP][0]; 3472ad8b1aafSjsg uint32_t value; 34735ca02815Sjsg uint64_t value64 = 0; 3474ad8b1aafSjsg uint32_t query = 0; 3475ad8b1aafSjsg int size; 3476ad8b1aafSjsg 3477ad8b1aafSjsg /* GPU Clocks */ 3478ad8b1aafSjsg size = sizeof(value); 3479ad8b1aafSjsg seq_printf(m, "GFX Clocks and Power:\n"); 34805ca02815Sjsg 34815ca02815Sjsg amdgpu_debugfs_prints_cpu_info(m, adev); 34825ca02815Sjsg 3483ad8b1aafSjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size)) 3484ad8b1aafSjsg seq_printf(m, "\t%u MHz (MCLK)\n", value/100); 3485ad8b1aafSjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size)) 3486ad8b1aafSjsg seq_printf(m, "\t%u MHz (SCLK)\n", value/100); 3487ad8b1aafSjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size)) 3488ad8b1aafSjsg seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100); 3489ad8b1aafSjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size)) 3490ad8b1aafSjsg seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100); 3491ad8b1aafSjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size)) 3492ad8b1aafSjsg seq_printf(m, "\t%u mV (VDDGFX)\n", value); 3493ad8b1aafSjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) 3494ad8b1aafSjsg seq_printf(m, "\t%u mV (VDDNB)\n", value); 3495ad8b1aafSjsg size = sizeof(uint32_t); 3496f005ef32Sjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) 3497ad8b1aafSjsg seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff); 3498f005ef32Sjsg size = sizeof(uint32_t); 3499f005ef32Sjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) 3500f005ef32Sjsg seq_printf(m, "\t%u.%u W (current GPU)\n", query >> 8, query & 0xff); 3501ad8b1aafSjsg size = sizeof(value); 3502ad8b1aafSjsg seq_printf(m, "\n"); 3503ad8b1aafSjsg 3504ad8b1aafSjsg /* GPU Temp */ 3505ad8b1aafSjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size)) 3506ad8b1aafSjsg seq_printf(m, "GPU Temperature: %u C\n", value/1000); 3507ad8b1aafSjsg 3508ad8b1aafSjsg /* GPU Load */ 3509ad8b1aafSjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size)) 3510ad8b1aafSjsg seq_printf(m, "GPU Load: %u %%\n", value); 3511ad8b1aafSjsg /* MEM Load */ 3512ad8b1aafSjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size)) 3513ad8b1aafSjsg seq_printf(m, "MEM Load: %u %%\n", value); 3514ad8b1aafSjsg 3515ad8b1aafSjsg seq_printf(m, "\n"); 3516ad8b1aafSjsg 3517ad8b1aafSjsg /* SMC feature mask */ 3518ad8b1aafSjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) 3519ad8b1aafSjsg seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); 3520ad8b1aafSjsg 35211bb76ff1Sjsg /* ASICs greater than CHIP_VEGA20 supports these sensors */ 35221bb76ff1Sjsg if (gc_ver != IP_VERSION(9, 4, 0) && mp1_ver > IP_VERSION(9, 0, 0)) { 3523ad8b1aafSjsg /* VCN clocks */ 3524ad8b1aafSjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) { 3525ad8b1aafSjsg if (!value) { 3526ad8b1aafSjsg seq_printf(m, "VCN: Disabled\n"); 3527ad8b1aafSjsg } else { 3528ad8b1aafSjsg seq_printf(m, "VCN: Enabled\n"); 3529ad8b1aafSjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 3530ad8b1aafSjsg seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 3531ad8b1aafSjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 3532ad8b1aafSjsg seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 3533ad8b1aafSjsg } 3534ad8b1aafSjsg } 3535ad8b1aafSjsg seq_printf(m, "\n"); 3536ad8b1aafSjsg } else { 3537ad8b1aafSjsg /* UVD clocks */ 3538ad8b1aafSjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { 3539ad8b1aafSjsg if (!value) { 3540ad8b1aafSjsg seq_printf(m, "UVD: Disabled\n"); 3541ad8b1aafSjsg } else { 3542ad8b1aafSjsg seq_printf(m, "UVD: Enabled\n"); 3543ad8b1aafSjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 3544ad8b1aafSjsg seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 3545ad8b1aafSjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 3546ad8b1aafSjsg seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 3547ad8b1aafSjsg } 3548ad8b1aafSjsg } 3549ad8b1aafSjsg seq_printf(m, "\n"); 3550ad8b1aafSjsg 3551ad8b1aafSjsg /* VCE clocks */ 3552ad8b1aafSjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { 3553ad8b1aafSjsg if (!value) { 3554ad8b1aafSjsg seq_printf(m, "VCE: Disabled\n"); 3555ad8b1aafSjsg } else { 3556ad8b1aafSjsg seq_printf(m, "VCE: Enabled\n"); 3557ad8b1aafSjsg if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) 3558ad8b1aafSjsg seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); 3559ad8b1aafSjsg } 3560ad8b1aafSjsg } 3561ad8b1aafSjsg } 3562ad8b1aafSjsg 3563ad8b1aafSjsg return 0; 3564ad8b1aafSjsg } 3565ad8b1aafSjsg 3566f005ef32Sjsg static const struct cg_flag_name clocks[] = { 3567f005ef32Sjsg {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"}, 3568f005ef32Sjsg {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"}, 3569f005ef32Sjsg {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"}, 3570f005ef32Sjsg {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"}, 3571f005ef32Sjsg {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"}, 3572f005ef32Sjsg {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"}, 3573f005ef32Sjsg {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"}, 3574f005ef32Sjsg {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"}, 3575f005ef32Sjsg {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"}, 3576f005ef32Sjsg {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"}, 3577f005ef32Sjsg {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"}, 3578f005ef32Sjsg {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"}, 3579f005ef32Sjsg {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"}, 3580f005ef32Sjsg {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"}, 3581f005ef32Sjsg {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"}, 3582f005ef32Sjsg {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"}, 3583f005ef32Sjsg {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"}, 3584f005ef32Sjsg {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"}, 3585f005ef32Sjsg {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"}, 3586f005ef32Sjsg {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"}, 3587f005ef32Sjsg {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"}, 3588f005ef32Sjsg {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"}, 3589f005ef32Sjsg {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"}, 3590f005ef32Sjsg {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"}, 3591f005ef32Sjsg {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"}, 3592f005ef32Sjsg {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"}, 3593f005ef32Sjsg {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"}, 3594f005ef32Sjsg {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"}, 3595f005ef32Sjsg {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"}, 3596f005ef32Sjsg {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"}, 3597f005ef32Sjsg {AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"}, 3598f005ef32Sjsg {AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"}, 3599f005ef32Sjsg {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"}, 3600f005ef32Sjsg {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"}, 3601f005ef32Sjsg {0, NULL}, 3602f005ef32Sjsg }; 3603f005ef32Sjsg 36041bb76ff1Sjsg static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags) 3605ad8b1aafSjsg { 3606ad8b1aafSjsg int i; 3607ad8b1aafSjsg 3608ad8b1aafSjsg for (i = 0; clocks[i].flag; i++) 3609ad8b1aafSjsg seq_printf(m, "\t%s: %s\n", clocks[i].name, 3610ad8b1aafSjsg (flags & clocks[i].flag) ? "On" : "Off"); 3611ad8b1aafSjsg } 3612ad8b1aafSjsg 36135ca02815Sjsg static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) 3614ad8b1aafSjsg { 36155ca02815Sjsg struct amdgpu_device *adev = (struct amdgpu_device *)m->private; 36165ca02815Sjsg struct drm_device *dev = adev_to_drm(adev); 36171bb76ff1Sjsg u64 flags = 0; 3618ad8b1aafSjsg int r; 3619ad8b1aafSjsg 3620ad8b1aafSjsg if (amdgpu_in_reset(adev)) 3621ad8b1aafSjsg return -EPERM; 36225ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 36235ca02815Sjsg return -EPERM; 3624ad8b1aafSjsg 3625ad8b1aafSjsg r = pm_runtime_get_sync(dev->dev); 3626ad8b1aafSjsg if (r < 0) { 3627ad8b1aafSjsg pm_runtime_put_autosuspend(dev->dev); 3628ad8b1aafSjsg return r; 3629ad8b1aafSjsg } 3630ad8b1aafSjsg 36311bb76ff1Sjsg if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) { 3632ad8b1aafSjsg r = amdgpu_debugfs_pm_info_pp(m, adev); 3633ad8b1aafSjsg if (r) 3634ad8b1aafSjsg goto out; 36351bb76ff1Sjsg } 3636ad8b1aafSjsg 3637ad8b1aafSjsg amdgpu_device_ip_get_clockgating_state(adev, &flags); 3638ad8b1aafSjsg 36391bb76ff1Sjsg seq_printf(m, "Clock Gating Flags Mask: 0x%llx\n", flags); 3640ad8b1aafSjsg amdgpu_parse_cg_state(m, flags); 3641ad8b1aafSjsg seq_printf(m, "\n"); 3642ad8b1aafSjsg 3643ad8b1aafSjsg out: 3644ad8b1aafSjsg pm_runtime_mark_last_busy(dev->dev); 3645ad8b1aafSjsg pm_runtime_put_autosuspend(dev->dev); 3646ad8b1aafSjsg 3647ad8b1aafSjsg return r; 3648ad8b1aafSjsg } 3649ad8b1aafSjsg 36505ca02815Sjsg DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info); 36515ca02815Sjsg 36525ca02815Sjsg /* 36535ca02815Sjsg * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW 36545ca02815Sjsg * 36555ca02815Sjsg * Reads debug memory region allocated to PMFW 36565ca02815Sjsg */ 36575ca02815Sjsg static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf, 36585ca02815Sjsg size_t size, loff_t *pos) 36595ca02815Sjsg { 36605ca02815Sjsg struct amdgpu_device *adev = file_inode(f)->i_private; 36615ca02815Sjsg size_t smu_prv_buf_size; 36625ca02815Sjsg void *smu_prv_buf; 36631bb76ff1Sjsg int ret = 0; 36645ca02815Sjsg 36655ca02815Sjsg if (amdgpu_in_reset(adev)) 36665ca02815Sjsg return -EPERM; 36675ca02815Sjsg if (adev->in_suspend && !adev->in_runpm) 36685ca02815Sjsg return -EPERM; 36695ca02815Sjsg 36701bb76ff1Sjsg ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size); 36711bb76ff1Sjsg if (ret) 36721bb76ff1Sjsg return ret; 36735ca02815Sjsg 36745ca02815Sjsg if (!smu_prv_buf || !smu_prv_buf_size) 36755ca02815Sjsg return -EINVAL; 36765ca02815Sjsg 36775ca02815Sjsg return simple_read_from_buffer(buf, size, pos, smu_prv_buf, 36785ca02815Sjsg smu_prv_buf_size); 36795ca02815Sjsg } 36805ca02815Sjsg 36815ca02815Sjsg static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = { 36825ca02815Sjsg .owner = THIS_MODULE, 36835ca02815Sjsg .open = simple_open, 36845ca02815Sjsg .read = amdgpu_pm_prv_buffer_read, 36855ca02815Sjsg .llseek = default_llseek, 3686ad8b1aafSjsg }; 36875ca02815Sjsg 3688ad8b1aafSjsg #endif 3689ad8b1aafSjsg 36905ca02815Sjsg void amdgpu_debugfs_pm_init(struct amdgpu_device *adev) 3691ad8b1aafSjsg { 3692ad8b1aafSjsg #if defined(CONFIG_DEBUG_FS) 36935ca02815Sjsg struct drm_minor *minor = adev_to_drm(adev)->primary; 36945ca02815Sjsg struct dentry *root = minor->debugfs_root; 36955ca02815Sjsg 36961bb76ff1Sjsg if (!adev->pm.dpm_enabled) 36971bb76ff1Sjsg return; 36981bb76ff1Sjsg 36995ca02815Sjsg debugfs_create_file("amdgpu_pm_info", 0444, root, adev, 37005ca02815Sjsg &amdgpu_debugfs_pm_info_fops); 37015ca02815Sjsg 37025ca02815Sjsg if (adev->pm.smu_prv_buffer_size > 0) 37035ca02815Sjsg debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root, 37045ca02815Sjsg adev, 37055ca02815Sjsg &amdgpu_debugfs_pm_prv_buffer_fops, 37065ca02815Sjsg adev->pm.smu_prv_buffer_size); 37071bb76ff1Sjsg 37081bb76ff1Sjsg amdgpu_dpm_stb_debug_fs_init(adev); 3709ad8b1aafSjsg #endif 3710ad8b1aafSjsg } 3711