1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 35 #define WIDTH_4K 3840 36 37 void amdgpu_dpm_print_class_info(u32 class, u32 class2) 38 { 39 const char *s; 40 41 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { 42 case ATOM_PPLIB_CLASSIFICATION_UI_NONE: 43 default: 44 s = "none"; 45 break; 46 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: 47 s = "battery"; 48 break; 49 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: 50 s = "balanced"; 51 break; 52 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: 53 s = "performance"; 54 break; 55 } 56 printk("\tui class: %s\n", s); 57 printk("\tinternal class:"); 58 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && 59 (class2 == 0)) 60 pr_cont(" none"); 61 else { 62 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) 63 pr_cont(" boot"); 64 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 65 pr_cont(" thermal"); 66 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) 67 pr_cont(" limited_pwr"); 68 if (class & ATOM_PPLIB_CLASSIFICATION_REST) 69 pr_cont(" rest"); 70 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) 71 pr_cont(" forced"); 72 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 73 pr_cont(" 3d_perf"); 74 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) 75 pr_cont(" ovrdrv"); 76 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 77 pr_cont(" uvd"); 78 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) 79 pr_cont(" 3d_low"); 80 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) 81 pr_cont(" acpi"); 82 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 83 pr_cont(" uvd_hd2"); 84 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 85 pr_cont(" uvd_hd"); 86 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 87 pr_cont(" uvd_sd"); 88 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) 89 pr_cont(" limited_pwr2"); 90 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 91 pr_cont(" ulv"); 92 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 93 pr_cont(" uvd_mvc"); 94 } 95 pr_cont("\n"); 96 } 97 98 void amdgpu_dpm_print_cap_info(u32 caps) 99 { 100 printk("\tcaps:"); 101 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) 102 pr_cont(" single_disp"); 103 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) 104 pr_cont(" video"); 105 if (caps & ATOM_PPLIB_DISALLOW_ON_DC) 106 pr_cont(" no_dc"); 107 pr_cont("\n"); 108 } 109 110 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, 111 struct amdgpu_ps *rps) 112 { 113 printk("\tstatus:"); 114 if (rps == adev->pm.dpm.current_ps) 115 pr_cont(" c"); 116 if (rps == adev->pm.dpm.requested_ps) 117 pr_cont(" r"); 118 if (rps == adev->pm.dpm.boot_ps) 119 pr_cont(" b"); 120 pr_cont("\n"); 121 } 122 123 void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) 124 { 125 struct drm_device *ddev = adev_to_drm(adev); 126 struct drm_crtc *crtc; 127 struct amdgpu_crtc *amdgpu_crtc; 128 129 adev->pm.dpm.new_active_crtcs = 0; 130 adev->pm.dpm.new_active_crtc_count = 0; 131 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 132 list_for_each_entry(crtc, 133 &ddev->mode_config.crtc_list, head) { 134 amdgpu_crtc = to_amdgpu_crtc(crtc); 135 if (amdgpu_crtc->enabled) { 136 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); 137 adev->pm.dpm.new_active_crtc_count++; 138 } 139 } 140 } 141 } 142 143 144 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) 145 { 146 struct drm_device *dev = adev_to_drm(adev); 147 struct drm_crtc *crtc; 148 struct amdgpu_crtc *amdgpu_crtc; 149 u32 vblank_in_pixels; 150 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ 151 152 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 153 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 154 amdgpu_crtc = to_amdgpu_crtc(crtc); 155 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { 156 vblank_in_pixels = 157 amdgpu_crtc->hw_mode.crtc_htotal * 158 (amdgpu_crtc->hw_mode.crtc_vblank_end - 159 amdgpu_crtc->hw_mode.crtc_vdisplay + 160 (amdgpu_crtc->v_border * 2)); 161 162 vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; 163 break; 164 } 165 } 166 } 167 168 return vblank_time_us; 169 } 170 171 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) 172 { 173 struct drm_device *dev = adev_to_drm(adev); 174 struct drm_crtc *crtc; 175 struct amdgpu_crtc *amdgpu_crtc; 176 u32 vrefresh = 0; 177 178 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 179 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 180 amdgpu_crtc = to_amdgpu_crtc(crtc); 181 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { 182 vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); 183 break; 184 } 185 } 186 } 187 188 return vrefresh; 189 } 190 191 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor) 192 { 193 switch (sensor) { 194 case THERMAL_TYPE_RV6XX: 195 case THERMAL_TYPE_RV770: 196 case THERMAL_TYPE_EVERGREEN: 197 case THERMAL_TYPE_SUMO: 198 case THERMAL_TYPE_NI: 199 case THERMAL_TYPE_SI: 200 case THERMAL_TYPE_CI: 201 case THERMAL_TYPE_KV: 202 return true; 203 case THERMAL_TYPE_ADT7473_WITH_INTERNAL: 204 case THERMAL_TYPE_EMC2103_WITH_INTERNAL: 205 return false; /* need special handling */ 206 case THERMAL_TYPE_NONE: 207 case THERMAL_TYPE_EXTERNAL: 208 case THERMAL_TYPE_EXTERNAL_GPIO: 209 default: 210 return false; 211 } 212 } 213 214 union power_info { 215 struct _ATOM_POWERPLAY_INFO info; 216 struct _ATOM_POWERPLAY_INFO_V2 info_2; 217 struct _ATOM_POWERPLAY_INFO_V3 info_3; 218 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 219 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 220 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 221 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; 222 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; 223 }; 224 225 union fan_info { 226 struct _ATOM_PPLIB_FANTABLE fan; 227 struct _ATOM_PPLIB_FANTABLE2 fan2; 228 struct _ATOM_PPLIB_FANTABLE3 fan3; 229 }; 230 231 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table, 232 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) 233 { 234 u32 size = atom_table->ucNumEntries * 235 sizeof(struct amdgpu_clock_voltage_dependency_entry); 236 int i; 237 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; 238 239 amdgpu_table->entries = kzalloc(size, GFP_KERNEL); 240 if (!amdgpu_table->entries) 241 return -ENOMEM; 242 243 entry = &atom_table->entries[0]; 244 for (i = 0; i < atom_table->ucNumEntries; i++) { 245 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | 246 (entry->ucClockHigh << 16); 247 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage); 248 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *) 249 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record)); 250 } 251 amdgpu_table->count = atom_table->ucNumEntries; 252 253 return 0; 254 } 255 256 int amdgpu_get_platform_caps(struct amdgpu_device *adev) 257 { 258 struct amdgpu_mode_info *mode_info = &adev->mode_info; 259 union power_info *power_info; 260 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 261 u16 data_offset; 262 u8 frev, crev; 263 264 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 265 &frev, &crev, &data_offset)) 266 return -EINVAL; 267 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 268 269 adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); 270 adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); 271 adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); 272 273 return 0; 274 } 275 276 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ 277 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 278 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 279 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 280 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 281 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 282 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 283 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24 284 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26 285 286 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) 287 { 288 struct amdgpu_mode_info *mode_info = &adev->mode_info; 289 union power_info *power_info; 290 union fan_info *fan_info; 291 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table; 292 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 293 u16 data_offset; 294 u8 frev, crev; 295 int ret, i; 296 297 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 298 &frev, &crev, &data_offset)) 299 return -EINVAL; 300 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 301 302 /* fan table */ 303 if (le16_to_cpu(power_info->pplib.usTableSize) >= 304 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { 305 if (power_info->pplib3.usFanTableOffset) { 306 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset + 307 le16_to_cpu(power_info->pplib3.usFanTableOffset)); 308 adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst; 309 adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin); 310 adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed); 311 adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh); 312 adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin); 313 adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed); 314 adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh); 315 if (fan_info->fan.ucFanTableFormat >= 2) 316 adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax); 317 else 318 adev->pm.dpm.fan.t_max = 10900; 319 adev->pm.dpm.fan.cycle_delay = 100000; 320 if (fan_info->fan.ucFanTableFormat >= 3) { 321 adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode; 322 adev->pm.dpm.fan.default_max_fan_pwm = 323 le16_to_cpu(fan_info->fan3.usFanPWMMax); 324 adev->pm.dpm.fan.default_fan_output_sensitivity = 4836; 325 adev->pm.dpm.fan.fan_output_sensitivity = 326 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity); 327 } 328 adev->pm.dpm.fan.ucode_fan_control = true; 329 } 330 } 331 332 /* clock dependancy tables, shedding tables */ 333 if (le16_to_cpu(power_info->pplib.usTableSize) >= 334 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) { 335 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) { 336 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 337 (mode_info->atom_context->bios + data_offset + 338 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset)); 339 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, 340 dep_table); 341 if (ret) { 342 amdgpu_free_extended_power_table(adev); 343 return ret; 344 } 345 } 346 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) { 347 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 348 (mode_info->atom_context->bios + data_offset + 349 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset)); 350 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 351 dep_table); 352 if (ret) { 353 amdgpu_free_extended_power_table(adev); 354 return ret; 355 } 356 } 357 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) { 358 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 359 (mode_info->atom_context->bios + data_offset + 360 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset)); 361 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 362 dep_table); 363 if (ret) { 364 amdgpu_free_extended_power_table(adev); 365 return ret; 366 } 367 } 368 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) { 369 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 370 (mode_info->atom_context->bios + data_offset + 371 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset)); 372 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 373 dep_table); 374 if (ret) { 375 amdgpu_free_extended_power_table(adev); 376 return ret; 377 } 378 } 379 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { 380 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = 381 (ATOM_PPLIB_Clock_Voltage_Limit_Table *) 382 (mode_info->atom_context->bios + data_offset + 383 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset)); 384 if (clk_v->ucNumEntries) { 385 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk = 386 le16_to_cpu(clk_v->entries[0].usSclkLow) | 387 (clk_v->entries[0].ucSclkHigh << 16); 388 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk = 389 le16_to_cpu(clk_v->entries[0].usMclkLow) | 390 (clk_v->entries[0].ucMclkHigh << 16); 391 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc = 392 le16_to_cpu(clk_v->entries[0].usVddc); 393 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci = 394 le16_to_cpu(clk_v->entries[0].usVddci); 395 } 396 } 397 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) { 398 ATOM_PPLIB_PhaseSheddingLimits_Table *psl = 399 (ATOM_PPLIB_PhaseSheddingLimits_Table *) 400 (mode_info->atom_context->bios + data_offset + 401 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); 402 ATOM_PPLIB_PhaseSheddingLimits_Record *entry; 403 404 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = 405 kcalloc(psl->ucNumEntries, 406 sizeof(struct amdgpu_phase_shedding_limits_entry), 407 GFP_KERNEL); 408 if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { 409 amdgpu_free_extended_power_table(adev); 410 return -ENOMEM; 411 } 412 413 entry = &psl->entries[0]; 414 for (i = 0; i < psl->ucNumEntries; i++) { 415 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = 416 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16); 417 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = 418 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16); 419 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = 420 le16_to_cpu(entry->usVoltage); 421 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *) 422 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record)); 423 } 424 adev->pm.dpm.dyn_state.phase_shedding_limits_table.count = 425 psl->ucNumEntries; 426 } 427 } 428 429 /* cac data */ 430 if (le16_to_cpu(power_info->pplib.usTableSize) >= 431 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) { 432 adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit); 433 adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit); 434 adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit; 435 adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit); 436 if (adev->pm.dpm.tdp_od_limit) 437 adev->pm.dpm.power_control = true; 438 else 439 adev->pm.dpm.power_control = false; 440 adev->pm.dpm.tdp_adjustment = 0; 441 adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold); 442 adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage); 443 adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope); 444 if (power_info->pplib5.usCACLeakageTableOffset) { 445 ATOM_PPLIB_CAC_Leakage_Table *cac_table = 446 (ATOM_PPLIB_CAC_Leakage_Table *) 447 (mode_info->atom_context->bios + data_offset + 448 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); 449 ATOM_PPLIB_CAC_Leakage_Record *entry; 450 u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table); 451 adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); 452 if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) { 453 amdgpu_free_extended_power_table(adev); 454 return -ENOMEM; 455 } 456 entry = &cac_table->entries[0]; 457 for (i = 0; i < cac_table->ucNumEntries; i++) { 458 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 459 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = 460 le16_to_cpu(entry->usVddc1); 461 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = 462 le16_to_cpu(entry->usVddc2); 463 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = 464 le16_to_cpu(entry->usVddc3); 465 } else { 466 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = 467 le16_to_cpu(entry->usVddc); 468 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = 469 le32_to_cpu(entry->ulLeakageValue); 470 } 471 entry = (ATOM_PPLIB_CAC_Leakage_Record *) 472 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record)); 473 } 474 adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; 475 } 476 } 477 478 /* ext tables */ 479 if (le16_to_cpu(power_info->pplib.usTableSize) >= 480 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { 481 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) 482 (mode_info->atom_context->bios + data_offset + 483 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); 484 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) && 485 ext_hdr->usVCETableOffset) { 486 VCEClockInfoArray *array = (VCEClockInfoArray *) 487 (mode_info->atom_context->bios + data_offset + 488 le16_to_cpu(ext_hdr->usVCETableOffset) + 1); 489 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits = 490 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *) 491 (mode_info->atom_context->bios + data_offset + 492 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + 493 1 + array->ucNumEntries * sizeof(VCEClockInfo)); 494 ATOM_PPLIB_VCE_State_Table *states = 495 (ATOM_PPLIB_VCE_State_Table *) 496 (mode_info->atom_context->bios + data_offset + 497 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + 498 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) + 499 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record))); 500 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; 501 ATOM_PPLIB_VCE_State_Record *state_entry; 502 VCEClockInfo *vce_clk; 503 u32 size = limits->numEntries * 504 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry); 505 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = 506 kzalloc(size, GFP_KERNEL); 507 if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { 508 amdgpu_free_extended_power_table(adev); 509 return -ENOMEM; 510 } 511 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = 512 limits->numEntries; 513 entry = &limits->entries[0]; 514 state_entry = &states->entries[0]; 515 for (i = 0; i < limits->numEntries; i++) { 516 vce_clk = (VCEClockInfo *) 517 ((u8 *)&array->entries[0] + 518 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); 519 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = 520 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); 521 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = 522 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); 523 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = 524 le16_to_cpu(entry->usVoltage); 525 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) 526 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); 527 } 528 adev->pm.dpm.num_of_vce_states = 529 states->numEntries > AMD_MAX_VCE_LEVELS ? 530 AMD_MAX_VCE_LEVELS : states->numEntries; 531 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { 532 vce_clk = (VCEClockInfo *) 533 ((u8 *)&array->entries[0] + 534 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); 535 adev->pm.dpm.vce_states[i].evclk = 536 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); 537 adev->pm.dpm.vce_states[i].ecclk = 538 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); 539 adev->pm.dpm.vce_states[i].clk_idx = 540 state_entry->ucClockInfoIndex & 0x3f; 541 adev->pm.dpm.vce_states[i].pstate = 542 (state_entry->ucClockInfoIndex & 0xc0) >> 6; 543 state_entry = (ATOM_PPLIB_VCE_State_Record *) 544 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record)); 545 } 546 } 547 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && 548 ext_hdr->usUVDTableOffset) { 549 UVDClockInfoArray *array = (UVDClockInfoArray *) 550 (mode_info->atom_context->bios + data_offset + 551 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1); 552 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits = 553 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *) 554 (mode_info->atom_context->bios + data_offset + 555 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 + 556 1 + (array->ucNumEntries * sizeof (UVDClockInfo))); 557 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry; 558 u32 size = limits->numEntries * 559 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry); 560 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = 561 kzalloc(size, GFP_KERNEL); 562 if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { 563 amdgpu_free_extended_power_table(adev); 564 return -ENOMEM; 565 } 566 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = 567 limits->numEntries; 568 entry = &limits->entries[0]; 569 for (i = 0; i < limits->numEntries; i++) { 570 UVDClockInfo *uvd_clk = (UVDClockInfo *) 571 ((u8 *)&array->entries[0] + 572 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo))); 573 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = 574 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16); 575 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = 576 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); 577 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = 578 le16_to_cpu(entry->usVoltage); 579 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) 580 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); 581 } 582 } 583 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) && 584 ext_hdr->usSAMUTableOffset) { 585 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits = 586 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *) 587 (mode_info->atom_context->bios + data_offset + 588 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1); 589 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry; 590 u32 size = limits->numEntries * 591 sizeof(struct amdgpu_clock_voltage_dependency_entry); 592 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = 593 kzalloc(size, GFP_KERNEL); 594 if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { 595 amdgpu_free_extended_power_table(adev); 596 return -ENOMEM; 597 } 598 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count = 599 limits->numEntries; 600 entry = &limits->entries[0]; 601 for (i = 0; i < limits->numEntries; i++) { 602 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = 603 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16); 604 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = 605 le16_to_cpu(entry->usVoltage); 606 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *) 607 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record)); 608 } 609 } 610 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && 611 ext_hdr->usPPMTableOffset) { 612 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) 613 (mode_info->atom_context->bios + data_offset + 614 le16_to_cpu(ext_hdr->usPPMTableOffset)); 615 adev->pm.dpm.dyn_state.ppm_table = 616 kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL); 617 if (!adev->pm.dpm.dyn_state.ppm_table) { 618 amdgpu_free_extended_power_table(adev); 619 return -ENOMEM; 620 } 621 adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; 622 adev->pm.dpm.dyn_state.ppm_table->cpu_core_number = 623 le16_to_cpu(ppm->usCpuCoreNumber); 624 adev->pm.dpm.dyn_state.ppm_table->platform_tdp = 625 le32_to_cpu(ppm->ulPlatformTDP); 626 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp = 627 le32_to_cpu(ppm->ulSmallACPlatformTDP); 628 adev->pm.dpm.dyn_state.ppm_table->platform_tdc = 629 le32_to_cpu(ppm->ulPlatformTDC); 630 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc = 631 le32_to_cpu(ppm->ulSmallACPlatformTDC); 632 adev->pm.dpm.dyn_state.ppm_table->apu_tdp = 633 le32_to_cpu(ppm->ulApuTDP); 634 adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp = 635 le32_to_cpu(ppm->ulDGpuTDP); 636 adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power = 637 le32_to_cpu(ppm->ulDGpuUlvPower); 638 adev->pm.dpm.dyn_state.ppm_table->tj_max = 639 le32_to_cpu(ppm->ulTjmax); 640 } 641 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) && 642 ext_hdr->usACPTableOffset) { 643 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits = 644 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *) 645 (mode_info->atom_context->bios + data_offset + 646 le16_to_cpu(ext_hdr->usACPTableOffset) + 1); 647 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry; 648 u32 size = limits->numEntries * 649 sizeof(struct amdgpu_clock_voltage_dependency_entry); 650 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = 651 kzalloc(size, GFP_KERNEL); 652 if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { 653 amdgpu_free_extended_power_table(adev); 654 return -ENOMEM; 655 } 656 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count = 657 limits->numEntries; 658 entry = &limits->entries[0]; 659 for (i = 0; i < limits->numEntries; i++) { 660 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = 661 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16); 662 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = 663 le16_to_cpu(entry->usVoltage); 664 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *) 665 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record)); 666 } 667 } 668 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) && 669 ext_hdr->usPowerTuneTableOffset) { 670 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset + 671 le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 672 ATOM_PowerTune_Table *pt; 673 adev->pm.dpm.dyn_state.cac_tdp_table = 674 kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL); 675 if (!adev->pm.dpm.dyn_state.cac_tdp_table) { 676 amdgpu_free_extended_power_table(adev); 677 return -ENOMEM; 678 } 679 if (rev > 0) { 680 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *) 681 (mode_info->atom_context->bios + data_offset + 682 le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 683 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 684 ppt->usMaximumPowerDeliveryLimit; 685 pt = &ppt->power_tune_table; 686 } else { 687 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) 688 (mode_info->atom_context->bios + data_offset + 689 le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 690 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255; 691 pt = &ppt->power_tune_table; 692 } 693 adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP); 694 adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp = 695 le16_to_cpu(pt->usConfigurableTDP); 696 adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC); 697 adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit = 698 le16_to_cpu(pt->usBatteryPowerLimit); 699 adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit = 700 le16_to_cpu(pt->usSmallPowerLimit); 701 adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage = 702 le16_to_cpu(pt->usLowCACLeakage); 703 adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage = 704 le16_to_cpu(pt->usHighCACLeakage); 705 } 706 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) && 707 ext_hdr->usSclkVddgfxTableOffset) { 708 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 709 (mode_info->atom_context->bios + data_offset + 710 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset)); 711 ret = amdgpu_parse_clk_voltage_dep_table( 712 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk, 713 dep_table); 714 if (ret) { 715 kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries); 716 return ret; 717 } 718 } 719 } 720 721 return 0; 722 } 723 724 void amdgpu_free_extended_power_table(struct amdgpu_device *adev) 725 { 726 struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state; 727 728 kfree(dyn_state->vddc_dependency_on_sclk.entries); 729 kfree(dyn_state->vddci_dependency_on_mclk.entries); 730 kfree(dyn_state->vddc_dependency_on_mclk.entries); 731 kfree(dyn_state->mvdd_dependency_on_mclk.entries); 732 kfree(dyn_state->cac_leakage_table.entries); 733 kfree(dyn_state->phase_shedding_limits_table.entries); 734 kfree(dyn_state->ppm_table); 735 kfree(dyn_state->cac_tdp_table); 736 kfree(dyn_state->vce_clock_voltage_dependency_table.entries); 737 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); 738 kfree(dyn_state->samu_clock_voltage_dependency_table.entries); 739 kfree(dyn_state->acp_clock_voltage_dependency_table.entries); 740 kfree(dyn_state->vddgfx_dependency_on_sclk.entries); 741 } 742 743 static const char *pp_lib_thermal_controller_names[] = { 744 "NONE", 745 "lm63", 746 "adm1032", 747 "adm1030", 748 "max6649", 749 "lm64", 750 "f75375", 751 "RV6xx", 752 "RV770", 753 "adt7473", 754 "NONE", 755 "External GPIO", 756 "Evergreen", 757 "emc2103", 758 "Sumo", 759 "Northern Islands", 760 "Southern Islands", 761 "lm96163", 762 "Sea Islands", 763 "Kaveri/Kabini", 764 }; 765 766 void amdgpu_add_thermal_controller(struct amdgpu_device *adev) 767 { 768 struct amdgpu_mode_info *mode_info = &adev->mode_info; 769 ATOM_PPLIB_POWERPLAYTABLE *power_table; 770 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 771 ATOM_PPLIB_THERMALCONTROLLER *controller; 772 struct amdgpu_i2c_bus_rec i2c_bus; 773 u16 data_offset; 774 u8 frev, crev; 775 776 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 777 &frev, &crev, &data_offset)) 778 return; 779 power_table = (ATOM_PPLIB_POWERPLAYTABLE *) 780 (mode_info->atom_context->bios + data_offset); 781 controller = &power_table->sThermalController; 782 783 /* add the i2c bus for thermal/fan chip */ 784 if (controller->ucType > 0) { 785 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) 786 adev->pm.no_fan = true; 787 adev->pm.fan_pulses_per_revolution = 788 controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; 789 if (adev->pm.fan_pulses_per_revolution) { 790 adev->pm.fan_min_rpm = controller->ucFanMinRPM; 791 adev->pm.fan_max_rpm = controller->ucFanMaxRPM; 792 } 793 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { 794 DRM_INFO("Internal thermal controller %s fan control\n", 795 (controller->ucFanParameters & 796 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 797 adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; 798 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { 799 DRM_INFO("Internal thermal controller %s fan control\n", 800 (controller->ucFanParameters & 801 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 802 adev->pm.int_thermal_type = THERMAL_TYPE_RV770; 803 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { 804 DRM_INFO("Internal thermal controller %s fan control\n", 805 (controller->ucFanParameters & 806 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 807 adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; 808 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { 809 DRM_INFO("Internal thermal controller %s fan control\n", 810 (controller->ucFanParameters & 811 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 812 adev->pm.int_thermal_type = THERMAL_TYPE_SUMO; 813 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { 814 DRM_INFO("Internal thermal controller %s fan control\n", 815 (controller->ucFanParameters & 816 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 817 adev->pm.int_thermal_type = THERMAL_TYPE_NI; 818 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) { 819 DRM_INFO("Internal thermal controller %s fan control\n", 820 (controller->ucFanParameters & 821 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 822 adev->pm.int_thermal_type = THERMAL_TYPE_SI; 823 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) { 824 DRM_INFO("Internal thermal controller %s fan control\n", 825 (controller->ucFanParameters & 826 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 827 adev->pm.int_thermal_type = THERMAL_TYPE_CI; 828 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) { 829 DRM_INFO("Internal thermal controller %s fan control\n", 830 (controller->ucFanParameters & 831 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 832 adev->pm.int_thermal_type = THERMAL_TYPE_KV; 833 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { 834 DRM_INFO("External GPIO thermal controller %s fan control\n", 835 (controller->ucFanParameters & 836 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 837 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; 838 } else if (controller->ucType == 839 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { 840 DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", 841 (controller->ucFanParameters & 842 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 843 adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; 844 } else if (controller->ucType == 845 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { 846 DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", 847 (controller->ucFanParameters & 848 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 849 adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; 850 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { 851 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", 852 pp_lib_thermal_controller_names[controller->ucType], 853 controller->ucI2cAddress >> 1, 854 (controller->ucFanParameters & 855 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 856 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL; 857 i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine); 858 adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus); 859 #ifdef notyet 860 if (adev->pm.i2c_bus) { 861 struct i2c_board_info info = { }; 862 const char *name = pp_lib_thermal_controller_names[controller->ucType]; 863 info.addr = controller->ucI2cAddress >> 1; 864 strlcpy(info.type, name, sizeof(info.type)); 865 i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info); 866 } 867 #endif 868 } else { 869 DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", 870 controller->ucType, 871 controller->ucI2cAddress >> 1, 872 (controller->ucFanParameters & 873 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 874 } 875 } 876 } 877 878 enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev, 879 u32 sys_mask, 880 enum amdgpu_pcie_gen asic_gen, 881 enum amdgpu_pcie_gen default_gen) 882 { 883 switch (asic_gen) { 884 case AMDGPU_PCIE_GEN1: 885 return AMDGPU_PCIE_GEN1; 886 case AMDGPU_PCIE_GEN2: 887 return AMDGPU_PCIE_GEN2; 888 case AMDGPU_PCIE_GEN3: 889 return AMDGPU_PCIE_GEN3; 890 default: 891 if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) && 892 (default_gen == AMDGPU_PCIE_GEN3)) 893 return AMDGPU_PCIE_GEN3; 894 else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) && 895 (default_gen == AMDGPU_PCIE_GEN2)) 896 return AMDGPU_PCIE_GEN2; 897 else 898 return AMDGPU_PCIE_GEN1; 899 } 900 return AMDGPU_PCIE_GEN1; 901 } 902 903 struct amd_vce_state* 904 amdgpu_get_vce_clock_state(void *handle, u32 idx) 905 { 906 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 907 908 if (idx < adev->pm.dpm.num_of_vce_states) 909 return &adev->pm.dpm.vce_states[idx]; 910 911 return NULL; 912 } 913 914 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 915 { 916 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 917 918 return pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low)); 919 } 920 921 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 922 { 923 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 924 925 return pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low)); 926 } 927 928 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) 929 { 930 int ret = 0; 931 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 932 enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; 933 934 if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { 935 dev_dbg(adev->dev, "IP block%d already in the target %s state!", 936 block_type, gate ? "gate" : "ungate"); 937 return 0; 938 } 939 940 switch (block_type) { 941 case AMD_IP_BLOCK_TYPE_UVD: 942 case AMD_IP_BLOCK_TYPE_VCE: 943 if (pp_funcs && pp_funcs->set_powergating_by_smu) { 944 /* 945 * TODO: need a better lock mechanism 946 * 947 * Here adev->pm.mutex lock protection is enforced on 948 * UVD and VCE cases only. Since for other cases, there 949 * may be already lock protection in amdgpu_pm.c. 950 * This is a quick fix for the deadlock issue below. 951 * NFO: task ocltst:2028 blocked for more than 120 seconds. 952 * Tainted: G OE 5.0.0-37-generic #40~18.04.1-Ubuntu 953 * echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. 954 * cltst D 0 2028 2026 0x00000000 955 * all Trace: 956 * __schedule+0x2c0/0x870 957 * schedule+0x2c/0x70 958 * schedule_preempt_disabled+0xe/0x10 959 * __mutex_lock.isra.9+0x26d/0x4e0 960 * __mutex_lock_slowpath+0x13/0x20 961 * ? __mutex_lock_slowpath+0x13/0x20 962 * mutex_lock+0x2f/0x40 963 * amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu] 964 * gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu] 965 * gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu] 966 * amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu] 967 * pp_dpm_force_performance_level+0xe7/0x100 [amdgpu] 968 * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu] 969 */ 970 mutex_lock(&adev->pm.mutex); 971 ret = (pp_funcs->set_powergating_by_smu( 972 (adev)->powerplay.pp_handle, block_type, gate)); 973 mutex_unlock(&adev->pm.mutex); 974 } 975 break; 976 case AMD_IP_BLOCK_TYPE_GFX: 977 case AMD_IP_BLOCK_TYPE_VCN: 978 case AMD_IP_BLOCK_TYPE_SDMA: 979 case AMD_IP_BLOCK_TYPE_JPEG: 980 case AMD_IP_BLOCK_TYPE_GMC: 981 case AMD_IP_BLOCK_TYPE_ACP: 982 if (pp_funcs && pp_funcs->set_powergating_by_smu) { 983 ret = (pp_funcs->set_powergating_by_smu( 984 (adev)->powerplay.pp_handle, block_type, gate)); 985 } 986 break; 987 default: 988 break; 989 } 990 991 if (!ret) 992 atomic_set(&adev->pm.pwr_state[block_type], pwr_state); 993 994 return ret; 995 } 996 997 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 998 { 999 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1000 void *pp_handle = adev->powerplay.pp_handle; 1001 int ret = 0; 1002 1003 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 1004 return -ENOENT; 1005 1006 /* enter BACO state */ 1007 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 1008 1009 return ret; 1010 } 1011 1012 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 1013 { 1014 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1015 void *pp_handle = adev->powerplay.pp_handle; 1016 int ret = 0; 1017 1018 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 1019 return -ENOENT; 1020 1021 /* exit BACO state */ 1022 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 1023 1024 return ret; 1025 } 1026 1027 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 1028 enum pp_mp1_state mp1_state) 1029 { 1030 int ret = 0; 1031 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1032 1033 if (pp_funcs && pp_funcs->set_mp1_state) { 1034 ret = pp_funcs->set_mp1_state( 1035 adev->powerplay.pp_handle, 1036 mp1_state); 1037 } 1038 1039 return ret; 1040 } 1041 1042 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 1043 { 1044 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1045 void *pp_handle = adev->powerplay.pp_handle; 1046 bool baco_cap; 1047 1048 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 1049 return false; 1050 /* Don't use baco for reset in S3. 1051 * This is a workaround for some platforms 1052 * where entering BACO during suspend 1053 * seems to cause reboots or hangs. 1054 * This might be related to the fact that BACO controls 1055 * power to the whole GPU including devices like audio and USB. 1056 * Powering down/up everything may adversely affect these other 1057 * devices. Needs more investigation. 1058 */ 1059 if (adev->in_s3) 1060 return false; 1061 1062 if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap)) 1063 return false; 1064 1065 return baco_cap; 1066 } 1067 1068 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 1069 { 1070 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1071 void *pp_handle = adev->powerplay.pp_handle; 1072 1073 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 1074 return -ENOENT; 1075 1076 return pp_funcs->asic_reset_mode_2(pp_handle); 1077 } 1078 1079 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 1080 { 1081 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1082 void *pp_handle = adev->powerplay.pp_handle; 1083 int ret = 0; 1084 1085 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 1086 return -ENOENT; 1087 1088 /* enter BACO state */ 1089 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 1090 if (ret) 1091 return ret; 1092 1093 /* exit BACO state */ 1094 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 1095 if (ret) 1096 return ret; 1097 1098 return 0; 1099 } 1100 1101 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 1102 { 1103 struct smu_context *smu = &adev->smu; 1104 1105 if (is_support_sw_smu(adev)) 1106 return smu_mode1_reset_is_support(smu); 1107 1108 return false; 1109 } 1110 1111 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 1112 { 1113 struct smu_context *smu = &adev->smu; 1114 1115 if (is_support_sw_smu(adev)) 1116 return smu_mode1_reset(smu); 1117 1118 return -EOPNOTSUPP; 1119 } 1120 1121 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 1122 enum PP_SMC_POWER_PROFILE type, 1123 bool en) 1124 { 1125 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1126 int ret = 0; 1127 1128 if (amdgpu_sriov_vf(adev)) 1129 return 0; 1130 1131 if (pp_funcs && pp_funcs->switch_power_profile) 1132 ret = pp_funcs->switch_power_profile( 1133 adev->powerplay.pp_handle, type, en); 1134 1135 return ret; 1136 } 1137 1138 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 1139 uint32_t pstate) 1140 { 1141 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1142 int ret = 0; 1143 1144 if (pp_funcs && pp_funcs->set_xgmi_pstate) 1145 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 1146 pstate); 1147 1148 return ret; 1149 } 1150 1151 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 1152 uint32_t cstate) 1153 { 1154 int ret = 0; 1155 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1156 void *pp_handle = adev->powerplay.pp_handle; 1157 1158 if (pp_funcs && pp_funcs->set_df_cstate) 1159 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 1160 1161 return ret; 1162 } 1163 1164 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en) 1165 { 1166 struct smu_context *smu = &adev->smu; 1167 1168 if (is_support_sw_smu(adev)) 1169 return smu_allow_xgmi_power_down(smu, en); 1170 1171 return 0; 1172 } 1173 1174 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 1175 { 1176 void *pp_handle = adev->powerplay.pp_handle; 1177 const struct amd_pm_funcs *pp_funcs = 1178 adev->powerplay.pp_funcs; 1179 int ret = 0; 1180 1181 if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) 1182 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 1183 1184 return ret; 1185 } 1186 1187 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 1188 uint32_t msg_id) 1189 { 1190 void *pp_handle = adev->powerplay.pp_handle; 1191 const struct amd_pm_funcs *pp_funcs = 1192 adev->powerplay.pp_funcs; 1193 int ret = 0; 1194 1195 if (pp_funcs && pp_funcs->set_clockgating_by_smu) 1196 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 1197 msg_id); 1198 1199 return ret; 1200 } 1201 1202 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 1203 bool acquire) 1204 { 1205 void *pp_handle = adev->powerplay.pp_handle; 1206 const struct amd_pm_funcs *pp_funcs = 1207 adev->powerplay.pp_funcs; 1208 int ret = -EOPNOTSUPP; 1209 1210 if (pp_funcs && pp_funcs->smu_i2c_bus_access) 1211 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 1212 acquire); 1213 1214 return ret; 1215 } 1216 1217 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 1218 { 1219 if (adev->pm.dpm_enabled) { 1220 mutex_lock(&adev->pm.mutex); 1221 if (power_supply_is_system_supplied() > 0) 1222 adev->pm.ac_power = true; 1223 else 1224 adev->pm.ac_power = false; 1225 if (adev->powerplay.pp_funcs && 1226 adev->powerplay.pp_funcs->enable_bapm) 1227 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 1228 mutex_unlock(&adev->pm.mutex); 1229 1230 if (is_support_sw_smu(adev)) 1231 smu_set_ac_dc(&adev->smu); 1232 } 1233 } 1234 1235 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 1236 void *data, uint32_t *size) 1237 { 1238 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1239 int ret = 0; 1240 1241 if (!data || !size) 1242 return -EINVAL; 1243 1244 if (pp_funcs && pp_funcs->read_sensor) 1245 ret = pp_funcs->read_sensor((adev)->powerplay.pp_handle, 1246 sensor, data, size); 1247 else 1248 ret = -EINVAL; 1249 1250 return ret; 1251 } 1252 1253 void amdgpu_dpm_thermal_work_handler(struct work_struct *work) 1254 { 1255 struct amdgpu_device *adev = 1256 container_of(work, struct amdgpu_device, 1257 pm.dpm.thermal.work); 1258 /* switch to the thermal state */ 1259 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 1260 int temp, size = sizeof(temp); 1261 1262 if (!adev->pm.dpm_enabled) 1263 return; 1264 1265 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, 1266 (void *)&temp, &size)) { 1267 if (temp < adev->pm.dpm.thermal.min_temp) 1268 /* switch back the user state */ 1269 dpm_state = adev->pm.dpm.user_state; 1270 } else { 1271 if (adev->pm.dpm.thermal.high_to_low) 1272 /* switch back the user state */ 1273 dpm_state = adev->pm.dpm.user_state; 1274 } 1275 mutex_lock(&adev->pm.mutex); 1276 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) 1277 adev->pm.dpm.thermal_active = true; 1278 else 1279 adev->pm.dpm.thermal_active = false; 1280 adev->pm.dpm.state = dpm_state; 1281 mutex_unlock(&adev->pm.mutex); 1282 1283 amdgpu_pm_compute_clocks(adev); 1284 } 1285 1286 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, 1287 enum amd_pm_state_type dpm_state) 1288 { 1289 int i; 1290 struct amdgpu_ps *ps; 1291 u32 ui_class; 1292 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? 1293 true : false; 1294 1295 /* check if the vblank period is too short to adjust the mclk */ 1296 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { 1297 if (amdgpu_dpm_vblank_too_short(adev)) 1298 single_display = false; 1299 } 1300 1301 /* certain older asics have a separare 3D performance state, 1302 * so try that first if the user selected performance 1303 */ 1304 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) 1305 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; 1306 /* balanced states don't exist at the moment */ 1307 if (dpm_state == POWER_STATE_TYPE_BALANCED) 1308 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1309 1310 restart_search: 1311 /* Pick the best power state based on current conditions */ 1312 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 1313 ps = &adev->pm.dpm.ps[i]; 1314 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; 1315 switch (dpm_state) { 1316 /* user states */ 1317 case POWER_STATE_TYPE_BATTERY: 1318 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { 1319 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1320 if (single_display) 1321 return ps; 1322 } else 1323 return ps; 1324 } 1325 break; 1326 case POWER_STATE_TYPE_BALANCED: 1327 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { 1328 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1329 if (single_display) 1330 return ps; 1331 } else 1332 return ps; 1333 } 1334 break; 1335 case POWER_STATE_TYPE_PERFORMANCE: 1336 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 1337 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1338 if (single_display) 1339 return ps; 1340 } else 1341 return ps; 1342 } 1343 break; 1344 /* internal states */ 1345 case POWER_STATE_TYPE_INTERNAL_UVD: 1346 if (adev->pm.dpm.uvd_ps) 1347 return adev->pm.dpm.uvd_ps; 1348 else 1349 break; 1350 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 1351 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 1352 return ps; 1353 break; 1354 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 1355 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 1356 return ps; 1357 break; 1358 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 1359 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 1360 return ps; 1361 break; 1362 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 1363 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 1364 return ps; 1365 break; 1366 case POWER_STATE_TYPE_INTERNAL_BOOT: 1367 return adev->pm.dpm.boot_ps; 1368 case POWER_STATE_TYPE_INTERNAL_THERMAL: 1369 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 1370 return ps; 1371 break; 1372 case POWER_STATE_TYPE_INTERNAL_ACPI: 1373 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) 1374 return ps; 1375 break; 1376 case POWER_STATE_TYPE_INTERNAL_ULV: 1377 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 1378 return ps; 1379 break; 1380 case POWER_STATE_TYPE_INTERNAL_3DPERF: 1381 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 1382 return ps; 1383 break; 1384 default: 1385 break; 1386 } 1387 } 1388 /* use a fallback state if we didn't match */ 1389 switch (dpm_state) { 1390 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 1391 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 1392 goto restart_search; 1393 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 1394 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 1395 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 1396 if (adev->pm.dpm.uvd_ps) { 1397 return adev->pm.dpm.uvd_ps; 1398 } else { 1399 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1400 goto restart_search; 1401 } 1402 case POWER_STATE_TYPE_INTERNAL_THERMAL: 1403 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 1404 goto restart_search; 1405 case POWER_STATE_TYPE_INTERNAL_ACPI: 1406 dpm_state = POWER_STATE_TYPE_BATTERY; 1407 goto restart_search; 1408 case POWER_STATE_TYPE_BATTERY: 1409 case POWER_STATE_TYPE_BALANCED: 1410 case POWER_STATE_TYPE_INTERNAL_3DPERF: 1411 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1412 goto restart_search; 1413 default: 1414 break; 1415 } 1416 1417 return NULL; 1418 } 1419 1420 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) 1421 { 1422 struct amdgpu_ps *ps; 1423 enum amd_pm_state_type dpm_state; 1424 int ret; 1425 bool equal = false; 1426 1427 /* if dpm init failed */ 1428 if (!adev->pm.dpm_enabled) 1429 return; 1430 1431 if (adev->pm.dpm.user_state != adev->pm.dpm.state) { 1432 /* add other state override checks here */ 1433 if ((!adev->pm.dpm.thermal_active) && 1434 (!adev->pm.dpm.uvd_active)) 1435 adev->pm.dpm.state = adev->pm.dpm.user_state; 1436 } 1437 dpm_state = adev->pm.dpm.state; 1438 1439 ps = amdgpu_dpm_pick_power_state(adev, dpm_state); 1440 if (ps) 1441 adev->pm.dpm.requested_ps = ps; 1442 else 1443 return; 1444 1445 if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { 1446 printk("switching from power state:\n"); 1447 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); 1448 printk("switching to power state:\n"); 1449 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); 1450 } 1451 1452 /* update whether vce is active */ 1453 ps->vce_active = adev->pm.dpm.vce_active; 1454 if (adev->powerplay.pp_funcs->display_configuration_changed) 1455 amdgpu_dpm_display_configuration_changed(adev); 1456 1457 ret = amdgpu_dpm_pre_set_power_state(adev); 1458 if (ret) 1459 return; 1460 1461 if (adev->powerplay.pp_funcs->check_state_equal) { 1462 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) 1463 equal = false; 1464 } 1465 1466 if (equal) 1467 return; 1468 1469 amdgpu_dpm_set_power_state(adev); 1470 amdgpu_dpm_post_set_power_state(adev); 1471 1472 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; 1473 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; 1474 1475 if (adev->powerplay.pp_funcs->force_performance_level) { 1476 if (adev->pm.dpm.thermal_active) { 1477 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; 1478 /* force low perf level for thermal */ 1479 amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); 1480 /* save the user's level */ 1481 adev->pm.dpm.forced_level = level; 1482 } else { 1483 /* otherwise, user selected level */ 1484 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); 1485 } 1486 } 1487 } 1488 1489 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) 1490 { 1491 int i = 0; 1492 1493 if (!adev->pm.dpm_enabled) 1494 return; 1495 1496 if (adev->mode_info.num_crtc) 1497 amdgpu_display_bandwidth_update(adev); 1498 1499 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1500 struct amdgpu_ring *ring = adev->rings[i]; 1501 if (ring && ring->sched.ready) 1502 amdgpu_fence_wait_empty(ring); 1503 } 1504 1505 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1506 if (!amdgpu_device_has_dc_support(adev)) { 1507 mutex_lock(&adev->pm.mutex); 1508 amdgpu_dpm_get_active_displays(adev); 1509 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; 1510 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); 1511 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); 1512 /* we have issues with mclk switching with 1513 * refresh rates over 120 hz on the non-DC code. 1514 */ 1515 if (adev->pm.pm_display_cfg.vrefresh > 120) 1516 adev->pm.pm_display_cfg.min_vblank_time = 0; 1517 if (adev->powerplay.pp_funcs->display_configuration_change) 1518 adev->powerplay.pp_funcs->display_configuration_change( 1519 adev->powerplay.pp_handle, 1520 &adev->pm.pm_display_cfg); 1521 mutex_unlock(&adev->pm.mutex); 1522 } 1523 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); 1524 } else { 1525 mutex_lock(&adev->pm.mutex); 1526 amdgpu_dpm_get_active_displays(adev); 1527 amdgpu_dpm_change_power_state_locked(adev); 1528 mutex_unlock(&adev->pm.mutex); 1529 } 1530 } 1531 1532 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 1533 { 1534 int ret = 0; 1535 1536 if (adev->family == AMDGPU_FAMILY_SI) { 1537 mutex_lock(&adev->pm.mutex); 1538 if (enable) { 1539 adev->pm.dpm.uvd_active = true; 1540 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 1541 } else { 1542 adev->pm.dpm.uvd_active = false; 1543 } 1544 mutex_unlock(&adev->pm.mutex); 1545 1546 amdgpu_pm_compute_clocks(adev); 1547 } else { 1548 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 1549 if (ret) 1550 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 1551 enable ? "enable" : "disable", ret); 1552 1553 /* enable/disable Low Memory PState for UVD (4k videos) */ 1554 if (adev->asic_type == CHIP_STONEY && 1555 adev->uvd.decode_image_width >= WIDTH_4K) { 1556 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 1557 1558 if (hwmgr && hwmgr->hwmgr_func && 1559 hwmgr->hwmgr_func->update_nbdpm_pstate) 1560 hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, 1561 !enable, 1562 true); 1563 } 1564 } 1565 } 1566 1567 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 1568 { 1569 int ret = 0; 1570 1571 if (adev->family == AMDGPU_FAMILY_SI) { 1572 mutex_lock(&adev->pm.mutex); 1573 if (enable) { 1574 adev->pm.dpm.vce_active = true; 1575 /* XXX select vce level based on ring/task */ 1576 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 1577 } else { 1578 adev->pm.dpm.vce_active = false; 1579 } 1580 mutex_unlock(&adev->pm.mutex); 1581 1582 amdgpu_pm_compute_clocks(adev); 1583 } else { 1584 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 1585 if (ret) 1586 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 1587 enable ? "enable" : "disable", ret); 1588 } 1589 } 1590 1591 void amdgpu_pm_print_power_states(struct amdgpu_device *adev) 1592 { 1593 int i; 1594 1595 if (adev->powerplay.pp_funcs->print_power_state == NULL) 1596 return; 1597 1598 for (i = 0; i < adev->pm.dpm.num_ps; i++) 1599 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); 1600 1601 } 1602 1603 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 1604 { 1605 int ret = 0; 1606 1607 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 1608 if (ret) 1609 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 1610 enable ? "enable" : "disable", ret); 1611 } 1612 1613 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 1614 { 1615 int r; 1616 1617 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) { 1618 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle); 1619 if (r) { 1620 pr_err("smu firmware loading failed\n"); 1621 return r; 1622 } 1623 1624 if (smu_version) 1625 *smu_version = adev->pm.fw_version; 1626 } 1627 1628 return 0; 1629 } 1630