1 /* 2 * Copyright 2011 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Alex Deucher 23 */ 24 25 #include "amdgpu.h" 26 #include "amdgpu_atombios.h" 27 #include "amdgpu_i2c.h" 28 #include "amdgpu_dpm.h" 29 #include "atom.h" 30 #include "amd_pcie.h" 31 #include "amdgpu_display.h" 32 #include "hwmgr.h" 33 #include <linux/power_supply.h> 34 35 #define WIDTH_4K 3840 36 37 void amdgpu_dpm_print_class_info(u32 class, u32 class2) 38 { 39 const char *s; 40 41 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { 42 case ATOM_PPLIB_CLASSIFICATION_UI_NONE: 43 default: 44 s = "none"; 45 break; 46 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: 47 s = "battery"; 48 break; 49 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: 50 s = "balanced"; 51 break; 52 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: 53 s = "performance"; 54 break; 55 } 56 printk("\tui class: %s\n", s); 57 printk("\tinternal class:"); 58 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && 59 (class2 == 0)) 60 pr_cont(" none"); 61 else { 62 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) 63 pr_cont(" boot"); 64 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 65 pr_cont(" thermal"); 66 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) 67 pr_cont(" limited_pwr"); 68 if (class & ATOM_PPLIB_CLASSIFICATION_REST) 69 pr_cont(" rest"); 70 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) 71 pr_cont(" forced"); 72 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 73 pr_cont(" 3d_perf"); 74 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) 75 pr_cont(" ovrdrv"); 76 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) 77 pr_cont(" uvd"); 78 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) 79 pr_cont(" 3d_low"); 80 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) 81 pr_cont(" acpi"); 82 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 83 pr_cont(" uvd_hd2"); 84 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 85 pr_cont(" uvd_hd"); 86 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 87 pr_cont(" uvd_sd"); 88 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) 89 pr_cont(" limited_pwr2"); 90 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 91 pr_cont(" ulv"); 92 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 93 pr_cont(" uvd_mvc"); 94 } 95 pr_cont("\n"); 96 } 97 98 void amdgpu_dpm_print_cap_info(u32 caps) 99 { 100 printk("\tcaps:"); 101 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) 102 pr_cont(" single_disp"); 103 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) 104 pr_cont(" video"); 105 if (caps & ATOM_PPLIB_DISALLOW_ON_DC) 106 pr_cont(" no_dc"); 107 pr_cont("\n"); 108 } 109 110 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, 111 struct amdgpu_ps *rps) 112 { 113 printk("\tstatus:"); 114 if (rps == adev->pm.dpm.current_ps) 115 pr_cont(" c"); 116 if (rps == adev->pm.dpm.requested_ps) 117 pr_cont(" r"); 118 if (rps == adev->pm.dpm.boot_ps) 119 pr_cont(" b"); 120 pr_cont("\n"); 121 } 122 123 void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) 124 { 125 struct drm_device *ddev = adev_to_drm(adev); 126 struct drm_crtc *crtc; 127 struct amdgpu_crtc *amdgpu_crtc; 128 129 adev->pm.dpm.new_active_crtcs = 0; 130 adev->pm.dpm.new_active_crtc_count = 0; 131 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 132 list_for_each_entry(crtc, 133 &ddev->mode_config.crtc_list, head) { 134 amdgpu_crtc = to_amdgpu_crtc(crtc); 135 if (amdgpu_crtc->enabled) { 136 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); 137 adev->pm.dpm.new_active_crtc_count++; 138 } 139 } 140 } 141 } 142 143 144 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) 145 { 146 struct drm_device *dev = adev_to_drm(adev); 147 struct drm_crtc *crtc; 148 struct amdgpu_crtc *amdgpu_crtc; 149 u32 vblank_in_pixels; 150 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ 151 152 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 153 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 154 amdgpu_crtc = to_amdgpu_crtc(crtc); 155 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { 156 vblank_in_pixels = 157 amdgpu_crtc->hw_mode.crtc_htotal * 158 (amdgpu_crtc->hw_mode.crtc_vblank_end - 159 amdgpu_crtc->hw_mode.crtc_vdisplay + 160 (amdgpu_crtc->v_border * 2)); 161 162 vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; 163 break; 164 } 165 } 166 } 167 168 return vblank_time_us; 169 } 170 171 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) 172 { 173 struct drm_device *dev = adev_to_drm(adev); 174 struct drm_crtc *crtc; 175 struct amdgpu_crtc *amdgpu_crtc; 176 u32 vrefresh = 0; 177 178 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 179 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 180 amdgpu_crtc = to_amdgpu_crtc(crtc); 181 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { 182 vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); 183 break; 184 } 185 } 186 } 187 188 return vrefresh; 189 } 190 191 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor) 192 { 193 switch (sensor) { 194 case THERMAL_TYPE_RV6XX: 195 case THERMAL_TYPE_RV770: 196 case THERMAL_TYPE_EVERGREEN: 197 case THERMAL_TYPE_SUMO: 198 case THERMAL_TYPE_NI: 199 case THERMAL_TYPE_SI: 200 case THERMAL_TYPE_CI: 201 case THERMAL_TYPE_KV: 202 return true; 203 case THERMAL_TYPE_ADT7473_WITH_INTERNAL: 204 case THERMAL_TYPE_EMC2103_WITH_INTERNAL: 205 return false; /* need special handling */ 206 case THERMAL_TYPE_NONE: 207 case THERMAL_TYPE_EXTERNAL: 208 case THERMAL_TYPE_EXTERNAL_GPIO: 209 default: 210 return false; 211 } 212 } 213 214 union power_info { 215 struct _ATOM_POWERPLAY_INFO info; 216 struct _ATOM_POWERPLAY_INFO_V2 info_2; 217 struct _ATOM_POWERPLAY_INFO_V3 info_3; 218 struct _ATOM_PPLIB_POWERPLAYTABLE pplib; 219 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; 220 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; 221 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; 222 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; 223 }; 224 225 union fan_info { 226 struct _ATOM_PPLIB_FANTABLE fan; 227 struct _ATOM_PPLIB_FANTABLE2 fan2; 228 struct _ATOM_PPLIB_FANTABLE3 fan3; 229 }; 230 231 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table, 232 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) 233 { 234 u32 size = atom_table->ucNumEntries * 235 sizeof(struct amdgpu_clock_voltage_dependency_entry); 236 int i; 237 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; 238 239 amdgpu_table->entries = kzalloc(size, GFP_KERNEL); 240 if (!amdgpu_table->entries) 241 return -ENOMEM; 242 243 entry = &atom_table->entries[0]; 244 for (i = 0; i < atom_table->ucNumEntries; i++) { 245 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | 246 (entry->ucClockHigh << 16); 247 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage); 248 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *) 249 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record)); 250 } 251 amdgpu_table->count = atom_table->ucNumEntries; 252 253 return 0; 254 } 255 256 int amdgpu_get_platform_caps(struct amdgpu_device *adev) 257 { 258 struct amdgpu_mode_info *mode_info = &adev->mode_info; 259 union power_info *power_info; 260 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 261 u16 data_offset; 262 u8 frev, crev; 263 264 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 265 &frev, &crev, &data_offset)) 266 return -EINVAL; 267 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 268 269 adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); 270 adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); 271 adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); 272 273 return 0; 274 } 275 276 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ 277 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 278 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 279 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 280 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 281 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 282 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 283 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24 284 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26 285 286 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) 287 { 288 struct amdgpu_mode_info *mode_info = &adev->mode_info; 289 union power_info *power_info; 290 union fan_info *fan_info; 291 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table; 292 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 293 u16 data_offset; 294 u8 frev, crev; 295 int ret, i; 296 297 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 298 &frev, &crev, &data_offset)) 299 return -EINVAL; 300 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); 301 302 /* fan table */ 303 if (le16_to_cpu(power_info->pplib.usTableSize) >= 304 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { 305 if (power_info->pplib3.usFanTableOffset) { 306 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset + 307 le16_to_cpu(power_info->pplib3.usFanTableOffset)); 308 adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst; 309 adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin); 310 adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed); 311 adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh); 312 adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin); 313 adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed); 314 adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh); 315 if (fan_info->fan.ucFanTableFormat >= 2) 316 adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax); 317 else 318 adev->pm.dpm.fan.t_max = 10900; 319 adev->pm.dpm.fan.cycle_delay = 100000; 320 if (fan_info->fan.ucFanTableFormat >= 3) { 321 adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode; 322 adev->pm.dpm.fan.default_max_fan_pwm = 323 le16_to_cpu(fan_info->fan3.usFanPWMMax); 324 adev->pm.dpm.fan.default_fan_output_sensitivity = 4836; 325 adev->pm.dpm.fan.fan_output_sensitivity = 326 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity); 327 } 328 adev->pm.dpm.fan.ucode_fan_control = true; 329 } 330 } 331 332 /* clock dependancy tables, shedding tables */ 333 if (le16_to_cpu(power_info->pplib.usTableSize) >= 334 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) { 335 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) { 336 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 337 (mode_info->atom_context->bios + data_offset + 338 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset)); 339 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, 340 dep_table); 341 if (ret) { 342 amdgpu_free_extended_power_table(adev); 343 return ret; 344 } 345 } 346 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) { 347 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 348 (mode_info->atom_context->bios + data_offset + 349 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset)); 350 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, 351 dep_table); 352 if (ret) { 353 amdgpu_free_extended_power_table(adev); 354 return ret; 355 } 356 } 357 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) { 358 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 359 (mode_info->atom_context->bios + data_offset + 360 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset)); 361 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, 362 dep_table); 363 if (ret) { 364 amdgpu_free_extended_power_table(adev); 365 return ret; 366 } 367 } 368 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) { 369 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 370 (mode_info->atom_context->bios + data_offset + 371 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset)); 372 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, 373 dep_table); 374 if (ret) { 375 amdgpu_free_extended_power_table(adev); 376 return ret; 377 } 378 } 379 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { 380 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = 381 (ATOM_PPLIB_Clock_Voltage_Limit_Table *) 382 (mode_info->atom_context->bios + data_offset + 383 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset)); 384 if (clk_v->ucNumEntries) { 385 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk = 386 le16_to_cpu(clk_v->entries[0].usSclkLow) | 387 (clk_v->entries[0].ucSclkHigh << 16); 388 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk = 389 le16_to_cpu(clk_v->entries[0].usMclkLow) | 390 (clk_v->entries[0].ucMclkHigh << 16); 391 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc = 392 le16_to_cpu(clk_v->entries[0].usVddc); 393 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci = 394 le16_to_cpu(clk_v->entries[0].usVddci); 395 } 396 } 397 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) { 398 ATOM_PPLIB_PhaseSheddingLimits_Table *psl = 399 (ATOM_PPLIB_PhaseSheddingLimits_Table *) 400 (mode_info->atom_context->bios + data_offset + 401 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); 402 ATOM_PPLIB_PhaseSheddingLimits_Record *entry; 403 404 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = 405 kcalloc(psl->ucNumEntries, 406 sizeof(struct amdgpu_phase_shedding_limits_entry), 407 GFP_KERNEL); 408 if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { 409 amdgpu_free_extended_power_table(adev); 410 return -ENOMEM; 411 } 412 413 entry = &psl->entries[0]; 414 for (i = 0; i < psl->ucNumEntries; i++) { 415 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = 416 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16); 417 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = 418 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16); 419 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = 420 le16_to_cpu(entry->usVoltage); 421 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *) 422 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record)); 423 } 424 adev->pm.dpm.dyn_state.phase_shedding_limits_table.count = 425 psl->ucNumEntries; 426 } 427 } 428 429 /* cac data */ 430 if (le16_to_cpu(power_info->pplib.usTableSize) >= 431 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) { 432 adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit); 433 adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit); 434 adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit; 435 adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit); 436 if (adev->pm.dpm.tdp_od_limit) 437 adev->pm.dpm.power_control = true; 438 else 439 adev->pm.dpm.power_control = false; 440 adev->pm.dpm.tdp_adjustment = 0; 441 adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold); 442 adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage); 443 adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope); 444 if (power_info->pplib5.usCACLeakageTableOffset) { 445 ATOM_PPLIB_CAC_Leakage_Table *cac_table = 446 (ATOM_PPLIB_CAC_Leakage_Table *) 447 (mode_info->atom_context->bios + data_offset + 448 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); 449 ATOM_PPLIB_CAC_Leakage_Record *entry; 450 u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table); 451 adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); 452 if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) { 453 amdgpu_free_extended_power_table(adev); 454 return -ENOMEM; 455 } 456 entry = &cac_table->entries[0]; 457 for (i = 0; i < cac_table->ucNumEntries; i++) { 458 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { 459 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = 460 le16_to_cpu(entry->usVddc1); 461 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = 462 le16_to_cpu(entry->usVddc2); 463 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = 464 le16_to_cpu(entry->usVddc3); 465 } else { 466 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = 467 le16_to_cpu(entry->usVddc); 468 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = 469 le32_to_cpu(entry->ulLeakageValue); 470 } 471 entry = (ATOM_PPLIB_CAC_Leakage_Record *) 472 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record)); 473 } 474 adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; 475 } 476 } 477 478 /* ext tables */ 479 if (le16_to_cpu(power_info->pplib.usTableSize) >= 480 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { 481 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) 482 (mode_info->atom_context->bios + data_offset + 483 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); 484 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) && 485 ext_hdr->usVCETableOffset) { 486 VCEClockInfoArray *array = (VCEClockInfoArray *) 487 (mode_info->atom_context->bios + data_offset + 488 le16_to_cpu(ext_hdr->usVCETableOffset) + 1); 489 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits = 490 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *) 491 (mode_info->atom_context->bios + data_offset + 492 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + 493 1 + array->ucNumEntries * sizeof(VCEClockInfo)); 494 ATOM_PPLIB_VCE_State_Table *states = 495 (ATOM_PPLIB_VCE_State_Table *) 496 (mode_info->atom_context->bios + data_offset + 497 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + 498 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) + 499 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record))); 500 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; 501 ATOM_PPLIB_VCE_State_Record *state_entry; 502 VCEClockInfo *vce_clk; 503 u32 size = limits->numEntries * 504 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry); 505 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = 506 kzalloc(size, GFP_KERNEL); 507 if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { 508 amdgpu_free_extended_power_table(adev); 509 return -ENOMEM; 510 } 511 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = 512 limits->numEntries; 513 entry = &limits->entries[0]; 514 state_entry = &states->entries[0]; 515 for (i = 0; i < limits->numEntries; i++) { 516 vce_clk = (VCEClockInfo *) 517 ((u8 *)&array->entries[0] + 518 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); 519 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = 520 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); 521 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = 522 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); 523 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = 524 le16_to_cpu(entry->usVoltage); 525 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) 526 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); 527 } 528 adev->pm.dpm.num_of_vce_states = 529 states->numEntries > AMD_MAX_VCE_LEVELS ? 530 AMD_MAX_VCE_LEVELS : states->numEntries; 531 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { 532 vce_clk = (VCEClockInfo *) 533 ((u8 *)&array->entries[0] + 534 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); 535 adev->pm.dpm.vce_states[i].evclk = 536 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); 537 adev->pm.dpm.vce_states[i].ecclk = 538 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); 539 adev->pm.dpm.vce_states[i].clk_idx = 540 state_entry->ucClockInfoIndex & 0x3f; 541 adev->pm.dpm.vce_states[i].pstate = 542 (state_entry->ucClockInfoIndex & 0xc0) >> 6; 543 state_entry = (ATOM_PPLIB_VCE_State_Record *) 544 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record)); 545 } 546 } 547 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && 548 ext_hdr->usUVDTableOffset) { 549 UVDClockInfoArray *array = (UVDClockInfoArray *) 550 (mode_info->atom_context->bios + data_offset + 551 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1); 552 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits = 553 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *) 554 (mode_info->atom_context->bios + data_offset + 555 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 + 556 1 + (array->ucNumEntries * sizeof (UVDClockInfo))); 557 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry; 558 u32 size = limits->numEntries * 559 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry); 560 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = 561 kzalloc(size, GFP_KERNEL); 562 if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { 563 amdgpu_free_extended_power_table(adev); 564 return -ENOMEM; 565 } 566 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = 567 limits->numEntries; 568 entry = &limits->entries[0]; 569 for (i = 0; i < limits->numEntries; i++) { 570 UVDClockInfo *uvd_clk = (UVDClockInfo *) 571 ((u8 *)&array->entries[0] + 572 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo))); 573 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = 574 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16); 575 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = 576 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); 577 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = 578 le16_to_cpu(entry->usVoltage); 579 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) 580 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); 581 } 582 } 583 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) && 584 ext_hdr->usSAMUTableOffset) { 585 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits = 586 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *) 587 (mode_info->atom_context->bios + data_offset + 588 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1); 589 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry; 590 u32 size = limits->numEntries * 591 sizeof(struct amdgpu_clock_voltage_dependency_entry); 592 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = 593 kzalloc(size, GFP_KERNEL); 594 if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { 595 amdgpu_free_extended_power_table(adev); 596 return -ENOMEM; 597 } 598 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count = 599 limits->numEntries; 600 entry = &limits->entries[0]; 601 for (i = 0; i < limits->numEntries; i++) { 602 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = 603 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16); 604 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = 605 le16_to_cpu(entry->usVoltage); 606 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *) 607 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record)); 608 } 609 } 610 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && 611 ext_hdr->usPPMTableOffset) { 612 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) 613 (mode_info->atom_context->bios + data_offset + 614 le16_to_cpu(ext_hdr->usPPMTableOffset)); 615 adev->pm.dpm.dyn_state.ppm_table = 616 kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL); 617 if (!adev->pm.dpm.dyn_state.ppm_table) { 618 amdgpu_free_extended_power_table(adev); 619 return -ENOMEM; 620 } 621 adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; 622 adev->pm.dpm.dyn_state.ppm_table->cpu_core_number = 623 le16_to_cpu(ppm->usCpuCoreNumber); 624 adev->pm.dpm.dyn_state.ppm_table->platform_tdp = 625 le32_to_cpu(ppm->ulPlatformTDP); 626 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp = 627 le32_to_cpu(ppm->ulSmallACPlatformTDP); 628 adev->pm.dpm.dyn_state.ppm_table->platform_tdc = 629 le32_to_cpu(ppm->ulPlatformTDC); 630 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc = 631 le32_to_cpu(ppm->ulSmallACPlatformTDC); 632 adev->pm.dpm.dyn_state.ppm_table->apu_tdp = 633 le32_to_cpu(ppm->ulApuTDP); 634 adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp = 635 le32_to_cpu(ppm->ulDGpuTDP); 636 adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power = 637 le32_to_cpu(ppm->ulDGpuUlvPower); 638 adev->pm.dpm.dyn_state.ppm_table->tj_max = 639 le32_to_cpu(ppm->ulTjmax); 640 } 641 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) && 642 ext_hdr->usACPTableOffset) { 643 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits = 644 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *) 645 (mode_info->atom_context->bios + data_offset + 646 le16_to_cpu(ext_hdr->usACPTableOffset) + 1); 647 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry; 648 u32 size = limits->numEntries * 649 sizeof(struct amdgpu_clock_voltage_dependency_entry); 650 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = 651 kzalloc(size, GFP_KERNEL); 652 if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { 653 amdgpu_free_extended_power_table(adev); 654 return -ENOMEM; 655 } 656 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count = 657 limits->numEntries; 658 entry = &limits->entries[0]; 659 for (i = 0; i < limits->numEntries; i++) { 660 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = 661 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16); 662 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = 663 le16_to_cpu(entry->usVoltage); 664 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *) 665 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record)); 666 } 667 } 668 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) && 669 ext_hdr->usPowerTuneTableOffset) { 670 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset + 671 le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 672 ATOM_PowerTune_Table *pt; 673 adev->pm.dpm.dyn_state.cac_tdp_table = 674 kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL); 675 if (!adev->pm.dpm.dyn_state.cac_tdp_table) { 676 amdgpu_free_extended_power_table(adev); 677 return -ENOMEM; 678 } 679 if (rev > 0) { 680 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *) 681 (mode_info->atom_context->bios + data_offset + 682 le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 683 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 684 ppt->usMaximumPowerDeliveryLimit; 685 pt = &ppt->power_tune_table; 686 } else { 687 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) 688 (mode_info->atom_context->bios + data_offset + 689 le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 690 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255; 691 pt = &ppt->power_tune_table; 692 } 693 adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP); 694 adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp = 695 le16_to_cpu(pt->usConfigurableTDP); 696 adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC); 697 adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit = 698 le16_to_cpu(pt->usBatteryPowerLimit); 699 adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit = 700 le16_to_cpu(pt->usSmallPowerLimit); 701 adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage = 702 le16_to_cpu(pt->usLowCACLeakage); 703 adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage = 704 le16_to_cpu(pt->usHighCACLeakage); 705 } 706 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) && 707 ext_hdr->usSclkVddgfxTableOffset) { 708 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) 709 (mode_info->atom_context->bios + data_offset + 710 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset)); 711 ret = amdgpu_parse_clk_voltage_dep_table( 712 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk, 713 dep_table); 714 if (ret) { 715 kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries); 716 return ret; 717 } 718 } 719 } 720 721 return 0; 722 } 723 724 void amdgpu_free_extended_power_table(struct amdgpu_device *adev) 725 { 726 struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state; 727 728 kfree(dyn_state->vddc_dependency_on_sclk.entries); 729 kfree(dyn_state->vddci_dependency_on_mclk.entries); 730 kfree(dyn_state->vddc_dependency_on_mclk.entries); 731 kfree(dyn_state->mvdd_dependency_on_mclk.entries); 732 kfree(dyn_state->cac_leakage_table.entries); 733 kfree(dyn_state->phase_shedding_limits_table.entries); 734 kfree(dyn_state->ppm_table); 735 kfree(dyn_state->cac_tdp_table); 736 kfree(dyn_state->vce_clock_voltage_dependency_table.entries); 737 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); 738 kfree(dyn_state->samu_clock_voltage_dependency_table.entries); 739 kfree(dyn_state->acp_clock_voltage_dependency_table.entries); 740 kfree(dyn_state->vddgfx_dependency_on_sclk.entries); 741 } 742 743 static const char *pp_lib_thermal_controller_names[] = { 744 "NONE", 745 "lm63", 746 "adm1032", 747 "adm1030", 748 "max6649", 749 "lm64", 750 "f75375", 751 "RV6xx", 752 "RV770", 753 "adt7473", 754 "NONE", 755 "External GPIO", 756 "Evergreen", 757 "emc2103", 758 "Sumo", 759 "Northern Islands", 760 "Southern Islands", 761 "lm96163", 762 "Sea Islands", 763 "Kaveri/Kabini", 764 }; 765 766 void amdgpu_add_thermal_controller(struct amdgpu_device *adev) 767 { 768 struct amdgpu_mode_info *mode_info = &adev->mode_info; 769 ATOM_PPLIB_POWERPLAYTABLE *power_table; 770 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); 771 ATOM_PPLIB_THERMALCONTROLLER *controller; 772 struct amdgpu_i2c_bus_rec i2c_bus; 773 u16 data_offset; 774 u8 frev, crev; 775 776 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, 777 &frev, &crev, &data_offset)) 778 return; 779 power_table = (ATOM_PPLIB_POWERPLAYTABLE *) 780 (mode_info->atom_context->bios + data_offset); 781 controller = &power_table->sThermalController; 782 783 /* add the i2c bus for thermal/fan chip */ 784 if (controller->ucType > 0) { 785 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) 786 adev->pm.no_fan = true; 787 adev->pm.fan_pulses_per_revolution = 788 controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; 789 if (adev->pm.fan_pulses_per_revolution) { 790 adev->pm.fan_min_rpm = controller->ucFanMinRPM; 791 adev->pm.fan_max_rpm = controller->ucFanMaxRPM; 792 } 793 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { 794 DRM_INFO("Internal thermal controller %s fan control\n", 795 (controller->ucFanParameters & 796 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 797 adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; 798 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { 799 DRM_INFO("Internal thermal controller %s fan control\n", 800 (controller->ucFanParameters & 801 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 802 adev->pm.int_thermal_type = THERMAL_TYPE_RV770; 803 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { 804 DRM_INFO("Internal thermal controller %s fan control\n", 805 (controller->ucFanParameters & 806 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 807 adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; 808 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { 809 DRM_INFO("Internal thermal controller %s fan control\n", 810 (controller->ucFanParameters & 811 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 812 adev->pm.int_thermal_type = THERMAL_TYPE_SUMO; 813 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { 814 DRM_INFO("Internal thermal controller %s fan control\n", 815 (controller->ucFanParameters & 816 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 817 adev->pm.int_thermal_type = THERMAL_TYPE_NI; 818 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) { 819 DRM_INFO("Internal thermal controller %s fan control\n", 820 (controller->ucFanParameters & 821 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 822 adev->pm.int_thermal_type = THERMAL_TYPE_SI; 823 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) { 824 DRM_INFO("Internal thermal controller %s fan control\n", 825 (controller->ucFanParameters & 826 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 827 adev->pm.int_thermal_type = THERMAL_TYPE_CI; 828 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) { 829 DRM_INFO("Internal thermal controller %s fan control\n", 830 (controller->ucFanParameters & 831 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 832 adev->pm.int_thermal_type = THERMAL_TYPE_KV; 833 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { 834 DRM_INFO("External GPIO thermal controller %s fan control\n", 835 (controller->ucFanParameters & 836 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 837 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; 838 } else if (controller->ucType == 839 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { 840 DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", 841 (controller->ucFanParameters & 842 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 843 adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; 844 } else if (controller->ucType == 845 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { 846 DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", 847 (controller->ucFanParameters & 848 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 849 adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; 850 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { 851 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", 852 pp_lib_thermal_controller_names[controller->ucType], 853 controller->ucI2cAddress >> 1, 854 (controller->ucFanParameters & 855 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 856 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL; 857 i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine); 858 adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus); 859 #ifdef notyet 860 if (adev->pm.i2c_bus) { 861 struct i2c_board_info info = { }; 862 const char *name = pp_lib_thermal_controller_names[controller->ucType]; 863 info.addr = controller->ucI2cAddress >> 1; 864 strlcpy(info.type, name, sizeof(info.type)); 865 i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info); 866 } 867 #endif 868 } else { 869 DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", 870 controller->ucType, 871 controller->ucI2cAddress >> 1, 872 (controller->ucFanParameters & 873 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); 874 } 875 } 876 } 877 878 enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev, 879 u32 sys_mask, 880 enum amdgpu_pcie_gen asic_gen, 881 enum amdgpu_pcie_gen default_gen) 882 { 883 switch (asic_gen) { 884 case AMDGPU_PCIE_GEN1: 885 return AMDGPU_PCIE_GEN1; 886 case AMDGPU_PCIE_GEN2: 887 return AMDGPU_PCIE_GEN2; 888 case AMDGPU_PCIE_GEN3: 889 return AMDGPU_PCIE_GEN3; 890 default: 891 if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) && 892 (default_gen == AMDGPU_PCIE_GEN3)) 893 return AMDGPU_PCIE_GEN3; 894 else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) && 895 (default_gen == AMDGPU_PCIE_GEN2)) 896 return AMDGPU_PCIE_GEN2; 897 else 898 return AMDGPU_PCIE_GEN1; 899 } 900 return AMDGPU_PCIE_GEN1; 901 } 902 903 struct amd_vce_state* 904 amdgpu_get_vce_clock_state(void *handle, u32 idx) 905 { 906 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 907 908 if (idx < adev->pm.dpm.num_of_vce_states) 909 return &adev->pm.dpm.vce_states[idx]; 910 911 return NULL; 912 } 913 914 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) 915 { 916 uint32_t clk_freq; 917 int ret = 0; 918 if (is_support_sw_smu(adev)) { 919 ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK, 920 low ? &clk_freq : NULL, 921 !low ? &clk_freq : NULL); 922 if (ret) 923 return 0; 924 return clk_freq * 100; 925 926 } else { 927 return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low)); 928 } 929 } 930 931 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) 932 { 933 uint32_t clk_freq; 934 int ret = 0; 935 if (is_support_sw_smu(adev)) { 936 ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK, 937 low ? &clk_freq : NULL, 938 !low ? &clk_freq : NULL); 939 if (ret) 940 return 0; 941 return clk_freq * 100; 942 943 } else { 944 return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low)); 945 } 946 } 947 948 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) 949 { 950 int ret = 0; 951 bool swsmu = is_support_sw_smu(adev); 952 953 switch (block_type) { 954 case AMD_IP_BLOCK_TYPE_UVD: 955 case AMD_IP_BLOCK_TYPE_VCE: 956 if (swsmu) { 957 ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); 958 } else if (adev->powerplay.pp_funcs && 959 adev->powerplay.pp_funcs->set_powergating_by_smu) { 960 /* 961 * TODO: need a better lock mechanism 962 * 963 * Here adev->pm.mutex lock protection is enforced on 964 * UVD and VCE cases only. Since for other cases, there 965 * may be already lock protection in amdgpu_pm.c. 966 * This is a quick fix for the deadlock issue below. 967 * NFO: task ocltst:2028 blocked for more than 120 seconds. 968 * Tainted: G OE 5.0.0-37-generic #40~18.04.1-Ubuntu 969 * echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. 970 * cltst D 0 2028 2026 0x00000000 971 * all Trace: 972 * __schedule+0x2c0/0x870 973 * schedule+0x2c/0x70 974 * schedule_preempt_disabled+0xe/0x10 975 * __mutex_lock.isra.9+0x26d/0x4e0 976 * __mutex_lock_slowpath+0x13/0x20 977 * ? __mutex_lock_slowpath+0x13/0x20 978 * mutex_lock+0x2f/0x40 979 * amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu] 980 * gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu] 981 * gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu] 982 * amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu] 983 * pp_dpm_force_performance_level+0xe7/0x100 [amdgpu] 984 * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu] 985 */ 986 mutex_lock(&adev->pm.mutex); 987 ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( 988 (adev)->powerplay.pp_handle, block_type, gate)); 989 mutex_unlock(&adev->pm.mutex); 990 } 991 break; 992 case AMD_IP_BLOCK_TYPE_GFX: 993 case AMD_IP_BLOCK_TYPE_VCN: 994 case AMD_IP_BLOCK_TYPE_SDMA: 995 if (swsmu) 996 ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); 997 else if (adev->powerplay.pp_funcs && 998 adev->powerplay.pp_funcs->set_powergating_by_smu) 999 ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( 1000 (adev)->powerplay.pp_handle, block_type, gate)); 1001 break; 1002 case AMD_IP_BLOCK_TYPE_JPEG: 1003 if (swsmu) 1004 ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); 1005 break; 1006 case AMD_IP_BLOCK_TYPE_GMC: 1007 case AMD_IP_BLOCK_TYPE_ACP: 1008 if (adev->powerplay.pp_funcs && 1009 adev->powerplay.pp_funcs->set_powergating_by_smu) 1010 ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( 1011 (adev)->powerplay.pp_handle, block_type, gate)); 1012 break; 1013 default: 1014 break; 1015 } 1016 1017 return ret; 1018 } 1019 1020 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) 1021 { 1022 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1023 void *pp_handle = adev->powerplay.pp_handle; 1024 struct smu_context *smu = &adev->smu; 1025 int ret = 0; 1026 1027 if (is_support_sw_smu(adev)) { 1028 ret = smu_baco_enter(smu); 1029 } else { 1030 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 1031 return -ENOENT; 1032 1033 /* enter BACO state */ 1034 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 1035 } 1036 1037 return ret; 1038 } 1039 1040 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) 1041 { 1042 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1043 void *pp_handle = adev->powerplay.pp_handle; 1044 struct smu_context *smu = &adev->smu; 1045 int ret = 0; 1046 1047 if (is_support_sw_smu(adev)) { 1048 ret = smu_baco_exit(smu); 1049 } else { 1050 if (!pp_funcs || !pp_funcs->set_asic_baco_state) 1051 return -ENOENT; 1052 1053 /* exit BACO state */ 1054 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 1055 } 1056 1057 return ret; 1058 } 1059 1060 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, 1061 enum pp_mp1_state mp1_state) 1062 { 1063 int ret = 0; 1064 1065 if (is_support_sw_smu(adev)) { 1066 ret = smu_set_mp1_state(&adev->smu, mp1_state); 1067 } else if (adev->powerplay.pp_funcs && 1068 adev->powerplay.pp_funcs->set_mp1_state) { 1069 ret = adev->powerplay.pp_funcs->set_mp1_state( 1070 adev->powerplay.pp_handle, 1071 mp1_state); 1072 } 1073 1074 return ret; 1075 } 1076 1077 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) 1078 { 1079 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1080 void *pp_handle = adev->powerplay.pp_handle; 1081 struct smu_context *smu = &adev->smu; 1082 bool baco_cap; 1083 1084 if (is_support_sw_smu(adev)) { 1085 return smu_baco_is_support(smu); 1086 } else { 1087 if (!pp_funcs || !pp_funcs->get_asic_baco_capability) 1088 return false; 1089 1090 if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap)) 1091 return false; 1092 1093 return baco_cap ? true : false; 1094 } 1095 } 1096 1097 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) 1098 { 1099 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1100 void *pp_handle = adev->powerplay.pp_handle; 1101 struct smu_context *smu = &adev->smu; 1102 1103 if (is_support_sw_smu(adev)) { 1104 return smu_mode2_reset(smu); 1105 } else { 1106 if (!pp_funcs || !pp_funcs->asic_reset_mode_2) 1107 return -ENOENT; 1108 1109 return pp_funcs->asic_reset_mode_2(pp_handle); 1110 } 1111 } 1112 1113 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) 1114 { 1115 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1116 void *pp_handle = adev->powerplay.pp_handle; 1117 struct smu_context *smu = &adev->smu; 1118 int ret = 0; 1119 1120 if (is_support_sw_smu(adev)) { 1121 ret = smu_baco_enter(smu); 1122 if (ret) 1123 return ret; 1124 1125 ret = smu_baco_exit(smu); 1126 if (ret) 1127 return ret; 1128 } else { 1129 if (!pp_funcs 1130 || !pp_funcs->set_asic_baco_state) 1131 return -ENOENT; 1132 1133 /* enter BACO state */ 1134 ret = pp_funcs->set_asic_baco_state(pp_handle, 1); 1135 if (ret) 1136 return ret; 1137 1138 /* exit BACO state */ 1139 ret = pp_funcs->set_asic_baco_state(pp_handle, 0); 1140 if (ret) 1141 return ret; 1142 } 1143 1144 return 0; 1145 } 1146 1147 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) 1148 { 1149 struct smu_context *smu = &adev->smu; 1150 1151 if (is_support_sw_smu(adev)) 1152 return smu_mode1_reset_is_support(smu); 1153 1154 return false; 1155 } 1156 1157 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) 1158 { 1159 struct smu_context *smu = &adev->smu; 1160 1161 if (is_support_sw_smu(adev)) 1162 return smu_mode1_reset(smu); 1163 1164 return -EOPNOTSUPP; 1165 } 1166 1167 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, 1168 enum PP_SMC_POWER_PROFILE type, 1169 bool en) 1170 { 1171 int ret = 0; 1172 1173 if (is_support_sw_smu(adev)) 1174 ret = smu_switch_power_profile(&adev->smu, type, en); 1175 else if (adev->powerplay.pp_funcs && 1176 adev->powerplay.pp_funcs->switch_power_profile) 1177 ret = adev->powerplay.pp_funcs->switch_power_profile( 1178 adev->powerplay.pp_handle, type, en); 1179 1180 return ret; 1181 } 1182 1183 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, 1184 uint32_t pstate) 1185 { 1186 int ret = 0; 1187 1188 if (is_support_sw_smu(adev)) 1189 ret = smu_set_xgmi_pstate(&adev->smu, pstate); 1190 else if (adev->powerplay.pp_funcs && 1191 adev->powerplay.pp_funcs->set_xgmi_pstate) 1192 ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, 1193 pstate); 1194 1195 return ret; 1196 } 1197 1198 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, 1199 uint32_t cstate) 1200 { 1201 int ret = 0; 1202 const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; 1203 void *pp_handle = adev->powerplay.pp_handle; 1204 struct smu_context *smu = &adev->smu; 1205 1206 if (is_support_sw_smu(adev)) 1207 ret = smu_set_df_cstate(smu, cstate); 1208 else if (pp_funcs && 1209 pp_funcs->set_df_cstate) 1210 ret = pp_funcs->set_df_cstate(pp_handle, cstate); 1211 1212 return ret; 1213 } 1214 1215 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en) 1216 { 1217 struct smu_context *smu = &adev->smu; 1218 1219 if (is_support_sw_smu(adev)) 1220 return smu_allow_xgmi_power_down(smu, en); 1221 1222 return 0; 1223 } 1224 1225 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) 1226 { 1227 void *pp_handle = adev->powerplay.pp_handle; 1228 const struct amd_pm_funcs *pp_funcs = 1229 adev->powerplay.pp_funcs; 1230 struct smu_context *smu = &adev->smu; 1231 int ret = 0; 1232 1233 if (is_support_sw_smu(adev)) 1234 ret = smu_enable_mgpu_fan_boost(smu); 1235 else if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) 1236 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); 1237 1238 return ret; 1239 } 1240 1241 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, 1242 uint32_t msg_id) 1243 { 1244 void *pp_handle = adev->powerplay.pp_handle; 1245 const struct amd_pm_funcs *pp_funcs = 1246 adev->powerplay.pp_funcs; 1247 int ret = 0; 1248 1249 if (pp_funcs && pp_funcs->set_clockgating_by_smu) 1250 ret = pp_funcs->set_clockgating_by_smu(pp_handle, 1251 msg_id); 1252 1253 return ret; 1254 } 1255 1256 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, 1257 bool acquire) 1258 { 1259 void *pp_handle = adev->powerplay.pp_handle; 1260 const struct amd_pm_funcs *pp_funcs = 1261 adev->powerplay.pp_funcs; 1262 int ret = -EOPNOTSUPP; 1263 1264 if (pp_funcs && pp_funcs->smu_i2c_bus_access) 1265 ret = pp_funcs->smu_i2c_bus_access(pp_handle, 1266 acquire); 1267 1268 return ret; 1269 } 1270 1271 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 1272 { 1273 if (adev->pm.dpm_enabled) { 1274 mutex_lock(&adev->pm.mutex); 1275 if (power_supply_is_system_supplied() > 0) 1276 adev->pm.ac_power = true; 1277 else 1278 adev->pm.ac_power = false; 1279 if (adev->powerplay.pp_funcs && 1280 adev->powerplay.pp_funcs->enable_bapm) 1281 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 1282 mutex_unlock(&adev->pm.mutex); 1283 1284 if (is_support_sw_smu(adev)) 1285 smu_set_ac_dc(&adev->smu); 1286 } 1287 } 1288 1289 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 1290 void *data, uint32_t *size) 1291 { 1292 int ret = 0; 1293 1294 if (!data || !size) 1295 return -EINVAL; 1296 1297 if (is_support_sw_smu(adev)) 1298 ret = smu_read_sensor(&adev->smu, sensor, data, size); 1299 else { 1300 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) 1301 ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, 1302 sensor, data, size); 1303 else 1304 ret = -EINVAL; 1305 } 1306 1307 return ret; 1308 } 1309 1310 void amdgpu_dpm_thermal_work_handler(struct work_struct *work) 1311 { 1312 struct amdgpu_device *adev = 1313 container_of(work, struct amdgpu_device, 1314 pm.dpm.thermal.work); 1315 /* switch to the thermal state */ 1316 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 1317 int temp, size = sizeof(temp); 1318 1319 if (!adev->pm.dpm_enabled) 1320 return; 1321 1322 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, 1323 (void *)&temp, &size)) { 1324 if (temp < adev->pm.dpm.thermal.min_temp) 1325 /* switch back the user state */ 1326 dpm_state = adev->pm.dpm.user_state; 1327 } else { 1328 if (adev->pm.dpm.thermal.high_to_low) 1329 /* switch back the user state */ 1330 dpm_state = adev->pm.dpm.user_state; 1331 } 1332 mutex_lock(&adev->pm.mutex); 1333 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) 1334 adev->pm.dpm.thermal_active = true; 1335 else 1336 adev->pm.dpm.thermal_active = false; 1337 adev->pm.dpm.state = dpm_state; 1338 mutex_unlock(&adev->pm.mutex); 1339 1340 amdgpu_pm_compute_clocks(adev); 1341 } 1342 1343 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, 1344 enum amd_pm_state_type dpm_state) 1345 { 1346 int i; 1347 struct amdgpu_ps *ps; 1348 u32 ui_class; 1349 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? 1350 true : false; 1351 1352 /* check if the vblank period is too short to adjust the mclk */ 1353 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { 1354 if (amdgpu_dpm_vblank_too_short(adev)) 1355 single_display = false; 1356 } 1357 1358 /* certain older asics have a separare 3D performance state, 1359 * so try that first if the user selected performance 1360 */ 1361 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) 1362 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; 1363 /* balanced states don't exist at the moment */ 1364 if (dpm_state == POWER_STATE_TYPE_BALANCED) 1365 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1366 1367 restart_search: 1368 /* Pick the best power state based on current conditions */ 1369 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 1370 ps = &adev->pm.dpm.ps[i]; 1371 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; 1372 switch (dpm_state) { 1373 /* user states */ 1374 case POWER_STATE_TYPE_BATTERY: 1375 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { 1376 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1377 if (single_display) 1378 return ps; 1379 } else 1380 return ps; 1381 } 1382 break; 1383 case POWER_STATE_TYPE_BALANCED: 1384 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { 1385 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1386 if (single_display) 1387 return ps; 1388 } else 1389 return ps; 1390 } 1391 break; 1392 case POWER_STATE_TYPE_PERFORMANCE: 1393 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 1394 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 1395 if (single_display) 1396 return ps; 1397 } else 1398 return ps; 1399 } 1400 break; 1401 /* internal states */ 1402 case POWER_STATE_TYPE_INTERNAL_UVD: 1403 if (adev->pm.dpm.uvd_ps) 1404 return adev->pm.dpm.uvd_ps; 1405 else 1406 break; 1407 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 1408 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 1409 return ps; 1410 break; 1411 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 1412 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 1413 return ps; 1414 break; 1415 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 1416 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 1417 return ps; 1418 break; 1419 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 1420 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 1421 return ps; 1422 break; 1423 case POWER_STATE_TYPE_INTERNAL_BOOT: 1424 return adev->pm.dpm.boot_ps; 1425 case POWER_STATE_TYPE_INTERNAL_THERMAL: 1426 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 1427 return ps; 1428 break; 1429 case POWER_STATE_TYPE_INTERNAL_ACPI: 1430 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) 1431 return ps; 1432 break; 1433 case POWER_STATE_TYPE_INTERNAL_ULV: 1434 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 1435 return ps; 1436 break; 1437 case POWER_STATE_TYPE_INTERNAL_3DPERF: 1438 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 1439 return ps; 1440 break; 1441 default: 1442 break; 1443 } 1444 } 1445 /* use a fallback state if we didn't match */ 1446 switch (dpm_state) { 1447 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 1448 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 1449 goto restart_search; 1450 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 1451 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 1452 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 1453 if (adev->pm.dpm.uvd_ps) { 1454 return adev->pm.dpm.uvd_ps; 1455 } else { 1456 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1457 goto restart_search; 1458 } 1459 case POWER_STATE_TYPE_INTERNAL_THERMAL: 1460 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 1461 goto restart_search; 1462 case POWER_STATE_TYPE_INTERNAL_ACPI: 1463 dpm_state = POWER_STATE_TYPE_BATTERY; 1464 goto restart_search; 1465 case POWER_STATE_TYPE_BATTERY: 1466 case POWER_STATE_TYPE_BALANCED: 1467 case POWER_STATE_TYPE_INTERNAL_3DPERF: 1468 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 1469 goto restart_search; 1470 default: 1471 break; 1472 } 1473 1474 return NULL; 1475 } 1476 1477 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) 1478 { 1479 struct amdgpu_ps *ps; 1480 enum amd_pm_state_type dpm_state; 1481 int ret; 1482 bool equal = false; 1483 1484 /* if dpm init failed */ 1485 if (!adev->pm.dpm_enabled) 1486 return; 1487 1488 if (adev->pm.dpm.user_state != adev->pm.dpm.state) { 1489 /* add other state override checks here */ 1490 if ((!adev->pm.dpm.thermal_active) && 1491 (!adev->pm.dpm.uvd_active)) 1492 adev->pm.dpm.state = adev->pm.dpm.user_state; 1493 } 1494 dpm_state = adev->pm.dpm.state; 1495 1496 ps = amdgpu_dpm_pick_power_state(adev, dpm_state); 1497 if (ps) 1498 adev->pm.dpm.requested_ps = ps; 1499 else 1500 return; 1501 1502 if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { 1503 printk("switching from power state:\n"); 1504 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); 1505 printk("switching to power state:\n"); 1506 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); 1507 } 1508 1509 /* update whether vce is active */ 1510 ps->vce_active = adev->pm.dpm.vce_active; 1511 if (adev->powerplay.pp_funcs->display_configuration_changed) 1512 amdgpu_dpm_display_configuration_changed(adev); 1513 1514 ret = amdgpu_dpm_pre_set_power_state(adev); 1515 if (ret) 1516 return; 1517 1518 if (adev->powerplay.pp_funcs->check_state_equal) { 1519 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) 1520 equal = false; 1521 } 1522 1523 if (equal) 1524 return; 1525 1526 amdgpu_dpm_set_power_state(adev); 1527 amdgpu_dpm_post_set_power_state(adev); 1528 1529 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; 1530 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; 1531 1532 if (adev->powerplay.pp_funcs->force_performance_level) { 1533 if (adev->pm.dpm.thermal_active) { 1534 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; 1535 /* force low perf level for thermal */ 1536 amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); 1537 /* save the user's level */ 1538 adev->pm.dpm.forced_level = level; 1539 } else { 1540 /* otherwise, user selected level */ 1541 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); 1542 } 1543 } 1544 } 1545 1546 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) 1547 { 1548 int i = 0; 1549 1550 if (!adev->pm.dpm_enabled) 1551 return; 1552 1553 if (adev->mode_info.num_crtc) 1554 amdgpu_display_bandwidth_update(adev); 1555 1556 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1557 struct amdgpu_ring *ring = adev->rings[i]; 1558 if (ring && ring->sched.ready) 1559 amdgpu_fence_wait_empty(ring); 1560 } 1561 1562 if (is_support_sw_smu(adev)) { 1563 struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm; 1564 smu_handle_task(&adev->smu, 1565 smu_dpm->dpm_level, 1566 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, 1567 true); 1568 } else { 1569 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1570 if (!amdgpu_device_has_dc_support(adev)) { 1571 mutex_lock(&adev->pm.mutex); 1572 amdgpu_dpm_get_active_displays(adev); 1573 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; 1574 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); 1575 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); 1576 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ 1577 if (adev->pm.pm_display_cfg.vrefresh > 120) 1578 adev->pm.pm_display_cfg.min_vblank_time = 0; 1579 if (adev->powerplay.pp_funcs->display_configuration_change) 1580 adev->powerplay.pp_funcs->display_configuration_change( 1581 adev->powerplay.pp_handle, 1582 &adev->pm.pm_display_cfg); 1583 mutex_unlock(&adev->pm.mutex); 1584 } 1585 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); 1586 } else { 1587 mutex_lock(&adev->pm.mutex); 1588 amdgpu_dpm_get_active_displays(adev); 1589 amdgpu_dpm_change_power_state_locked(adev); 1590 mutex_unlock(&adev->pm.mutex); 1591 } 1592 } 1593 } 1594 1595 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 1596 { 1597 int ret = 0; 1598 1599 if (adev->family == AMDGPU_FAMILY_SI) { 1600 mutex_lock(&adev->pm.mutex); 1601 if (enable) { 1602 adev->pm.dpm.uvd_active = true; 1603 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; 1604 } else { 1605 adev->pm.dpm.uvd_active = false; 1606 } 1607 mutex_unlock(&adev->pm.mutex); 1608 1609 amdgpu_pm_compute_clocks(adev); 1610 } else { 1611 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 1612 if (ret) 1613 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 1614 enable ? "enable" : "disable", ret); 1615 1616 /* enable/disable Low Memory PState for UVD (4k videos) */ 1617 if (adev->asic_type == CHIP_STONEY && 1618 adev->uvd.decode_image_width >= WIDTH_4K) { 1619 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 1620 1621 if (hwmgr && hwmgr->hwmgr_func && 1622 hwmgr->hwmgr_func->update_nbdpm_pstate) 1623 hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, 1624 !enable, 1625 true); 1626 } 1627 } 1628 } 1629 1630 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 1631 { 1632 int ret = 0; 1633 1634 if (adev->family == AMDGPU_FAMILY_SI) { 1635 mutex_lock(&adev->pm.mutex); 1636 if (enable) { 1637 adev->pm.dpm.vce_active = true; 1638 /* XXX select vce level based on ring/task */ 1639 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; 1640 } else { 1641 adev->pm.dpm.vce_active = false; 1642 } 1643 mutex_unlock(&adev->pm.mutex); 1644 1645 amdgpu_pm_compute_clocks(adev); 1646 } else { 1647 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 1648 if (ret) 1649 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 1650 enable ? "enable" : "disable", ret); 1651 } 1652 } 1653 1654 void amdgpu_pm_print_power_states(struct amdgpu_device *adev) 1655 { 1656 int i; 1657 1658 if (adev->powerplay.pp_funcs->print_power_state == NULL) 1659 return; 1660 1661 for (i = 0; i < adev->pm.dpm.num_ps; i++) 1662 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); 1663 1664 } 1665 1666 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 1667 { 1668 int ret = 0; 1669 1670 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 1671 if (ret) 1672 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 1673 enable ? "enable" : "disable", ret); 1674 } 1675 1676 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 1677 { 1678 int r; 1679 1680 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) { 1681 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle); 1682 if (r) { 1683 pr_err("smu firmware loading failed\n"); 1684 return r; 1685 } 1686 *smu_version = adev->pm.fw_version; 1687 } 1688 return 0; 1689 } 1690